diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 94ace774..8d9af98b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,14 +2,10 @@ name: Release on: - workflow_dispatch: - inputs: - target: - default: testpypi - description: Deployment target. Can be pypi or testpypi. release: types: - published + workflow_dispatch: jobs: dist: @@ -55,8 +51,16 @@ jobs: needs: dist runs-on: ubuntu-latest environment: - name: publish - url: https://pypi.org/p/glass + name: >- + ${{ (github.event_name == 'release' && + github.event.action == 'published') && + 'publish' || + 'test-publish' }} + url: >- + ${{ (github.event_name == 'release' && + github.event.action == 'published') && + 'https://pypi.org/project/glass' || + 'https://test.pypi.org/project/glass' }} permissions: id-token: write steps: @@ -71,12 +75,11 @@ jobs: - name: Publish to PyPI if: >- - github.event.inputs.target == 'pypi' || (github.event_name == - 'release' && github.event.action == 'published') + github.event_name == 'release' && github.event.action == 'published' uses: pypa/gh-action-pypi-publish@release/v1 - name: Publish to TestPyPI - if: github.event.inputs.target == 'testpypi' + if: github.event_name == 'workflow_dispatch' uses: pypa/gh-action-pypi-publish@release/v1 with: repository-url: https://test.pypi.org/legacy/ diff --git a/glass/lensing.py b/glass/lensing.py index fbb6e91f..981c025f 100644 --- a/glass/lensing.py +++ b/glass/lensing.py @@ -45,6 +45,117 @@ from glass.shells import RadialWindow +@typing.overload +def from_convergence( + kappa: npt.NDArray[np.float64], + lmax: int | None = None, + *, + potential: typing.Literal[True] = True, + deflection: typing.Literal[False] = False, + shear: typing.Literal[False] = False, + discretized: bool = True, +) -> tuple[npt.NDArray[np.float64]]: + # returns psi + ... + + +@typing.overload +def from_convergence( + kappa: npt.NDArray[np.float64], + lmax: int | None = None, + *, + potential: typing.Literal[False] = False, + deflection: typing.Literal[True] = True, + shear: typing.Literal[False] = False, + discretized: bool = True, +) -> tuple[npt.NDArray[np.complex128]]: + # returns alpha + ... + + +@typing.overload +def from_convergence( + kappa: npt.NDArray[np.float64], + lmax: int | None = None, + *, + potential: typing.Literal[False] = False, + deflection: typing.Literal[False] = False, + shear: typing.Literal[True] = True, + discretized: bool = True, +) -> tuple[npt.NDArray[np.complex128]]: + # returns gamma + ... + + +@typing.overload +def from_convergence( + kappa: npt.NDArray[np.float64], + lmax: int | None = None, + *, + potential: typing.Literal[True] = True, + deflection: typing.Literal[True] = True, + shear: typing.Literal[False] = False, + discretized: bool = True, +) -> tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], +]: + # returns psi, alpha + ... + + +@typing.overload +def from_convergence( + kappa: npt.NDArray[np.float64], + lmax: int | None = None, + *, + potential: typing.Literal[True] = True, + deflection: typing.Literal[False] = False, + shear: typing.Literal[True] = True, + discretized: bool = True, +) -> tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], +]: + # returns psi, gamma + ... + + +@typing.overload +def from_convergence( + kappa: npt.NDArray[np.float64], + lmax: int | None = None, + *, + potential: typing.Literal[False] = False, + deflection: typing.Literal[True] = True, + shear: typing.Literal[True] = True, + discretized: bool = True, +) -> tuple[ + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], +]: + # returns alpha, gamma + ... + + +@typing.overload +def from_convergence( + kappa: npt.NDArray[np.float64], + lmax: int | None = None, + *, + potential: typing.Literal[True] = True, + deflection: typing.Literal[True] = True, + shear: typing.Literal[True] = True, + discretized: bool = True, +) -> tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], +]: + # returns psi, alpha, gamma + ... + + def from_convergence( # noqa: PLR0913 kappa: npt.NDArray[np.float64], lmax: int | None = None, @@ -53,7 +164,7 @@ def from_convergence( # noqa: PLR0913 deflection: bool = False, shear: bool = False, discretized: bool = True, -) -> tuple[npt.NDArray[np.float64], ...]: +) -> tuple[npt.NDArray[np.float64] | npt.NDArray[np.complex128], ...]: r""" Compute other weak lensing maps from the convergence. @@ -175,7 +286,7 @@ def from_convergence( # noqa: PLR0913 ell = np.arange(lmax + 1) # this tuple will be returned - results: tuple[npt.NDArray[np.float64], ...] = () + results: tuple[npt.NDArray[np.float64] | npt.NDArray[np.complex128], ...] = () # convert convergence to potential fl = np.divide(-2, ell * (ell + 1), where=(ell > 0), out=np.zeros(lmax + 1)) diff --git a/glass/observations.py b/glass/observations.py index a7802364..229099eb 100644 --- a/glass/observations.py +++ b/glass/observations.py @@ -89,7 +89,7 @@ def gaussian_nz( mean: float | npt.NDArray[np.float64], sigma: float | npt.NDArray[np.float64], *, - norm: npt.NDArray[np.float64] | None = None, + norm: float | npt.NDArray[np.float64] | None = None, ) -> npt.NDArray[np.float64]: """ Gaussian redshift distribution. @@ -130,11 +130,11 @@ def gaussian_nz( def smail_nz( z: npt.NDArray[np.float64], - z_mode: npt.NDArray[np.float64], - alpha: npt.NDArray[np.float64], - beta: npt.NDArray[np.float64], + z_mode: float | npt.NDArray[np.float64], + alpha: float | npt.NDArray[np.float64], + beta: float | npt.NDArray[np.float64], *, - norm: npt.NDArray[np.float64] | None = None, + norm: float | npt.NDArray[np.float64] | None = None, ) -> npt.NDArray[np.float64]: r""" Redshift distribution following Smail et al. (1994). diff --git a/tests/test_fields.py b/tests/test_fields.py index 9f33daee..11f322d5 100644 --- a/tests/test_fields.py +++ b/tests/test_fields.py @@ -1,6 +1,392 @@ +import healpy as hp import numpy as np +import pytest -from glass import getcl +from glass import ( + cls2cov, + discretized_cls, + effective_cls, + generate_gaussian, + generate_lognormal, + getcl, + iternorm, + lognormal_gls, + multalm, + transform_cls, +) + + +def test_iternorm() -> None: + # check output shapes and types + + k = 2 + + generator = iternorm(k, np.array([1.0, 0.5, 0.5, 0.5, 0.2, 0.1, 0.5, 0.1, 0.2])) + result = next(generator) + + j, a, s = result + + assert isinstance(j, int) + assert a.shape == (k,) + assert isinstance(s, float) # type: ignore[unreachable] + assert s.shape == () # type: ignore[unreachable] + + # specify size + + size = 3 + + generator = iternorm( + k, + np.array( + [ + [1.0, 0.5, 0.5], + [0.5, 0.2, 0.1], + [0.5, 0.1, 0.2], + ] + ), + size, + ) + result = next(generator) + + j, a, s = result + + assert isinstance(j, int) + assert a.shape == (size, k) + assert s.shape == (size,) + + # test shape mismatch error + + with pytest.raises(TypeError, match="covariance row 0: shape"): + list(iternorm(k, np.array([[1.0, 0.5], [0.5, 0.2]]))) + + # test positive definite error + + with pytest.raises(ValueError, match="covariance matrix is not positive definite"): + list(iternorm(k, np.array([1.0, 0.5, 0.9, 0.5, 0.2, 0.4, 0.9, 0.4, -1.0]))) + + # test multiple iterations + + size = (3,) + + generator = iternorm( + k, + np.array( + [ + [[1.0, 0.5, 0.5], [0.5, 0.2, 0.1], [0.5, 0.1, 0.2]], + [[2.0, 1.0, 0.8], [1.0, 0.5, 0.3], [0.8, 0.3, 0.6]], + ] + ), + size, + ) + + result1 = next(generator) + result2 = next(generator) + + assert result1 != result2 + assert isinstance(result1, tuple) + assert len(result1) == 3 + assert isinstance(result2, tuple) + assert len(result2) == 3 + + # test k = 0 + + generator = iternorm(0, np.array([1.0])) + + j, a, s = result + + assert j == 1 + assert a.shape == (3, 2) + assert isinstance(s, np.ndarray) + assert s.shape == (3,) + + +def test_cls2cov() -> None: + # check output values and shape + + nl, nf, nc = 3, 2, 2 + + generator = cls2cov( + [np.array([1.0, 0.5, 0.3]), None, np.array([0.7, 0.6, 0.1])], # type: ignore[list-item] + nl, + nf, + nc, + ) + cov = next(generator) + + assert cov.shape == (nl, nc + 1) + + np.testing.assert_array_equal(cov[:, 0], np.array([0.5, 0.25, 0.15])) + np.testing.assert_array_equal(cov[:, 1], 0) + np.testing.assert_array_equal(cov[:, 2], 0) + + # test negative value error + + generator = cls2cov( + np.array( # type: ignore[arg-type] + [ + [-1.0, 0.5, 0.3], + [0.8, 0.4, 0.2], + [0.7, 0.6, 0.1], + ] + ), + nl, + nf, + nc, + ) + with pytest.raises(ValueError, match="negative values in cl"): + next(generator) + + # test multiple iterations + + nl, nf, nc = 3, 3, 2 + + generator = cls2cov( + np.array( # type: ignore[arg-type] + [ + [1.0, 0.5, 0.3], + [0.8, 0.4, 0.2], + [0.7, 0.6, 0.1], + [0.9, 0.5, 0.3], + [0.6, 0.3, 0.2], + [0.8, 0.7, 0.4], + ] + ), + nl, + nf, + nc, + ) + + cov1 = np.copy(next(generator)) + cov2 = np.copy(next(generator)) + cov3 = next(generator) + + assert cov1.shape == (nl, nc + 1) + assert cov2.shape == (nl, nc + 1) + assert cov3.shape == (nl, nc + 1) + + assert not np.array_equal(cov1, cov2) + assert not np.array_equal(cov2, cov3) + + +def test_multalm() -> None: + # check output values and shapes + + alm = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]) + bl = np.array([2.0, 0.5, 1.0]) + alm_copy = np.copy(alm) + + result = multalm(alm, bl, inplace=True) + + assert np.array_equal(result, alm) # in-place + expected_result = np.array([2.0, 1.0, 3.0, 2.0, 5.0, 6.0]) + np.testing.assert_allclose(result, expected_result) + assert not np.array_equal(alm_copy, result) + + # multiple with 1s + + bl = np.ones(3) + + result = multalm(alm, bl, inplace=False) + np.testing.assert_array_equal(result, alm) + + # multiple with 0s + + bl = np.array([0.0, 1.0, 0.0]) + + result = multalm(alm, bl, inplace=False) + + expected_result = np.array([0.0, 1.0, 0.0, 2.0, 0.0, 0.0]) + np.testing.assert_allclose(result, expected_result) + + # empty arrays + + alm = np.array([]) + bl = np.array([]) + + result = multalm(alm, bl, inplace=False) + np.testing.assert_array_equal(result, alm) + + +def test_transform_cls() -> None: + tfm = "lognormal" + pars = [2] + sub_cls = np.array([1, 2, 3, 4, 5]) + + # empty cls + + assert transform_cls([], tfm, pars) == [] + + # check output shape + + assert len(transform_cls([sub_cls], tfm, pars)) == 1 + assert len(transform_cls([sub_cls], tfm, pars)[0]) == 5 + + assert len(transform_cls([sub_cls, sub_cls], tfm, pars)) == 2 + assert len(transform_cls([sub_cls, sub_cls], tfm, pars)[0]) == 5 + assert len(transform_cls([sub_cls, sub_cls], tfm, pars)[1]) == 5 + + # one sequence of empty cls + + assert transform_cls([[], sub_cls], tfm, pars)[0] == [] + + # monopole behavior + + assert transform_cls([sub_cls, np.linspace(0, 5, 6)], tfm, pars)[1][0] == 0 + + +def test_lognormal_gls() -> None: + shift = 2 + + # empty cls + + assert lognormal_gls([], shift) == [] + + # check output shape + + assert len(lognormal_gls([np.linspace(1, 5, 5)], shift)) == 1 + assert len(lognormal_gls([np.linspace(1, 5, 5)], shift)[0]) == 5 + + assert len(lognormal_gls([np.linspace(1, 5, 5), np.linspace(1, 5, 5)], shift)) == 2 + assert ( + len(lognormal_gls([np.linspace(1, 5, 5), np.linspace(1, 5, 5)], shift)[0]) == 5 + ) + assert ( + len(lognormal_gls([np.linspace(1, 5, 5), np.linspace(1, 5, 5)], shift)[1]) == 5 + ) + + +def test_discretized_cls() -> None: + # empty cls + + result = discretized_cls([]) + assert result == [] + + # power spectra truncated at lmax + 1 if lmax provided + + result = discretized_cls([np.arange(10), np.arange(10), np.arange(10)], lmax=5) + + for cl in result: + assert len(cl) == 6 + + # check ValueError for triangle number + + with pytest.raises( + ValueError, match="length of cls array is not a triangle number" + ): + discretized_cls([np.arange(10), np.arange(10)], ncorr=1) + + # ncorr not None + + cls = [np.arange(10), np.arange(10), np.arange(10)] + ncorr = 0 + result = discretized_cls(cls, ncorr=ncorr) + + assert len(result[0]) == 10 + assert len(result[1]) == 10 + assert len(result[2]) == 0 # third correlation should be removed + + # check if pixel window function was applied correctly with nside not None + + nside = 4 + + pw = hp.pixwin(nside, lmax=7) + + result = discretized_cls([[], np.ones(10), np.ones(10)], nside=nside) + + for cl in result: + n = min(len(cl), len(pw)) + expected = np.ones(n) * pw[:n] ** 2 + np.testing.assert_allclose(cl[:n], expected) + + +def test_effective_cls() -> None: + # empty cls + + result = effective_cls([], np.array([])) + assert result.shape == (0,) + + # check ValueError for triangle number + + with pytest.raises(ValueError, match="length of cls is not a triangle number"): + effective_cls([np.arange(10), np.arange(10)], np.ones((2, 1))) + + # check ValueError for triangle number + + with pytest.raises(ValueError, match="shape mismatch between fields and weights1"): + effective_cls([], np.ones((3, 1))) + + # check with only weights1 + + cls = [np.arange(15), np.arange(15), np.arange(15)] + weights1 = np.ones((2, 1)) + + result = effective_cls(cls, weights1) + assert result.shape == (1, 1, 15) + + # check truncation if lmax provided + + result = effective_cls(cls, weights1, lmax=5) + + assert result.shape == (1, 1, 6) + np.testing.assert_allclose(result[..., 6:], 0) + + # check with weights1 and weights2 and weights1 is weights2 + + result = effective_cls(cls, weights1, weights2=weights1) + assert result.shape == (1, 1, 15) + + +def test_generate_gaussian() -> None: + gls = [np.array([1.0, 0.5, 0.1])] + nside = 4 + ncorr = 1 + + gaussian_fields = list(generate_gaussian(gls, nside)) + + assert gaussian_fields[0].shape == (hp.nside2npix(nside),) + + # requires resetting the RNG for reproducibility + rng = np.random.default_rng(seed=42) + gaussian_fields = list(generate_gaussian(gls, nside, rng=rng)) + + assert gaussian_fields[0].shape == (hp.nside2npix(nside),) + + # requires resetting the RNG for reproducibility + rng = np.random.default_rng(seed=42) + new_gaussian_fields = list(generate_gaussian(gls, nside, ncorr=ncorr, rng=rng)) + + assert new_gaussian_fields[0].shape == (hp.nside2npix(nside),) + + np.testing.assert_allclose(new_gaussian_fields[0], gaussian_fields[0]) + + with pytest.raises(ValueError, match="all gls are empty"): + list(generate_gaussian([], nside)) + + +def test_generate_lognormal(rng: np.random.Generator) -> None: + gls = [np.array([1.0, 0.5, 0.1])] + nside = 4 + + # check shape and values + + lognormal_fields = list(generate_lognormal(gls, nside, shift=1.0, rng=rng)) + + assert len(lognormal_fields) == len(gls) + for m in lognormal_fields: + assert m.shape == (192,) + assert np.all(m >= -1) + + # check shift + + # requires resetting the RNG to obtain exact the same result multiplied by 2 (shift) + rng = np.random.default_rng(seed=42) + lognormal_fields = list(generate_lognormal(gls, nside, shift=1.0, rng=rng)) + + rng = np.random.default_rng(seed=42) + new_lognormal_fields = list(generate_lognormal(gls, nside, shift=2.0, rng=rng)) + + for ms, mu in zip(new_lognormal_fields, lognormal_fields, strict=False): + np.testing.assert_allclose(ms, mu * 2.0) def test_getcl() -> None: @@ -14,3 +400,15 @@ def test_getcl() -> None: result = getcl(cls, i, j) expected = np.array([min(i, j), max(i, j)], dtype=np.float64) np.testing.assert_array_equal(np.sort(result), expected) + + # check slicing + result = getcl(cls, i, j, lmax=0) + expected = np.array([max(i, j)], dtype=np.float64) + assert len(result) == 1 + np.testing.assert_array_equal(result, expected) + + # check padding + result = getcl(cls, i, j, lmax=50) + expected = np.zeros((49,), dtype=np.float64) + assert len(result) == 51 + np.testing.assert_array_equal(result[2:], expected) diff --git a/tests/test_observations.py b/tests/test_observations.py new file mode 100644 index 00000000..d801881f --- /dev/null +++ b/tests/test_observations.py @@ -0,0 +1,144 @@ +import numpy as np +import pytest + +from glass import ( + equal_dens_zbins, + fixed_zbins, + gaussian_nz, + smail_nz, + tomo_nz_gausserr, + vmap_galactic_ecliptic, +) + + +def test_vmap_galactic_ecliptic() -> None: + """Add unit tests for :func:`vmap_galactic_ecliptic`.""" + n_side = 4 + + # check shape + + vmap = vmap_galactic_ecliptic(n_side) + np.testing.assert_array_equal(len(vmap), 12 * n_side**2) + + # no rotation + + vmap = vmap_galactic_ecliptic(n_side, galactic=(0, 0), ecliptic=(0, 0)) + np.testing.assert_array_equal(vmap, np.zeros_like(vmap)) + + # check errors raised + + with pytest.raises(TypeError, match="galactic stripe must be a pair of numbers"): + vmap_galactic_ecliptic(n_side, galactic=(1,)) # type: ignore[arg-type] + + with pytest.raises(TypeError, match="ecliptic stripe must be a pair of numbers"): + vmap_galactic_ecliptic(n_side, ecliptic=(1,)) # type: ignore[arg-type] + + with pytest.raises(TypeError, match="galactic stripe must be a pair of numbers"): + vmap_galactic_ecliptic(n_side, galactic=(1, 2, 3)) # type: ignore[arg-type] + + with pytest.raises(TypeError, match="ecliptic stripe must be a pair of numbers"): + vmap_galactic_ecliptic(n_side, ecliptic=(1, 2, 3)) # type: ignore[arg-type] + + +def test_gaussian_nz(rng: np.random.Generator) -> None: + """Add unit tests for :func:`gaussian_nz`.""" + mean = 0 + sigma = 1 + z = np.linspace(0, 1, 11) + + # check passing in the norm + + nz = gaussian_nz(z, mean, sigma, norm=0) + np.testing.assert_array_equal(nz, np.zeros_like(nz)) + + # check the value of each entry is close to the norm + + norm = 1 + nz = gaussian_nz(z, mean, sigma, norm=norm) + np.testing.assert_allclose(nz.sum() / nz.shape, norm, rtol=1e-2) + + # check multidimensionality size + + nz = gaussian_nz( + z, + np.tile(mean, z.shape), + np.tile(sigma, z.shape), + norm=rng.normal(size=z.shape), + ) + np.testing.assert_array_equal(nz.shape, (len(z), len(z))) + + +def test_smail_nz() -> None: + """Add unit tests for :func:`smail_nz`.""" + alpha = 1 + beta = 1 + mode = 1 + z = np.linspace(0, 1, 11) + + # check passing in the norm + + pz = smail_nz(z, mode, alpha, beta, norm=0) + np.testing.assert_array_equal(pz, np.zeros_like(pz)) + + +def test_fixed_zbins() -> None: + """Add unit tests for :func:`fixed_zbins`.""" + zmin = 0 + zmax = 1 + + # check nbins input + + nbins = 5 + expected_zbins = [(0.0, 0.2), (0.2, 0.4), (0.4, 0.6), (0.6, 0.8), (0.8, 1.0)] + zbins = fixed_zbins(zmin, zmax, nbins=nbins) + np.testing.assert_array_equal(len(zbins), nbins) + np.testing.assert_allclose(zbins, expected_zbins, rtol=1e-15) + + # check dz input + + dz = 0.2 + zbins = fixed_zbins(zmin, zmax, dz=dz) + np.testing.assert_array_equal(len(zbins), np.ceil((zmax - zmin) / dz)) + np.testing.assert_allclose(zbins, expected_zbins, rtol=1e-15) + + # check dz for spacing which results in a max value above zmax + + zbins = fixed_zbins(zmin, zmax, dz=0.3) + np.testing.assert_array_less(zmax, zbins[-1][1]) + + # check error raised + + with pytest.raises(ValueError, match="exactly one of nbins and dz must be given"): + fixed_zbins(zmin, zmax, nbins=nbins, dz=dz) + + +def test_equal_dens_zbins() -> None: + """Add unit tests for :func:`equal_dens_zbins`.""" + z = np.linspace(0, 1, 11) + nbins = 5 + + # check expected zbins returned + + expected_zbins = [(0.0, 0.2), (0.2, 0.4), (0.4, 0.6), (0.6, 0.8), (0.8, 1.0)] + zbins = equal_dens_zbins(z, np.ones_like(z), nbins) + np.testing.assert_allclose(zbins, expected_zbins, rtol=1e-15) + + # check output shape + + np.testing.assert_array_equal(len(zbins), nbins) + + +def test_tomo_nz_gausserr() -> None: + """Add unit tests for :func:`tomo_nz_gausserr`.""" + sigma_0 = 0.1 + z = np.linspace(0, 1, 11) + zbins = [(0, 0.2), (0.2, 0.4), (0.4, 0.6), (0.6, 0.8), (0.8, 1.0)] + + # check zeros returned + + binned_nz = tomo_nz_gausserr(z, np.zeros_like(z), sigma_0, zbins) + np.testing.assert_array_equal(binned_nz, np.zeros_like(binned_nz)) + + # check the shape of the output + + np.testing.assert_array_equal(binned_nz.shape, (len(zbins), len(z))) diff --git a/tests/test_fits.py b/tests/test_user.py similarity index 84% rename from tests/test_fits.py rename to tests/test_user.py index f2a21186..dfee8353 100644 --- a/tests/test_fits.py +++ b/tests/test_user.py @@ -5,7 +5,7 @@ import numpy.typing as npt import pytest -from glass import write_catalog +from glass import load_cls, save_cls, write_catalog # check if fitsio is available for testing HAVE_FITSIO = importlib.util.find_spec("fitsio") is not None @@ -14,6 +14,25 @@ my_max = 1000 # Typically number of galaxies in loop except_int = 750 # Where test exception occurs in loop filename = "MyFile.Fits" +cls_file = "Cls.npz" + + +def test_read_write_cls(rng: np.random.Generator, tmp_path: pathlib.Path) -> None: + cls = rng.normal(size=(10, 10)) + save_cls(tmp_path / cls_file, cls) + + assert pathlib.Path.exists(tmp_path / cls_file) + + with np.load(tmp_path / cls_file) as npz: + values = npz["values"] + split = npz["split"] + + np.testing.assert_array_equal(values, np.concatenate(cls)) + np.testing.assert_array_equal(split, np.cumsum([len(cl) for cl in cls[:-1]])) + np.testing.assert_array_equal(cls, np.split(values, split)) + + npz = load_cls(tmp_path / cls_file) + np.testing.assert_array_equal(npz, cls) @pytest.mark.skipif(not HAVE_FITSIO, reason="test requires fitsio")