Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DEV: make sure all priors return float when needed #848

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
120 changes: 68 additions & 52 deletions bilby/core/prior/analytical.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,10 @@ def cdf(self, val):
_cdf = (np.log(val / self.minimum) /
np.log(self.maximum / self.minimum))
else:
_cdf = np.atleast_1d(val ** (self.alpha + 1) - self.minimum ** (self.alpha + 1)) / \
(self.maximum ** (self.alpha + 1) - self.minimum ** (self.alpha + 1))
_cdf = (
(val ** (self.alpha + 1) - self.minimum ** (self.alpha + 1))
/ (self.maximum ** (self.alpha + 1) - self.minimum ** (self.alpha + 1))
)
_cdf = np.minimum(_cdf, 1)
_cdf = np.maximum(_cdf, 0)
return _cdf
Expand Down Expand Up @@ -367,16 +369,16 @@ def ln_prob(self, val):
return np.nan_to_num(- np.log(2 * np.abs(val)) - np.log(np.log(self.maximum / self.minimum)))

def cdf(self, val):
val = np.atleast_1d(val)
norm = 0.5 / np.log(self.maximum / self.minimum)
cdf = np.zeros((len(val)))
lower_indices = np.where(np.logical_and(-self.maximum <= val, val <= -self.minimum))[0]
upper_indices = np.where(np.logical_and(self.minimum <= val, val <= self.maximum))[0]
cdf[lower_indices] = -norm * np.log(-val[lower_indices] / self.maximum)
cdf[np.where(np.logical_and(-self.minimum < val, val < self.minimum))] = 0.5
cdf[upper_indices] = 0.5 + norm * np.log(val[upper_indices] / self.minimum)
cdf[np.where(self.maximum < val)] = 1
return cdf
_cdf = (
-norm * np.log(abs(val) / self.maximum)
* (val <= -self.minimum) * (val >= -self.maximum)
+ (0.5 + norm * np.log(abs(val) / self.minimum))
* (val >= self.minimum) * (val <= self.maximum)
+ 0.5 * (val > -self.minimum) * (val < self.minimum)
+ 1 * (val > self.maximum)
)
return _cdf


class Cosine(Prior):
Expand Down Expand Up @@ -426,10 +428,12 @@ def prob(self, val):
return np.cos(val) / 2 * self.is_in_prior_range(val)

def cdf(self, val):
_cdf = np.atleast_1d((np.sin(val) - np.sin(self.minimum)) /
(np.sin(self.maximum) - np.sin(self.minimum)))
_cdf[val > self.maximum] = 1
_cdf[val < self.minimum] = 0
_cdf = (
(np.sin(val) - np.sin(self.minimum))
/ (np.sin(self.maximum) - np.sin(self.minimum))
* (val >= self.minimum) * (val <= self.maximum)
+ 1 * (val > self.maximum)
)
return _cdf


Expand Down Expand Up @@ -480,10 +484,12 @@ def prob(self, val):
return np.sin(val) / 2 * self.is_in_prior_range(val)

def cdf(self, val):
_cdf = np.atleast_1d((np.cos(val) - np.cos(self.minimum)) /
(np.cos(self.maximum) - np.cos(self.minimum)))
_cdf[val > self.maximum] = 1
_cdf[val < self.minimum] = 0
_cdf = (
(np.cos(val) - np.cos(self.minimum))
/ (np.cos(self.maximum) - np.cos(self.minimum))
* (val >= self.minimum) * (val <= self.maximum)
+ 1 * (val > self.maximum)
)
return _cdf


Expand Down Expand Up @@ -625,11 +631,13 @@ def prob(self, val):
/ self.sigma / self.normalisation * self.is_in_prior_range(val)

def cdf(self, val):
val = np.atleast_1d(val)
_cdf = (erf((val - self.mu) / 2 ** 0.5 / self.sigma) - erf(
(self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2 / self.normalisation
_cdf[val > self.maximum] = 1
_cdf[val < self.minimum] = 0
_cdf = (
(
erf((val - self.mu) / 2 ** 0.5 / self.sigma)
- erf((self.minimum - self.mu) / 2 ** 0.5 / self.sigma)
) / 2 / self.normalisation * (val >= self.minimum) * (val <= self.maximum)
+ 1 * (val > self.maximum)
)
return _cdf


Expand Down Expand Up @@ -1367,6 +1375,8 @@ def __init__(self, sigma, mu=None, r=None, name=None, latex_label=None,
raise ValueError("For the Fermi-Dirac prior the values of sigma and r "
"must be positive.")

self.expr = np.exp(self.r)

def rescale(self, val):
"""
'Rescale' a sample from the unit line element to the appropriate Fermi-Dirac prior.
Expand All @@ -1384,21 +1394,8 @@ def rescale(self, val):
.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
"""
inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r)) ** -val +
np.exp(-1. * self.r) * (1. + np.exp(self.r)) ** -val)

# if val is 1 this will cause inv to be negative (due to numerical
# issues), so return np.inf
if isinstance(val, (float, int)):
if inv < 0:
return np.inf
else:
return -self.sigma * np.log(inv)
else:
idx = inv >= 0.
tmpinv = np.inf * np.ones(len(np.atleast_1d(val)))
tmpinv[idx] = -self.sigma * np.log(inv[idx])
return tmpinv
inv = -1 / self.expr + (1 + self.expr)**-val + (1 + self.expr)**-val / self.expr
return -self.sigma * np.log(np.maximum(inv, 0))

def prob(self, val):
"""Return the prior probability of val.
Expand All @@ -1411,7 +1408,11 @@ def prob(self, val):
=======
float: Prior probability of val
"""
return np.exp(self.ln_prob(val))
return (
(np.exp((val - self.mu) / self.sigma) + 1)**-1
/ (self.sigma * np.log1p(self.expr))
* (val >= self.minimum)
)

def ln_prob(self, val):
"""Return the log prior probability of val.
Expand All @@ -1424,19 +1425,34 @@ def ln_prob(self, val):
=======
Union[float, array_like]: Log prior probability of val
"""
return np.log(self.prob(val))

norm = -np.log(self.sigma * np.log(1. + np.exp(self.r)))
if isinstance(val, (float, int)):
if val < self.minimum:
return -np.inf
else:
return norm - np.logaddexp((val / self.sigma) - self.r, 0.)
else:
val = np.atleast_1d(val)
lnp = -np.inf * np.ones(len(val))
idx = val >= self.minimum
lnp[idx] = norm - np.logaddexp((val[idx] / self.sigma) - self.r, 0.)
return lnp
def cdf(self, val):
"""
Evaluate the CDF of the Fermi-Dirac distribution using a slightly
modified form of Equation 23 of [1]_.

Parameters
==========
val: Union[float, int, array_like]
The value(s) to evaluate the CDF at

Returns
=======
Union[float, array_like]:
The CDF value(s)

References
==========

.. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
<https:arxiv.org/abs/1705.08978v1>`_, 2017.
"""
result = (
(np.logaddexp(0, -self.r) - np.logaddexp(-val / self.sigma, -self.r))
/ np.logaddexp(0, self.r)
mj-will marked this conversation as resolved.
Show resolved Hide resolved
)
return np.clip(result, 0, 1)


class Categorical(Prior):
Expand Down
7 changes: 5 additions & 2 deletions bilby/core/prior/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@

import numpy as np
import scipy.stats
from scipy.interpolate import interp1d

from ..utils import (
infer_args_from_method,
BilbyJsonEncoder,
decode_bilby_json,
logger,
get_dict_with_properties,
WrappedInterp1d as interp1d,
)


Expand Down Expand Up @@ -178,7 +178,10 @@ def cdf(self, val):
cdf = cumulative_trapezoid(pdf, x, initial=0)
interp = interp1d(x, cdf, assume_sorted=True, bounds_error=False,
fill_value=(0, 1))
return interp(val)
output = interp(val)
if isinstance(val, (int, float)):
output = float(output)
return output

def ln_prob(self, val):
"""Return the prior ln probability of val, this should be overwritten
Expand Down
39 changes: 27 additions & 12 deletions bilby/core/prior/dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -468,7 +468,9 @@ def sample_subset_constrained(self, keys=iter([]), size=None):
def normalize_constraint_factor(
self, keys, min_accept=10000, sampling_chunk=50000, nrepeats=10
):
if keys in self._cached_normalizations.keys():
if len(self.constraint_keys) == 0:
return 1
elif keys in self._cached_normalizations.keys():
return self._cached_normalizations[keys]
else:
factor_estimates = [
Expand Down Expand Up @@ -530,8 +532,10 @@ def check_prob(self, sample, prob):
return 0.0
else:
constrained_prob = np.zeros_like(prob)
keep = np.array(self.evaluate_constraints(sample), dtype=bool)
constrained_prob[keep] = prob[keep] * ratio
in_bounds = np.isfinite(prob)
subsample = {key: sample[key][in_bounds] for key in sample}
keep = np.array(self.evaluate_constraints(subsample), dtype=bool)
constrained_prob[in_bounds] = prob[in_bounds] * keep * ratio
return constrained_prob

def ln_prob(self, sample, axis=None, normalized=True):
Expand Down Expand Up @@ -572,8 +576,10 @@ def check_ln_prob(self, sample, ln_prob, normalized=True):
return -np.inf
else:
constrained_ln_prob = -np.inf * np.ones_like(ln_prob)
keep = np.array(self.evaluate_constraints(sample), dtype=bool)
constrained_ln_prob[keep] = ln_prob[keep] + np.log(ratio)
in_bounds = np.isfinite(ln_prob)
subsample = {key: sample[key][in_bounds] for key in sample}
keep = np.log(np.array(self.evaluate_constraints(subsample), dtype=bool))
constrained_ln_prob[in_bounds] = ln_prob[in_bounds] + keep + np.log(ratio)
return constrained_ln_prob

def cdf(self, sample):
Expand Down Expand Up @@ -634,9 +640,7 @@ def test_has_redundant_keys(self):
del temp[key]
if temp.test_redundancy(key, disable_logging=True):
logger.warning(
"{} is a redundant key in this {}.".format(
key, self.__class__.__name__
)
f"{key} is a redundant key in this {self.__class__.__name__}."
)
redundant = True
return redundant
Expand Down Expand Up @@ -844,17 +848,28 @@ def rescale(self, keys, theta):
self._check_resolved()
self._update_rescale_keys(keys)
result = dict()
joint = dict()
for key, index in zip(
self.sorted_keys_without_fixed_parameters, self._rescale_indexes
):
result[key] = self[key].rescale(
theta[index], **self.get_required_variables(key)
)
self[key].least_recently_sampled = result[key]
samples = []
for key in keys:
samples += list(np.asarray(result[key]).flatten())
return samples
if isinstance(self[key], JointPrior) and self[key].dist.distname not in joint:
joint[self[key].dist.distname] = [key]
elif isinstance(self[key], JointPrior):
joint[self[key].dist.distname].append(key)
for names in joint.values():
values = list()
for key in names:
values = np.concatenate([values, result[key]])
for key, value in zip(names, values):
result[key] = value
# this is gross but can be removed whenever we switch to returning
# arrays, flatten converts 0-d arrays to 1-d and squeeze converts it
# back
return [np.asarray(result[key]).flatten().squeeze() for key in keys]

def _update_rescale_keys(self, keys):
if not keys == self._least_recently_rescaled_keys:
Expand Down
8 changes: 2 additions & 6 deletions bilby/core/prior/interpolated.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
import numpy as np
from scipy.interpolate import interp1d

from .base import Prior
from ..utils import logger
from ..utils import logger, WrappedInterp1d as interp1d


class Interped(Prior):
Expand Down Expand Up @@ -86,10 +85,7 @@ def rescale(self, val):

This maps to the inverse CDF. This is done using interpolation.
"""
rescaled = self.inverse_cumulative_distribution(val)
if rescaled.shape == ():
rescaled = float(rescaled)
return rescaled
return self.inverse_cumulative_distribution(val)

@property
def minimum(self):
Expand Down
25 changes: 24 additions & 1 deletion bilby/core/utils/calculus.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import math

import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import RectBivariateSpline, interp1d
from scipy.special import logsumexp

from .log import logger
Expand Down Expand Up @@ -219,6 +219,29 @@ def __call__(self, x, y, dx=0, dy=0, grid=False):
return result


class WrappedInterp1d(interp1d):
"""
A wrapper around scipy interp1d which sets equality-by-instantiation and
makes sure that the output is a float if the input is a float or int.
"""
def __call__(self, x):
output = super().__call__(x)
if isinstance(x, (float, int)):
output = output.item()
return output

def __eq__(self, other):
for key in self.__dict__:
if type(self.__dict__[key]) is np.ndarray:
if not np.array_equal(self.__dict__[key], other.__dict__[key]):
return False
elif key == "_spline":
pass
elif getattr(self, key) != getattr(other, key):
return False
return True


def round_up_to_power_of_two(x):
"""Round up to the next power of two

Expand Down
Loading
Loading