From 0df4da013c8e4df61cfdcd467efe66ca539f119c Mon Sep 17 00:00:00 2001 From: Nahuel Espinosa Date: Tue, 15 Oct 2024 11:37:52 -0600 Subject: [PATCH] Refactor estimation library (#445) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Proposed changes Related to #385. As the title says, refactors the estimation library to reduce compile times, improve consistency, and make it easier to extend. #### Type of change - [ ] 🐛 Bugfix (change which fixes an issue) - [x] 🚀 Feature (change which adds functionality) - [ ] 📚 Documentation (change which fixes or extends documentation) 💥 **Breaking change!** `beluga::calculate_covariance` is `beluga::covariance` and the algorithms are niebloids now. ### Checklist - [x] Lint and unit tests (if any) pass locally with my changes - [x] I have added tests that prove my fix is effective or that my feature works - [x] I have added necessary documentation (if appropriate) - [x] All commits have been signed for [DCO](https://developercertificate.org/) ### Additional comments There is room for improvement around this design (i.e. optional return types, more overloads). --------- Signed-off-by: Nahuel Espinosa --- .clang-tidy | 1 + beluga/include/beluga/algorithm/amcl_core.hpp | 3 +- .../algorithm/cluster_based_estimation.hpp | 1 + .../include/beluga/algorithm/estimation.hpp | 806 ++++++++++-------- .../test_cluster_based_estimation.cpp | 3 +- .../test/beluga/algorithm/test_estimation.cpp | 11 +- .../test_multivariate_normal_distribution.cpp | 2 +- 7 files changed, 468 insertions(+), 359 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index f746a23a4..38d51e5d3 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -22,6 +22,7 @@ Checks: > -performance-move-const-arg, -performance-unnecessary-value-param, -readability-braces-around-statements, + -readability-qualified-auto, -readability-identifier-length, -readability-magic-numbers, -readability-named-parameter, diff --git a/beluga/include/beluga/algorithm/amcl_core.hpp b/beluga/include/beluga/algorithm/amcl_core.hpp index 72eaa7436..63f6ac191 100644 --- a/beluga/include/beluga/algorithm/amcl_core.hpp +++ b/beluga/include/beluga/algorithm/amcl_core.hpp @@ -89,8 +89,7 @@ class Amcl { using map_type = typename SensorModel::map_type; using spatial_hasher_type = spatial_hash; using random_state_generator_type = RandomStateGenerator; - using estimation_type = - std::invoke_result_t>), std::vector>; + using estimation_type = std::invoke_result_t>; public: /// Construct a AMCL instance. diff --git a/beluga/include/beluga/algorithm/cluster_based_estimation.hpp b/beluga/include/beluga/algorithm/cluster_based_estimation.hpp index 2c60b5764..509c4644c 100644 --- a/beluga/include/beluga/algorithm/cluster_based_estimation.hpp +++ b/beluga/include/beluga/algorithm/cluster_based_estimation.hpp @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include diff --git a/beluga/include/beluga/algorithm/estimation.hpp b/beluga/include/beluga/algorithm/estimation.hpp index 7cbd5cf95..182accc0d 100644 --- a/beluga/include/beluga/algorithm/estimation.hpp +++ b/beluga/include/beluga/algorithm/estimation.hpp @@ -1,4 +1,4 @@ -// Copyright 2022-2023 Ekumen, Inc. +// Copyright 2022-2024 Ekumen, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,388 +15,498 @@ #ifndef BELUGA_ALGORITHM_ESTIMATION_HPP #define BELUGA_ALGORITHM_ESTIMATION_HPP -#include -#include -#include #include -#include -#include -#include -#include #include #include -#include + #include #include #include #include #include +#include /** * \file - * \brief Implementation of algorithms that allow calculating the estimated state of - * a particle filter. + * \brief Implementation of algorithms that allow calculating the estimated state of a particle filter. */ namespace beluga { namespace detail { -/** - * \brief Given a range with quaternions and a normalized range of weights, compute the weighted average quaternion. - * - * \tparam QuaternionRange A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Eigen::Quaternion`. - * \tparam WeightsRange A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Scalar`. - * \tparam Scalar The scalar type, matching those of the weights. - * \param quaternions Range of quaternions to compute the weighted average from. - * \param normalized_weights Range of normalized weights. With matching indices wrt \c quaternions . Non-normalized - * weights will yield incorrect results. - * \return The average quaternion. - */ -template < - class QuaternionRange, - class WeightsRange, - class Scalar = typename ranges::range_value_t::Scalar> -Eigen::Quaternion weighted_average_quaternion( - const QuaternionRange& quaternions, - const WeightsRange& normalized_weights) { - // Implementation is based on https://github.com/strasdat/Sophus/blob/main/sophus/average.hpp#L133 with the variant of - // non-uniform weights. - // See https://ntrs.nasa.gov/api/citations/20070017872/downloads/20070017872.pdf equation (13). - const size_t num_quaternions = std::size(quaternions); - assert(num_quaternions >= 1); - Eigen::Matrix all_quaternions(4, num_quaternions); - - for (const auto& [i, quaternion, normalized_weight] : - ranges::views::zip(ranges::views::iota(0), quaternions, normalized_weights)) { - all_quaternions.col(i) = normalized_weight * quaternion.coeffs(); +/// \cond detail + +struct mean_fn { + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>>, + std::enable_if_t, Value>, int> = 0> + auto operator()(Values&& values, Weights&& normalized_weights, Projection projection = {}) const { + static_assert(ranges::input_range); + static_assert(ranges::input_range); + + constexpr int M = Value::RowsAtCompileTime; + constexpr int N = Value::ColsAtCompileTime; + static_assert(N == 1); + using Scalar = typename Value::Scalar; + auto accumulator = Sophus::Vector::Zero().eval(); + + auto it = ranges::begin(values); + const auto last = ranges::end(values); + auto weights_it = ranges::begin(normalized_weights); + + assert(it != last); + assert(weights_it != ranges::end(normalized_weights)); + + for (; it != last; ++weights_it, ++it) { + accumulator += *weights_it * projection(*it); + } + + assert(weights_it == ranges::end(normalized_weights)); + + return accumulator; + } + + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Scalar = std::decay_t>>, + std::enable_if_t, int> = 0> + auto operator()(Values&& values, Weights&& normalized_weights, Projection projection = {}) const -> Scalar { + return (*this)( + std::forward(values), std::forward(normalized_weights), + [projection = std::move(projection)](const auto& value) { + return Sophus::Vector(projection(value)); + })(0); + } + + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>, + class Scalar = typename Value::Scalar, + std::enable_if_t, Value>, int> = 0> + auto operator()(Values&&, Weights&&, Projection = {}) const -> Value = delete; // not-implemented + + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>>, + class Scalar = typename Value::Scalar, + class = std::enable_if_t, Value>>> + auto operator()(Values&& values, Weights&& normalized_weights, Projection projection = {}) const + -> Sophus::SE2 { + // Compute the average of all the coefficients of the SE2 group elements and construct a new SE2 element. Notice + // that after averaging the complex representation of the orientation the resulting complex is not on the unit + // circle. This is expected and the value will be renormalized before returning. + Sophus::Vector4 mean_vector = (*this)( + std::forward(values), std::forward(normalized_weights), + [projection = std::move(projection)](const auto& value) { + return Eigen::Map>{projection(value).data()}; + }); + + Eigen::Map> mean{mean_vector.data()}; + mean.so2().normalize(); + return mean; } - const Eigen::Matrix qqt = all_quaternions * all_quaternions.transpose(); - const Eigen::EigenSolver> es(qqt); - - Eigen::Index max_eigenvalue_index; - es.eigenvalues().cwiseAbs().maxCoeff(&max_eigenvalue_index); - const Eigen::Matrix, 4, 1> max_eigenvector = es.eigenvectors().col(max_eigenvalue_index); - Eigen::Quaternion quat; - // This is not the same as quat{max_eigenvector.real()}. Eigen's internal coefficient order is different from the - // constructor one. - quat.coeffs() << max_eigenvector.real(); - return quat; -} + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>>, + class Scalar = typename Value::Scalar, + std::enable_if_t, Value>, int> = 0> + auto operator()(Values&& values, Weights&& normalized_weights, Projection projection = {}) const + -> Eigen::Quaternion { + static_assert(ranges::input_range); + static_assert(ranges::sized_range); + + static_assert(ranges::input_range); + static_assert(ranges::sized_range); + + // This implementation is based on Sophus' average methods with the variant of non-uniform weights. + // See https://github.com/strasdat/Sophus/blob/d0b7315a0d90fc6143defa54596a3a95d9fa10ec/sophus/average.hpp#L135 + // See https://ntrs.nasa.gov/api/citations/20070017872/downloads/20070017872.pdf equation (13). + + const auto size = static_cast(ranges::size(values)); + auto matrix = Eigen::Matrix(4, size); + + auto weights_it = ranges::begin(normalized_weights); + auto it = ranges::begin(values); + + assert(it != ranges::end(values)); + assert(weights_it != ranges::end(normalized_weights)); + + for (int index = 0; index < size; ++index, ++weights_it, ++it) { + matrix.col(index) = *weights_it * projection(*it).coeffs(); + } + + assert(it == ranges::end(values)); + assert(weights_it == ranges::end(normalized_weights)); + + const auto solver = Eigen::SelfAdjointEigenSolver>{matrix * matrix.transpose()}; + assert(solver.info() == Eigen::Success); + + // This is not the same as `result{solver.eigenvectors().col(3).real()}`. + // Eigen's internal coefficient order is different from the constructor one. + // Eigenvalues are sorted in increasing order, so eigenvalue number 3 is the max. + Eigen::Quaternion result; + result.coeffs() << solver.eigenvectors().col(3).real(); + return result; + } + + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>>, + class Scalar = typename Value::Scalar, + std::enable_if_t, Value>, int> = 0> + auto operator()(Values&& values, Weights&& normalized_weights, Projection projection = {}) const + -> Sophus::SO3 { + return {(*this)( + std::forward(values), std::forward(normalized_weights), + [projection = std::move(projection)](const auto& value) { return projection(value).unit_quaternion(); })}; + } + + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>>, + class Scalar = typename Value::Scalar, + std::enable_if_t, Value>, int> = 0> + auto operator()(Values&& values, Weights&& normalized_weights, Projection projection = {}) const + -> Sophus::SE3 { + static_assert(ranges::forward_range); // must allow multi-pass + static_assert(ranges::forward_range); // must allow multi-pass + + return { + (*this)(values, normalized_weights, [&projection](const auto& v) { return projection(v).unit_quaternion(); }), + (*this)(values, normalized_weights, [&projection](const auto& v) { return projection(v).translation(); })}; + } + + template < + class Values, + class Projection = ranges::identity, + class Value = std::decay_t>>> + auto operator()(Values&& values, Projection projection = {}) const { + using Scalar = typename Value::Scalar; + static_assert(ranges::sized_range); + const auto size = ranges::size(values); + auto weights = ranges::views::repeat_n(1.0 / static_cast(size), static_cast(size)); + return (*this)(std::forward(values), std::move(weights), std::move(projection)); + } +}; + +/// \endcond + } // namespace detail -/// Calculates the covariance of a range given its mean and the weights of each element. -/** - * \tparam Range A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Sophus::Vector2`. - * \tparam WeightsRange A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Scalar`. - * \tparam Scalar A scalar type, e.g. double or float. - * \param range Range to be used to calculate the covariance. - * \param normalized_weights Range with the normalized (total weight 1.0) weights of the samples in 'range'. - * \param mean The previously calculated mean of range. The value must be correct for the resulting - * covariance to be correct. - * \return The calculated covariance, as a `Sophus::Matrix2`. - */ -template -Sophus::Matrix2 -calculate_covariance(Range&& range, WeightsRange&& normalized_weights, const Sophus::Vector2& mean) { - auto translations_view = range | ranges::views::common; - auto normalized_weights_view = normalized_weights | ranges::views::common; - - auto calculate_weighted_sample_covariance = [mean](const auto& value, const auto& weight) { - const auto centered = value - mean; - return Sophus::Vector3{ - weight * centered.x() * centered.x(), // weighted sample x autocovariance - weight * centered.x() * centered.y(), // weighted sample xy cross-covariance - weight * centered.y() * centered.y(), // weighted sample y autocovariance - }; - }; - - // calculate the averaging factor for the weighted covariance estimate - // See https://en.wikipedia.org/wiki/Sample_mean_and_covariance#Weighted_samples - const auto squared_weight_sum = std::transform_reduce( - normalized_weights_view.begin(), normalized_weights_view.end(), Scalar{0.0}, std::plus{}, - [](const auto& weight) { return (weight * weight); }); - - // calculate the x autocovariance, xy cross-covariance, and y autocovariance - Sophus::Vector3 coefficients = - std::transform_reduce( - translations_view.begin(), translations_view.end(), normalized_weights_view.begin(), - Sophus::Vector3::Zero().eval(), std::plus{}, calculate_weighted_sample_covariance) / - (1.0 - squared_weight_sum); - - // create the symmetric 2x2 translation covariance matrix from the coefficients - auto covariance_matrix = Sophus::Matrix2{}; - covariance_matrix << coefficients(0), coefficients(1), coefficients(1), coefficients(2); - return covariance_matrix; -} - -/// Convenience overload that calculates the covariance of a range given its mean for the case where all -/// samples have the same weight. +/// Calculate the weighted mean (or average) of a range of values. /** - * \tparam Range A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Sophus::Vector2`. - * \tparam Scalar A scalar type, e.g. double or float. - * \param range Range to be used to calculate the covariance. - * \param mean The previously calculated mean of range. The value must be correct for the resulting - * covariance to be correct. - * \return The calculated covariance, as a `Sophus::Matrix2`. - */ -template -Sophus::Matrix2 calculate_covariance(Range&& range, const Sophus::Vector2& mean) { - const auto sample_count = range.size(); - return calculate_covariance( - range, - ranges::views::repeat_n(1.0 / static_cast(sample_count), static_cast(sample_count)), - mean); -} - -/// Returns a pair consisting of the estimated mean pose and its covariance. -/** - * Given a range of poses, computes the estimated pose by averaging the translation - * and rotation parts. - * Computes the covariance matrix of the translation and rotation parts to create a 6x6 covariance matrix. - * NOTE: We represent this estimate as a \c large noiseless SE3 pose and a covariance in se3, - * the tangent space of the SE3 manifold. - * Users may perform the appropriate conversions to get the covariance matrix into their parametrization of interest. - * This reasoning is based on "Characterizing the Uncertainty of Jointly - * Distributed Poses in the Lie Algebra" by Mangelson et al. - * (https://robots.et.byu.edu/jmangelson/pubs/2020/mangelson2020tro.pdf). See section III. E) "Defining Random Variables - * over Poses" for more context. + * The inputs are a range of values, a range of corresponding weights, and an optional projection function + * to convert to the right value-type. * - * \tparam Poses A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Sophus::SE3`. - * \tparam Weights A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Scalar`. - * \tparam Pose The pose value type of the given range. - * \tparam Scalar A scalar type, e.g. double or float. - * \param poses 3D poses to estimate mean and covariances from. - * \param weights Weights for the poses with matching indices. - * \return The estimated pose and its 6x6 covariance matrix. - */ -template < - class Poses, - class Weights, - class Pose = ranges::range_value_t, - class Scalar = typename Pose::Scalar, - typename = std::enable_if_t>>> -std::pair, Sophus::Matrix6> estimate(const Poses& poses, const Weights& weights) { - assert(std::size(poses) == std::size(weights)); - assert(std::size(poses) > 0); - auto poses_view = poses | ranges::views::common; - auto weights_view = weights | ranges::views::common; - - auto quaternion_view = poses | ranges::views::transform([](const Pose& pose) { return pose.unit_quaternion(); }); - - const auto weights_sum = std::accumulate(weights_view.begin(), weights_view.end(), 0.0); - auto normalized_weights_view = - weights_view | // - ranges::views::transform([weights_sum](const auto weight) { return weight / weights_sum; }); - - const auto average_quat = detail::weighted_average_quaternion(quaternion_view, normalized_weights_view); - - const Sophus::Vector3 mean_translation = std::transform_reduce( - poses_view.begin(), // - poses_view.end(), // - normalized_weights_view.begin(), // - Sophus::Vector3::Zero().eval(), // - std::plus{}, // - [](const Pose& pose, const auto& weight) { return pose.translation() * weight; }); - - const Sophus::SE3 mean{Sophus::SO3{average_quat}, mean_translation}; - - // calculate the averaging factor for the weighted covariance estimate - // See https://en.wikipedia.org/wiki/Sample_mean_and_covariance#Weighted_samples - const auto squared_weight_sum = std::transform_reduce( - normalized_weights_view.begin(), normalized_weights_view.end(), Scalar{0.0}, std::plus{}, - [](const auto& weight) { return (weight * weight); }); - - const Eigen::Matrix covariance = - std::transform_reduce( - poses_view.begin(), // - poses_view.end(), // - normalized_weights_view.begin(), // - Eigen::Matrix::Zero().eval(), // - std::plus{}, // - [inverse_mean = mean.inverse()](const Pose& pose, const auto weight) { - // Compute deviation from mean in Lie algebra (logarithm of SE3) - const Pose delta = inverse_mean * pose; - // Accumulate weighted covariance - return Eigen::Matrix{weight * (delta.log() * delta.log().transpose())}; - }) / - (1. - squared_weight_sum); - - return std::pair{mean, covariance}; -} - -/// Returns a pair consisting of the estimated mean pose and its covariance. -/** - * Given a range of poses, computes the estimated pose by averaging the translation - * and rotation parts. - * Computes the covariance matrix of the translation parts and the circular variance - * of the rotation angles to create a 3x3 covariance matrix. - * It does not take into account the particle weights. + * It supports floating-point numbers, vectors, quaternions, and Lie group elements like SE2 and SE3. * - * \tparam Poses A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Sophus::SE2`. - * \tparam Weights A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Scalar`. - * \tparam Pose The pose value type of the given range. - * \tparam Scalar A scalar type, e.g. double or float. - * \param poses Poses of equally weighted 2D poses. - * \param weights Poses of equally weighted 2D poses. - * \return The estimated pose and its 3x3 covariance matrix. + * The weights are assumed to already be normalized. Non-normalized weights will yield incorrect results. */ -template < - class Poses, - class Weights, - class Pose = ranges::range_value_t, - class Scalar = typename Pose::Scalar, - typename = std::enable_if_t>>> -std::pair, Sophus::Matrix3> estimate(Poses&& poses, Weights&& weights) { - auto translation_view = poses | ranges::views::transform([](const auto& pose) { return pose.translation(); }); - auto poses_view = poses | ranges::views::common; - auto weights_view = weights | ranges::views::common; - const auto weights_sum = std::accumulate(weights_view.begin(), weights_view.end(), 0.0); - auto normalized_weights_view = - weights_view | ranges::views::transform([weights_sum](const auto& weight) { return weight / weights_sum; }); - - // map sophus pose 2D pose into a 4D Eigen vector. Mapping exploits that Sophus stores the 2D transform - // as two elements for the linear translation, and two more for the orientation (in complex number form) - const auto pose_to_weighted_eigen_vector = [](const auto& pose, const auto& weight) { - return Eigen::Map>{pose.data()} * weight; - }; - - // Compute the average of all the coefficients of the SE2 group elements and construct a new SE2 element. Notice - // that after averaging the complex representation of the orientation the resulting complex is not on the unit circle. - // This is expected and the value will be renormalized after having used the non-normal result to estimate the - // orientation autocovariance. - const Sophus::Vector4 mean_pose_vector = std::transform_reduce( - poses_view.begin(), poses_view.end(), normalized_weights_view.begin(), Sophus::Vector4::Zero().eval(), - std::plus{}, pose_to_weighted_eigen_vector); - - // Calculate the weighted pose estimation - Sophus::SE2 estimated_pose = Eigen::Map>{mean_pose_vector.data()}; - - Sophus::Matrix3 covariance_matrix = Sophus::Matrix3::Zero(); - - // Compute the covariance of the translation part. - covariance_matrix.template topLeftCorner<2, 2>() = - calculate_covariance(translation_view, normalized_weights_view, estimated_pose.translation()); - - // Compute the orientation variance and re-normalize the rotation component. - if (estimated_pose.so2().unit_complex().norm() < std::numeric_limits::epsilon()) { - // Handle the case where both averages are too close to zero. - // Return zero yaw and infinite variance. - covariance_matrix.coeffRef(2, 2) = std::numeric_limits::infinity(); - estimated_pose.so2() = Sophus::SO2{0.0}; - } else { - // See circular standard deviation in - // https://en.wikipedia.org/wiki/Directional_statistics#Dispersion. - covariance_matrix.coeffRef(2, 2) = -2.0 * std::log(estimated_pose.so2().unit_complex().norm()); - estimated_pose.so2().normalize(); +inline constexpr detail::mean_fn mean; + +namespace detail { + +/// \cond detail + +struct covariance_fn { + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>>, + std::enable_if_t, Value>, int> = 0> + auto operator()( + Values&& values, + Weights&& normalized_weights, + const typename Value::PlainMatrix& mean, + Projection projection = {}) const { + static_assert(ranges::input_range); + static_assert(ranges::input_range); + + constexpr int M = Value::RowsAtCompileTime; + constexpr int N = Value::ColsAtCompileTime; + static_assert(N == 1); + using Scalar = typename Value::Scalar; + auto accumulator = Eigen::Matrix::Zero().eval(); + auto squared_weight_sum = Scalar{0.0}; + + auto it = ranges::begin(values); + const auto last = ranges::end(values); + auto weights_it = ranges::begin(normalized_weights); + + assert(it != last); + assert(weights_it != ranges::end(normalized_weights)); + + for (; it != last; ++weights_it, ++it) { + const auto& value = projection(*it); + const auto weight = *weights_it; + const auto centered = value - mean; + accumulator.noalias() += weight * centered * centered.transpose(); + squared_weight_sum += weight * weight; + } + + assert(weights_it == ranges::end(normalized_weights)); + assert(squared_weight_sum < 1.0); + + accumulator /= (1.0 - squared_weight_sum); // apply the correction factor to yield an unbiased estimator + return accumulator; + } + + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>>, + std::enable_if_t, int> = 0> + auto operator()(Values&& values, Weights&& normalized_weights, Value mean, Projection projection = {}) const { + static_assert(ranges::input_range); + static_assert(ranges::input_range); + + auto accumulator = Value{0}; + auto squared_weight_sum = Value{0}; + + auto it = ranges::begin(values); + const auto last = ranges::end(values); + auto weights_it = ranges::begin(normalized_weights); + + assert(it != last); + assert(weights_it != ranges::end(normalized_weights)); + + for (; it != last; ++weights_it, ++it) { + const auto& value = projection(*it); + const auto weight = *weights_it; + const auto centered = value - mean; + accumulator += weight * centered * centered; + squared_weight_sum += weight * weight; + } + + assert(weights_it == ranges::end(normalized_weights)); + assert(squared_weight_sum < 1.0); + + accumulator /= (1.0 - squared_weight_sum); // apply the correction factor to yield an unbiased estimator + return accumulator; + } + + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>>, + std::enable_if_t< + std::disjunction_v< + std::is_base_of, Value>, + std::is_base_of, Value>>, + int> = 0> + auto operator()(Values&& values, Weights&& normalized_weights, const Value& mean, Projection projection = {}) const { + static_assert(ranges::input_range); + static_assert(ranges::input_range); + + // For SE3 (and SE2) estimates, we represent the estimate as a noiseless pose and covariance in se3, + // the tangent space of the SE3 manifold. + // + // Users may perform the appropriate conversions to get the covariance matrix into their parametrization of + // interest. + // + // This reasoning is based on "Characterizing the Uncertainty of Jointly Distributed Poses in the Lie + // Algebra" by Mangelson et al. (https://robots.et.byu.edu/jmangelson/pubs/2020/mangelson2020tro.pdf). + // + // See section III. E) "Defining Random Variables over Poses" for more context. + auto accumulator = Value::Adjoint::Zero().eval(); + auto squared_weight_sum = typename Value::Scalar{0.0}; + + auto it = ranges::begin(values); + const auto last = ranges::end(values); + auto weights_it = ranges::begin(normalized_weights); + + assert(it != last); + assert(weights_it != ranges::end(normalized_weights)); + + const auto inverse_mean = mean.inverse(); + + for (; it != last; ++weights_it, ++it) { + const auto& value = projection(*it); + const auto weight = *weights_it; + const auto centered = (inverse_mean * value).log(); + accumulator.noalias() += weight * centered * centered.transpose(); + squared_weight_sum += weight * weight; + } + + assert(weights_it == ranges::end(normalized_weights)); + assert(squared_weight_sum < 1.0); + + accumulator /= (1.0 - squared_weight_sum); // apply the correction factor to yield an unbiased estimator + return accumulator; + } + + template < + class Values, + class Weights, + class Projection = ranges::identity, + class Value = std::decay_t>, + std::enable_if_t< + std::disjunction_v< + std::is_base_of, Value>, + std::is_base_of, Value>, + std::is_base_of, Value>>, + int> = 0> + auto operator()(Values&&, Weights&&, const Value&, Projection = {}) const -> Value = delete; // not-implemented + + template < + class Values, + class Projection = ranges::identity, + class Value = std::decay_t>>> + auto operator()(Values&& values, const Value& mean, Projection projection = {}) const { + static_assert(ranges::sized_range); + const auto size = ranges::size(values); + auto weights = ranges::views::repeat_n(1.0 / static_cast(size), static_cast(size)); + return (*this)(std::forward(values), std::move(weights), mean, std::move(projection)); } - return std::pair{estimated_pose, covariance_matrix}; -} +}; + +/// \endcond -/// Computes mean and variance of a range of weighted scalars. +} // namespace detail + +/// Calculate the weighted covariance of a range of values. /** - * Given a range of scalars, computes the scalar mean and variance. + * The inputs are a range of values, a range of corresponding weights, the pre-computed mean, and an optional projection + * function to convert to the right value-type. * - * \tparam Scalars A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `std::vector`. - * \tparam Weights A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Scalar`. - * \tparam Scalar The scalar value type of the given range of Scalars. - * \param scalars Range of scalars. - * \param weights Range of weights. - * \return The estimated mean and variance. - */ -template < - class Scalars, - class Weights, - class Scalar = ranges::range_value_t, - typename = std::enable_if_t>> -std::pair estimate(Scalars&& scalars, Weights&& weights) { - auto weights_view = weights | ranges::views::common; - const auto weights_sum = ranges::accumulate(weights, 0.0, std::plus<>{}); - auto normalized_weights_view = - weights_view | ranges::views::transform([weights_sum](auto weight) { return weight / weights_sum; }); - - const Scalar weighted_mean = std::transform_reduce( - scalars.begin(), scalars.end(), normalized_weights_view.begin(), 0.0, std::plus<>{}, std::multiplies<>{}); - - const Scalar weighted_squared_deviations = std::transform_reduce( - scalars.begin(), scalars.end(), normalized_weights_view.begin(), 0.0, std::plus<>{}, - [weighted_mean](const auto& scalar, const auto& weight) { - return weight * (scalar - weighted_mean) * (scalar - weighted_mean); - }); - - const auto number_of_non_zero_weights = - static_cast(ranges::count_if(weights_view, [&](auto weight) { return weight > 0; })); - - const Scalar weighted_variance = - weighted_squared_deviations * number_of_non_zero_weights / (number_of_non_zero_weights - 1); - - return std::pair{weighted_mean, weighted_variance}; -} - -/// Returns a pair consisting of the estimated mean pose and its covariance. -/** - * Given a range of poses, computes the estimated pose by averaging the translation - * and rotation parts. - * Computes the covariance matrix of the translation and rotation parts to create a 6x6 covariance matrix, assuming all - * the poses are equally weighted. + * It supports floating-point numbers, vectors, and Lie group elements like SE2 and SE3. + * + * For Lie group elements, the function computes covariance in the tangent space, representing the uncertainty of poses + * on the Lie manifold. Users may perform the appropriate conversions to get the covariance matrix into their + * parametrization of interest. * - * \tparam Poses A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Sophus::SE3`. - * \tparam Weights A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Scalar`. - * \tparam Pose The pose value type of the given range. - * \tparam Scalar A scalar type, e.g. double or float. - * \param poses 3D poses to estimate mean and covariances from, equally. - * \return The estimated pose and its 6x6 covariance matrix. + * The weights are assumed to already be normalized. Non-normalized weights will yield incorrect results. */ -template < - class Poses, - class Pose = ranges::range_value_t, - class Scalar = typename Pose::Scalar, - typename = std::enable_if_t>>> -std::pair, Sophus::Matrix6> estimate(const Poses& poses) { - return beluga::estimate(poses, ranges::views::repeat_n(1.0, static_cast(poses.size()))); -} - -/// Returns a pair consisting of the estimated mean pose and its covariance. +inline constexpr detail::covariance_fn covariance; + +namespace detail { + +/// \cond detail + +struct estimate_fn { + template < + class Values, + class Weights, + class Value = std::decay_t>, + class = std::enable_if_t, + std::is_base_of, Value>, + std::is_base_of, Value>>>> + auto operator()(Values&& values, Weights&& weights) const { + static_assert(ranges::forward_range); // must allow multi-pass + static_assert(ranges::forward_range); // must allow multi-pass + + auto normalized_weights = weights | ranges::views::transform([sum = ranges::accumulate(weights, 0.0)](auto weight) { + return weight / sum; + }); + + const auto mean = beluga::mean(values, normalized_weights); + const auto variance = beluga::covariance(values, normalized_weights, mean); + return std::make_pair(mean, variance); + } + + template < + class Values, + class Weights, + class Value = std::decay_t>, + class Scalar = typename Value::Scalar, + class = std::enable_if_t, Value>>> + auto operator()(Values&& values, Weights&& weights) const -> std::pair, Sophus::Matrix3> { + static_assert(ranges::forward_range); // must allow multi-pass + static_assert(ranges::forward_range); // must allow multi-pass + + auto normalized_weights = weights | ranges::views::transform([sum = ranges::accumulate(weights, 0.0)](auto weight) { + return weight / sum; + }); + + // Compute the average of all the coefficients of the SE2 group elements and construct a new SE2 element. Notice + // that after averaging the complex representation of the orientation the resulting complex is not on the unit + // circle. This is expected and the value will be renormalized after having used the non-normal result to estimate + // the orientation autocovariance. + const Sophus::Vector4 mean_vector = beluga::mean(values, normalized_weights, [](const auto& value) { + return Eigen::Map>{value.data()}; + }); + + auto mean = Sophus::SE2{Eigen::Map>{mean_vector.data()}}; + auto covariance = Sophus::Matrix3::Zero().eval(); + + // Compute the covariance of the translation part. + covariance.template topLeftCorner<2, 2>() = beluga::covariance( + values, normalized_weights, mean.translation(), [](const auto& value) { return value.translation(); }); + + // Compute the orientation variance and re-normalize the rotation component (after using the non-normal result). + if (mean.so2().unit_complex().norm() < std::numeric_limits::epsilon()) { + // Handle the case where both averages are too close to zero. + // Return zero yaw and infinite variance. + covariance.coeffRef(2, 2) = std::numeric_limits::infinity(); + mean.so2() = Sophus::SO2{0.0}; + // TODO(nahuel): Consider breaking the existing API and return + // an optional to handle degenerate cases just like Sophus does. + } else { + // See circular standard deviation in + // https://en.wikipedia.org/wiki/Directional_statistics#Dispersion. + covariance.coeffRef(2, 2) = -2.0 * std::log(mean.so2().unit_complex().norm()); + mean.so2().normalize(); + } + + return std::make_pair(mean, covariance); + } + + template < + class Values, + class Weights, + class Value = std::decay_t>, + class = std::enable_if_t, Value>, + std::is_base_of, Value>, + std::is_base_of, Value>>>> + auto operator()(Values&&, Weights&&) const -> Value = delete; // not-implemented + + template + auto operator()(Values&& values) const { + static_assert(ranges::sized_range); + const auto size = ranges::size(values); + auto weights = ranges::views::repeat_n(1.0 / static_cast(size), static_cast(size)); + return (*this)(std::forward(values), std::move(weights)); + } +}; + +/// \endcond + +} // namespace detail + +/// Calculate the estimate (mean and covariance) of a range of values. /** - * Given a range of poses, computes the estimated pose by averaging the translation - * and rotation parts, assuming all poses are equally weighted. - * Computes the covariance matrix of the translation parts and the circular variance - * of the rotation angles to create a 3x3 covariance matrix. - * It does not take into account the particle weights. This is appropriate for use with - * filter update cycles that resample the particle set at every iteration, since - * in that case the belief is fully represented by the spatial distribution of the - * particles, and the particle weights provide no additional information. + * The inputs are a range of values and a range of corresponding weights. + * It supports floating-point numbers, vectors, and Lie group elements like SE2 and SE3. * - * \tparam Poses A [sized range](https://en.cppreference.com/w/cpp/ranges/sized_range) type whose - * value type is `Sophus::SE2`. - * \tparam Pose The pose value type of the given range. - * \tparam Scalar A scalar type, e.g. double or float. - * \param poses Poses of equally weighted 2D poses. - * \return The estimated pose and its 3x3 covariance matrix. + * The function does not assume that the input weights are normalized. It normalizes the weights while iterating, + * which might have a performance impact. Specifically, the function has to iterate over the weights multiple times + * (e.g., once to compute the mean, and again to compute the covariance). The normalization factor is computed once, but + * division for normalizing the weights is performed each time a weight is accessed. */ -template < - class Poses, - class Pose = ranges::range_value_t, - class Scalar = typename Pose::Scalar, - typename = std::enable_if_t>>> -std::pair, Sophus::Matrix3> estimate(Poses&& poses) { - return beluga::estimate(poses, ranges::views::repeat_n(1.0, static_cast(poses.size()))); -} +inline constexpr detail::estimate_fn estimate; } // namespace beluga diff --git a/beluga/test/beluga/algorithm/test_cluster_based_estimation.cpp b/beluga/test/beluga/algorithm/test_cluster_based_estimation.cpp index 64ab2c499..019979390 100644 --- a/beluga/test/beluga/algorithm/test_cluster_based_estimation.cpp +++ b/beluga/test/beluga/algorithm/test_cluster_based_estimation.cpp @@ -333,8 +333,7 @@ TEST_F(ClusterBasedEstimationDetailTesting, ClusterEstimation) { const auto particles = ranges::views::zip(states, weights, clusters); - auto cluster_0_particles = - particles | ranges::views::cache1 | ranges::views::filter([](const auto& p) { return std::get<2>(p) == 0; }); + auto cluster_0_particles = particles | ranges::views::filter([](const auto& p) { return std::get<2>(p) == 0; }); auto cluster_0_states = cluster_0_particles | beluga::views::elements<0>; auto cluster_0_weights = cluster_0_particles | beluga::views::elements<1>; diff --git a/beluga/test/beluga/algorithm/test_estimation.cpp b/beluga/test/beluga/algorithm/test_estimation.cpp index 0a3f945f3..2036ad0f5 100644 --- a/beluga/test/beluga/algorithm/test_estimation.cpp +++ b/beluga/test/beluga/algorithm/test_estimation.cpp @@ -67,7 +67,7 @@ TEST_F(CovarianceCalculation, UniformWeightOverload) { Vector2d{2, 0}, Vector2d{0, 2}, Vector2d{2, 2}, Vector2d{0, 2}, Vector2d{2, 0}, }; const auto translation_mean = Vector2d{1, 1}; - const auto covariance = beluga::calculate_covariance(translation_vector, translation_mean); + const auto covariance = beluga::covariance(translation_vector, translation_mean); ASSERT_NEAR(covariance(0, 0), 1.1111, 0.001); ASSERT_NEAR(covariance(0, 1), 0.2222, 0.001); ASSERT_NEAR(covariance(1, 0), 0.2222, 0.001); @@ -93,7 +93,7 @@ TEST_F(CovarianceCalculation, NonUniformWeightOverload) { const auto total_weight = std::accumulate(weights.begin(), weights.end(), 0.0); std::for_each(weights.begin(), weights.end(), [total_weight](auto& weight) { weight /= total_weight; }); const auto translation_mean = Vector2d{1.1111, 1.1111}; - const auto covariance = beluga::calculate_covariance(translation_vector, weights, translation_mean); + const auto covariance = beluga::covariance(translation_vector, weights, translation_mean); ASSERT_NEAR(covariance(0, 0), 1.1765, 0.001); ASSERT_NEAR(covariance(0, 1), 0.1176, 0.001); ASSERT_NEAR(covariance(1, 0), 0.1176, 0.001); @@ -265,7 +265,7 @@ TEST_F(ScalarEstimation, NonUniformWeightOverload) { const auto standard_deviation = std::sqrt(variance); constexpr double kTolerance = 0.001; ASSERT_NEAR(mean, 4.300, kTolerance); - ASSERT_NEAR(standard_deviation, 2.026, kTolerance); + ASSERT_NEAR(standard_deviation, 2.055, kTolerance); } TEST_F(PoseCovarianceEstimation, MultiVariateNormalSE3) { @@ -311,7 +311,7 @@ TEST(AverageQuaternion, AgainstSophusImpl) { { auto quats_as_so3_view = quaternions | ranges::views::transform([](const Eigen::Quaterniond& q) { return SO3d{q}; }); - const auto avg_quaternion = beluga::detail::weighted_average_quaternion(quaternions, std::array{1., 1., 1.}); + const auto avg_quaternion = beluga::mean(quaternions, std::array{1., 1., 1.}); const auto avg_quat_sophus = Sophus::details::averageUnitQuaternion(quats_as_so3_view | ranges::to); ASSERT_TRUE(avg_quaternion.isApprox(avg_quat_sophus)); } @@ -320,8 +320,7 @@ TEST(AverageQuaternion, AgainstSophusImpl) { constexpr double kTolerance = 0.01; auto quats_as_so3_view = quaternions | ranges::views::transform([](const Eigen::Quaterniond& q) { return SO3d{q}; }); - const auto avg_quaternion = - beluga::detail::weighted_average_quaternion(quaternions, std::array{1e-3, 1e-3, 1. - 2e-3}); + const auto avg_quaternion = beluga::mean(quaternions, std::array{1e-3, 1e-3, 1. - 2e-3}); const auto avg_quat_sophus = Sophus::details::averageUnitQuaternion(quats_as_so3_view | ranges::to); ASSERT_FALSE(avg_quaternion.isApprox(avg_quat_sophus)); ASSERT_TRUE(avg_quaternion.isApprox(quaternions.back(), kTolerance)); diff --git a/beluga/test/beluga/random/test_multivariate_normal_distribution.cpp b/beluga/test/beluga/random/test_multivariate_normal_distribution.cpp index 6e1ba2c96..86ddcaaa5 100644 --- a/beluga/test/beluga/random/test_multivariate_normal_distribution.cpp +++ b/beluga/test/beluga/random/test_multivariate_normal_distribution.cpp @@ -82,7 +82,7 @@ TEST_P(MultivariateNormalDistributionWithParam, DistributionCovarianceAndMean) { const auto mean = Eigen::Vector2d{sum / sequence.size()}; ASSERT_NEAR(mean(0), expected_mean(0), kTolerance); ASSERT_NEAR(mean(1), expected_mean(1), kTolerance); - const auto covariance = beluga::calculate_covariance(sequence, mean); + const auto covariance = beluga::covariance(sequence, mean); ASSERT_NEAR(covariance(0, 0), expected_covariance(0, 0), kTolerance); ASSERT_NEAR(covariance(0, 1), expected_covariance(0, 1), kTolerance); ASSERT_NEAR(covariance(1, 0), expected_covariance(1, 0), kTolerance);