From 2e2a148ca2f85e89ba7a8cfcec05044366acb6f0 Mon Sep 17 00:00:00 2001 From: Mike Owen Date: Mon, 7 Oct 2024 16:32:41 -0700 Subject: [PATCH] Adding specialized ASPH H advance options (fixShape and radialOnly), and creating a special H time advance policy. Note when using these options the ASPH idealH does not require the Voronoi (big time savings). --- .../SmoothingScale/ASPHSmoothingScale.py | 6 +- src/SmoothingScale/ASPHSmoothingScale.cc | 267 +++++++++++------- src/SmoothingScale/ASPHSmoothingScale.hh | 14 +- src/SmoothingScale/CMakeLists.txt | 1 + src/SmoothingScale/IncrementASPHHtensor.cc | 120 ++++++++ src/SmoothingScale/IncrementASPHHtensor.hh | 66 +++++ .../IncrementASPHHtensorInst.cc.py | 11 + tests/functional/RK/RKInterpolation.py | 2 +- 8 files changed, 375 insertions(+), 112 deletions(-) create mode 100644 src/SmoothingScale/IncrementASPHHtensor.cc create mode 100644 src/SmoothingScale/IncrementASPHHtensor.hh create mode 100644 src/SmoothingScale/IncrementASPHHtensorInst.cc.py diff --git a/src/PYB11/SmoothingScale/ASPHSmoothingScale.py b/src/PYB11/SmoothingScale/ASPHSmoothingScale.py index 40319192a..be9ae7d38 100644 --- a/src/PYB11/SmoothingScale/ASPHSmoothingScale.py +++ b/src/PYB11/SmoothingScale/ASPHSmoothingScale.py @@ -21,7 +21,9 @@ class ASPHSmoothingScale(SmoothingScaleBase): # Constructors def pyinit(self, HUpdate = "HEvolutionType", - W = "const TableKernel<%(Dimension)s>&"): + W = "const TableKernel<%(Dimension)s>&", + fixShape = ("const bool", "false"), + radialOnly = ("const bool", "false")): "ASPHSmoothingScale constructor" #........................................................................... @@ -95,3 +97,5 @@ def label(self): secondMoment = PYB11property("const FieldList<%(Dimension)s, SymTensor>&", "secondMoment", doc="The second moment storage FieldList") cellSecondMoment = PYB11property("const FieldList<%(Dimension)s, SymTensor>&", "cellSecondMoment", doc="The second moment of the Voronoi cells") HidealFilter = PYB11property("std::shared_ptr", "HidealFilter", "HidealFilter", doc="Optional function to manipulate the Hideal calculation") + fixShape = PYB11property("bool", "fixShape", "fixShape", doc="Force the H tensor shape to be fixed -- only adjust volume") + radialOnly = PYB11property("bool", "radialOnly", "radialOnly", doc="Force the H tensor to evolve solely in the radial direction") diff --git a/src/SmoothingScale/ASPHSmoothingScale.cc b/src/SmoothingScale/ASPHSmoothingScale.cc index eca39f527..2b140d9c1 100644 --- a/src/SmoothingScale/ASPHSmoothingScale.cc +++ b/src/SmoothingScale/ASPHSmoothingScale.cc @@ -7,6 +7,7 @@ //----------------------------------------------------------------------------// #include "SmoothingScale/ASPHSmoothingScale.hh" #include "SmoothingScale/polySecondMoment.hh" +#include "SmoothingScale/IncrementASPHHtensor.hh" #include "Geometry/Dimension.hh" #include "Kernel/TableKernel.hh" #include "Field/FieldList.hh" @@ -18,6 +19,7 @@ #include "FileIO/FileIO.hh" #include "Utilities/FastMath.hh" #include "Utilities/GeometricUtilities.hh" +#include "Utilities/rotationMatrix.hh" #include "Utilities/range.hh" #include "Utilities/Timer.hh" @@ -88,6 +90,54 @@ smoothingScaleDerivative(const Dim<3>::SymTensor& H, return result; } +//------------------------------------------------------------------------------ +// Radial evolution/alignment specialized evolution +//------------------------------------------------------------------------------ +// 1-D +inline +Dim<1>::SymTensor +radialEvolution(const Dim<1>::SymTensor& Hi, + const Dim<1>::Vector& nhat, + const Dim<1>::Scalar s) { + return Hi / s; +} + +// 2-D +inline +Dim<2>::SymTensor +radialEvolution(const Dim<2>::SymTensor& Hi, + const Dim<2>::Vector& nhat, + const Dim<2>::Scalar s) { + const auto T = rotationMatrix(nhat).Transpose(); + const auto hev0 = Hi.eigenVectors(); + Dim<2>::SymTensor result; + if (abs(hev0.eigenVectors.getColumn(0).dot(nhat)) > abs(hev0.eigenVectors.getColumn(1).dot(nhat))) { + result(0,0) = hev0.eigenValues(0); + result(1,1) = hev0.eigenValues(1); + } else { + result(0,0) = hev0.eigenValues(1); + result(1,1) = hev0.eigenValues(0); + } + result(0,0) /= s; + result.rotationalTransform(T); + return result; +} + +// 3-D +inline +Dim<3>::SymTensor +radialEvolution(const Dim<3>::SymTensor& Hi, + const Dim<3>::Vector& nhat, + const Dim<3>::Scalar s) { + const auto Tprinciple = rotationMatrix(nhat); + const auto Tlab = Tprinciple.Transpose(); + auto result = Hi; + result.rotationalTransform(Tprinciple); + result(0,0) /= s; + result.rotationalTransform(Tlab); + return result; +} + } //------------------------------------------------------------------------------ @@ -96,13 +146,17 @@ smoothingScaleDerivative(const Dim<3>::SymTensor& H, template ASPHSmoothingScale:: ASPHSmoothingScale(const HEvolutionType HUpdate, - const TableKernel& W): + const TableKernel& W, + const bool fixShape, + const bool radialOnly): SmoothingScaleBase(HUpdate), mWT(W), mZerothMoment(FieldStorageType::CopyFields), mSecondMoment(FieldStorageType::CopyFields), mCellSecondMoment(FieldStorageType::CopyFields), - mHidealFilterPtr(std::make_shared>()) { + mHidealFilterPtr(std::make_shared>()), + mFixShape(fixShape), + mRadialOnly(radialOnly) { } //------------------------------------------------------------------------------ @@ -136,12 +190,13 @@ registerState(DataBase& dataBase, for (auto k = 0u; k < numFields; ++k) { auto& Hfield = *Hfields[k]; const auto& nodeList = Hfield.nodeList(); - const auto hmaxInv = 1.0/nodeList.hmax(); - const auto hminInv = 1.0/nodeList.hmin(); + const auto hmin = nodeList.hmin(); + const auto hmax = nodeList.hmax(); + const auto hminratio = nodeList.hminratio(); switch (Hupdate) { case HEvolutionType::IntegrateH: case HEvolutionType::IdealH: - state.enroll(Hfield, make_policy>(hmaxInv, hminInv)); + state.enroll(Hfield, make_policy>(hmin, hmax, hminratio, mFixShape, mRadialOnly)); break; case HEvolutionType::FixedH: @@ -197,37 +252,15 @@ evaluateDerivatives(const typename Dimension::Scalar time, auto DHDt = derivs.fields(IncrementBoundedState::prefix() + HydroFieldNames::H, SymTensor::zero); CHECK(DHDt.size() == numNodeLists); - // Finish up the derivatives now that we've walked all pairs - for (auto nodeListi = 0u; nodeListi < numNodeLists; ++nodeListi) { - const auto& nodeList = H[nodeListi]->nodeList(); - const auto ni = nodeList.numInternalNodes(); + // Set the H time derivatives + for (auto k = 0u; k < numNodeLists; ++k) { + const auto& nodeList = H[k]->nodeList(); + const auto ni = nodeList.numInternalNodes(); #pragma omp parallel for for (auto i = 0u; i < ni; ++i) { - - // Get the state for node i. - const auto& Hi = H(nodeListi, i); - const auto& DvDxi = DvDx(nodeListi, i); - - // Time derivative of H - DHDt(nodeListi, i) = smoothingScaleDerivative(Hi, DvDxi); - - // // Determine the current effective number of nodes per smoothing scale. - // const auto currentNodesPerSmoothingScale = (fuzzyEqual(massZerothMomenti, 0.0) ? // Is this node isolated (no neighbors)? - // 0.5*nPerh : - // mWT.equivalentNodesPerSmoothingScale(massZerothMomenti)); - // CHECK2(currentNodesPerSmoothingScale > 0.0, "Bad estimate for nPerh effective from kernel: " << currentNodesPerSmoothingScale); - - // // The ratio of the desired to current nodes per smoothing scale. - // const auto s = std::min(4.0, std::max(0.25, nPerh/(currentNodesPerSmoothingScale + 1.0e-30))); - // CHECK(s > 0.0); - - // // Now determine how to scale the current H to the desired value. - // // We only scale H at this point in setting Hideal, not try to change the shape. - // const auto a = (s < 1.0 ? - // 0.4*(1.0 + s*s) : - // 0.4*(1.0 + 1.0/(s*s*s))); - // CHECK(1.0 - a + a*s > 0.0); - // Hideal(nodeListi, i) = std::max(hmaxInv, std::min(hminInv, Hi / (1.0 - a + a*s))); + const auto& Hi = H(k,i); + const auto& DvDxi = DvDx(k,i); + DHDt(k,i) = smoothingScaleDerivative(Hi, DvDxi); } } TIME_END("ASPHSmoothingScaleDerivs"); @@ -253,37 +286,46 @@ finalize(const Scalar time, // If we're not using the IdealH algorithm we can save a lot of time... const auto Hupdate = this->HEvolution(); if (Hupdate == HEvolutionType::IdealH) { + CHECK(not (mFixShape and mRadialOnly)); // Can't do both simultaneously + const auto voronoi = not (mFixShape or mRadialOnly); // In these special cases we don't need the Voronoi second moment // Grab our state const auto numNodeLists = dataBase.numFluidNodeLists(); const auto& cm = dataBase.connectivityMap(); auto pos = state.fields(HydroFieldNames::position, Vector::zero); - const auto vel = state.fields(HydroFieldNames::velocity, Vector::zero); - const auto cs = state.fields(HydroFieldNames::soundSpeed, 0.0); const auto mass = state.fields(HydroFieldNames::mass, 0.0); const auto rho = state.fields(HydroFieldNames::massDensity, 0.0); const auto cells = state.fields(HydroFieldNames::cells, FacetedVolume()); const auto surfacePoint = state.fields(HydroFieldNames::surfacePoint, 0); auto H = state.fields(HydroFieldNames::H, SymTensor::zero); auto Hideal = derivs.fields(ReplaceBoundedState::prefix() + HydroFieldNames::H, SymTensor::zero); + CHECK(pos.size() == numNodeLists); + CHECK(mass.size() == numNodeLists); + CHECK(rho.size() == numNodeLists); + CHECK2((cells.size() == numNodeLists) or not voronoi, cells.size() << " " << voronoi << " " << mFixShape << " " << mRadialOnly); + CHECK2((surfacePoint.size() == numNodeLists) or not voronoi, cells.size() << " " << voronoi << " " << mFixShape << " " << mRadialOnly); + CHECK(H.size() == numNodeLists); + CHECK(Hideal.size() == numNodeLists); // Pair connectivity const auto& pairs = cm.nodePairList(); const auto npairs = pairs.size(); // Compute the second moments for the Voronoi cells - for (auto k = 0u; k < numNodeLists; ++k) { - const auto n = cells[k]->numInternalElements(); + if (voronoi) { + for (auto k = 0u; k < numNodeLists; ++k) { + const auto n = cells[k]->numInternalElements(); #pragma omp parallel for - for (auto i = 0u; i < n; ++i) { - mCellSecondMoment(k,i) = polySecondMoment(cells(k,i), pos(k,i)).sqrt(); + for (auto i = 0u; i < n; ++i) { + mCellSecondMoment(k,i) = polySecondMoment(cells(k,i), pos(k,i)).sqrt(); + } } - } - // Apply boundary conditions to the cell second moments - for (auto* boundaryPtr: range(this->boundaryBegin(), this->boundaryEnd())) { - boundaryPtr->applyFieldListGhostBoundary(mCellSecondMoment); - boundaryPtr->finalizeGhostBoundary(); + // Apply boundary conditions to the cell second moments + for (auto* boundaryPtr: range(this->boundaryBegin(), this->boundaryEnd())) { + boundaryPtr->applyFieldListGhostBoundary(mCellSecondMoment); + boundaryPtr->finalizeGhostBoundary(); + } } // // // Prepare RK correction terms @@ -433,8 +475,10 @@ finalize(const Scalar time, fweightij = sameMatij ? 1.0 : mj*rhoi/(mi*rhoj); massZerothMomenti += fweightij * WSPHi; massZerothMomentj += 1.0/fweightij * WSPHj; - if (surfacePoint(nodeListj, j) == 0) massSecondMomenti += WSPHi * mCellSecondMoment(nodeListj, j); - if (surfacePoint(nodeListi, i) == 0) massSecondMomentj += 1.0/fweightij * WSPHj * mCellSecondMoment(nodeListi, i); + if (voronoi) { + if (surfacePoint(nodeListj, j) == 0) massSecondMomenti += WSPHi * mCellSecondMoment(nodeListj, j); + if (surfacePoint(nodeListi, i) == 0) massSecondMomentj += 1.0/fweightij * WSPHj * mCellSecondMoment(nodeListi, i); + } } // Reduce the thread values to the master. @@ -452,38 +496,20 @@ finalize(const Scalar time, // const auto W0 = mWT.kernelValue(0.0, 1.0); for (auto k = 0u; k < numNodeLists; ++k) { const auto& nodeList = mass[k]->nodeList(); - // const auto hminInv = safeInvVar(nodeList.hmin()); - // const auto hmaxInv = safeInvVar(nodeList.hmax()); - // const auto hminratio = nodeList.hminratio(); + const auto hminInv = safeInvVar(nodeList.hmin()); + const auto hmaxInv = safeInvVar(nodeList.hmax()); + const auto hminratio = nodeList.hminratio(); const auto nPerh = nodeList.nodesPerSmoothingScale(); const auto n = nodeList.numInternalNodes(); #pragma omp parallel for for (auto i = 0u; i < n; ++i) { auto& Hi = H(k,i); auto& Hideali = Hideal(k,i); - auto massZerothMomenti = mZerothMoment(k,i); - auto& massSecondMomenti = mSecondMoment(k,i); // Complete the zeroth moment + auto& massZerothMomenti = mZerothMoment(k,i); massZerothMomenti = Dimension::rootnu(max(0.0, massZerothMomenti)); - // // Complete the second moment - // massSecondMomenti += W0 * polySecondMoment(mCells(k,i), ri).sqrt(); - - // Find the new normalized target shape - auto T = massSecondMomenti; // .sqrt(); - { - const auto detT = T.Determinant(); - if (fuzzyEqual(detT, 0.0)) { - T = SymTensor::one; - } else { - T /= Dimension::rootnu(detT); - } - } - CHECK(fuzzyEqual(T.Determinant(), 1.0)); - T /= Dimension::rootnu(Hi.Determinant()); // T in units of length, now with same volume as the old Hinverse - CHECK(fuzzyEqual(T.Determinant(), 1.0/Hi.Determinant())); - // Determine the current effective number of nodes per smoothing scale. const auto currentNodesPerSmoothingScale = (fuzzyEqual(massZerothMomenti, 0.0) ? // Is this node isolated (no neighbors)? 0.5*nPerh : @@ -494,56 +520,81 @@ finalize(const Scalar time, const auto s = std::min(4.0, std::max(0.25, nPerh/(currentNodesPerSmoothingScale + 1.0e-30))); CHECK(s > 0.0); - // // Determine the desired H determinant using our usual target nperh logic - // auto fscale = 1.0; - // for (auto j = 0u; j < Dimension::nDim; ++j) { - // eigenT.eigenValues[j] = std::max(eigenT.eigenValues[j], hminratio*Tmax); - // fscale *= eigenT.eigenValues[j]; - // } - // CHECK(fscale > 0.0); - // fscale = 1.0/Dimension::rootnu(fscale); - - // Now apply the desired volume scaling from the zeroth moment to fscale - // const auto a = (s < 1.0 ? - // 0.4*(1.0 + s*s) : - // 0.4*(1.0 + 1.0/(s*s*s))); - // CHECK(1.0 - a + a*s > 0.0); - // T *= std::min(4.0, std::max(0.25, 1.0 - a + a*s)); - T *= s; - - // Build the new H tensor - if (surfacePoint(k, i) == 0) { - Hideali = (*mHidealFilterPtr)(k, i, Hi, T.Inverse()); + // Now determine how to scale the current H to the desired value. + const auto a = (s < 1.0 ? + 0.4*(1.0 + s*s) : + 0.4*(1.0 + 1.0/(s*s*s))); + CHECK(1.0 - a + a*s > 0.0); + + // Now a big branch if we're using the normal IdealH or one of the specialized cases. + if (voronoi) { + + // Find the new normalized target shape + auto T = mSecondMoment(k,i); // .sqrt(); + { + const auto detT = T.Determinant(); + if (fuzzyEqual(detT, 0.0)) { + T = SymTensor::one; + } else { + T /= Dimension::rootnu(detT); + } + } + CHECK(fuzzyEqual(T.Determinant(), 1.0)); + T /= Dimension::rootnu(Hi.Determinant()); // T in units of length, now with same volume as the old Hinverse + CHECK(fuzzyEqual(T.Determinant(), 1.0/Hi.Determinant())); + + // T *= std::min(4.0, std::max(0.25, 1.0 - a + a*s)); + T *= s; + + // Build the new H tensor + if (surfacePoint(k, i) == 0) { + Hideali = (*mHidealFilterPtr)(k, i, Hi, T.Inverse()); + } else { + Hideali = (*mHidealFilterPtr)(k, i, Hi, Hi); // Keep the time evolved version for surface points + } + Hi = Hideali; // Since this is the after all our regular state update gotta update the actual H + + } else if (mFixShape) { + + // We're just scaling the fixed H tensor shape, so very close to the normal SPH IdealH algorithm + Hideali = Hi / (1.0 - a + a*s); + } else { - Hideali = (*mHidealFilterPtr)(k, i, Hi, Hi); // Keep the time evolved version for surface points + + // We scale H in the radial direction only (also force H to be aligned radially). + CHECK(mRadialOnly); + const auto nhat = pos(k, i).unitVector(); + Hideali = radialEvolution(Hi, nhat, 1.0 - a + a*s); + } - Hi = Hideali; // Since this is the after all our regular state update gotta update the actual H - - // // If requested, move toward the cell centroid - // if (mfHourGlass > 0.0 and surfacePoint(k,i) == 0) { - // const auto& vi = vel(k,i); - // const auto ci = cs(k,i); - // const auto vhat = vi*safeInv(vi.magnitude()); // goes to zero when velocity zero - // const auto centi = cells(k,i).centroid(); - // auto dr = mfHourGlass*(centi - ri); - // dr = dr.dot(vhat) * vhat; - // // const auto drmax = mfHourGlass*dt*vi.magnitude(); - // const auto drmax = mfHourGlass*dt*ci; - // // const auto drmax = 0.5*dt*min(ci, vi.magnitude()); - // const auto drmag = dr.magnitude(); - // dr *= min(1.0, drmax*safeInv(drmag)); - // ri += dr; - // } + + // Apply limiting and set the actual H + Hideali = (*mHidealFilterPtr)(k, i, Hi, Hideali); + const auto hev = Hideali.eigenVectors(); + const auto hminEffInv = min(hminInv, max(hmaxInv, hev.eigenValues.minElement())/hminratio); + Hideali = constructSymTensorWithBoundedDiagonal(hev.eigenValues, hmaxInv, hminEffInv); + Hideali.rotationalTransform(hev.eigenVectors); + Hi = Hideali; } } + } else { + // Apply any requested user filtering/alterations to the final H in the case where we're not using the IdealH algorithm const auto numNodeLists = dataBase.numFluidNodeLists(); auto H = state.fields(HydroFieldNames::H, SymTensor::zero); for (auto k = 0u; k < numNodeLists; ++k) { - const auto n = H[k]->numInternalElements(); + const auto& nodeList = H[k]->nodeList(); + const auto hminInv = safeInvVar(nodeList.hmin()); + const auto hmaxInv = safeInvVar(nodeList.hmax()); + const auto hminratio = nodeList.hminratio(); + const auto n = nodeList.numInternalNodes(); for (auto i = 0u; i < n; ++i) { H(k,i) = (*mHidealFilterPtr)(k, i, H(k,i), H(k,i)); + const auto hev = H(k,i).eigenVectors(); + const auto hminEffInv = min(hminInv, max(hmaxInv, hev.eigenValues.minElement())/hminratio); + H(k,i) = constructSymTensorWithBoundedDiagonal(hev.eigenValues, hmaxInv, hminEffInv); + H(k,i).rotationalTransform(hev.eigenVectors); } } } diff --git a/src/SmoothingScale/ASPHSmoothingScale.hh b/src/SmoothingScale/ASPHSmoothingScale.hh index 0e91ad09c..3f9c24b10 100644 --- a/src/SmoothingScale/ASPHSmoothingScale.hh +++ b/src/SmoothingScale/ASPHSmoothingScale.hh @@ -29,7 +29,9 @@ public: // Constructors, destructor. ASPHSmoothingScale(const HEvolutionType HUpdate, - const TableKernel& W); + const TableKernel& W, + const bool fixShape = false, + const bool radialOnly = false); ASPHSmoothingScale() = delete; virtual ~ASPHSmoothingScale() {} @@ -71,7 +73,8 @@ public: StateDerivatives& derivs) override; // We require the Voronoi-like cells per point - virtual bool requireVoronoiCells() const override { return this->HEvolution() == HEvolutionType::IdealH; } + virtual bool requireVoronoiCells() const override { return (this->HEvolution() == HEvolutionType::IdealH and + not (mFixShape or mRadialOnly)); } // Access our internal data const TableKernel& WT() const { return mWT; } @@ -79,6 +82,12 @@ public: const FieldList& secondMoment() const { return mSecondMoment; } const FieldList& cellSecondMoment() const { return mCellSecondMoment; } + // Special evolution flags + bool fixShape() const { return mFixShape; } + bool radialOnly() const { return mRadialOnly; } + void fixShape(const bool x) { mFixShape = x; } + void radialOnly(const bool x) { mRadialOnly = x; } + // Optional user hook providing a functor to manipulate the ideal H vote std::shared_ptr HidealFilter() const { return mHidealFilterPtr; } void HidealFilter(std::shared_ptr functorPtr) { mHidealFilterPtr = functorPtr; } @@ -94,6 +103,7 @@ private: FieldList mZerothMoment; FieldList mSecondMoment, mCellSecondMoment; std::shared_ptr mHidealFilterPtr; + bool mFixShape, mRadialOnly; }; } diff --git a/src/SmoothingScale/CMakeLists.txt b/src/SmoothingScale/CMakeLists.txt index 747c69120..43de653d5 100644 --- a/src/SmoothingScale/CMakeLists.txt +++ b/src/SmoothingScale/CMakeLists.txt @@ -3,6 +3,7 @@ set(SmoothingScale_inst SmoothingScaleBase SPHSmoothingScale ASPHSmoothingScale + IncrementASPHHtensor ) set(SmoothingScale_sources diff --git a/src/SmoothingScale/IncrementASPHHtensor.cc b/src/SmoothingScale/IncrementASPHHtensor.cc new file mode 100644 index 000000000..3b6bc6fa8 --- /dev/null +++ b/src/SmoothingScale/IncrementASPHHtensor.cc @@ -0,0 +1,120 @@ +//---------------------------------Spheral++----------------------------------// +// IncrementASPHHtensor +// +// Specialized version of FieldUpdatePolicy for time integrating the H tensor. +// +// Created by JMO, Mon Oct 7 13:31:02 PDT 2024 +//----------------------------------------------------------------------------// +#include "IncrementASPHHtensor.hh" +#include "DataBase/State.hh" +#include "DataBase/StateDerivatives.hh" +#include "Field/Field.hh" +#include "Hydro/HydroFieldNames.hh" +#include "Utilities/rotationMatrix.hh" +#include "Utilities/GeometricUtilities.hh" +#include "Utilities/DBC.hh" + +namespace Spheral { + +//------------------------------------------------------------------------------ +// Constructor +//------------------------------------------------------------------------------ +template +IncrementASPHHtensor:: +IncrementASPHHtensor(const Scalar hmin, + const Scalar hmax, + const Scalar hminratio, + const bool fixShape, + const bool radialOnly): + FieldUpdatePolicy(), + mhmin(hmin), + mhmax(hmax), + mhminratio(hminratio), + mFixShape(fixShape), + mRadialOnly(radialOnly) { +} + +//------------------------------------------------------------------------------ +// Update the field. +//------------------------------------------------------------------------------ +template +void +IncrementASPHHtensor:: +update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) { + + // Get the field name portion of the key. + KeyType fieldKey, nodeListKey; + StateBase::splitFieldKey(key, fieldKey, nodeListKey); + CHECK(fieldKey == HydroFieldNames::H); + + const auto hminInv = 1.0/mhmin; + const auto hmaxInv = 1.0/mhmax; + + // Get the state we're updating. + auto& H = state.field(key, SymTensor::zero); + const auto& DHDt = derivs.field(prefix() + StateBase::buildFieldKey(HydroFieldNames::H, nodeListKey), SymTensor::zero); + const auto& pos = state.field(StateBase::buildFieldKey(HydroFieldNames::position, nodeListKey), Vector::zero); // Only needed if we're using radial scaling + + // Walk the nodes and update H (with limiting) + const auto n = H.numInternalElements(); +#pragma omp parallel for + for (auto i = 0u; i < n; ++i) { + + // Check for special update rules + if (mFixShape) { + + // Fix the shape (only volume scaling allowed) + + auto fi = Dimension::rootnu((H(i) + multiplier*DHDt(i)).Determinant()/H(i).Determinant()); + H(i) *= fi; + + } else if (mRadialOnly) { + + // Force only the radial component of H to be scaled + const auto nhat = pos(i).unitVector(); + const auto T = rotationMatrix(nhat); + H(i).rotationalTransform(T); // Should have one eigenvector aligned with the x' axis in this frame + auto DHDti = DHDt(i); + DHDti.rotationalTransform(T); + H(i)[0] += multiplier * DHDti[0]; + H(i).rotationalTransform(T.Transpose()); + + } else { + + H(i) += multiplier * DHDt(i); + + } + + // Apply limiting + const auto hev = H(i).eigenVectors(); + const auto hminEffInv = min(hminInv, max(hmaxInv, hev.eigenValues.minElement())/mhminratio); + H(i) = constructSymTensorWithBoundedDiagonal(hev.eigenValues, hmaxInv, hminEffInv); + H(i).rotationalTransform(hev.eigenVectors); + } +} + +//------------------------------------------------------------------------------ +// Equivalence operator. +//------------------------------------------------------------------------------ +template +inline +bool +IncrementASPHHtensor:: +operator==(const UpdatePolicyBase& rhs) const { + const auto rhsPtr = dynamic_cast*>(&rhs); + if (rhsPtr == nullptr) return false; + + // Ok, now do we agree on min & max? + return (hmin() == rhsPtr->hmin() and + hmax() == rhsPtr->hmax() and + hminratio() == rhsPtr->hminratio() and + fixShape() == rhsPtr->fixShape() and + radialOnly() == rhsPtr->radialOnly()); +} + +} diff --git a/src/SmoothingScale/IncrementASPHHtensor.hh b/src/SmoothingScale/IncrementASPHHtensor.hh new file mode 100644 index 000000000..abb34f5b6 --- /dev/null +++ b/src/SmoothingScale/IncrementASPHHtensor.hh @@ -0,0 +1,66 @@ +//---------------------------------Spheral++----------------------------------// +// IncrementASPHHtensor +// +// Specialized version of FieldUpdatePolicy for time integrating the H tensor. +// +// Created by JMO, Mon Oct 7 13:31:02 PDT 2024 +//----------------------------------------------------------------------------// +#ifndef __Spheral_IncrementASPHHtensor_hh__ +#define __Spheral_IncrementASPHHtensor_hh__ + +#include "DataBase/FieldUpdatePolicy.hh" + +namespace Spheral { + +// Forward declarations. +template class StateDerivatives; + +template +class IncrementASPHHtensor: public FieldUpdatePolicy { +public: + //--------------------------- Public Interface ---------------------------// + // Useful typedefs + using KeyType = typename FieldUpdatePolicy::KeyType; + using Scalar = typename Dimension::Scalar; + using Vector = typename Dimension::Vector; + using SymTensor = typename Dimension::SymTensor; + + // Constructors, destructor. + IncrementASPHHtensor(const Scalar hmin, + const Scalar hmax, + const Scalar hminratio, + const bool fixShape, + const bool radialOnly); + virtual ~IncrementASPHHtensor() {} + IncrementASPHHtensor(const IncrementASPHHtensor& rhs) = delete; + IncrementASPHHtensor& operator=(const IncrementASPHHtensor& rhs) = delete; + + // Overload the methods describing how to update Fields. + virtual void update(const KeyType& key, + State& state, + StateDerivatives& derivs, + const double multiplier, + const double t, + const double dt) override; + + // Access the min and max's. + Scalar hmin() const { return mhmin; } + Scalar hmax() const { return mhmax; } + Scalar hminratio() const { return mhminratio; } + bool fixShape() const { return mFixShape; } + bool radialOnly() const { return mRadialOnly; } + + // Equivalence. + virtual bool operator==(const UpdatePolicyBase& rhs) const override; + + static const std::string prefix() { return "delta "; } + +private: + //--------------------------- Private Interface ---------------------------// + Scalar mhmin, mhmax, mhminratio; + bool mFixShape, mRadialOnly; +}; + +} + +#endif diff --git a/src/SmoothingScale/IncrementASPHHtensorInst.cc.py b/src/SmoothingScale/IncrementASPHHtensorInst.cc.py new file mode 100644 index 000000000..2c6ece764 --- /dev/null +++ b/src/SmoothingScale/IncrementASPHHtensorInst.cc.py @@ -0,0 +1,11 @@ +text = """ +//------------------------------------------------------------------------------ +// Explicit instantiation. +//------------------------------------------------------------------------------ +#include "SmoothingScale/IncrementASPHHtensor.cc" +#include "Geometry/Dimension.hh" + +namespace Spheral { + template class IncrementASPHHtensor>; +} +""" diff --git a/tests/functional/RK/RKInterpolation.py b/tests/functional/RK/RKInterpolation.py index 5d3187bbd..1a28d2f1c 100644 --- a/tests/functional/RK/RKInterpolation.py +++ b/tests/functional/RK/RKInterpolation.py @@ -232,7 +232,7 @@ # Randomize nodes #------------------------------------------------------------------------------- import random -seed = 2 +seed = 459297849234 rangen = random.Random() rangen.seed(seed)