diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index d36d3044470..5ff7c241bb0 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -1084,6 +1084,13 @@ def Ke_HybridWrapper(level=0, include_ghosts=False): ) +def fluid_weights_HybridWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="fluid_qdsmc_weights_electrons_hybrid", + level=level, + include_ghosts=include_ghosts, + ) + def Hybrid_Pe_Wrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="hybrid_electron_pressure_fp", diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 191d3ff606e..5b690519e5e 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -80,6 +80,19 @@ void WarpX::HybridPICEvolveFields () // 0'th index of `rho_fp`, J_i^{n-1/2} in `current_fp_temp` and J_i^{n+1/2} // in `current_fp`. + // Calculate Ke using rho^{n} in rho_fp_temp + if(m_hybrid_pic_model->m_solve_electron_energy_equation) + { + // copy rho_fp_temp to hybrid_electron_fl->name_mf_N + m_fields.get(hybrid_electron_fl->name_mf_N, finest_level)->setVal(0); + MultiFab::Copy( *m_fields.get(hybrid_electron_fl->name_mf_N, finest_level), + *m_fields.get(FieldType::hybrid_rho_fp_temp, finest_level), + 0, 0, 1, m_fields.get(hybrid_electron_fl->name_mf_N, finest_level)->nGrowVect()); + // Calculate Ke + hybrid_electron_fl->HybridInitializeKe(m_fields, m_hybrid_pic_model->m_gamma, m_hybrid_pic_model->m_n_floor, finest_level); + } + + // Note: E^{n} is recalculated with the accurate J_i^{n} since at the end // of the last step we had to "guess" it. It also needs to be // recalculated to include the resistivity before evolving B. @@ -143,6 +156,15 @@ void WarpX::HybridPICEvolveFields () ); } + // Here current_fp_temp has Ji^{n} + // Calculating Ue at step n + if(m_hybrid_pic_model->m_solve_electron_energy_equation){ + hybrid_electron_fl->HybridInitializeUe(m_fields, + current_fp_temp[finest_level], + m_hybrid_pic_model.get(), + finest_level); + } + // Extrapolate the ion current density to t=n+1 using // J_i^{n+1} = 1/2 * J_i^{n-1/2} + 3/2 * J_i^{n+1/2}, and recalling that // now current_fp_temp = J_i^{n} = 1/2 * (J_i^{n-1/2} + J_i^{n+1/2}) @@ -169,18 +191,6 @@ void WarpX::HybridPICEvolveFields () // all the qdsmc solver functions should be in a ElectronEnergyEquationSolver class as well as other solvers like Layer method if(m_hybrid_pic_model->m_solve_electron_energy_equation){ - // Initialize Ke = ne^(1-gamma)*Te - // Move up, shouldn't Ke be defined with both ne and Te at n instead of ne at n+1 ? - hybrid_electron_fl->HybridInitializeKe(m_fields, m_hybrid_pic_model->m_gamma, m_hybrid_pic_model->m_n_floor, finest_level); - - // Update Ue in electron fluid container - // check at what step this should be calculated, here Ue is at n+1 - // maybe move up to be consistent with Ke initialization - hybrid_electron_fl->HybridInitializeUe(m_fields, - current_fp_temp[finest_level], - m_hybrid_pic_model.get(), - finest_level); - // Reset qdsmc particles positions to x0,y0,z0 and rest of attributes to 0 and redistribute qdsmc_hybrid_electron_pc->ResetParticles(finest_level); @@ -193,14 +203,24 @@ void WarpX::HybridPICEvolveFields () // Set fictitious electron particles entropy qdsmc_hybrid_electron_pc->SetK(finest_level, *m_fields.get(hybrid_electron_fl->name_mf_K, finest_level), - *m_fields.get(FieldType::rho_fp, finest_level)); + *m_fields.get(hybrid_electron_fl->name_mf_N, finest_level)); // Push fictitious electron particles qdsmc_hybrid_electron_pc->PushX(finest_level, dt[0]); + /// Needed to update Te later on (weights from qdsmc particles) + m_fields.get(hybrid_electron_fl->name_mf_weights, finest_level)->setVal(0); + qdsmc_hybrid_electron_pc->DepositField(finest_level, *m_fields.get(hybrid_electron_fl->name_mf_weights, finest_level)); + // Deposit entropy from qdsmc qdsmc_hybrid_electron_pc->DepositK(finest_level, *m_fields.get(hybrid_electron_fl->name_mf_K, finest_level)); + // Update ne to n+1 before updating Te so the calculation is consistent + m_fields.get(hybrid_electron_fl->name_mf_N, finest_level)->setVal(0); + MultiFab::Copy( *m_fields.get(hybrid_electron_fl->name_mf_N, finest_level), + *m_fields.get(FieldType::rho_fp, finest_level), + 0, 0, 1, m_fields.get(hybrid_electron_fl->name_mf_N, finest_level)->nGrowVect()); + // Update Te after QDSMC solver: hybrid_electron_fl->HybridUpdateTe(m_fields, m_hybrid_pic_model->m_gamma, m_hybrid_pic_model->m_n_floor, finest_level); } diff --git a/Source/Fluids/QdsmcParticleContainer.H b/Source/Fluids/QdsmcParticleContainer.H index 3ccc6ce8ab8..d5a8573c06c 100644 --- a/Source/Fluids/QdsmcParticleContainer.H +++ b/Source/Fluids/QdsmcParticleContainer.H @@ -110,6 +110,9 @@ public: // then calls Redistribute() void ResetParticles(int lev); + // REMOVE + void DepositField(int lev, amrex::MultiFab &Field); + }; #endif // WARPX_QdsmcParticleContainer_H_ diff --git a/Source/Fluids/QdsmcParticleContainer.cpp b/Source/Fluids/QdsmcParticleContainer.cpp index c41f5a008e3..91c511de85a 100644 --- a/Source/Fluids/QdsmcParticleContainer.cpp +++ b/Source/Fluids/QdsmcParticleContainer.cpp @@ -264,13 +264,11 @@ QdsmcParticleContainer::SetV (int lev, const auto &arrUyfield = Uy[pti].array(); const auto &arrUzfield = Uz[pti].array(); - // Gather drift velocity directly from nodes since - // particles are located at the node positions before PushX amrex::ParallelFor( np, [=] AMREX_GPU_DEVICE (long ip) { - amrex::Real vxp; - amrex::Real vyp; - amrex::Real vzp; + amrex::Real vxp = 0; + amrex::Real vyp = 0; + amrex::Real vzp = 0; gather_vector_field_qdsmc(part_x0[ip], part_y0[ip], part_z0[ip], vxp, vyp, vzp, arrUxfield, arrUyfield, arrUzfield, plo, dinv, box); @@ -317,12 +315,10 @@ QdsmcParticleContainer::SetK (int lev, const auto &arrKfield = Kfield[pti].array(); const auto &arrrhofield = rhofield[pti].array(); - // Gather entropy and density directly from nodes - // since particles are located at the node positions before PushX amrex::ParallelFor( np, [=] AMREX_GPU_DEVICE (long ip) { - amrex::Real n_p; - amrex::Real kn_p; + amrex::Real n_p = 0; + amrex::Real kn_p = 0; gather_density_entropy(part_x0[ip], part_y0[ip], part_z0[ip], n_p, kn_p, arrrhofield, arrKfield, plo, dinv, cell_volume, box); @@ -479,5 +475,64 @@ QdsmcParticleContainer::DepositK(int lev, amrex::MultiFab &Kfield) Kfield, 0, Kfield.nComp(), Kfield.nGrowVect(), Kfield.nGrowVect(), WarpX::do_single_precision_comms, period); - //amrex::Gpu::streamSynchronize(); } + + + +// Auxiliary function, should generalize the function above +// to deposit a particle property (passed as argument) +// to a multifab passed as argument +void +QdsmcParticleContainer::DepositField(int lev, amrex::MultiFab &Field) +{ + const amrex::XDim3 dinv = WarpX::InvCellSize(lev); + + WarpX &warpx = WarpX::GetInstance(); + const amrex::Geometry &geom = warpx.Geom(lev); + const amrex::Periodicity &period = geom.periodicity(); + auto plo = geom.ProbLoArray(); + + const amrex::Real* dx = warpx.Geom(lev).CellSize(); + + Field.setVal(0); + + const auto ix_type = Field.ixType().toIntVect(); + + for (iterator pti(*this, lev); pti.isValid(); ++pti) + { + auto const np = pti.numParticles(); + + amrex::Box tilebox = pti.tilebox(); + amrex::Box box = amrex::convert( tilebox, ix_type ); + box.grow(Field.nGrowVect()); + + auto& attribs = pti.GetStructOfArrays().GetRealData(); + + amrex::ParticleReal* const AMREX_RESTRICT part_x = attribs[QdsmcPIdx::x].dataPtr(); + amrex::ParticleReal* const AMREX_RESTRICT part_y = attribs[QdsmcPIdx::y].dataPtr(); + amrex::ParticleReal* const AMREX_RESTRICT part_z = attribs[QdsmcPIdx::z].dataPtr(); + + // should change this so that Deposit receives as argument which value to read from the QdsmcPIdx struct + amrex::ParticleReal* const AMREX_RESTRICT part_np_real = attribs[QdsmcPIdx::np_real].dataPtr(); + + // change this to just scalarField + auto arrField = Field[pti].array(); + + // Gather entropy and density directly from nodes + // since particles are located at the node positions before PushX + amrex::ParallelFor( np, [=] AMREX_GPU_DEVICE (long ip) + { + // avoid launching kernel for "empty" particles + if(part_np_real[ip]>0) + { + amrex::Real val = part_np_real[ip]/(dx[0]*dx[1]*dx[2]); + do_deposit_scalar(arrField, part_x[ip], part_y[ip], part_z[ip], plo, dinv, val, box); + } + }); + } + + ablastr::utils::communication::SumBoundary( + Field, 0, Field.nComp(), Field.nGrowVect(), Field.nGrowVect(), + WarpX::do_single_precision_comms, period); + +} \ No newline at end of file diff --git a/Source/Fluids/Qdsmc_K.H b/Source/Fluids/Qdsmc_K.H index cb4e9bb5bef..550f010ab0f 100644 --- a/Source/Fluids/Qdsmc_K.H +++ b/Source/Fluids/Qdsmc_K.H @@ -20,7 +20,7 @@ * Qdsmc particles x0,y0,z0 positions should at nodes positions * at the beginning of every step, so the field gathering * does not require any interpolation, it only needs to read - * the values from the nodes. + * the values from the nodes. Interpolation added but not needed. */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void gather_density_entropy(const amrex::ParticleReal x0p, @@ -43,11 +43,28 @@ void gather_density_entropy(const amrex::ParticleReal x0p, int j0p = std::floor(y); int k0p = std::floor(z); - amrex::IntVect c(i0p, j0p, k0p); - if (bx.contains(c)){ // Check if the cell is within the tile's box - n_p = rho_arr(i0p,j0p,k0p)*cell_volume/PhysConst::q_e; - kn_p = K_arr(i0p,j0p,k0p)*n_p; - } + amrex::Real dip = x - i0p; + amrex::Real djp = y - j0p; + amrex::Real dkp = z - k0p; + + amrex:: Real sx[] = {1.-dip, dip}; + amrex:: Real sy[] = {1.-djp, djp}; + amrex:: Real sz[] = {1.-dkp, dkp}; + + for (int ioff = 0; ioff < 2; ++ioff){ + for (int joff = 0; joff < 2; ++joff){ + for (int koff = 0; koff < 2; ++koff){ + + amrex::IntVect c(i0p+ioff, j0p+joff, k0p+koff); + if (bx.contains(c)){ // Check if the cell is within the tile's box + n_p += sx[ioff]*sy[joff]*sz[koff]*rho_arr(i0p+ioff,j0p+joff,k0p+koff)*cell_volume/PhysConst::q_e; + kn_p += sx[ioff]*sy[joff]*sz[koff]*K_arr(i0p+ioff,j0p+joff,k0p+koff); + } + } + } + } + kn_p = kn_p*n_p; + } @@ -55,7 +72,7 @@ void gather_density_entropy(const amrex::ParticleReal x0p, * Qdsmc particles x0,y0,z0 positions should be at nodes positions * at the beginning of every step, so the field gathering * does not require any interpolation, it only needs to read - * the values from the nodes. + * the values from the nodes. Interpolation added but not needed. */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void gather_vector_field_qdsmc(const amrex::ParticleReal x0p, @@ -79,13 +96,27 @@ void gather_vector_field_qdsmc(const amrex::ParticleReal x0p, int j0p = std::floor(y); int k0p = std::floor(z); - amrex::IntVect c(i0p, j0p, k0p); - if (bx.contains(c)){ // Check if the cell is within the tile's box - field_x_p = field_x_arr(i0p,j0p,k0p); - field_y_p = field_y_arr(i0p,j0p,k0p); - field_z_p = field_z_arr(i0p,j0p,k0p); - } + amrex::Real dip = x - i0p; + amrex::Real djp = y - j0p; + amrex::Real dkp = z - k0p; + amrex:: Real sx[] = {1.-dip, dip}; + amrex:: Real sy[] = {1.-djp, djp}; + amrex:: Real sz[] = {1.-dkp, dkp}; + + for (int ioff = 0; ioff < 2; ++ioff){ + for (int joff = 0; joff < 2; ++joff){ + for (int koff = 0; koff < 2; ++koff){ + + amrex::IntVect c(i0p+ioff, j0p+joff, k0p+koff); + if (bx.contains(c)){ // Check if the cell is within the tile's box + field_x_p += sx[ioff]*sy[joff]*sz[koff]*field_x_arr(i0p+ioff,j0p+joff,k0p+koff); + field_y_p += sx[ioff]*sy[joff]*sz[koff]*field_y_arr(i0p+ioff,j0p+joff,k0p+koff); + field_z_p += sx[ioff]*sy[joff]*sz[koff]*field_z_arr(i0p+ioff,j0p+joff,k0p+koff); + } + } + } + } } diff --git a/Source/Fluids/WarpXFluidContainer.H b/Source/Fluids/WarpXFluidContainer.H index 8f8296e6365..5790f758c00 100644 --- a/Source/Fluids/WarpXFluidContainer.H +++ b/Source/Fluids/WarpXFluidContainer.H @@ -204,6 +204,7 @@ public: std::string name_mf_NU = "fluid_momentum_density_"+species_name; std::string name_mf_T = "fluid_temperature_"+species_name; std::string name_mf_K = "fluid_entropy_"+species_name; + std::string name_mf_weights = "fluid_qdsmc_weights_"+species_name; }; diff --git a/Source/Fluids/WarpXFluidContainer.cpp b/Source/Fluids/WarpXFluidContainer.cpp index d36f097298b..68062b9a6b1 100644 --- a/Source/Fluids/WarpXFluidContainer.cpp +++ b/Source/Fluids/WarpXFluidContainer.cpp @@ -172,6 +172,10 @@ void WarpXFluidContainer::AllocateLevelMFs(ablastr::fields::MultiFabRegister& fi name_mf_K, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, ncomps, nguards, 0.0_rt); + fields.alloc_init( + name_mf_weights, lev, amrex::convert(ba, amrex::IntVect::TheNodeVector()), dm, + ncomps, nguards, 0.0_rt); + } void WarpXFluidContainer::InitData(ablastr::fields::MultiFabRegister& fields, amrex::Box init_box, amrex::Real cur_time, int lev) @@ -1432,7 +1436,8 @@ void WarpXFluidContainer::HybridInitializeUe ( const auto ix_type = m_fields.get(name_mf_NU, Direction{0}, lev)->ixType().toIntVect(); - ablastr::fields::ScalarField rho_fp = m_fields.get(FieldType::rho_fp, lev); + //ablastr::fields::ScalarField rho_fp = m_fields.get(FieldType::rho_fp, lev); + ablastr::fields::VectorField current_fp_ampere = m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); // Index type required for interpolating fields from their respective @@ -1449,9 +1454,9 @@ void WarpXFluidContainer::HybridInitializeUe ( #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for ( MFIter mfi(*rho_fp, TilingIfNotGPU()); mfi.isValid(); ++mfi ) + for ( MFIter mfi(*m_fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - amrex::Array4 const& rho = rho_fp->array(mfi); + amrex::Array4 const& rho = m_fields.get(name_mf_N, lev)->array(mfi); amrex::Array4 const& Jx = current_fp_ampere[0]->array(mfi); amrex::Array4 const& Jy = current_fp_ampere[1]->array(mfi); @@ -1517,7 +1522,7 @@ void WarpXFluidContainer::HybridInitializeKe (ablastr::fields::MultiFabRegister& const auto ix_type = m_fields.get(name_mf_K, lev)->ixType().toIntVect(); - ablastr::fields::ScalarField rho_fp = m_fields.get(FieldType::rho_fp, lev); + //ablastr::fields::ScalarField rho_fp = m_fields.get(FieldType::rho_fp, lev); // For safety condition (divition by rho) amrex::Real rho_floor = PhysConst::q_e*n_floor; @@ -1525,9 +1530,9 @@ void WarpXFluidContainer::HybridInitializeKe (ablastr::fields::MultiFabRegister& #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for ( MFIter mfi(*rho_fp, TilingIfNotGPU()); mfi.isValid(); ++mfi ) + for ( MFIter mfi(*m_fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - amrex::Array4 const& rho = rho_fp->array(mfi); + amrex::Array4 const& rho = m_fields.get(name_mf_N, lev)->array(mfi); amrex::Array4 const& Te = m_fields.get(name_mf_T, lev)->array(mfi); amrex::Array4 const& Ke = m_fields.get(name_mf_K, lev)->array(mfi); @@ -1563,8 +1568,6 @@ void WarpXFluidContainer::HybridUpdateTe (ablastr::fields::MultiFabRegister& m_f const auto dx = geom.CellSizeArray(); const auto cell_volume = dx[0]*dx[1]*dx[2]; - ablastr::fields::ScalarField rho_fp = m_fields.get(FieldType::rho_fp, lev); - // Set Te to 0 m_fields.get(name_mf_T, lev)->setVal(0); @@ -1573,12 +1576,13 @@ void WarpXFluidContainer::HybridUpdateTe (ablastr::fields::MultiFabRegister& m_f #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) #endif - for ( MFIter mfi(*rho_fp, TilingIfNotGPU()); mfi.isValid(); ++mfi ) + for ( MFIter mfi(*m_fields.get(name_mf_N, lev), TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - amrex::Array4 const& rho = rho_fp->array(mfi); + amrex::Array4 const& rho = m_fields.get(name_mf_N, lev)->array(mfi); amrex::Array4 const& Ke = m_fields.get(name_mf_K, lev)->array(mfi); amrex::Array4 const& Te = m_fields.get(name_mf_T, lev)->array(mfi); - + amrex::Array4 const& weights = m_fields.get(name_mf_weights, lev)->array(mfi); + const Box& tilebox = mfi.tilebox(); amrex::Box box = amrex::convert( tilebox, ix_type ); box.grow(m_fields.get(name_mf_K, lev)->nGrowVect()); @@ -1590,9 +1594,13 @@ void WarpXFluidContainer::HybridUpdateTe (ablastr::fields::MultiFabRegister& m_f if(ne