Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/development' into take_Version…
Browse files Browse the repository at this point in the history
…_and_PicsarVersion_out_of_the_WarpX_class
  • Loading branch information
lucafedeli88 committed Jun 24, 2024
2 parents b657bb9 + 03dc9b7 commit 1da68fa
Show file tree
Hide file tree
Showing 93 changed files with 1,216 additions and 1,257 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/cuda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ jobs:
which nvcc || echo "nvcc not in PATH!"
git clone https://github.com/AMReX-Codes/amrex.git ../amrex
cd ../amrex && git checkout --detach 28b010126a1b39297d8a496ba81f171d8563953b && cd -
cd ../amrex && git checkout --detach 1f038e767011e20100af6ab0db02c69cf7ebe55c && cd -
make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4
ccache -s
Expand Down
5 changes: 3 additions & 2 deletions .github/workflows/macos.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ jobs:
- name: CCache Cache
uses: actions/cache@v4
with:
path: /Users/runner/Library/Caches/ccache
path: ~/Library/Caches/ccache
key: ccache-${{ github.workflow }}-${{ github.job }}-git-${{ github.sha }}
restore-keys: |
ccache-${{ github.workflow }}-${{ github.job }}-git-
Expand All @@ -53,7 +53,7 @@ jobs:
export CCACHE_COMPRESS=1
export CCACHE_COMPRESSLEVEL=10
export CCACHE_MAXSIZE=100M
export CCACHE_DEPEND=1
export CCACHE_SLOPPINESS=time_macros
ccache -z
source py-venv/bin/activate
Expand All @@ -76,6 +76,7 @@ jobs:
cmake --build build_sp -j 3
cmake --build build_sp --target pip_install
du -hs ~/Library/Caches/ccache
ccache -s
- name: run pywarpx
Expand Down
39 changes: 38 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Preamble ####################################################################
#
cmake_minimum_required(VERSION 3.20.0)
project(WarpX VERSION 24.05)
project(WarpX VERSION 24.06)

include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake)

Expand Down Expand Up @@ -79,6 +79,7 @@ option(WarpX_LIB "Build WarpX as a library" OFF)
option(WarpX_MPI "Multi-node support (message-passing)" ON)
option(WarpX_OPENPMD "openPMD I/O (HDF5, ADIOS)" ON)
option(WarpX_FFT "FFT-based solvers" OFF)
option(WarpX_HEFFTE "Multi-node FFT-based solvers" OFF)
option(WarpX_PYTHON "Python bindings" OFF)
option(WarpX_SENSEI "SENSEI in situ diagnostics" OFF)
option(WarpX_QED "QED support (requires PICSAR)" ON)
Expand Down Expand Up @@ -136,6 +137,10 @@ mark_as_advanced(WarpX_MPI_THREAD_MULTIPLE)

option(WarpX_amrex_internal "Download & build AMReX" ON)

if(WarpX_HEFFTE AND NOT WarpX_MPI)
message(FATAL_ERROR "WarpX_HEFFTE (${WarpX_HEFFTE}) can only be used if WarpX_MPI is ON.")
endif()

# change the default build type to Release (or RelWithDebInfo) instead of Debug
set_default_build_type("Release")

Expand Down Expand Up @@ -174,6 +179,10 @@ option(ABLASTR_FFT "compile AnyFFT wrappers" ${WarpX_FFT})
if(WarpX_FFT)
set(ABLASTR_FFT ON CACHE STRING "FFT-based solvers" FORCE)
endif()
option(ABLASTR_HEFFTE "compile AnyFFT wrappers" ${WarpX_HEFFTE})
if(WarpX_HEFFTE)
set(ABLASTR_HEFFTE ON CACHE STRING "Multi-Node FFT-based solvers" FORCE)
endif()

# this defined the variable BUILD_TESTING which is ON by default
#include(CTest)
Expand Down Expand Up @@ -215,6 +224,23 @@ if(WarpX_FFT)
endif()
endif()

# multi-node FFT
if(WarpX_HEFFTE)
if(WarpX_COMPUTE STREQUAL CUDA)
set(_heFFTe_COMPS CUDA)
elseif(WarpX_COMPUTE STREQUAL HIP)
set(_heFFTe_COMPS ROCM)
elseif(WarpX_COMPUTE STREQUAL SYCL)
set(_heFFTe_COMPS ONEAPI)
else() # NOACC, OMP
set(_heFFTe_COMPS FFTW) # or MKL
endif()
# note: we could also enforce GPUAWARE for CUDA and HIP, which can still be
# disabled at runtime

find_package(Heffte REQUIRED COMPONENTS ${_heFFTe_COMPS})
endif()

# Python
if(WarpX_PYTHON)
find_package(Python COMPONENTS Interpreter Development.Module REQUIRED)
Expand Down Expand Up @@ -455,6 +481,10 @@ foreach(D IN LISTS WarpX_DIMS)
endif()
endif()

if(ABLASTR_HEFFTE)
target_link_libraries(ablastr_${SD} PUBLIC Heffte::Heffte)
endif()

if(WarpX_PYTHON)
target_link_libraries(pyWarpX_${SD} PRIVATE pybind11::module pybind11::windows_extras)
if(WarpX_PYTHON_IPO)
Expand Down Expand Up @@ -539,6 +569,13 @@ foreach(D IN LISTS WarpX_DIMS)
target_compile_definitions(ablastr_${SD} PUBLIC ABLASTR_USE_FFT)
endif()

if(WarpX_HEFFTE)
target_compile_definitions(ablastr_${SD} PUBLIC WARPX_USE_HEFFTE)
endif()
if(ABLASTR_HEFFTE)
target_compile_definitions(ablastr_${SD} PUBLIC ABLASTR_USE_HEFFTE)
endif()

if(WarpX_PYTHON AND pyWarpX_VERSION_INFO)
# for module __version__
target_compile_definitions(pyWarpX_${SD} PRIVATE
Expand Down
8 changes: 4 additions & 4 deletions Docs/source/acknowledge_us.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,14 @@ Please add the following sentence to your publications, it helps contributors ke

**Plain text:**

This research used the open-source particle-in-cell code WarpX https://github.com/ECP-WarpX/WarpX, primarily funded by the US DOE Exascale Computing Project. Primary WarpX contributors are with LBNL, LLNL, CEA-LIDYL, SLAC, DESY, CERN, and TAE Technologies. We acknowledge all WarpX contributors.
This research used the open-source particle-in-cell code WarpX https://github.com/ECP-WarpX/WarpX. Primary WarpX contributors are with LBNL, LLNL, CEA-LIDYL, SLAC, DESY, CERN, and TAE Technologies. We acknowledge all WarpX contributors.

**LaTeX:**

.. code-block:: latex

\usepackage{hyperref}
This research used the open-source particle-in-cell code WarpX \url{https://github.com/ECP-WarpX/WarpX}, primarily funded by the US DOE Exascale Computing Project.
This research used the open-source particle-in-cell code WarpX \url{https://github.com/ECP-WarpX/WarpX}.
Primary WarpX contributors are with LBNL, LLNL, CEA-LIDYL, SLAC, DESY, CERN, and TAE Technologies.
We acknowledge all WarpX contributors.

Expand All @@ -55,8 +55,8 @@ If your project uses a specific algorithm or component, please consider citing t

- Sandberg R T, Lehe R, Mitchell C E, Garten M, Myers A, Qiang J, Vay J-L and Huebl A.
**Synthesizing Particle-in-Cell Simulations Through Learning and GPU Computing for Hybrid Particle Accelerator Beamlines**.
Proc. of Platform for Advanced Scientific Computing (PASC'24), *submitted*, 2024.
`preprint <http://arxiv.org/abs/2402.17248>__`
Proc. of Platform for Advanced Scientific Computing (PASC'24), *PASC24 Best Paper Award*, 2024.
`DOI:10.1145/3659914.3659937 <https://doi.org/10.1145/3659914.3659937>`__

- Sandberg R T, Lehe R, Mitchell C E, Garten M, Qiang J, Vay J-L and Huebl A.
**Hybrid Beamline Element ML-Training for Surrogates in the ImpactX Beam-Dynamics Code**.
Expand Down
4 changes: 2 additions & 2 deletions Docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,9 +103,9 @@ def __init__(self, *args, **kwargs):
# built documents.
#
# The short X.Y version.
version = u'24.05'
version = u'24.06'
# The full version, including alpha/beta/rc tags.
release = u'24.05'
release = u'24.06'

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
Expand Down
31 changes: 21 additions & 10 deletions Docs/source/highlights.rst
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,17 @@ Plasma-Based Acceleration

Scientific works in laser-plasma and beam-plasma acceleration.

#. Peng, H. and Huang, T. W. and Jiang, K. and Li, R. and Wu, C. N. and Yu, M. Y. and Riconda, C. and Weber, S. and Zhou, C. T. and Ruan, S. C.
#. Ross AJ, Chappell J, van de Wetering JJ, Cowley J, Archer E, Bourgeois N, Corner L, Emerson DR, Feder L, Gu XJ, Jakobsson O, Jones H, Picksley A, Reid L, Wang W, Walczak R, Hooker SM.
**Resonant excitation of plasma waves in a plasma channel**.
Phys. Rev. Research **6**, L022001, 2024
`DOI:10.1103/PhysRevResearch.6.L022001 <https://doi.org/10.1103/PhysRevResearch.6.L022001>`__

#. Sandberg R T, Lehe R, Mitchell C E, Garten M, Myers A, Qiang J, Vay J-L and Huebl A.
**Synthesizing Particle-in-Cell Simulations Through Learning and GPU Computing for Hybrid Particle Accelerator Beamlines**.
Proc. of Platform for Advanced Scientific Computing (PASC'24), *PASC24 Best Paper Award*, 2024.
`DOI:10.1145/3659914.3659937 <https://doi.org/10.1145/3659914.3659937>`__

#. Peng H, Huang TW, Jiang K, Li R, Wu CN, Yu MY, Riconda C, Weber S, Zhou CT, Ruan SC.
**Coherent Subcycle Optical Shock from a Superluminal Plasma Wake**.
Phys. Rev. Lett. **131**, 145003, 2023
`DOI:10.1103/PhysRevLett.131.145003 <https://doi.org/10.1103/PhysRevLett.131.145003>`__
Expand All @@ -24,11 +34,6 @@ Scientific works in laser-plasma and beam-plasma acceleration.
Phys. Rev. Research **5**, 033112, 2023
`DOI:10.1103/PhysRevResearch.5.033112 <https://doi.org/10.1103/PhysRevResearch.5.033112>`__

#. Sandberg R T, Lehe R, Mitchell C E, Garten M, Myers A, Qiang J, Vay J-L and Huebl A.
**Synthesizing Particle-in-Cell Simulations Through Learning and GPU Computing for Hybrid Particle Accelerator Beamlines**.
Proc. of Platform for Advanced Scientific Computing (PASC'24), *submitted*, 2024.
`preprint <http://arxiv.org/abs/2402.17248>`__

#. Sandberg R T, Lehe R, Mitchell C E, Garten M, Qiang J, Vay J-L and Huebl A.
**Hybrid Beamline Element ML-Training for Surrogates in the ImpactX Beam-Dynamics Code**.
14th International Particle Accelerator Conference (IPAC'23), WEPA101, 2023.
Expand Down Expand Up @@ -103,14 +108,20 @@ Scientific works in particle and beam modeling.

#. Sandberg R T, Lehe R, Mitchell C E, Garten M, Myers A, Qiang J, Vay J-L and Huebl A.
**Synthesizing Particle-in-Cell Simulations Through Learning and GPU Computing for Hybrid Particle Accelerator Beamlines**.
Proc. of Platform for Advanced Scientific Computing (PASC'24), *submitted*, 2024.
`preprint <http://arxiv.org/abs/2402.17248>`__
Proc. of Platform for Advanced Scientific Computing (PASC'24), *PASC24 Best Paper Award*, 2024.
`DOI:10.1145/3659914.3659937 <https://doi.org/10.1145/3659914.3659937>`__

#. Nguyen B, Formenti A, Lehe R, Vay J-L, Gessner S, and Fedeli L.
**Comparison of WarpX and GUINEA-PIG for electron positron collisions**.
15th International Particle Accelerator Conference (IPAC'24), WEPC84, 2024.
`preprint <https://arxiv.org/abs/2405.09583>`__,
`DOI:10.18429/JACoW-IPAC2024-WEPC84 <https://doi.org/10.18429/JACoW-IPAC2024-WEPC84>`__

#. Sandberg R T, Lehe R, Mitchell C E, Garten M, Qiang J, Vay J-L, Huebl A.
**Hybrid Beamline Element ML-Training for Surrogates in the ImpactX Beam-Dynamics Code**.
14th International Particle Accelerator Conference (IPAC'23), WEPA101, *in print*, 2023.
14th International Particle Accelerator Conference (IPAC'23), WEPA101, 2023.
`preprint <https://www.ipac23.org/preproc/pdf/WEPA101.pdf>`__,
`DOI:10.18429/JACoW-IPAC-23-WEPA101 <https://doi.org/10.18429/JACoW-IPAC-23-WEPA101>`__
`DOI:10.18429/JACoW-IPAC2023-WEPA101 <https://doi.org/10.18429/JACoW-IPAC2023-WEPA101>`__

#. Tan W H, Piot P, Myers A, Zhang W, Rheaume T, Jambunathan R, Huebl A, Lehe R, Vay J-L.
**Simulation studies of drive-beam instability in a dielectric wakefield accelerator**.
Expand Down
2 changes: 2 additions & 0 deletions Docs/source/install/cmake.rst
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ CMake Option Default & Values Descr
``WarpX_PRECISION`` SINGLE/**DOUBLE** Floating point precision (single/double)
``WarpX_PARTICLE_PRECISION`` SINGLE/**DOUBLE** Particle floating point precision (single/double), defaults to WarpX_PRECISION value if not set
``WarpX_FFT`` ON/**OFF** FFT-based solvers
``WarpX_HEFFTE`` ON/**OFF** Multi-Node FFT-based solvers
``WarpX_PYTHON`` ON/**OFF** Python bindings
``WarpX_QED`` **ON**/OFF QED support (requires PICSAR)
``WarpX_QED_TABLE_GEN`` ON/**OFF** QED table generation support (requires PICSAR and Boost)
Expand Down Expand Up @@ -271,6 +272,7 @@ Environment Variable Default & Values Descr
``WARPX_PRECISION`` SINGLE/**DOUBLE** Floating point precision (single/double)
``WARPX_PARTICLE_PRECISION`` SINGLE/**DOUBLE** Particle floating point precision (single/double), defaults to WarpX_PRECISION value if not set
``WARPX_FFT`` ON/**OFF** FFT-based solvers
``WARPX_HEFFTE`` ON/**OFF** Multi-Node FFT-based solvers
``WARPX_QED`` **ON**/OFF PICSAR QED (requires PICSAR)
``WARPX_QED_TABLE_GEN`` ON/**OFF** QED table generation (requires PICSAR and Boost)
``BUILD_PARALLEL`` ``2`` Number of threads to use for parallel builds
Expand Down
7 changes: 4 additions & 3 deletions Docs/source/install/dependencies.rst
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,13 @@ Optional dependencies include:
- for on-node accelerated compute *one of either*:

- `OpenMP 3.1+ <https://www.openmp.org>`__: for threaded CPU execution or
- `CUDA Toolkit 11.7+ <https://developer.nvidia.com/cuda-downloads>`__: for Nvidia GPU support (see `matching host-compilers <https://gist.github.com/ax3l/9489132>`_) or
- `CUDA Toolkit 11.7+ <https://developer.nvidia.com/cuda-downloads>`__: for Nvidia GPU support (see `matching host-compilers <https://gist.github.com/ax3l/9489132>`__) or
- `ROCm 5.2+ (5.5+ recommended) <https://gpuopen.com/learn/amd-lab-notes/amd-lab-notes-rocm-installation-readme/>`__: for AMD GPU support
- `FFTW3 <http://www.fftw.org>`_: for spectral solver (PSATD) support when running on CPU or SYCL
- `FFTW3 <http://www.fftw.org>`__: for spectral solver (PSATD or IGF) support when running on CPU or SYCL

- also needs the ``pkg-config`` tool on Unix
- `BLAS++ <https://github.com/icl-utk-edu/blaspp>`_ and `LAPACK++ <https://github.com/icl-utk-edu/lapackpp>`_: for spectral solver (PSATD) support in RZ geometry
- `heFFTe 2.4.0+ <https://github.com/icl-utk-edu/heffte`__: for multi-node spectral solver (IGF) support
- `BLAS++ <https://github.com/icl-utk-edu/blaspp>`__ and `LAPACK++ <https://github.com/icl-utk-edu/lapackpp>`__: for spectral solver (PSATD) support in RZ geometry
- `Boost 1.66.0+ <https://www.boost.org/>`__: for QED lookup tables generation support
- `openPMD-api 0.15.1+ <https://github.com/openPMD/openPMD-api>`__: we automatically download and compile a copy of openPMD-api for openPMD I/O support

Expand Down
8 changes: 4 additions & 4 deletions Docs/source/install/hpc/perlmutter.rst
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ Use the following :ref:`cmake commands <building-cmake>` to compile the applicat
cd $HOME/src/warpx
rm -rf build_pm_gpu
cmake -S . -B build_pm_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3"
cmake -S . -B build_pm_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3"
cmake --build build_pm_gpu -j 16
The WarpX application executables are now in ``$HOME/src/warpx/build_pm_gpu/bin/``.
Expand All @@ -164,7 +164,7 @@ Use the following :ref:`cmake commands <building-cmake>` to compile the applicat
cd $HOME/src/warpx
rm -rf build_pm_gpu_py
cmake -S . -B build_pm_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3"
cmake -S . -B build_pm_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3"
cmake --build build_pm_gpu_py -j 16 --target pip_install
.. tab-item:: CPU Nodes
Expand All @@ -174,7 +174,7 @@ Use the following :ref:`cmake commands <building-cmake>` to compile the applicat
cd $HOME/src/warpx
rm -rf build_pm_cpu
cmake -S . -B build_pm_cpu -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3"
cmake -S . -B build_pm_cpu -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3"
cmake --build build_pm_cpu -j 16
The WarpX application executables are now in ``$HOME/src/warpx/build_pm_cpu/bin/``.
Expand All @@ -184,7 +184,7 @@ Use the following :ref:`cmake commands <building-cmake>` to compile the applicat
rm -rf build_pm_cpu_py
cmake -S . -B build_pm_cpu_py -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3"
cmake -S . -B build_pm_cpu_py -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3"
cmake --build build_pm_cpu_py -j 16 --target pip_install
Now, you can :ref:`submit Perlmutter compute jobs <running-cpp-perlmutter>` for WarpX :ref:`Python (PICMI) scripts <usage-picmi>` (:ref:`example scripts <usage-examples>`).
Expand Down
25 changes: 14 additions & 11 deletions Docs/source/refs.bib
Original file line number Diff line number Diff line change
Expand Up @@ -205,17 +205,20 @@ @article{Roedel2010
year = {2010}
}

@misc{SandbergPASC24,
address = {Zuerich, Switzerland},
author = {Ryan Sandberg and Remi Lehe and Chad E Mitchell and Marco Garten and Andrew Myers and Ji Qiang and Jean-Luc Vay and Axel Huebl},
booktitle = {Proc. of PASC24},
note = {accepted},
series = {PASC'24 - Platform for Advanced Scientific Computing},
title = {{Synthesizing Particle-in-Cell Simulations Through Learning and GPU Computing for Hybrid Particle Accelerator Beamlines}},
venue = {Zuerich, Switzerland},
year = {2024},
doi = {10.48550/arXiv.2402.17248},
url = {https://arxiv.org/abs/2402.17248}
@inproceedings{SandbergPASC24,
author = {Sandberg, Ryan and Lehe, Remi and Mitchell, Chad and Garten, Marco and Myers, Andrew and Qiang, Ji and Vay, Jean-Luc and Huebl, Axel},
title = {{Synthesizing Particle-In-Cell Simulations through Learning and GPU Computing for Hybrid Particle Accelerator Beamlines}},
series = {PASC '24},
booktitle = {Proceedings of the Platform for Advanced Scientific Computing Conference},
location = {Zuerich, Switzerland},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
articleno = {23},
numpages = {11},
isbn = {9798400706394},
doi = {10.1145/3659914.3659937},
note = {{PASC24 Best Paper Award}},
year = {2024}
}

@inproceedings{SandbergIPAC23,
Expand Down
Loading

0 comments on commit 1da68fa

Please sign in to comment.