diff --git a/.gitlab/machines.yml b/.gitlab/machines.yml index 95b01e765..68c3d2d5b 100644 --- a/.gitlab/machines.yml +++ b/.gitlab/machines.yml @@ -4,25 +4,27 @@ .on_ruby: tags: - ruby - - shell + - batch variables: + SCHEDULER_ACTION: allocate + SCHEDULER_PARAMETERS: "--res=ci --exclusive -N 2 -t 120" + NPROC: 112 HOSTNAME: 'ruby' - PARTITION: pdebug - BUILD_ALLOC: srun -N 1 -c 36 -p pdebug -t 60 - TEST_ALLOC: '' - CLEAN_ALLOC: srun -n 20 + timeout: 120 minutes extends: [.on_toss_4_x86] .on_lassen: tags: - lassen - - shell + - batch variables: + SCHEDULER_PARAMETERS: "-nnodes 1 -W 150 -q pci -alloc_flags atsdisable" + LSB_JOB_STARTER: "ENVIRONMENT=BATCH /usr/tcetmp/bin/bsub_job_starter %USRCMD" + NPROC: 40 + ENVIRONMENT: "BATCH" HOSTNAME: 'lassen' - BUILD_ALLOC: lalloc 1 -W 60 - TEST_ALLOC: $BUILD_ALLOC - CLEAN_ALLOC: lalloc 1 lrun -n 20 LC_MODULES: "cuda/11.1.0" + timeout: 150 minutes extends: [.on_blueos_3_ppc64] # ------------------------------------------------------------------------------ diff --git a/.gitlab/scripts.yml b/.gitlab/scripts.yml index 5b44f650f..14e48a1f4 100644 --- a/.gitlab/scripts.yml +++ b/.gitlab/scripts.yml @@ -14,7 +14,7 @@ - cd $CI_BUILD_DIR - echo $SPEC - - $BUILD_ALLOC ./$SCRIPT_DIR/gitlab/build_and_install.py --spec="$SPEC" --tpls-only + - ./$SCRIPT_DIR/gitlab/build_and_install.py --spec="$SPEC" --tpls-only artifacts: paths: - ci-dir.txt @@ -27,7 +27,7 @@ script: - CI_BUILD_DIR=$(cat ci-dir.txt) - cd $CI_BUILD_DIR && cat job-name.txt - - $BUILD_ALLOC ./$SCRIPT_DIR/devtools/host-config-build.py --host-config gitlab.cmake --build $EXTRA_CMAKE_ARGS + - ./$SCRIPT_DIR/devtools/host-config-build.py --no-clean --build --nprocs $NPROC --host-config gitlab.cmake $EXTRA_CMAKE_ARGS artifacts: paths: - ci-dir.txt @@ -55,7 +55,7 @@ - CI_BUILD_DIR=$(cat ci-dir.txt) - cd $CI_BUILD_DIR && cat job-name.txt - - ./build_gitlab/install/spheral $SCRIPT_DIR/gitlab/run_ats.py --test-alloc "$TEST_ALLOC" --ats-file $ATS_FILE --ci-build-dir $CI_BUILD_DIR || exit_code=$? + - ./build_gitlab/install/spheral-ats --ciRun ./build_gitlab/install/$ATS_FILE || exit_code=$? - cp -r test-logs $CI_PROJECT_DIR - exit $exit_code artifacts: @@ -76,7 +76,7 @@ - ml load mpifileutils - cd $SPHERAL_BUILDS_DIR - - $CLEAN_ALLOC drm $CI_BUILD_DIR/.. + - drm $CI_BUILD_DIR/.. # ------------------------------------------------------------------------------ # Shared TPL scripts. @@ -85,7 +85,7 @@ .update_tpls: stage: update_tpls script: - - $BUILD_ALLOC ./$SCRIPT_DIR/devtools/tpl-manager.py --spec-list="$SCRIPT_DIR/devtools/spec-list.json" --spheral-spack-dir=$UPSTREAM_DIR + - ./$SCRIPT_DIR/devtools/tpl-manager.py --no-upstream --spec-list="$SCRIPT_DIR/devtools/spec-list.json" --spheral-spack-dir=$UPSTREAM_DIR .toss_update_permissions: stage: update_permissions @@ -93,7 +93,7 @@ GIT_STRATEGY: none script: - ml load mpifileutils - - srun -N 1 -p $PARTITION -n 20 -t 10 dchmod --mode go+rx $UPSTREAM_DIR + - dchmod --mode go+rx $UPSTREAM_DIR # ------------------------------------------------------------------------------ # Production Installation scripts @@ -117,7 +117,7 @@ - INSTALL_DIR=/usr/gapps/Spheral/$SYS_TYPE/spheral-$SPHERAL_REV_STR - DEV_PKG_NAME=$SYS_TYPE-spheral-dev-pkg-$SPHERAL_REV_STR - - env SPHERAL_REV_STR=$SPHERAL_REV_STR INSTALL_DIR=$INSTALL_DIR SPEC=$SPEC SPACK_PKG_NAME=$SPACK_PKG_NAME BUILD_ALLOC="$BUILD_ALLOC" SCRIPT_DIR=$SCRIPT_DIR + - env SPHERAL_REV_STR=$SPHERAL_REV_STR INSTALL_DIR=$INSTALL_DIR SPEC=$SPEC SPACK_PKG_NAME=$SPACK_PKG_NAME SCRIPT_DIR=$SCRIPT_DIR bash ./$SCRIPT_DIR/lc/generate-buildcache.sh - echo $INSTALL_DIR &> install-dir.txt @@ -131,6 +131,8 @@ .install_dev_pkg: stage: install_production + variables: + GIT_STRATEGY: none script: - INSTALL_DIR=$(cat install-dir.txt) - DEV_PKG_NAME=$(cat dev-pkg-name.txt) @@ -139,7 +141,7 @@ - tar -xzf $DEV_PKG_NAME.tar.gz - cd $DEV_PKG_NAME - - env INSTALL_DIR=$INSTALL_DIR SPEC=$SPEC SPACK_PKG_NAME=$SPACK_PKG_NAME BUILD_ALLOC="$BUILD_ALLOC" SCRIPT_DIR=$SCRIPT_DIR + - env INSTALL_DIR=$INSTALL_DIR SPEC=$SPEC SPACK_PKG_NAME=$SPACK_PKG_NAME BUILD_ALLOC="" SCRIPT_DIR=$SCRIPT_DIR bash ./$SCRIPT_DIR/lc/install-from-dev-pkg.sh artifacts: @@ -158,7 +160,7 @@ - chmod go+r /usr/gapps/Spheral/modulefiles/Spheral/"$ALIAS".lua - ml load mpifileutils - - srun -N 1 -p $PARTITION -n 20 -t 10 dchmod --mode go+rx $INSTALL_DIR + - dchmod --mode go+rx $INSTALL_DIR - ln -sfn $INSTALL_DIR /usr/gapps/Spheral/$SYS_TYPE/$ALIAS @@ -181,7 +183,7 @@ - echo $DIR_LIST - ml load mpifileutils - - if [[ $DIR_LIST ]]; then $CLEAN_ALLOC drm $DIR_LIST; else echo "No directories to remove at this time."; fi + - if [[ $DIR_LIST ]]; then drm $DIR_LIST; else echo "No directories to remove at this time."; fi when: always .merge_pr_rule: diff --git a/CMakeLists.txt b/CMakeLists.txt index 41b049c11..07640e039 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -5,7 +5,7 @@ include(cmake/SpheralVersion.cmake) project(spheral LANGUAGES C CXX Fortran VERSION ${SPHERAL_VERSION}) set(SPHERAL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR} CACHE PATH "Path to Spheral source directory") -set(SPHERAL_TEST_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX}) +set(SPHERAL_TEST_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX}/tests) include(cmake/SetupSpheral.cmake) diff --git a/Dockerfile b/Dockerfile index 75bd32be5..3c8e9bad0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -75,5 +75,13 @@ RUN make install # Run ATS testing suite. WORKDIR ../install ENV MPLBACKEND=agg -RUN ./spheral-atstest --filter="level<100" tests/integration.ats + +# ATS currently does not allow us to run in parallel for regular linux machines +# If it did, we would need some of the following commands +#RUN export OMP_NUM_THREADS=1 +#RUN export MACHINE_TYPE="winParallel" +#RUN ./spheral-ats --level 99 --mpiexe mpiexec --npMax $JCXX tests/integration.ats + +# Instead, we will just run it normally +RUN ./spheral-ats --level 99 tests/integration.ats # ----------------------------------------------------------------------------- diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 9e5d37f99..fb739d943 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -25,9 +25,10 @@ Notable changes include: * Physics::postStateUpdate now returns a bool indicating if boundary conditions should be enforced again. * Physics packages can now have Physics sub-packages, which can be run before or after the main package. The SpheralController now checks for these packages and adds them to the physics package list as needed. - * Physics packages can indicate if they require Voronoi cell information be available. If so, a new package which computes and + * Physics packages can indicate if they require Voronoi cell information be available. If so, a new package which computes and updates the Voronoi information is automatically added to the package list by the SpheralController (similar to how the Reproducing Kernel corrections are handled). + * Command line options are now consistent. Default values of a string "None" are no longer allowed and any input through the command line of "None" will become the python NoneType None. * Cleaned up use of std::any in State objects using a visitor pattern to be rigorous ensuring all state entries are handled properly during assignement, equality, and cloning operations. This is intended to help ensure our Physics advance during time integration is correct. @@ -43,13 +44,16 @@ Notable changes include: * ENABLE\_DEV\_BUILD can now export targets properly. * Added a GCC flag to prevent building variable tracking symbols when building PYB11 modules. This is unnecessary, and on some platforms trying to build such symbols is very expensive and in some cases fails. + * Consolidates lcatstest.in and run\_ats.py into a single spheral\_ats.py script. + * SPHERAL\_TEST\_INSTALL\_PREFIX now includes the tests directory. + * Removed most configured files and added a SpheralConfigs.py file to use at runtime instead. * Bug Fixes / improvements: * Wrappers for MPI calls are simplified and improved. * Time step estimate due to velocity divergence in RZ space has been fixed. * Fixed tolerances for ANEOS equation of state temperature lookup * Clang C++ warnings have eliminated, so the Clang CI tests have been updated to treat warnings as errors. - * Fix for installing libraries when building individual package WITH ENABLE_DEV_BUILD=On. + * Fix for installing libraries when building individual package with ENABLE\_DEV\_BUILD=On. * Bugfix for RZ solid CRKSPH with compatible energy. * Parsing of None string now always becomes None python type. Tests have been updated accordingly. * IO for checkpoints and visuzalization can now be properly turned off through SpheralController input options. diff --git a/cmake/SetupSpheral.cmake b/cmake/SetupSpheral.cmake index d93c4fd41..3738456e6 100644 --- a/cmake/SetupSpheral.cmake +++ b/cmake/SetupSpheral.cmake @@ -153,40 +153,15 @@ endif() # Build C++ tests and install tests to install directory #------------------------------------------------------------------------------- if (ENABLE_TESTS) + install(DIRECTORY ${SPHERAL_ROOT_DIR}/tests/ + USE_SOURCE_PERMISSIONS + DESTINATION "${SPHERAL_TEST_INSTALL_PREFIX}" + PATTERN "*CMakeLists.txt*" EXCLUDE + PATTERN "*.cmake" EXCLUDE + PATTERN "*.in" EXCLUDE + PATTERN "*.pyc" EXCLUDE + PATTERN "*~" EXCLUDE) add_subdirectory(${SPHERAL_ROOT_DIR}/tests/unit) - - # A macro to preserve directory structure when installing files - macro(install_with_directory) - set(optionsArgs "") - set(oneValueArgs SOURCE DESTINATION) - set(multiValueArgs FILES) - cmake_parse_arguments(CAS "${optionsArgs}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) - foreach(FILE ${CAS_FILES}) - get_filename_component(DIR ${FILE} DIRECTORY) - INSTALL(FILES ${CAS_SOURCE}/${FILE} DESTINATION ${CAS_DESTINATION}/${DIR}) - endforeach() - endmacro(install_with_directory) - - # Find the test files we want to install - set(test_files1 "") - if (EXISTS "${CMAKE_SOURCE_DIR}/.git") - execute_process( - COMMAND git ls-files tests - WORKING_DIRECTORY ${SPHERAL_ROOT_DIR} - OUTPUT_VARIABLE test_files1) - else() - execute_process( - COMMAND find tests -type f - WORKING_DIRECTORY ${SPHERAL_ROOT_DIR} - OUTPUT_VARIABLE test_files1) - endif() - string(REPLACE "\n" " " test_files ${test_files1}) - separate_arguments(test_files) - list(REMOVE_ITEM test_files tests/unit/CXXTests/runCXXTests.ats) - install_with_directory( - FILES ${test_files} - SOURCE ${SPHERAL_ROOT_DIR} - DESTINATION ${SPHERAL_TEST_INSTALL_PREFIX}) endif() include(${SPHERAL_ROOT_DIR}/cmake/SpheralConfig.cmake) diff --git a/docs/developer/dev/diagnostic_tools.rst b/docs/developer/dev/diagnostic_tools.rst index f79cc14b2..782bfc72f 100644 --- a/docs/developer/dev/diagnostic_tools.rst +++ b/docs/developer/dev/diagnostic_tools.rst @@ -1,95 +1,119 @@ -Code Performance Diagnostics -############################ +Code Debugging and Diagnostics +############################## -Spheral uses Caliper to preform code diagnostics, such as timing. To enable this functionality in the code, Spheral needs to be configured with ``ENABLE_TIMER=ON``. Otherwise, the timing regions are no-ops for improved preformance. +Valgrind +======== + +We advise using Valgrind to check memory leaks when doing development on Spheral. +When using Valgrind to check Spheral, be sure to use the provided suppression file :: - ./scripts/devtools/host-config-build.py -.cmake -DENABLE_TIMER=ON + valgrind --suppressions=./scripts/devtools/valgrind_python_suppression ./spheral -Querying using Caliper -====================== +Using Caliper +============= + +Spheral uses Caliper to preform code diagnostics, such as timing. To enable this functionality in the code, Spheral needs to be configured with ``ENABLE_TIMER=ON``. Otherwise, the timing regions are no-ops for improved preformance. +:: + + ./scripts/devtools/host-config-build.py -.cmake -DENABLE_TIMER=ON Caliper is configured and started through the ``cali::ConfigManager``. The ``cali::ConfigManager`` is wrapped in a ``TimerMgr`` singleton class, which has a python interface. .. note:: - ``TimerMgr`` is initialized and started during ``commandLine()`` in ``src/SimulationControl/SpheralOptionParser.py``. This is because ``commandLine()`` is almost always invoked directly near the start of a problem. However, if ``commandLine()`` is not called, the timers would need to be configured and started directly using the ``TimerMgr`` class. See :ref:`below ` for more details. + ``TimerMgr`` is initialized in ``src/SimulationControl/SpheralTimingParser.py`` which is called during ``commandLine()`` in ``src/SimulationControl/SpheralOptionParser.py``. This is because ``commandLine()`` is almost always invoked directly near the start of a problem. However, if ``commandLine()`` is not called, the timer manager would need to be configured and started directly using the ``TimerMgr`` class. See :ref:`below ` for more details. By default, the Caliper configuration is set to ``spot`` and outputs Caliper files (``.cali``). -For the default configuration, the Caliper files are named based on what file is being run, for example: -:: - python Noh-cylindrical-2d.py +There are many different Caliper configurations to view various information. Here are some extra links for those who want to read or experiment with other features in Caliper that can be incorporated into Spheral: -will produce a timing file called ``Noh-cylindrical-2d_YEAR_MONTH_DATE_TIME.cali`` where the file name includes the current date and time. + * `Configuration basics `_ + * `Builtin Configuration `_ + * `Manual Configuration `_ + * `Output Format `_ -The Caliper file name can be specified using the command line -:: +Caliper and Adiak Options +------------------------- - python Noh-cylindrical-2d.py --caliperFilename 'new_test_name.cali' +.. option:: --caliperFilename -Different Caliper configurations can be set at the command line using ``--caliperConfig`` like so -:: + Name of Caliper timing file. Should include file extensions. Optional, default: ``name_of_file_YEAR_MONTH_DATE_TIME.cali``. - python Noh-cylindrical-2d.py --caliperConfig 'runtime-report(output=time.txt),calc.inclusive,region.count' +.. option:: --caliperConfig CONFIG_STR -.. note:: - The above configuration produces timing results similar to the previous ``Spheral::Timer`` method. This results in a file named ``time.txt`` with cumulative times for the nested regions as well as a count of how many times each region ran. + Specify a built-in Caliper configuration or turn off timers with ``none``. Optional, default: ``spot``. -Similarly, a non-default Caliper configuration can be read in from a JSON file using ``--caliperConfigJSON`` and providing the file name. -Lastly, Caliper timers can be turned off using ``--caliperConfig none``. + **Example**: + :: -There are many different Caliper configurations to view various information. Here are some extra links for those who want to read or experiment with other features in Caliper that can be incorporated into Spheral: + ./spheral ex_prog.py --caliperConfig 'runtime-report(output=time.txt),calc.inclusive,region.count' - * `Configuration basics `_ - * `Builtin Configuration `_ - * `Manual Configuration `_ - * `Output Format `_ +.. note:: + The configuration in the example above produces timing results similar to the previous ``Spheral::Timer`` method. This results in a file named ``time.txt`` with cumulative times for the nested regions as well as a count of how many times each region ran. +.. option:: --caliperConfigJSON JSON_FILE -Adding Region Timers in C++ -=========================== + Specify a JSON file containing a non-default Caliper configuration. Optional. -So far there are two different types of regions in Spheral, using the following macros: -:: +.. option:: --adiakData ADIAK_DATA_STR - TIME_FUNCTION + Specify any Adiak data directly in the command line. Must be a string in key:value format, separated by commas. Optional. -or + **Example**: + :: -:: + ./spheral ex_prog.py --adiakData "test_name: the_cheat, test_num:10" - TIME_BEGIN("timer_name") - TIME_END("timer_name") +.. note:: + By default, all ``commandLine()`` inputs are added as Adiak metadata. ``--adiakData`` are for metadata that does not come through Spheral command line arguments. Adiak metadata can also be added through the python interface. See :ref:`below ` for more details. +Adding Region Timers in C++ +--------------------------- + +The following macros are used to create timing regions in the Spheral C++ interface: + - ``TIME_FUNCTION`` can be added to the very beginning of a function and creates a region for the entire function using the function's name. ``TIME_FUNCTION`` uses just the function name and no class or parameter information, so be careful when using this method with functions that could share names. - ``TIME_BEGIN("timer_name")`` and ``TIME_END("timer_name")`` create a region between the two different calls and use the string (in this case ``timer_name``) as the name. Adding Region Timers in Python -============================== +------------------------------ Region timers can be added inside the python code using the following function calls: :: + from SpheralUtilities import TimerMgr TimerMgr.timer_start("timer_name") some_function_call() TimerMgr.timer_end("timer_name") .. note:: - IMPORTANT: All timers must have both a start and end call. Otherwise, memory issues will occur. + All timers must have both a start and end call. Otherwise, memory issues will occur. + +.. _python_adiak: + +Adding Adiak Metadata in Python +------------------------------- + +Adiak metadata can be added inside python code using the following function calls: + +.. code-block:: python + + adiak_values("value_name", value) .. _manual_caliper: Starting Caliper Manually -======================== +========================= -As mentioned above, Caliper (not an individual Caliper timer) is normally configured and started in ``commandLine()`` python routine. However, Caliper can be directly configured and started through the python interface, if desired. This can be done by putting the following into the python file: +As mentioned above, the Caliper timing manager is normally configured and started in the ``commandLine()`` routine. However, Caliper can be directly configured and started through the python interface, if desired. This can be done by putting the following into the python file: :: + from SpheralUtilities import TimerMgr caliper_config = "some_configuration(output=some_filename.txt)" TimerMgr.add(caliper_config) TimerMgr.start() diff --git a/docs/developer/dev/docker_dev_env.rst b/docs/developer/dev/docker_dev_env.rst index 3618b4125..625732622 100644 --- a/docs/developer/dev/docker_dev_env.rst +++ b/docs/developer/dev/docker_dev_env.rst @@ -10,9 +10,9 @@ on local machines. Creating a Dev Environment =========================== -We will use ``docker dev create`` with our spheral docker image and a -local repository. This will allow us to skip setting up a linux system with -external packages, gives us pre-built TPLs and allows us to edit a cloned +We will use ``docker dev create`` with our Spheral docker image and a +local repository. This will allow us to skip setting up a linux system with +external packages, gives us pre-built TPLs and allows us to edit a cloned repository from our local machines IDE/text editor.bash:: > rm /compose-dev.yaml @@ -46,18 +46,18 @@ repository from our local machines IDE/text editor.bash:: Connecting to a Dev Container ============================= -Once the continaer has ben started you can connect directly through the terminal +Once the container has been started, you can connect directly through the terminal with the **Container** name (**NOT** the **Dev Environment** name).:: > docker exec -it spheral-recursing_darwin-app-1 /bin/bash root@671dab5d0b00:/home/spheral/workspace/build_docker-gcc/install# This drops you into the install location of the ``spheral@develop`` build from -github, this is a fully installed version of the latest ``develop`` spheral. +github, this is a fully installed version of the latest ``develop`` Spheral. -.. tip:: +.. tip:: VSCode & Docker Desktop: - * Open **Docker Desktop** and navigate to the **Dev Environment** tab. + * Open **Docker Desktop** and navigate to the **Dev Environment** tab. * Find the container name and select **OPEN IN VSCODE**. @@ -68,8 +68,8 @@ Development Work Your local Spheral repo is mounted from your local filesystem. You can develop directly from your IDE or text editor of choice. Then you can compile and run from within the container itself. -- The local Spheral repository will be mounted in the container at ``/com.docker.devenvironments.code/``. +- The local Spheral repository will be mounted in the container at ``/com.docker.devenvironments.code/``. -- There already exists a full build and install of Spheral at ``develop`` in ``/home/spheral/workspace/build_docker-gcc/install``. +- There already exists a full build and install of Spheral at ``develop`` in ``/home/spheral/workspace/build_docker-gcc/install``. - An updated host config file can be found at ``/home/spheral/wokspace/docker-gcc.cmake``. diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt index cca28a506..19001e771 100644 --- a/scripts/CMakeLists.txt +++ b/scripts/CMakeLists.txt @@ -8,78 +8,37 @@ if (NOT ENABLE_CXXONLY) # our virtual env in spheral-setup-venv.sh string(REGEX REPLACE "lib\/python3.9\/site-packages\/?[A-Za-z]*:" ";" VIRTUALENV_PYTHONPATH_COPY "${SPACK_PYTHONPATH}:") - set(SPHERAL_ATS_BUILD_CONFIG_ARGS ) - - if (CMAKE_BUILD_TYPE STREQUAL "Debug") - list(APPEND SPHERAL_ATS_BUILD_CONFIG_ARGS "--filter='\"level<100\"'") - endif() - - if (NOT ENABLE_MPI) - list(APPEND SPHERAL_ATS_BUILD_CONFIG_ARGS "--filter='\"np<2\"'") - endif() - - if (NOT SPHERAL_ENABLE_FSISPH) - list(APPEND SPHERAL_ATS_BUILD_CONFIG_ARGS "--filter='\"not fsisph\"'") - endif() - - if (NOT SPHERAL_ENABLE_GSPH) - list(APPEND SPHERAL_ATS_BUILD_CONFIG_ARGS "--filter='\"not gsph\"'") - endif() - - if (NOT SPHERAL_ENABLE_SVPH) - list(APPEND SPHERAL_ATS_BUILD_CONFIG_ARGS "--filter='\"not svph\"'") - endif() - - if ($ENV{SYS_TYPE} MATCHES ".*blueos.*") - list(APPEND SPHERAL_ATS_BUILD_CONFIG_ARGS "--addOp --smpi_off") - endif() - - string(REPLACE ";" " " SPHERAL_ATS_BUILD_CONFIG_ARGS_STRING "${SPHERAL_ATS_BUILD_CONFIG_ARGS}") - configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/spheral-setup-venv.in" "${CMAKE_CURRENT_BINARY_DIR}/spheral-setup-venv.sh" - ) + ) configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/spheral-env.in" "${CMAKE_CURRENT_BINARY_DIR}/spheral-env.sh" - ) + ) configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/atstest.in" "${CMAKE_CURRENT_BINARY_DIR}/atstest.sh" - ) - - configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/lcatstest.in" - "${CMAKE_CURRENT_BINARY_DIR}/lcatstest.sh" - ) - - configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/performance/performance.py.in" - "${CMAKE_CURRENT_BINARY_DIR}/performance/performance.py" ) - install(FILES + install(FILES "${CMAKE_CURRENT_BINARY_DIR}/spheral-setup-venv.sh" "${CMAKE_CURRENT_BINARY_DIR}/spheral-env.sh" "${CMAKE_CURRENT_BINARY_DIR}/atstest.sh" - "${CMAKE_CURRENT_BINARY_DIR}/lcatstest.sh" - "${CMAKE_CURRENT_SOURCE_DIR}/lc/lcats" + "${CMAKE_CURRENT_SOURCE_DIR}/spheral_ats.py" + "${CMAKE_CURRENT_SOURCE_DIR}/gitlab/performance_analysis.py" + "${CMAKE_CURRENT_SOURCE_DIR}/spheralutils.py" DESTINATION "${CMAKE_INSTALL_PREFIX}/scripts" ) - install(FILES - "${CMAKE_CURRENT_BINARY_DIR}/performance/performance.py" - DESTINATION "${CMAKE_INSTALL_PREFIX}/tests" - ) - install(CODE "execute_process( \ COMMAND env PYTHONPATH=${SPACK_PYTHONPATH} ${PYTHON_EXE} -m venv .venv --without-pip --prompt \ 'Spheral>')" ) + # Copy over all of the python TPL files, with a few exceptions foreach(_venv_dir ${VIRTUALENV_PYTHONPATH_COPY}) if(NOT ${_venv_dir} MATCHES "sphinx") install(DIRECTORY ${_venv_dir} diff --git a/scripts/atstest.in b/scripts/atstest.in index c46f4d31c..e1262dccb 100644 --- a/scripts/atstest.in +++ b/scripts/atstest.in @@ -1,3 +1,3 @@ #!/usr/bin/env bash -@CMAKE_INSTALL_PREFIX@/.venv/bin/ats -e @CMAKE_INSTALL_PREFIX@/spheral @SPHERAL_ATS_BUILD_CONFIG_ARGS_STRING@ "$@" +@CMAKE_INSTALL_PREFIX@/spheral @CMAKE_INSTALL_PREFIX@/scripts/spheral_ats.py "$@" diff --git a/scripts/devtools/host-config-build.py b/scripts/devtools/host-config-build.py index 0c90c45e8..557b23ae6 100755 --- a/scripts/devtools/host-config-build.py +++ b/scripts/devtools/host-config-build.py @@ -31,6 +31,9 @@ def parse_args(): parser.add_argument('--build', action='store_true', help='Run make -j install after configuring build dirs.') + parser.add_argument('--nprocs', default=48, + help="Set number of procs to use while building. This is not used if --build is not enabled.") + parser.add_argument('--lc-modules', type=str, default="", help='LC Modules to use during build, install and smoke test. This is not used if --build is not enabled.') @@ -106,8 +109,9 @@ def main(): print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") print("~~~~~ Building Spheral") print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~") + build_cmd = f"{ml_cmd} {cmake_cmd} --build . --target install -j {args.nprocs}" - sexe("{0} {1} --build . -j 48 --target install".format(ml_cmd, cmake_cmd), echo=True, ret_output=False) + sexe(build_cmd, echo=True, ret_output=False) if __name__ == "__main__": main() diff --git a/scripts/gitlab/performance_analysis.py b/scripts/gitlab/performance_analysis.py new file mode 100644 index 000000000..49a401b78 --- /dev/null +++ b/scripts/gitlab/performance_analysis.py @@ -0,0 +1,74 @@ +#!/user/bin/env python3 + +import os, sys, shutil, glob +import argparse + +import SpheralConfigs + +# Location of benchmark data +benchmark_dir = "/usr/gapps/Spheral/benchmarks" + +caliper_loc = SpheralConfigs.caliper_module_path() +sys.path.append(caliper_loc) +import caliperreader as cr + +def main(): + #--------------------------------------------------------------------------- + # Setup argument parser + #--------------------------------------------------------------------------- + parser = argparse.ArgumentParser() + parser.add_argument("--atsOutput", type=str, required=True, + help="Path to atsr.py file produced from running performance.py") + args = parser.parse_args() + + atsFile = args.atsOutput + if (os.path.isdir(args.atsOutput)): + atsFile = os.path.join(args.atsOutput, "atsr.py") + if (not os.path.exists(atsFile)): + raise Exception("ATS file not found") + # Run atsr.py and put values into globals + exec(compile(open(atsFile).read(), atsFile, 'exec'), globals()) + state = globals()["state"] + tests = state["testlist"] + for test in tests: + # Retrieve the Caliper file from run + run_dir = test["directory"] + options = test["options"] + cali_file = options["caliper_filename"] + cfile = os.path.join(run_dir, cali_file) + install_config = options["install_config"] + + # Grab list of regions and timers + ref_regions = options["regions"] + ref_timers = options["timers"] + # Read Caliper file + r = cr.CaliperReader() + r.read(cfile) + # Get adiak metadata + gls = r.globals + test_name = gls["test_name"] + + # Extract relevant regions and timers + times = {} + for rec in records: + if ("region" in rec): + fname = rec["region"] + if (type(fname) is list): + fname = fname[-1] + if (fname in ref_regions): + if (fname in times): + for t in ref_timers: + times[fname][t] += float(rec[t]) + else: + new_dict = {} + for t in ref_timers: + new_dict.update({t: float(rec[t])}) + times.update({fname: new_dict}) + # Get historical timing data + cali_ref_dir = os.path.join(benchmark_dir, install_config, test_name) + if (not os.path.exists(cali_ref_dir)): + os.makedirs(cali_ref_dir) + shutils.copyfile(cfile, os.path.join(cali_ref_dir, cali_file)) + +if __name__=="__main__": + main() diff --git a/scripts/gitlab/run_ats.py b/scripts/gitlab/run_ats.py deleted file mode 100755 index 0e86c701b..000000000 --- a/scripts/gitlab/run_ats.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python3 - -import sys, subprocess, argparse, os - -sys.path.append(os.path.join(os.path.dirname(__file__), "..")) -from spheralutils import sexe - -# If the number of failed tests exceeds this value, ATS is not rerun -max_test_failures = 10 -# Number of times to rerun the ATS tests -max_reruns = 1 - -#------------------------------------------------------------------------------ - -def parse_args(): - parser = argparse.ArgumentParser() - - # Spec args - parser.add_argument('--test-alloc', type=str, nargs="+", - help='Allocation command for the machine.') - parser.add_argument('--ats-file', type=str, - help='ATS test file to run.') - parser.add_argument('--ci-build-dir', type=str, - help='CI build directory.') - parser.add_argument('--ci-install-dir', type=str, - default="build_gitlab/install", - help="Location of Spheral installation "+\ - "relative to --ci-build-dir") - return parser.parse_args() - -#------------------------------------------------------------------------------ - -# Run ats.py to check results and return the number of failed tests -def report_results(output_dir): - ats_py = os.path.join(output_dir, "atsr.py") - if (not os.path.exists(ats_py)): - print(f"{ats_py} does not exists") - sys.exit(1) - exec(compile(open(ats_py).read(), ats_py, 'exec'), globals()) - state = globals()["state"] - failed_tests = [t for t in state['testlist'] if t['status'] in [FAILED,TIMEDOUT] ] - if len(failed_tests) > 0: - print(f"ATS failed {len(failed_tests)} tests.") - for t in failed_tests: - print(t['name']) - return len(failed_tests) - else: - print("ATS passed all tests.") - return 0 - -#------------------------------------------------------------------------------ - -# Run the tests and check if any failed -def run_and_report(run_command, ci_output, num_runs): - if (num_runs > max_reruns): - print("Exceeded number of ATS reruns") - sys.exit(1) - sexe(run_command) - tests_passed = report_results(ci_output) - if (tests_passed == 0): - if (num_runs > 0): - print("WARNING: Some tests were run multiple times") - sys.exit(0) - # This should be added back in once Jacamar can handle exit codes properly - # if (num_runs == 0): - # sys.exit(0) - # else: - # sys.exit(80) - elif (tests_passed >= max_test_failures): - print("Too many test failures, not rerunning ATS") - sys.exit(1) - else: - rerun_command = run_command - if (num_runs == 0): - ats_cont_file = os.path.join(ci_output, "continue.ats") - if (not os.path.exists(ats_cont_file)): - print(f"{ats_cont_file} not found, ATS cannot be rerun") - sys.exit(1) - rerun_command = f"{run_command} {ats_cont_file}" - print("WARNING: Test failure, rerunning ATS") - run_and_report(rerun_command, ci_output, num_runs + 1) - -#------------------------------------------------------------------------------ - -def run_ats_test(args): - build_gl_dir = os.path.join(args.ci_build_dir, args.ci_install_dir) - ats_file = os.path.join(build_gl_dir, args.ats_file) - if (not os.path.exists(ats_file)): - print(f"{ats_file} does not exists") - sys.exit(1) - lcats_test = os.path.join(build_gl_dir, "spheral-lcatstest") - if (not os.path.exists(lcats_test)): - print(f"{lcats_test} does not exists") - ats_configs = ' --timelimit="45m"' - test_alloc = " ".join(args.test_alloc) - run_command = f"{test_alloc} {lcats_test} --logs test-logs {ats_file} {ats_configs}" - ci_output = os.path.join(args.ci_build_dir, "test-logs") - run_and_report(run_command, ci_output, 0) - -#------------------------------------------------------------------------------ - -def main(): - args = parse_args() - run_ats_test(args) - -if __name__ == "__main__": - main() diff --git a/scripts/lc/lcats b/scripts/lc/lcats deleted file mode 100755 index eb16e4171..000000000 --- a/scripts/lc/lcats +++ /dev/null @@ -1,1022 +0,0 @@ -#!/usr/bin/env python3 - -from builtins import str -from builtins import object -import os, string, time, sys -import getopt -import time -import platform -import sys -import optparse, re, copy -import subprocess - -d_debug= 0 - -SYS_TYPE = os.environ.get('SYS_TYPE','') -# This is better than platform.node() some of the time, because it differentiates between jade, jadeita, and jadedev. -LCSCHEDCLUSTER = os.environ.get('LCSCHEDCLUSTER',platform.node()) - -def cpu_count(): - """Reliably return the number of physical cores. - multiprocessing.cpu_count() and psutil.cpu_count() do not do this correctly. - Instead parse the output of 'lscpu'. - """ - if 'rzwhippet' in platform.node(): - return 56 - elif 'rzgenie' in platform.node() or 'ruby' in platform.node(): - return 36 - else: - try: - p = subprocess.run(["lscpu",], stdout=subprocess.PIPE, text=True) - except Exception as e: - print ("Error running lscpu to get cpu count\n") - sys.exit(1) - - out = p.stdout - lines = out.split('\n') - - for line in lines: - key, value = line.split(":") - if key == "Core(s) per socket": - cores_per_socket = int(value) - if key == "Socket(s)": - sockets = int(value) - break - - return (cores_per_socket * sockets) - - -#--------------------------------------------------------------------------- -def createBsubFile(inCommand, inAllOptions): - inFilename= inAllOptions.msubFilename - - FILE= open(inFilename, "w") - - bsubOutputFilename= inFilename + ".out" - - import platform - thisNode= platform.node() - - #BSUB -G guests - #BSUB -o jobRetry.output # output is sent to file job.output - #BSUB -J nightlyBlueosBuild # name of the job - #BSUB -W 240 # alloc time - was defaulting to 30 minutes - FILE.write("#BSUB -G %s \n" % machineSettings.options.group) - FILE.write("#BSUB -o " + bsubOutputFilename + "\n") - FILE.write("#BSUB -J " + inFilename + "\n") - FILE.write("#BSUB -W %d \n" % machineSettings.options.allocTime ) - FILE.write("#BSUB -n " + str(machineSettings.options.numProcs) + "\n") - FILE.write("\n\n") - FILE.write("setenv MACHINE_TYPE " + machineSettings.options.machineType + '\n') - FILE.write("setenv SYS_TYPE " + SYS_TYPE + '\n') - - FILE.write(""+ '\n') - FILE.write("date"+ '\n') - FILE.write("cd " + os.getcwd() + "\n") - FILE.write(inCommand+ '\n') - - FILE.write("date"+ '\n') - FILE.close() - return inFilename - -#--------------------------------------------------------------------------- -def createMsubFile(inCommand, inAllOptions): - - inFilename= inAllOptions.msubFilename - - FILE= open(inFilename, "w") - - msubOutputFilename= inFilename + ".out" - - import platform - thisNode= platform.node() - - FILE.write("#!/bin/tcsh" + '\n') - FILE.write("\n") - FILE.write("#MSUB -N " + inFilename + '\n') - FILE.write("#MSUB -j oe "+ '\n') # directs all err output to stdout ") - FILE.write("#MSUB -o " + msubOutputFilename + '\n') - FILE.write("#MSUB -l nodes=" + str(machineSettings.options.numNodes)+ ":ppn=" + str(cpu_count()) + '\n') - FILE.write("#MSUB -l walltime=%d:00\n" % machineSettings.options.allocTime ) -# FILE.write("#MSUB -V # exports all environment var "+ '\n') - - if machineSettings.options.name != 'cray': - FILE.write("#MSUB -q " + machineSettings.options.partition + '\n') - FILE.write("#MSUB -l gres=ignore "+ '\n') - FILE.write("#MSUB -A " + machineSettings.options.bank + " #bank to use "+ '\n') - - FILE.write(""+ '\n') - - # LLNL specific - if machineSettings.options.name == 'cray': - FILE.write("source " + "/usr/projects/kull/developers/tools/kull_cshrc.csh " + '\n') - - # rzmerl and zin specific - increase limits to avoid pthread_create errors. - if machineSettings.options.name == 'chaos5BatchCapable': - FILE.write('limit maxproc 7168'+'\n') - FILE.write('limit descriptors 7168'+'\n') - - FILE.write("setenv MACHINE_TYPE " + machineSettings.options.machineType + '\n') - FILE.write("setenv SYS_TYPE " + SYS_TYPE + '\n') - - FILE.write(""+ '\n') - FILE.write("date"+ '\n') - FILE.write("cd " + os.getcwd() + "\n") - FILE.write(inCommand+ '\n') - - FILE.write("date"+ '\n') - - - FILE.close() - return inFilename - -#--------------------------------------------------------------------------- -def createSbatchFile(inCommand, inAllOptions): - ''' Some clusters don't have msub wrappers so we use sbatch directly. - Options are still the same as those used for the msub just the submission is different - so reusing most of the same variables here. - ''' - inFilename= inAllOptions.msubFilename - - FILE= open(inFilename, "w") - - sbatchOutputFilename= inFilename + ".out" - sbatchErrorFilename= inFilename + ".error" - - import platform - thisNode= platform.node() - - FILE.write("#!/bin/tcsh" + '\n') - FILE.write("\n") - FILE.write("#SBATCH --job-name=" + inFilename + '\n') - FILE.write("#SBATCH --error="+ sbatchErrorFilename + '\n') # directs all err output to stdout ") - FILE.write("#SBATCH --output="+ sbatchOutputFilename + '\n') # directs all other output to stdout ") - FILE.write("#SBATCH --nodes=" + str(machineSettings.options.numNodes)+ "\n") - FILE.write("#SBATCH --ntasks=" + str(cpu_count()) +"\n") # Is this OKay? Not sure if we want to default ntasks. - FILE.write("#SBATCH --time=%d\n" % machineSettings.options.allocTime ) - - if machineSettings.options.name != 'cray': - FILE.write("#SBATCH --partition=" + machineSettings.options.partition + '\n') - FILE.write("#SBATCH --account=" + machineSettings.options.bank + " #bank to use "+ '\n') - - FILE.write(""+ '\n') - - # LLNL specific - FILE.write("setenv MACHINE_TYPE " + machineSettings.options.machineType + '\n') - FILE.write("setenv SYS_TYPE " + SYS_TYPE + '\n') - - FILE.write(""+ '\n') - FILE.write("date"+ '\n') - FILE.write("cd " + os.getcwd() + "\n") - FILE.write(inCommand+ '\n') - - FILE.write("date"+ '\n') - - - FILE.close() - return inFilename - -#--------------------------------------------------------------------------- -from optparse import SUPPRESS_HELP -class NoErrOptionParser(optparse.OptionParser): - # Found this online... modified some - def __init__(self,*args,**kwargs): - self.valid_args_cre_list = [] - optparse.OptionParser.__init__(self, *args, **kwargs) - - def error(self,msg): - optparse.OptionParser.error(self,msg) - pass - - def add_option(self,*args,**kwargs): - self.valid_args_cre_list.append(re.compile('^'+args[0] + "[ =]")) - self.valid_args_cre_list.append(re.compile('^' + args[0] + '$')) - optparse.OptionParser.add_option(self, *args, **kwargs) - - def parse_args(self,*args,**kwargs): - # filter out invalid options - args_to_parse = args[0] - # all args are stored in args_to_parse - new_args_to_parse = [] - for a in args_to_parse: - for cre in self.valid_args_cre_list: - if cre.match(a): - new_args_to_parse.append(a) - break - elif not a.startswith("-"): - new_args_to_parse.append(a) - break - - #args that'll be used are stored in new_args_to_parse - # remove old values and insert the new - while len(args_to_parse) > 0: - args_to_parse.pop() - for a in new_args_to_parse: - args_to_parse.append(a) - - return optparse.OptionParser.parse_args(self,*args,**kwargs) - -#------------------------------------------------------------------------ -class AttributeDict (dict): - """A dictionary whose items can be accessed as attributes.""" - def __getattr__(self, name): - return self[name] - def __setattr__(self, name, value): - self[name] = value - def __repr__(self): - from io import StringIO - out = StringIO() - print("AttributeDict(", file=out) - keys = list(self.keys()) - keys.sort() - for key in keys: - print(" ", key, " = ", repr(self[key]), ",", file=out) - print(")", file=out) - s = out.getvalue() - out.close() - return s - __str__ = __repr__ -#------------------------------------------------------------------------ - -class MachineInfo (object): - - def __init__ (self, **options): - "Must not throw an exception -- object must always get created." - super(MachineInfo, self).__init__() - - - self.options = AttributeDict( - # Run settings - - name= '', - allocTime= '', - machineType= '', - batch = True, - numNodes=4, - numProcs= None, - group = '', - partition= 'pbatch', - - atsArgs= [], - - ) - try: - self.options.update(options) - except Exception as e: - self.set(INVALID, 'Bad options: ' + e) - return - - -#--------------------------------------------------------------------------- -# MAIN -#--------------------------------------------------------------------------- - -#--------------------------------------------------------------------------- -# Setup option parser -#--------------------------------------------------------------------------- -parser= NoErrOptionParser(add_help_option=False) - -(options, args) = parser.parse_args(sys.argv[:]) - -#--------------------------------------------------------------------------- -useCpu= cpu_count() -#--------------------------------------------------------------------------- - -blueosSettings= MachineInfo( - name='blueos', - machineType='blueos_3_ppc64le_ib_p9', - batch= False, - allocTime = 240, - group = "guests", - partition='pdebug', - numProcs = 128, - numNodes=4, - bank='guests', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=40", - "--continueFreq=15", - "--timelimit=120", - "--glue='noDraco=True'", - "--glue='noVisit=True'", - "--glue='noOpacityServer=True'", - "--glue='noCxxUnitTesting=True'", - ], -) - -rzmantaSettings= MachineInfo( - name='blueos', - machineType='rzmanta', - batch= True, - allocTime = 240, - group = "guests", - partition='pdebug', - numProcs = 128, - #numNodes=, - #bank='science', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=8", - "--continueFreq=15", - "--timelimit=480", - "--glue='noDraco=True'", - "--glue='noVisit=True'", - "--glue='noOpacityServer=True'", - "--glue='noCxxUnitTesting=True'", - ], -) - -bgqSettings= MachineInfo( - name='bgq', - machineType='rzuseq', - batch= False, - allocTime = 480, - - partition='pdebug', - numNodes=64, - bank='science', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=8", - "--continueFreq=15", - "--timelimit=480", - "--glue='noDraco=True'", - "--glue='noVisit=True'", - "--glue='noOpacityServer=True'", - "--glue='noCxxUnitTesting=True'", - ], - -) - -chaos5BatchCapable = MachineInfo( - name='chaos5BatchCapable', - machineType='SlurmProcessorScheduled', - batch = True, - partition='pbatch', - numNodes=8, - bank='wbronze', - allocTime = 180, - atsArgs=[ "--allInteractive", - "--glue='independent=True'", - "--continueFreq=15", - "--timelimit=60", - "--npMax=%s" % cpu_count() - ], - -) - -chaos5NotBatchCapable= MachineInfo( - name='chaos5NotBatchCapable', - machineType='SlurmProcessorScheduled', - batch=False, - numNodes=4, - partition='pdebug', - bank='wbronze', - allocTime = 180, - atsArgs=[ "--allInteractive", - "--continueFreq=15", - "--timelimit=60", - "--glue='independent=True'" - ], - -) - -craySettings= MachineInfo( - name='cray', - machineType='cray', - atsArgs=[ - '--allInteractive', - "--timelimit=60", - "--glue='independent=True'", - ], - -) - -chamaSettings= MachineInfo( - name='chama', - machineType='SlurmProcessorScheduled', - batch = False, - partition='nw', - numNodes=8, - bank='FY140244', # TAG's WC_IC - wcid='FY140244', # TAG's WC_IC - allocTime = 240, - atsArgs=[ "--allInteractive", - "--glue='independent=True'", - "--glue='noDraco=True'", - "--glue='noOverlink=True'", - "--glue='noOpacityServer=True'", - "--glue='noTracker=True'", - "--timelimit=60", - "--npMax=%s" % cpu_count() - ], - -) - -glorySettings= MachineInfo( - name='glory', - machineType='SlurmProcessorScheduled', - batch = False, - partition='nw', - numNodes=8, - bank='FY140244', # TAG's WC_IC - wcid='FY140244', # TAG's WC_IC - allocTime = 240, - atsArgs=[ "--allInteractive", - "--glue='independent=True'", - # "--glue='noDraco=True'", - "--glue='noOverlink=True'", - "--glue='noOpacityServer=True'", - "--glue='noTracker=True'", - "--timelimit=60", - "--npMax=%s" % cpu_count() - ], - -) - -toss3Settings= MachineInfo( - name='rzgenie', - machineType='SlurmProcessorScheduled', - batch= False, - #allocTime = 240, - allocTime = 180, - partition='pdebug', - #numNodes=4, - numNodes=2, - bank='wbronze', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=%s"%(useCpu), - "--continueFreq=15", - "--timelimit=120", - ], - -) - -toss3Batch= MachineInfo( - name='rztopaz', - machineType='SlurmProcessorScheduled', - batch= True, - allocTime = 360, - partition='pbatch', - numNodes=4, - bank='wbronze', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=36", - "--continueFreq=15", - "--timelimit=120", - ], - -) - -rztopazSettings= MachineInfo( - name='rztopaz', - machineType='SlurmProcessorScheduled', - batch= False, - allocTime = 60, - partition='pdebug', - numNodes=2, - bank='wbronze', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=36", - "--continueFreq=15", - "--timelimit=60", - ], -) - -toss4Settings= MachineInfo( - name='toss4machine', - machineType='slurm36', - batch= False, - allocTime = 180, - partition='pdebug', - numNodes=2, - bank='wbronze', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=36", - "--continueFreq=15", - "--timelimit 120m", - ], -) - -toss4BatchSettings= MachineInfo( - name='toss4BatchMachine', - machineType='slurm36', - batch= True, - allocTime = 180, - partition='pdebug', - numNodes=2, - bank='wbronze', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=36", - "--continueFreq=15", - "--timelimit 120m", - ], -) - -# Ruby settings (same as TOSS4 interactive without pdebug) -rubySettings= MachineInfo( - name='toss4machine', - machineType='slurm36', - batch= False, - allocTime = 180, - numNodes=2, - bank='wbronze', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=36", - "--continueFreq=15", - "--timelimit 120m", - ], -) - -# settings when rzwhippet is running flux natively -rzwhippetSettings= MachineInfo( - name='rzwhippet', - machineType='flux00', - batch= False, - allocTime = 180, - numNodes=2, - bank='wbronze', - atsArgs=[ - "--glue='independent=True'", - '--allInteractive', - "--npMax=%s"%(useCpu), - "--continueFreq=15", - "--timelimit=120", - ], -) - -# Determine machine settings to use -#----------------------------------------------------------------------- -# Determine machine settings to use -# NOTE: -# The options are obtained from argv, the defaults for the options are from the machineSettings -# options are used later to figure what to do.... machine settings are used for non-init options -# -#----------------------------------------------------------------------- - -if platform.processor() == 'ppc64': - machineSettings = bgqSettings - -elif 'PRGENVMODULES' in os.environ: # cray machine - machineSettings = craySettings - -elif 'chama' in SYS_TYPE: - machineSettings = chamaSettings - -elif 'glory' in SYS_TYPE: - machineSettings = glorySettings - -elif 'blue' in SYS_TYPE: - machineSettings = blueosSettings - -elif 'toss_3' in SYS_TYPE: - if 'rzgenie' in LCSCHEDCLUSTER or 'jadedev' == LCSCHEDCLUSTER or 'zindev' == LCSCHEDCLUSTER: - # Developer machines are interactive - machineSettings = toss3Settings - elif '--partition=pdebug' in sys.argv and not '--batch' in sys.argv: - # Need short queue settings - machineSettings = rztopazSettings - else: - # Put it in batch. - machineSettings = toss3Batch -elif 'toss_4' in SYS_TYPE: - if 'ruby' in LCSCHEDCLUSTER: - machineSettings = rubySettings - else: - machineSettings = toss4Settings -else: - print("Could not determine machine settings to use.") - sys.exit(1) - -print("Selected machine settings for: ", machineSettings.options.name) - -#---------------------------------------------------------- -# inits and defaults -#---------------------------------------------------------- -import random -import time -ezatsLocaltime = time.localtime() -ezatsStartTime = time.strftime("%y%m%d%H%M%S",ezatsLocaltime) -msubFilenameDefault= "tmpAts." + ezatsStartTime + ".job" -bsubFilenameDefault= "tmpAts." + ezatsStartTime + ".job" - - -#--------------------------------------------------------------------------- -# options affecting machine settings -#--------------------------------------------------------------------------- - -parser.add_option( "--allocTime", action="store", type="int", metavar="minutes", dest="allocTime", - help = "The amount of time for the batch job (in minutes) .") - -parser.add_option( "--interactive", action="store_true", dest="interactive", - help = "Run ats interactively in SLURM (default is false if batch system detected.)") - -parser.add_option( "--machineType", action="store", type="string", metavar="MACHINE_TYPE", dest="machineType", - help="Sets the MACHINE_TYPE for ats.") - -parser.add_option( "--numNodes", action="store", type="int", metavar="number of nodes", dest="numNodes", - help="Number of nodes to allocate for ats to run in.") - -parser.add_option( "--partition", action="store", type="string", metavar="scheduler partition", dest="partition", - help = "Partition in which to run jobs.") - -parser.add_option( "--bank", action="store", type="string", metavar="account to charge",dest="bank", - help = "Bank to use for batch job.") - -parser.add_option("--wcid", action="store", type="string", metavar="WC-ID to assign", dest='wcid', - #default = machineSettings.options.bank, - help = "HERT WC-ID to use for batch job.") - -parser.add_option( "--nogpu", action="store_true", dest="nogpu", - help = "For blueos. Filters out gpu test. Used in conjunction with threaded option.") - -parser.add_option( "--gpuonly", action="store_true", dest="gpuonly", - help = "For blueos nvcc runs. Filters for gpu tests. Used in conjunction with threaded option.") - -parser.add_option( "--sanitize", action="store_true", dest="sanitize", - help = "Run sanitize tests. NOTE These need a specific build to work. ") - - -#--------------------------------------------------------------------------- -# other options -#--------------------------------------------------------------------------- -parser.add_option( "--msubFilename", action="store", type="string", metavar="msub file name", dest='msubFilename', - default = msubFilenameDefault, help = "The name of the generated ats msub job script that will be run.") - -parser.add_option( "--bsubFilename", action="store", type="string", metavar="msub file name", dest='bsubFilename', - default = bsubFilenameDefault, help = "The name of the generated ats bsub job script that will be run.") - -parser.add_option( '--timelimit', dest='timelimit', default=30, - help='Set the default time limit on each test. The value may be given as a digit followed by an s, m, or h to give the time in seconds, minutes (the default), or hours.') - -# The P2 version is a sym-link to the latest python 2 version of ATS. There's a P3 when we're ready for Python3 -parser.add_option( "--atsExe", action="store", type="string", dest="atsExe", default="/usr/apps/ats/7.0.P3/bin/ats", help="Sets which ats to use.") - -parser.add_option( "--addOp", action="store", type="string", dest="extraEzatsArgs", default='', - help="Adds extra job scheduler option to ezats.") - -parser.add_option( "--skip", action='store_true', dest='skip', default = False, - help='skip actual execution of the tests, but show filtering results and missing test files.') - -parser.add_option( "--testpath", action="store", type="string", dest="testpath", default="", - help="Specifies a path for ezats to use for unique test output.") - -parser.add_option( "--debug-build", action="store_true", dest="debugbuild", default=False, - help="assume we are testing a debug build and should skip expensive (level>=100) tests.") - -(options, args) = parser.parse_args(sys.argv[:]) - -# If running in SLURM, use defaults of less nodes and pdebug partition -if options.interactive: - machineSettings.options.batch = False - machineSettings.options.numNodes = 4 - - if "muir" in platform.node(): - machineSettings.options.partition = 'views' - else: - machineSettings.options.partition = 'pdebug' - - machineSettings.options.allocTime = 60 - -if options.allocTime: - machineSettings.options.allocTime = options.allocTime - -if options.machineType: - machineSettings.options.machineType = options.machineType - -if options.numNodes: - machineSettings.options.numNodes = options.numNodes - -if options.partition: - machineSettings.options.partition = options.partition - -if options.bank: - machineSettings.options.bank = options.bank - -if options.wcid: - machineSettings.options.wcid = options.wcid - -if (d_debug==1): - print("options= ", options) - - -whichAts= sys.argv[0] -atsArgs = sys.argv[1:] - -if "--help" in atsArgs or "-h" in atsArgs or "-help" in atsArgs: - print("------------------------------------------------------------------") - print("Options available for only ezats: ") - print("------------------------------------------------------------------") - parser.print_help() - print("------------------------------------------------------------------") - print("Options for ats: ") - print("------------------------------------------------------------------") - from subprocess import check_call - check_call([options.atsExe, "-h"]) - - print("\n\n------------------------------------------------------------------") - print("ezats sets these ATS options: ") - print("------------------------------------------------------------------") - print('\n'.join(machineSettings.options.atsArgs)) - print('\n\n') - - sys.exit(0) - -# Convert array of strings to a string with spaces for delimiters -atsArgs = " ".join(str(x) for x in atsArgs) - -#--------------------------------------------------------------------------- -# Added this section to allow ezats to determine an appropriate filesystem -# to use for testing for this machine. The filesystem can then be passed -# onto to tests in ats that request a location. -# -# The appropriate filesystem to use for each machine is determined by the -# lustre file system summary and max bandwith tables -# https://computing.llnl.gov/?set=resources&page=lc_lustre -#--------------------------------------------------------------------------- - -#def checkFileSystem(path, timeout=4): -# 04/25/23: SD: Increasing the timeout as they're having lustre problems and we -# suspect this is causing failures. Revisit and change back to 4 when lustre issues -# are resolved -def checkFileSystem(path, timeout=30): - - def timeoutFunction( timeout, timeoutReturn, func, *args): - - res = timeoutReturn - - def handleTimeOut( signum, frame): - raise TimeOutException - - import signal - theOldHandler = signal.signal( signal.SIGALRM, handleTimeOut) - signal.alarm(timeout) - try: - try: - res = func(*args) - finally: - signal.signal(signal.SIGALRM, theOldHandler) - except: - pass #catch the TimeOutException - - signal.alarm(0) - - return res - - def canWriteToFileSystem(path): - - from tempfile import TemporaryFile - from os import makedirs - try: - if not os.path.exists(path): - makedirs( path ) - TemporaryFile(dir=path) - except: - return False - - return True - - - return timeoutFunction( timeout, False, canWriteToFileSystem, path ) - -#--------------------------------------------------------------------------- - -#---------------------------------------------------------- -# Examine options: -#---------------------------------------------------------- - -print("Note: the srun message 'error: ioctl(TIOCGWINSZ)' can be ignored. \n[It means the process is trying to do something that requires a tty \nbut it's not doing either a read or write.]\n") - -#---------------------------------------------------------- -# get args to add - added threaded option to the ezatsArgs or it would be passed to ats -#---------------------------------------------------------- -batchArgs= ['partition', 'bank', 'wcid'] -ezatsArgs= ['addOp', 'batch', 'interactive', 'name', 'allocTime', 'atsExe', 'machineType', 'bsubFile', 'msubFile', 'bank', 'testpath' , 'threaded', 'gpuonly' ,'nogpu', 'numProcs', 'group', 'sanitize', 'debug-build'] - -# Add glue arg to pass unique file system test path to ats -toAdd= """ --glue='testpath=str("%s")' """ % options.testpath - -if options.sanitize: - toAdd += """ --filter="sanitize==1" """ - -if options.debugbuild: - toAdd += """ --filter="level<100" """ - -for machineArg in machineSettings.options: - if machineSettings.options[machineArg] == '' or \ - machineSettings.options[machineArg] == None: - continue - if machineArg not in atsArgs and \ - machineArg != 'atsArgs' and \ - machineArg not in batchArgs and \ - machineArg not in ezatsArgs: - toAdd += "--" + machineArg + " " + str(machineSettings.options[machineArg]) + " " -toAdd+= " " - -for machineArg in machineSettings.options.atsArgs: - theArg= machineArg.replace("=", " ").split()[0] - if theArg not in atsArgs: - toAdd += machineArg + " " - -atsArgs= options.atsExe + " " + toAdd + atsArgs -finalCommandToRun= atsArgs -#---------------------------------------------------------- -# clean finalCommandToRun -#---------------------------------------------------------- -listCommandsToRemove= ['--'+x for x in ezatsArgs] - -if machineSettings.options.batch: - listCommandsToRemove.append('--batchHost') - listCommandsToRemove.append('--batchT') - listCommandsToRemove.append('--batchP') - listCommandsToRemove.append('--batch ') - listCommandsToRemove.append('--partition') -if machineSettings.options.machineType=='SlurmProcessorScheduled' or machineSettings.options.machineType=='blueos_3_ppc64le_ib_p9': - listCommandsToRemove.append('--numNodes') - listCommandsToRemove.append('--wcid') - -for machineArg in machineSettings.options.atsArgs: - if 'REMOVE' in machineArg: - listCommandsToRemove.append(machineArg) - -# Remove all extra spaces -finalCommandToRun = re.sub(r"\s+", " ", finalCommandToRun.strip()) - -# Remove extra options for both batch and interactive -if len(listCommandsToRemove) > 0: - for unwantedCommand in listCommandsToRemove[:]: - startPos= finalCommandToRun.find(unwantedCommand) - if d_debug: - print("DEBUG: ", unwantedCommand, "-- found start pos= ", startPos) - if startPos!= -1: #if found - endPos= finalCommandToRun.find(" -", startPos+2) - if d_debug: - print("DEBUG: ", "end pos= ", endPos) - - if endPos== -1: - endPos= finalCommandToRun.find("--", startPos+2) - if d_debug: - print("DEBUG: ", "found end pos= ", endPos) - if endPos== -1: # looking at last option, backtrack to find the last space.. - endPos= finalCommandToRun.rfind(' ') - if d_debug: - print("DEBUG: ", "rfind() found end pos= ", endPos) - - if endPos < startPos: - finalCommandToRun= finalCommandToRun[:endPos] - else: - finalCommandToRun= finalCommandToRun[:startPos] + finalCommandToRun[endPos:] - - if d_debug: - print("DEBUG: ", unwantedCommand, "-----> ", finalCommandToRun) - -#finalCommandToRun= finalCommandToRun.replace(options.extraEzatsArgs, '', 1) -realFinalCommandToRun= None - -#---------------------------------------------------------- -# if MSUB, SBATCH or BSUB -#---------------------------------------------------------- -if machineSettings.options.batch: - print("--- ATS COMMAND ---\n", finalCommandToRun) - if "blueos" in SYS_TYPE: - bsubFilename= createBsubFile(finalCommandToRun, options) - batchtype = 'bsub < ' # have to have an input file redirect for bsub - print("\nWritten to %s batch filename: %s " %(batchtype, bsubFilename)) - cmd = batchtype + ' ' + bsubFilename - elif 'magma' in LCSCHEDCLUSTER: - sbatchFilename= createSbatchFile(finalCommandToRun, options) - batchtype = 'sbatch' - print("\nWritten to %s batch filename: %s " %(batchtype, sbatchFilename)) - cmd = batchtype + ' ' + sbatchFilename - elif 'mica' in LCSCHEDCLUSTER: - sbatchFilename= createSbatchFile(finalCommandToRun, options) - batchtype = 'sbatch' - print("\nWritten to %s batch filename: %s " %(batchtype, sbatchFilename)) - cmd = batchtype + ' ' + sbatchFilename - else: - msubFilename= createMsubFile(finalCommandToRun, options) - batchtype = 'msub' - print("\nWritten to %s batch filename: %s " %(batchtype, msubFilename)) - cmd = batchtype + ' ' + msubFilename - - if not options.skip: - from subprocess import check_call - print("Running0: ", cmd) - #check_call( cmd.split() ) - os.system( cmd ) - else: - if 'msub' in batchtype: - print("SKIP option in ats command. ' msub ", msubFilename, "' was not executed.") - elif 'sbatch' in batchtype: - print("SKIP option in ats command. ' sbatch ", sbatchFilename, "' was not executed.") - else: - print("SKIP option in ats command. ' bsub ", bsubFilename, "' was not executed.") - - - sys.exit() - -#---------------------------------------------------------- -# else SALLOC or threaded w/out salloc -#---------------------------------------------------------- -else: - - os.environ["MACHINE_TYPE"] = machineSettings.options.machineType - if machineSettings.options.name in ['rzwhippet_flux']: - os.environ["MACHINE_TYPE"] = "flux00" - os.environ["BATCH_TYPE"] = "None" - - if platform.processor() == 'ppc64': - numProcsLine = "" - else: - numProcsLine = " -n %d" % ( machineSettings.options.numNodes* cpu_count() ) - - if machineSettings.options.allocTime: - if machineSettings.options.name in ['rzwhippet_flux']: - allocTime = "-t %dm" % machineSettings.options.allocTime - else: - allocTime = "--time=%d:00" % machineSettings.options.allocTime - else: - allocTime = "" - - HERT_WC_ID = '' - if machineSettings.options.name in ['chama', 'glory']: - HERT_WC_ID = ' --account=' + machineSettings.options.wcid - - if machineSettings.options.name in ['rzwhippet_flux']: - finalCommandToRun= "flux alloc --exclusive " \ - + " " + allocTime \ - + HERT_WC_ID \ - + options.extraEzatsArgs \ - + " -N " + str(machineSettings.options.numNodes) \ - + numProcsLine + " " \ - + finalCommandToRun - # + " -p " + machineSettings.options.partition + " " - # Threaded tests under ats should NOT use salloc - elif 'blue' not in os.environ['SYS_TYPE']: - finalCommandToRun= "salloc --exclusive " \ - + " " + allocTime \ - + HERT_WC_ID \ - + options.extraEzatsArgs \ - + " -N " + str(machineSettings.options.numNodes) \ - + numProcsLine \ - + " -p " + machineSettings.options.partition + " " \ - + finalCommandToRun - else: - finalCommandToRun += " --numNodes="+ str(machineSettings.options.numNodes) - #sys.exit() - -if (d_debug==1): - print("whichAts= ", whichAts) - print("finalCommandToRun after= ", finalCommandToRun) - -#---------------------------------------------------------- -# Find filter part and keep whole -# -# [05/30] The problem is from splitting the command into argsToUse. If we ran w/ something other than os.execv, maybe this will work correctly. -tagFilter= finalCommandToRun.find('--filter') -comboMark= False -for anArg in sys.argv[1:]: - if '--filter' in anArg and ("'" in anArg): - comboMark= True -if tagFilter != -1 and comboMark==True: - startFilter= finalCommandToRun.find("'", tagFilter) - endFilter= finalCommandToRun.find("'", startFilter+1) - filterPart= finalCommandToRun[tagFilter:endFilter+1] - filterPart= filterPart.replace("'", '') - argsToUse= finalCommandToRun[0:tagFilter].split() + [filterPart] + finalCommandToRun[endFilter+1:].split() -else: - argsToUse= finalCommandToRun.split() - -if realFinalCommandToRun is not None: - print("Running:\n ", realFinalCommandToRun) - if (d_debug==1): - print("Really running:\n ", finalCommandToRun) -else: - print("Running:\n ", finalCommandToRun) -if (d_debug==1): - print("atsExe= ", options.atsExe) - print("atsArgs= ", argsToUse) - -from subprocess import check_call -try: - check_call( finalCommandToRun,shell=True ) -except Exception as e: - print("Caught - non-zero exit status 3 - thrown by final command", e) - print("Tests appear to execute correctly...but this output is here to keep an eye on this.") - - diff --git a/scripts/lcatstest.in b/scripts/lcatstest.in deleted file mode 100644 index c13ae04c1..000000000 --- a/scripts/lcatstest.in +++ /dev/null @@ -1,3 +0,0 @@ -#!/usr/bin/env bash - -@CMAKE_INSTALL_PREFIX@/spheral @CMAKE_INSTALL_PREFIX@/scripts/lcats --atsExe @CMAKE_INSTALL_PREFIX@/.venv/bin/ats -e @CMAKE_INSTALL_PREFIX@/spheral @SPHERAL_ATS_BUILD_CONFIG_ARGS_STRING@ "$@" diff --git a/scripts/performance/performance.py.in b/scripts/performance/performance.py.in deleted file mode 100644 index b337e5042..000000000 --- a/scripts/performance/performance.py.in +++ /dev/null @@ -1,18 +0,0 @@ -#!/user/bin/env python3 - -import sys, os -caliper_loc = "@CONFIG_CALIPER_DIR@" -sys.path.append(os.path.join(caliper_loc, "lib64/caliper")) - -import caliperreader as cr - -# Put some filler functions here -def compare_times(manager): - filtered = [test for test in manager.testlist if test.status is PASSED] - for t in filtered: - print(t) - -onExit(compare_times) -glue(keep=True) -source("functional/Hydro/Noh/Noh-cylindrical-2d.py") - diff --git a/scripts/spack/packages/py-ats/package.py b/scripts/spack/packages/py-ats/package.py index 3d69395a1..e915b197c 100644 --- a/scripts/spack/packages/py-ats/package.py +++ b/scripts/spack/packages/py-ats/package.py @@ -1,9 +1,9 @@ -# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other +# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) -from spack import * +from spack.package import * class PyAts(PythonPackage): @@ -12,19 +12,22 @@ class PyAts(PythonPackage): of high performance computers.""" homepage = "https://github.com/LLNL/ATS" - git = "https://github.com/LLNL/ATS.git" + git = "https://github.com/LLNL/ATS.git" - maintainers = ['white238'] + maintainers("white238") - version('main', branch='main') - version('exit', branch='bugfix/exit-code') - version('7.0.100', tag='7.0.100') - version('7.0.9', tag='7.0.9') - version('7.0.5', tag='7.0.5') + license("MIT") + + version("main", branch="main") + version('7.0.117', commit='1aa5c381d201306d16397cc0e76a81b4450438b2') + version("7.0.105", tag="7.0.105", commit="3a3461061d4493a002018f5bb3715db702212f72") + version("7.0.100", tag="7.0.100", commit="202c18d11b8f1c14f1a3361a6e45c9e4f83a3fa1") + version("7.0.5", tag="7.0.5", commit="86b0b18b96b179f97008393170f5e5bc95118867") # TODO: Add flux variant when Flux functionality works in ATS - depends_on("python@3.8:", type=('build', 'run')) - depends_on("py-numpy", type=('build', 'run')) - depends_on('py-setuptools', type='build') - depends_on('py-poetry-core', type='build') + depends_on("python@3.8:", type=("build", "run")) + depends_on("py-numpy", type=("build", "run")) + depends_on("py-setuptools", type="build") + depends_on("py-poetry-core", type="build") + diff --git a/scripts/spack/packages/spheral/package.py b/scripts/spack/packages/spheral/package.py index 8695500ea..c0a1dede4 100644 --- a/scripts/spack/packages/spheral/package.py +++ b/scripts/spack/packages/spheral/package.py @@ -1,9 +1,10 @@ -# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other +# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack.package import * +import spack import socket import os @@ -82,7 +83,7 @@ class Spheral(CachedCMakePackage, CudaPackage): depends_on('py-h5py@3.9.0', type='build') depends_on('py-docutils@0.18.1', type='build') depends_on('py-scipy@1.12.0', type='build') - depends_on('py-ats@exit', type='build') + depends_on('py-ats@7.0.117', type='build') depends_on('py-mpi4py@3.1.5', type='build', when='+mpi') depends_on('py-sphinx', type='build') @@ -101,6 +102,25 @@ def _get_sys_type(self, spec): sys_type = env["SYS_TYPE"] return sys_type + def _get_arch(self): + host_platform = spack.platforms.host() + host_os = host_platform.operating_system("default_os") + host_target = host_platform.target("default_target") + architecture = spack.spec.ArchSpec((str(host_platform), str(host_os), str(host_target))) + spack_arch = str(architecture) + return spack_arch.strip() + + # Create a name for the specific configuration being built + # This name is used to differentiate timings during performance testing + def _get_config_name(self, spec): + arch = self._get_arch() + config_name = f"{arch}_{spec.compiler.name}_{spec.compiler.version}" + if ("+mpi" in spec): + config_name += "_" + spec.format("{^mpi.name}_{^mpi.version}") + if ("+cuda" in spec): + config_name += "_" + spec.format("{^cuda.name}{^cuda.version}") + return config_name.replace(" ", "_") + @property def cache_name(self): @@ -163,6 +183,9 @@ def initconfig_package_entries(self): entries.append(cmake_cache_option('TPL_VERBOSE', False)) entries.append(cmake_cache_option('BUILD_TPL', True)) + entries.append(cmake_cache_string('SPHERAL_SYS_ARCH', self._get_arch())) + entries.append(cmake_cache_string('SPHERAL_CONFIGURATION', self._get_config_name(spec))) + # TPL locations entries.append(cmake_cache_path('caliper_DIR', spec['caliper'].prefix)) diff --git a/scripts/spheral-setup-venv.in b/scripts/spheral-setup-venv.in index fafd20547..2991641e4 100644 --- a/scripts/spheral-setup-venv.in +++ b/scripts/spheral-setup-venv.in @@ -5,7 +5,7 @@ cp @SPHERAL_SITE_PACKAGES_PATH@/Spheral.pth .venv/@SPHERAL_SITE_PACKAGES_PATH@/ mkdir -p .venv/@SPHERAL_SITE_PACKAGES_PATH@/Spheral cd @CMAKE_INSTALL_PREFIX@/.venv/@SPHERAL_SITE_PACKAGES_PATH@/Spheral cp --symbolic-link @CMAKE_INSTALL_PREFIX@/@SPHERAL_SITE_PACKAGES_PATH@/Spheral/* . > /dev/null 2>&1 -cd - > /dev/null +cd - > /dev/null # We need to reconfigure ATS to use our virtual env python otherwise ats will not be able to launch properly. echo "Reconfigure ATS executing python to virtual env python..." @@ -16,11 +16,9 @@ echo "Creating spheral symlink to spheral-env script ..." cd @CMAKE_INSTALL_PREFIX@ chmod u+x scripts/spheral-env.sh chmod u+x scripts/atstest.sh -chmod u+x scripts/lcatstest.sh cp --symbolic-link scripts/spheral-env.sh spheral &> /dev/null -cp --symbolic-link scripts/atstest.sh spheral-atstest &> /dev/null -cp --symbolic-link scripts/lcatstest.sh spheral-lcatstest &> /dev/null -cd - > /dev/null +cp --symbolic-link scripts/atstest.sh spheral-ats &> /dev/null +cd - > /dev/null echo "Byte-compiling packages in install path ..." @CMAKE_INSTALL_PREFIX@/spheral -m compileall -q @CMAKE_INSTALL_PREFIX@/.venv/@SPHERAL_SITE_PACKAGES_PATH@ diff --git a/scripts/spheral_ats.py b/scripts/spheral_ats.py new file mode 100644 index 000000000..a3da87e01 --- /dev/null +++ b/scripts/spheral_ats.py @@ -0,0 +1,208 @@ +#!/usr/bin/env python3 + +import os, time, sys +import argparse +import ats.util.generic_utils as ats_utils +import SpheralConfigs +import mpi + +# This is a wrapper for running Spheral through ATS + +# Options for running CI +# If the number of failed tests exceeds this value, ATS is not rerun +max_test_failures = 10 +# Number of times to rerun the ATS tests +max_reruns = 1 + +# Use current path to find spheralutils module +cur_dir = os.path.dirname(__file__) +# Set current directory to install prefix +if (os.path.islink(__file__)): + cur_dir = os.path.join(cur_dir, os.readlink(__file__)) +install_prefix = os.path.join(cur_dir, "..") +ats_exe = os.path.join(install_prefix, ".venv/bin/ats") +spheral_exe = os.path.join(install_prefix, "spheral") +sys.path.append(cur_dir) +from spheralutils import sexe + +#------------------------------------------------------------------------------ +# Run ats.py to check results and return the number of failed tests +def report_results(output_dir): + ats_py = os.path.join(output_dir, "atsr.py") + if (not os.path.exists(ats_py)): + raise Exception("ats.py does not exists. Tests likely did not run.") + exec(compile(open(ats_py).read(), ats_py, 'exec'), globals()) + state = globals()["state"] + failed_tests = [t for t in state['testlist'] if t['status'] in [FAILED,TIMEDOUT] ] + if len(failed_tests) > 0: + print(f"ATS failed {len(failed_tests)} tests.") + for t in failed_tests: + print(t['name']) + return len(failed_tests) + else: + print("ATS passed all tests.") + return 0 + +#------------------------------------------------------------------------------ +# Run the tests and check if any failed +def run_and_report(run_command, ci_output, num_runs): + if (num_runs > max_reruns): + raise Exception ("Exceeded number of ATS reruns") + ats_cont_file = os.path.join(ci_output, "continue.ats") + new_run_command = run_command + if (os.path.exists(ats_cont_file) and num_runs == 0): + new_run_command = f"{run_command} {ats_cont_file}" + print("Restarting from previous job") + try: + sexe(new_run_command) + except Exception as e: + print(e) + tests_passed = report_results(ci_output) + if (tests_passed == 0): + if (num_runs > 0): + print("WARNING: Some tests were run multiple times") + sys.exit(0) + # This should be added back in once Jacamar can handle exit codes properly + # if (num_runs == 0): + # sys.exit(0) + # else: + # sys.exit(80) + elif (tests_passed >= max_test_failures): + raise Exception("Too many test failures, not rerunning ATS") + else: + rerun_command = run_command + if (num_runs == 0): + ats_cont_file = os.path.join(ci_output, "continue.ats") + if (not os.path.exists(ats_cont_file)): + raise Exception(f"{ats_cont_file} not found, ATS cannot be rerun") + rerun_command = f"{run_command} {ats_cont_file}" + print("WARNING: Test failure, rerunning ATS") + run_and_report(rerun_command, ci_output, num_runs + 1) + +#------------------------------------------------------------------------------ +# Add any build specific ATS arguments +def install_ats_args(): + install_args = [] + if (SpheralConfigs.build_type() == "Debug"): + install_args.append('--level 99') + if (mpi.is_fake_mpi()): + install_args.append('--filter="np<2"') + comp_configs = SpheralConfigs.component_configs() + test_comps = ["FSISPH", "GSPH", "SVPH"] + for ts in test_comps: + if ts not in comp_configs: + install_args.append(f'--filter="not {ts.lower()}"') + return install_args + +#--------------------------------------------------------------------------- +# Main routine +#--------------------------------------------------------------------------- +def main(): + test_log_name = "test-logs" + toss_machine_names = ["rzgenie", "rzwhippet", "rzhound", "ruby"] + blueos_machine_names = ["rzansel", "lassen"] + temp_uname = os.uname() + hostname = temp_uname[1] + sys_type = os.getenv("SYS_TYPE") + # Use ATS to for some machine specific functions + if "MACHINE_TYPE" not in os.environ: + ats_utils.set_machine_type_based_on_sys_type() + + #--------------------------------------------------------------------------- + # Setup argument parser + #--------------------------------------------------------------------------- + parser = argparse.ArgumentParser(allow_abbrev=False, + usage=""" + ./spheral-ats --numNodes 2 tests/integration.ats --filter="level<100" + """, + description=""" + Launches and runs Spheral using the ATS system. + Must provide an ATS file (either python or .ats). + Any unrecognized arguments are passed as inputs to the ATS file. + """) + parser.add_argument("--numNodes", type=int, + default=None, + help="Number of nodes to allocate.") + parser.add_argument("--timeLimit", type=int, + default=None, + help="Time limit for allocation.") + parser.add_argument("--ciRun", action="store_true", + help="Option to only be used by the CI") + parser.add_argument("--atsHelp", action="store_true", + help="Print the help output for ATS. Useful for seeing ATS options.") + options, unknown_options = parser.parse_known_args() + if (options.atsHelp): + sexe(f"{ats_exe} --help") + return + + #--------------------------------------------------------------------------- + # Setup machine info classes + #--------------------------------------------------------------------------- + ats_args = install_ats_args() + numNodes = options.numNodes + timeLimit = options.timeLimit + launch_cmd = "" + blueOS = False + # These are environment variables to suggest we are in an allocation already + # NOTE: CI runs should already be in an allocation so the launch cmd is + # unused in those cases + inAllocVars = [] + + if hostname: + mac_args = [] + if any(x in hostname for x in toss_machine_names): + numNodes = numNodes if numNodes else 2 + timeLimit = timeLimit if timeLimit else 120 + mac_args = [f"--numNodes {numNodes}"] + inAllocVars = ["SLURM_JOB_NUM_NODES", "SLURM_NNODES"] + launch_cmd = f"salloc --exclusive -N {numNodes} -t {timeLimit} " + if (options.ciRun): + launch_cmd += "-p pdebug " + elif any(x in hostname for x in blueos_machine_names): + blueOS = True + numNodes = numNodes if numNodes else 1 + timeLimit = timeLimit if timeLimit else 60 + inAllocVars = ["LSB_MAX_NUM_PROCESSORS"] + mac_args = ["--smpi_off", f"--numNodes {numNodes}"] + launch_cmd = f"bsub -nnodes {numNodes} -Is -XF -W {timeLimit} -core_isolation 2 " + ats_args.extend(mac_args) + + #--------------------------------------------------------------------------- + # Launch ATS + #--------------------------------------------------------------------------- + # If doing a CI run, set some more options + if (options.ciRun): + if ("--logs" not in unknown_options): + ats_args.append(f"--logs {test_log_name}") + log_name = test_log_name + else: + log_name_indx = unknown_options.index("--logs") + 1 + log_name = unknown_options[log_name_indx] + ats_args.append('--glue="independent=True"') + ats_args.append('--continueFreq=15') + ats_args = " ".join(str(x) for x in ats_args) + other_args = " ".join(str(x) for x in unknown_options) + cmd = f"{ats_exe} -e {spheral_exe} {ats_args} {other_args}" + # Check if are already in an allocation + inAlloc = any(e in list(os.environ.keys()) for e in inAllocVars) + # If already in allocation, do not do a launch + if inAlloc: + run_command = cmd + else: + if blueOS: + # Launches using Bsub have issues with '<' being in command + # so entire run statment must be in quotes + run_command = f"{launch_cmd} '{cmd}'" + else: + run_command = f"{launch_cmd}{cmd}" + print(f"\nRunning: {run_command}\n") + if (options.ciRun): + run_and_report(run_command, log_name, 0) + else: + try: + sexe(run_command) + except Exception as e: + print(e) + +if __name__ == "__main__": + main() diff --git a/src/SimulationControl/CMakeLists.txt b/src/SimulationControl/CMakeLists.txt index 32f7ca438..578733220 100644 --- a/src/SimulationControl/CMakeLists.txt +++ b/src/SimulationControl/CMakeLists.txt @@ -25,6 +25,22 @@ configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/Spheral_banner.py ${CMAKE_CURRENT_BINARY_DIR}/Spheral_banner.py) +# Configure SpheralConfigs.py.in +set(SPHERAL_COMP_CONFIG) +if (SPHERAL_ENABLE_FSISPH) + list(APPEND SPHERAL_COMP_CONFIG "\"FSISPH\"") +endif() +if (SPHERAL_ENABLE_GSPH) + list(APPEND SPHERAL_COMP_CONFIG "\"GSPH\"") +endif() +if (SPHERAL_ENABLE_SVPH) + list(APPEND SPHERAL_COMP_CONFIG "\"SVPH\"") +endif() +string(REPLACE ";" ", " SPHERAL_COMP_CONFIG_STR "${SPHERAL_COMP_CONFIG}") +configure_file( + ${CMAKE_CURRENT_SOURCE_DIR}/SpheralConfigs.py.in + ${CMAKE_CURRENT_BINARY_DIR}/SpheralConfigs.py) + set(_dims 1) if(ENABLE_2D) list(APPEND _dims 2) @@ -80,4 +96,5 @@ spheral_install_python_files( CaptureStdout.py ${CMAKE_CURRENT_BINARY_DIR}/Spheral_banner.py ${CMAKE_CURRENT_BINARY_DIR}/spheralDimensions.py + ${CMAKE_CURRENT_BINARY_DIR}/SpheralConfigs.py ) diff --git a/src/SimulationControl/Spheral.py b/src/SimulationControl/Spheral.py index 8f5bf2255..4ba881f3c 100644 --- a/src/SimulationControl/Spheral.py +++ b/src/SimulationControl/Spheral.py @@ -3,6 +3,11 @@ # Modified version to be compatible with the pybindgen version of Spheral++. +# ------------------------------------------------------------------------------ +# Load up MPI. +# ------------------------------------------------------------------------------ +import mpi + from SpheralUtilities import BuildData if not BuildData.cxx_compiler_id == "GNU": @@ -13,11 +18,6 @@ print("WARNING: unable to set python dl flags on Spheral import.") pass -# ------------------------------------------------------------------------------ -# Load up MPI. -# ------------------------------------------------------------------------------ -import mpi - # ------------------------------------------------------------------------------ # Import a scipy module to initialize scipy's shared qhull library before # spheral's static qhull library. diff --git a/src/SimulationControl/SpheralConfigs.py.in b/src/SimulationControl/SpheralConfigs.py.in new file mode 100644 index 000000000..0f0c2e747 --- /dev/null +++ b/src/SimulationControl/SpheralConfigs.py.in @@ -0,0 +1,28 @@ + +''' +This module allows access to the Spheral build configuration information +''' + +import sys, os + +def build_type(): + return "@CMAKE_BUILD_TYPE@" + +def sys_arch(): + return "@SPHERAL_SYS_ARCH@" + +def config(): + return "@SPHERAL_CONFIGURATION@" + +def component_configs(): + return [@SPHERAL_COMP_CONFIG_STR@] + +def caliper_module_path(): + caliper_loc = "@CONFIG_CALIPER_DIR@" + if (caliper_loc and os.path.exists(caliper_loc)): + return os.path.join(caliper_loc, "lib64/caliper") + else: + return None + +def test_install_path(): + return "@SPHERAL_TEST_INSTALL_PREFIX@" diff --git a/tests/CRKSPH.ats b/tests/CRKSPH.ats index d6a66db00..b0e5c7f78 100644 --- a/tests/CRKSPH.ats +++ b/tests/CRKSPH.ats @@ -7,6 +7,7 @@ filter = 0.0 KernelConstructor = "NBSplineKernel" order = 7 linearInExpansion = False +glue(independent=True) #------------------------------------------------------------------------------- # Function to add the tests. diff --git a/tests/PSPH.ats b/tests/PSPH.ats index f18294446..89a1e1194 100644 --- a/tests/PSPH.ats +++ b/tests/PSPH.ats @@ -11,6 +11,7 @@ evolveTotalEnergy = True boolCullenViscosity = True HopkinsConductivity = True resMultiplier = 1 +glue(independent=True) #------------------------------------------------------------------------------- # Function to add the tests. diff --git a/tests/compSPH.ats b/tests/compSPH.ats index af0a555e8..580b5e4c0 100644 --- a/tests/compSPH.ats +++ b/tests/compSPH.ats @@ -7,6 +7,7 @@ filter = 0.0 KernelConstructor = "NBSplineKernel" order = 5 linearInExpansion = False +glue(independent=True) #------------------------------------------------------------------------------- # Function to add the tests. diff --git a/tests/integration.ats b/tests/integration.ats index 02efa2297..038960ca3 100644 --- a/tests/integration.ats +++ b/tests/integration.ats @@ -7,6 +7,10 @@ glue(fsisph = False) glue(gsph = False) glue(svph = False) +glue(independent = True) + +# Fail test to make sure tests are working +source("unit/Utilities/testFails.py") # Fail test to make sure tests are working source("unit/Utilities/testFails.py") diff --git a/tests/performance.py b/tests/performance.py new file mode 100644 index 000000000..66858a5d4 --- /dev/null +++ b/tests/performance.py @@ -0,0 +1,145 @@ +#!/user/bin/env python3 + +# This file runs and compares performance tests through the ats system. +# Run using: ./spheral-ats tests/performance.py + +import sys, shutil, os, time +import numpy as np +spheral_path = "../lib/python3.9/site-packages/Spheral" +sys.path.append(spheral_path) +import SpheralConfigs + +# If desired, set a location to consolidate Caliper files, tthis is useful +# when running scaling tests +# This automatically creates directories based on the install configuration +# and test names inside output_loc +# WARNING: Be sure to remove older performance data in +# output location beforehand +#output_loc = "/home/user/scaling/test/files" +output_loc = None + +# Current system architecture from Spack +spheral_sys_arch = SpheralConfigs.sys_arch() +# Current install configuration from Spack +spheral_install_config = SpheralConfigs.config() + +def add_timer_cmds(cali_name, test_name): + return f"--caliperFilename {cali_name} --adiakData 'test_name: {test_name}, install_config: {spheral_install_config}'" + +# Consolidate Caliper files after run +def gather_files(manager): + filtered = [test for test in manager.testlist if test.status is PASSED] + for test in filtered: + run_dir = test.directory + cali_filename = test.options["caliper_filename"] + cfile = os.path.join(run_dir, cali_filename) + test_name = test.options["label"] + outdir = os.path.join(output_loc, spheral_install_config, test_name) + if (not os.path.exists(outdir)): + log(f"Creating {outdir}") + os.makedirs(outdir) + outfile = os.path.join(outdir, cali_filename) + log(f"Copying {cali_filename} to {outdir}") + shutil.copy(cfile, outfile) +# Setup Spheral performance tests +def spheral_setup_test(test_path, test_name, test_num, inps, ncores, threads=1): + 'General method for creating an individual performance test' + global regions, timers, spheral_install_config + caliper_filename = f"{test_name}_{test_num}_{int(time.time())}.cali" + timer_cmds = add_timer_cmds(caliper_filename, test_name) + finps = f"{inps} {timer_cmds}" + t = test(script=test_path, clas=finps, + label=test_name, + np=ncores, + nt=threads, + caliper_filename=caliper_filename, + regions=regions, + timers=timers, + install_config=spheral_install_config) + return t + +def main(): + if (output_loc): + onExit(gather_files) + glue(keep=True) + if ("power" in spheral_sys_arch): + num_nodes = 1 + num_cores = 40 + elif ("broadwell" in spheral_sys_arch): + num_nodes = 2 + num_cores = 36 + # Select which timing regions to compare (for CI) + regions = ["CheapRK2", + "CheapRK2PreInit", + "ConnectivityMap_computeConnectivity", + "ConnectivityMap_patch", + "CheapRK2EvalDerivs", + "CheapRK2EndStep"] + # Select which timers to compare (for CI) + timers = ["sum#inclusive#sum#time.duration"] # Means the sum of the time from all ranks + + # 3D convection test + test_dir = os.path.join(SpheralConfigs.test_install_path(), "unit/Boundary") + + group(name="3D Convection test") + test_file = "testPeriodicBoundary-3d.py" + test_path = os.path.join(test_dir, test_file) + test_name = "3DCONV" + + # Test with varying number of ranks + ranks = [1, 2, 4] + # We want 20 points per unit length + ref_len = 1. + sph_point_rho = 20. / ref_len + sph_per_core = 300 + for i, n in enumerate(ranks): + ncores = int(num_nodes*num_cores/n) + total_sph_nodes = sph_per_core * ncores + npd = int(np.cbrt(total_sph_nodes)) + new_len = npd * ref_len / sph_point_rho + inps = f"--nx {npd} --ny {npd} --nz {npd} --x1 {new_len} --y1 {new_len} --z1 {new_len} --steps 100" + t = spheral_setup_test(test_path, test_name, i, inps, ncores) + endgroup() + + # NOH tests + test_dir = os.path.join(SpheralConfigs.test_install_path(), "functional/Hydro/Noh") + + # General input for all Noh tests + gen_noh_inps = "--crksph False --cfl 0.25 --Cl 1.0 --Cq 1.0 --xfilter 0.0 "+\ + "--nPerh 2.01 --graphics False --clearDirectories False --doCompare False "+\ + "--dataDir None --vizTime None --vizCycle None" + + group(name="NOH 2D tests") + test_file = "Noh-cylindrical-2d.py" + nRadial = 100 + test_path = os.path.join(test_dir, test_file) + test_name = "NC2D" + + # Test with varying number of ranks + ranks = [1, 2, 4] + for i, n in enumerate(ranks): + inps = f"{gen_noh_inps} --nRadial {nRadial} --steps 10" + ncores = int(num_nodes*num_cores/n) + t = spheral_setup_test(test_path, test_name, i, inps, ncores) + + endgroup() + + group(name="NOH 3D tests") + test_file = "Noh-spherical-3d.py" + test_path = os.path.join(test_dir, test_file) + test_name = "NS3D" + + # Test with varying number of SPH nodes per rank + npcore = [100, 200, 300] + for i, n in enumerate(npcore): + ncores = int(num_nodes*num_cores) + total_sph_nodes = n*ncores + npd = int(np.cbrt(total_sph_nodes)) + node_inps = f"--nx {npd} --ny {npd} --nz {npd}" + inps = f"{gen_noh_inps} {node_inps} --steps 10" + t = spheral_setup_test(test_path, test_name, i, inps, ncores) + # Add a wait to ensure all timer files are done + wait() + +if __name__=="__main__": + main() diff --git a/tests/unit/Boundary/testPeriodicBoundary-3d.py b/tests/unit/Boundary/testPeriodicBoundary-3d.py new file mode 100644 index 000000000..d26d2e33e --- /dev/null +++ b/tests/unit/Boundary/testPeriodicBoundary-3d.py @@ -0,0 +1,164 @@ +#ATS:t0 = test(SELF, "", np=10, label="Periodic boundary unit test -- 3-D (parallel)") +#------------------------------------------------------------------------------- +# 3D test of periodic boundaries -- we simply allow a pressureless fluid to +# cycle around a box and check the sum density +#------------------------------------------------------------------------------- +from math import * +from Spheral3d import * +from SpheralTestUtilities import * +from SpheralPointmeshSiloDump import dumpPhysicsState +import mpi + +title("3D periodic boundary test.") + +#------------------------------------------------------------------------------- +# Generic problem parameters +#------------------------------------------------------------------------------- +commandLine(nx = 20, + ny = 20, + nz = 20, + x0 = 0.0, + x1 = 1.0, + y0 = 0.0, + y1 = 1.0, + z0 = 0.0, + z1 = 1.0, + + rho1 = 1.0, + cs2 = 1.0, + mu = 1.0, + vx1 = 1.0, + vy1 = 1.0, + vz1 = 1.0, + + nPerh = 2.01, + + hmin = 0.0001, + hmax = 0.5, + cfl = 0.5, + + tol = 1.0e-3, + steps = 300, + dt = 0.0001, + dtMin = 1.0e-5, + dtMax = 0.1, + dtGrowth = 2.0, + dtverbose = False, + rigorousBoundaries = False, + maxSteps = None, + statsStep = 1, + smoothIters = 0, + HEvolution = IdealH, + densityUpdate = RigorousSumDensity, + compatibleEnergy = True, + gradhCorrection = True, + linearConsistent = False, + domainIndependent = False, + + restoreCycle = None, + restartStep = 10000, + restartBaseName = None + ) + +#------------------------------------------------------------------------------- +# Material properties. +#------------------------------------------------------------------------------- +eos = IsothermalEquationOfStateMKS(cs2, mu) + +#------------------------------------------------------------------------------- +# Interpolation kernels. +#------------------------------------------------------------------------------- +WT = TableKernel(BSplineKernel(), 1000) +WTPi = TableKernel(BSplineKernel(), 1000) + +#------------------------------------------------------------------------------- +# Make the NodeList. +#------------------------------------------------------------------------------- +nodes1 = makeFluidNodeList("nodes1", eos, + hmin = hmin, + hmax = hmax, + nPerh = nPerh) + +#------------------------------------------------------------------------------- +# Set the node properties. +#------------------------------------------------------------------------------- +from GenerateNodeDistribution3d import GenerateNodeDistribution3d +gen1 = GenerateNodeDistribution3d(nx, ny, nz, + rho = rho1, + distributionType = "lattice", + xmin = (x0, y0, z0), + xmax = (x1, y1, z1), + nNodePerh = nPerh, + SPH = True) +if mpi.procs > 1: + from PeanoHilbertDistributeNodes import distributeNodes3d +else: + from DistributeNodes import distributeNodes3d +distributeNodes3d((nodes1, gen1)) + +# Set the node positions, velocities, and densities. +nodes1.velocity(VectorField("tmp velocity", nodes1, Vector(vx1, vy1, vz1))) + +#------------------------------------------------------------------------------- +# Construct a DataBase to hold our node list +#------------------------------------------------------------------------------- +db = DataBase() +db.appendNodeList(nodes1) + +#------------------------------------------------------------------------------- +# Construct the artificial viscosity. +#------------------------------------------------------------------------------- +q = MonaghanGingoldViscosity(0.0, 0.0) + +#------------------------------------------------------------------------------- +# Construct the hydro physics object. +#------------------------------------------------------------------------------- +hydro = SPH(dataBase = db, + W = WT, + Q = q, + cfl = cfl, + densityUpdate = RigorousSumDensity, + HUpdate = HEvolution) + +#------------------------------------------------------------------------------- +# Create boundary conditions. +#------------------------------------------------------------------------------- +loVect = Vector(x0, y0, z0) +hiVect = Vector(x1, y1, z1) +bcs = [] +for i in range(0,3): + nVect = Vector(0., 0., 0.) + nVect[i] = 1. + plane0 = Plane(loVect, nVect) + plane1 = Plane(hiVect, -nVect) + bcs.append(PeriodicBoundary(plane0, plane1)) +# Segfault occurs if hydro is append directly in previous loop +for i in bcs: + hydro.appendBoundary(i) + +#------------------------------------------------------------------------------- +# Construct a time integrator. +#------------------------------------------------------------------------------- +integrator = CheapSynchronousRK2Integrator(db) +integrator.appendPhysicsPackage(hydro) +integrator.lastDt = dt +integrator.dtMin = dtMin +integrator.dtMax = dtMax +integrator.dtGrowth = dtGrowth +integrator.rigorousBoundaries = rigorousBoundaries +integrator.domainDecompositionIndependent = domainIndependent +integrator.verbose = dtverbose + +#------------------------------------------------------------------------------- +# Make the problem controller. +#------------------------------------------------------------------------------- +control = SpheralController(integrator, WT, + statsStep = statsStep, + restartStep = restartStep, + restartBaseName = restartBaseName, + restoreCycle = restoreCycle) + +#------------------------------------------------------------------------------- +# Advance to the end time. +#------------------------------------------------------------------------------- +control.step(steps) diff --git a/tests/unit/CMakeLists.txt b/tests/unit/CMakeLists.txt index 9182c51f9..4f009ae8a 100644 --- a/tests/unit/CMakeLists.txt +++ b/tests/unit/CMakeLists.txt @@ -1,6 +1 @@ add_subdirectory(CXXTests) - -configure_file( - "${CMAKE_CURRENT_SOURCE_DIR}/Utilities/testTimers.py.in" - "${SPHERAL_TEST_INSTALL_PREFIX}/tests/unit/Utilities/testTimers.py" - ) diff --git a/tests/unit/CXXTests/CMakeLists.txt b/tests/unit/CXXTests/CMakeLists.txt index 264480b59..df1182b3c 100644 --- a/tests/unit/CXXTests/CMakeLists.txt +++ b/tests/unit/CXXTests/CMakeLists.txt @@ -33,7 +33,7 @@ foreach(test ${gtest_spheral_tests}) if (NOT ENABLE_CXXONLY) configure_file("${CMAKE_CURRENT_SOURCE_DIR}/pyRunCXXTest.in" - "${SPHERAL_TEST_INSTALL_PREFIX}/tests/unit/CXXTests/${test_name}.py" + "${SPHERAL_TEST_INSTALL_PREFIX}/unit/CXXTests/${test_name}.py" ) endif() @@ -50,6 +50,6 @@ string(REPLACE ";" ", " TEST_LIST "${TESTS}") if (NOT ENABLE_CXXONLY) configure_file("${CMAKE_CURRENT_SOURCE_DIR}/runCXXTests.in" - "${SPHERAL_TEST_INSTALL_PREFIX}/tests/unit/CXXTests/runCXXTests.ats" + "${SPHERAL_TEST_INSTALL_PREFIX}/unit/CXXTests/runCXXTests.ats" ) endif() diff --git a/tests/unit/Utilities/testTimers.py.in b/tests/unit/Utilities/testTimers.py similarity index 91% rename from tests/unit/Utilities/testTimers.py.in rename to tests/unit/Utilities/testTimers.py index 09fc0c9ad..32ff49920 100644 --- a/tests/unit/Utilities/testTimers.py.in +++ b/tests/unit/Utilities/testTimers.py @@ -8,9 +8,9 @@ import Spheral from SpheralTestUtilities import * from SpheralOptionParser import * -from SpheralUtilities import TimerMgr from SpheralUtilities import * import mpi +import SpheralConfigs import sys, os, time @@ -51,11 +51,11 @@ adiak_fini() TimerMgr.fini() mpi.barrier() - caliper_loc = "@CONFIG_CALIPER_DIR@" - sys.path.append(os.path.join(caliper_loc, "lib64/caliper")) + caliper_loc = SpheralConfigs.caliper_module_path() + if (not caliper_loc): + raise FileNotFoundError("Caliper file not found") + sys.path.append(caliper_loc) import caliperreader as cr - if (not os.path.exists(caliper_file)): - raise ValueError("Caliper file not found") r = cr.CaliperReader() r.read(caliper_file) records = r.records @@ -85,4 +85,3 @@ if ("adiakData" in adiak_inp): assert adiak_data_dict.items() <= adiak_inp.items(),\ "incorrect adiakData inputs found in Caliper file Adiak values" -