From af832dedc5535c8da428af2558edb233b1bd1b78 Mon Sep 17 00:00:00 2001 From: juacrumar Date: Thu, 5 Dec 2024 13:33:31 +0100 Subject: [PATCH] apply comments ; add set_threads from pytorch as well --- .github/workflows/pytorch_test.yml | 1 + .../backends/keras_backend/internal_state.py | 26 ++++++++++--------- .../backends/keras_backend/operations.py | 2 +- n3fit/src/n3fit/model_gen.py | 4 +-- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/.github/workflows/pytorch_test.yml b/.github/workflows/pytorch_test.yml index 57aa9791d8..1621159b4c 100644 --- a/.github/workflows/pytorch_test.yml +++ b/.github/workflows/pytorch_test.yml @@ -16,6 +16,7 @@ jobs: shell: bash -l {0} run: | pip install .[nolha,torch] + # Since there is no LHAPDF in the system, initialize the folder and download pdfsets.index lhapdf-management update --init - name: Test we can run one runcard shell: bash -l {0} diff --git a/n3fit/src/n3fit/backends/keras_backend/internal_state.py b/n3fit/src/n3fit/backends/keras_backend/internal_state.py index 23d9ed3819..a10e317010 100644 --- a/n3fit/src/n3fit/backends/keras_backend/internal_state.py +++ b/n3fit/src/n3fit/backends/keras_backend/internal_state.py @@ -22,6 +22,7 @@ # Prepare Keras-backend dependent functions if K.backend() in ("torch", "jax"): + import torch def set_eager(flag=True): """Pytorch is eager by default""" @@ -29,7 +30,8 @@ def set_eager(flag=True): def set_threading(threads, core): """Not implemented""" - pass + log.info("Setting max number of threads to: %d", threads) + torch.set_num_threads(threads) elif K.backend() == "tensorflow": import tensorflow as tf @@ -43,8 +45,16 @@ def set_eager(flag=True): def set_threading(threads, cores): """Set the Tensorflow inter and intra parallelism options""" - tf.config.threading.set_inter_op_parallelism_threads(threads) - tf.config.threading.set_intra_op_parallelism_threads(cores) + log.info("Setting the number of cores to: %d", cores) + try: + tf.config.threading.set_inter_op_parallelism_threads(threads) + tf.config.threading.set_intra_op_parallelism_threads(cores) + except RuntimeError: + # If tensorflow has already been initiated, the previous calls might fail. + # This may happen for instance if pdfflow is being used + log.warning( + "Could not set tensorflow parallelism settings from n3fit, maybe tensorflow is already initialized by a third program" + ) def set_number_of_cores(max_cores=None, max_threads=None): @@ -80,15 +90,7 @@ def set_number_of_cores(max_cores=None, max_threads=None): if max_threads is not None: threads = min(max_threads, threads) - log.info("Setting the number of cores to: %d", cores) - try: - set_threading(threads, cores) - except RuntimeError: - # If pdfflow is being used, tensorflow will already be initialized by pdfflow - # maybe it would be good to drop completely pdfflow before starting the fit? (TODO ?) - log.warning( - "Could not set tensorflow parallelism settings from n3fit, maybe has already been initialized?" - ) + set_threading(threads, cores) def clear_backend_state(): diff --git a/n3fit/src/n3fit/backends/keras_backend/operations.py b/n3fit/src/n3fit/backends/keras_backend/operations.py index 23d412c171..f123e450e3 100644 --- a/n3fit/src/n3fit/backends/keras_backend/operations.py +++ b/n3fit/src/n3fit/backends/keras_backend/operations.py @@ -18,7 +18,7 @@ Most of the operations in this module are just aliases to the backend (Keras in this case) so that, when implementing new backends, it is clear - which operations may needd to be overwritten. + which operations may need to be overwritten. For a few selected operations, a more complicated wrapper to e.g., make them into layers or apply some default, is included. diff --git a/n3fit/src/n3fit/model_gen.py b/n3fit/src/n3fit/model_gen.py index f7fe7c5608..868409f489 100644 --- a/n3fit/src/n3fit/model_gen.py +++ b/n3fit/src/n3fit/model_gen.py @@ -99,10 +99,10 @@ def _generate_experimental_layer(self, pdf): the input PDF is evaluated in all points that the experiment needs and needs to be split """ if len(self.dataset_xsizes) > 1: - sp_layer = op.tensor_splitter( + splitting_layer = op.tensor_splitter( pdf.shape, self.dataset_xsizes, axis=2, name=f"{self.name}_split" ) - sp_pdf = sp_layer(pdf) + sp_pdf = splitting_layer(pdf) output_layers = [obs(p) for obs, p in zip(self.observables, sp_pdf)] else: output_layers = [obs(pdf) for obs in self.observables]