Geant4 Cross Reference

Cross-Referencing   Geant4
Geant4/examples/extended/parameterisations/Par04/training/utils/gpu_limiter.py

Version: [ ReleaseNotes ] [ 1.0 ] [ 1.1 ] [ 2.0 ] [ 3.0 ] [ 3.1 ] [ 3.2 ] [ 4.0 ] [ 4.0.p1 ] [ 4.0.p2 ] [ 4.1 ] [ 4.1.p1 ] [ 5.0 ] [ 5.0.p1 ] [ 5.1 ] [ 5.1.p1 ] [ 5.2 ] [ 5.2.p1 ] [ 5.2.p2 ] [ 6.0 ] [ 6.0.p1 ] [ 6.1 ] [ 6.2 ] [ 6.2.p1 ] [ 6.2.p2 ] [ 7.0 ] [ 7.0.p1 ] [ 7.1 ] [ 7.1.p1 ] [ 8.0 ] [ 8.0.p1 ] [ 8.1 ] [ 8.1.p1 ] [ 8.1.p2 ] [ 8.2 ] [ 8.2.p1 ] [ 8.3 ] [ 8.3.p1 ] [ 8.3.p2 ] [ 9.0 ] [ 9.0.p1 ] [ 9.0.p2 ] [ 9.1 ] [ 9.1.p1 ] [ 9.1.p2 ] [ 9.1.p3 ] [ 9.2 ] [ 9.2.p1 ] [ 9.2.p2 ] [ 9.2.p3 ] [ 9.2.p4 ] [ 9.3 ] [ 9.3.p1 ] [ 9.3.p2 ] [ 9.4 ] [ 9.4.p1 ] [ 9.4.p2 ] [ 9.4.p3 ] [ 9.4.p4 ] [ 9.5 ] [ 9.5.p1 ] [ 9.5.p2 ] [ 9.6 ] [ 9.6.p1 ] [ 9.6.p2 ] [ 9.6.p3 ] [ 9.6.p4 ] [ 10.0 ] [ 10.0.p1 ] [ 10.0.p2 ] [ 10.0.p3 ] [ 10.0.p4 ] [ 10.1 ] [ 10.1.p1 ] [ 10.1.p2 ] [ 10.1.p3 ] [ 10.2 ] [ 10.2.p1 ] [ 10.2.p2 ] [ 10.2.p3 ] [ 10.3 ] [ 10.3.p1 ] [ 10.3.p2 ] [ 10.3.p3 ] [ 10.4 ] [ 10.4.p1 ] [ 10.4.p2 ] [ 10.4.p3 ] [ 10.5 ] [ 10.5.p1 ] [ 10.6 ] [ 10.6.p1 ] [ 10.6.p2 ] [ 10.6.p3 ] [ 10.7 ] [ 10.7.p1 ] [ 10.7.p2 ] [ 10.7.p3 ] [ 10.7.p4 ] [ 11.0 ] [ 11.0.p1 ] [ 11.0.p2 ] [ 11.0.p3, ] [ 11.0.p4 ] [ 11.1 ] [ 11.1.1 ] [ 11.1.2 ] [ 11.1.3 ] [ 11.2 ] [ 11.2.1 ] [ 11.2.2 ] [ 11.3.0 ]

  1 import os
  2 from dataclasses import dataclass
  3 
  4 import tensorflow as tf
  5 
  6 
  7 @dataclass
  8 class GPULimiter:
  9     """
 10     Class responsible to set the limits of possible GPU usage by TensorFlow. Currently, the limiter creates one
 11     instance of logical device per physical device. This can be changed in a future.
 12 
 13     Attributes:
 14         _gpu_ids: A string representing visible devices for the process. Identifiers of physical GPUs should
 15             be separated by commas (no spaces).
 16         _max_gpu_memory_allocation: An integer specifying limit of allocated memory per logical device.
 17 
 18     """
 19     _gpu_ids: str
 20     _max_gpu_memory_allocation: int
 21 
 22     def __call__(self):
 23         os.environ["CUDA_VISIBLE_DEVICES"] = f"{self._gpu_ids}"
 24         gpus = tf.config.list_physical_devices('GPU')
 25         if gpus:
 26             # Restrict TensorFlow to only allocate max_gpu_memory_allocation*1024 MB of memory on one of the GPUs
 27             try:
 28                 for gpu in gpus:
 29                     tf.config.set_logical_device_configuration(
 30                         gpu,
 31                         [tf.config.LogicalDeviceConfiguration(memory_limit=1024 * self._max_gpu_memory_allocation)])
 32                 logical_gpus = tf.config.list_logical_devices('GPU')
 33                 print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
 34             except RuntimeError as e:
 35                 # Virtual devices must be set before GPUs have been initialized
 36                 print(e)