Example #1
0
  def testGpuInvalidConfig(self):
    gpus = config.list_physical_devices('GPU')
    self.assertNotEqual(len(gpus), 0)

    for gpu in gpus:
      config.set_memory_growth(gpu, True)

    c = context.context().config
    self.assertTrue(c.gpu_options.allow_growth)

    with self.assertRaisesRegexp(ValueError, 'memory limit'):
      config.set_virtual_device_configuration(gpus[-1], [
          context.VirtualDeviceConfiguration(),
          context.VirtualDeviceConfiguration()
      ])

    self.assertIsNone(config.get_virtual_device_configuration(gpus[-1]))
    config.set_virtual_device_configuration(gpus[-1], [
        context.VirtualDeviceConfiguration(memory_limit=10),
        context.VirtualDeviceConfiguration(memory_limit=10)
    ])

    c = context.context().config
    self.assertFalse(c.gpu_options.allow_growth)

    with self.assertRaisesRegexp(ValueError, 'virtual devices'):
      config.set_memory_growth(gpus[-1], False)
Example #2
0
  def testGpuInvalidConfig(self):
    gpus = config.list_physical_devices('GPU')
    self.assertNotEqual(len(gpus), 0)

    for gpu in gpus:
      config.set_memory_growth(gpu, True)

    c = context.context().config
    self.assertTrue(c.gpu_options.allow_growth)

    with self.assertRaisesRegexp(ValueError, 'memory limit'):
      config.set_virtual_device_configuration(gpus[-1], [
          context.VirtualDeviceConfiguration(),
          context.VirtualDeviceConfiguration()
      ])

    self.assertIsNone(config.get_virtual_device_configuration(gpus[-1]))
    config.set_virtual_device_configuration(gpus[-1], [
        context.VirtualDeviceConfiguration(memory_limit=10),
        context.VirtualDeviceConfiguration(memory_limit=10)
    ])

    c = context.context().config
    self.assertFalse(c.gpu_options.allow_growth)

    with self.assertRaisesRegexp(ValueError, 'virtual devices'):
      config.set_memory_growth(gpus[-1], False)
    def setUp(self):
        super(trt_test.TfTrtIntegrationTestBase, self).setUp()  # pylint: disable=bad-super-call
        os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "True"
        gpus = config.list_physical_devices("GPU")

        logging.info("Found the following GPUs:")
        for gpu in gpus:
            logging.info(f"\t- {gpu}")
            config.set_memory_growth(gpu, True)
Example #4
0
    def setUp(self):
        super().setUp()
        os.environ["TF_TRT_ALLOW_ENGINE_NATIVE_SEGMENT_EXECUTION"] = "True"
        gpus = config.list_physical_devices("GPU")

        logging.info("Found the following GPUs:")
        for gpu in gpus:
            logging.info(f"\t- {gpu}")
            config.set_memory_growth(gpu, True)
Example #5
0
    def testGpuInvalidConfig(self):
        gpus = config.list_physical_devices('GPU')
        self.assertNotEqual(len(gpus), 0)

        if len(gpus) > 1:
            # Assert if other GPUs were not configured
            config.set_memory_growth(gpus[0], True)
            with self.assertRaisesRegex(ValueError, 'cannot differ'):
                c = context.context().config

            # If we limit visibility to GPU 0, growth is fine
            config.set_visible_devices(gpus[0], 'GPU')
            c = context.context().config
            self.assertTrue(c.gpu_options.allow_growth)

            # Default setting for second GPU is False and works if we set visibility
            config.set_visible_devices(gpus[1], 'GPU')
            c = context.context().config
            self.assertFalse(c.gpu_options.allow_growth)

            # Growth now fails because all the GPUs are visible and not the same
            config.set_visible_devices(gpus, 'GPU')
            with self.assertRaisesRegex(ValueError, 'cannot differ'):
                c = context.context().config

        for gpu in gpus:
            config.set_memory_growth(gpu, True)

        c = context.context().config
        self.assertTrue(c.gpu_options.allow_growth)

        with self.assertRaisesRegex(ValueError, 'memory limit'):
            config.set_logical_device_configuration(gpus[-1], [
                context.LogicalDeviceConfiguration(),
                context.LogicalDeviceConfiguration()
            ])

        self.assertIsNone(config.get_logical_device_configuration(gpus[-1]))
        config.set_logical_device_configuration(gpus[-1], [
            context.LogicalDeviceConfiguration(memory_limit=10),
            context.LogicalDeviceConfiguration(memory_limit=10)
        ])

        c = context.context().config
        self.assertFalse(c.gpu_options.allow_growth)

        with self.assertRaisesRegex(ValueError, 'virtual devices'):
            config.set_memory_growth(gpus[-1], False)
Example #6
0
    def testGpuGrowth(self):
        gpus = config.list_physical_devices('GPU')
        self.assertNotEqual(len(gpus), 0)

        self.assertIsNone(config.get_memory_growth(gpus[-1]))
        for gpu in gpus:
            config.set_memory_growth(gpu, True)

        c = context.context().config
        self.assertTrue(c.gpu_options.allow_growth)

        logical_gpus = config.list_logical_devices('GPU')
        self.assertTrue(len(logical_gpus), len(gpus))

        # Modifying the GPU configuration is not supported
        with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
            for gpu in gpus:
                config.set_memory_growth(gpu, False)

        # Setting the same GPU configuration is fine
        for gpu in gpus:
            config.set_memory_growth(gpu, True)
Example #7
0
from keras.backend import tensorflow_backend as K
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
#출처: https://mellowlee.tistory.com/entry/Python-Keras-InternalError-GPU-sync-failed [잠토의 잠망경]
old_v = tf.compat.v1.logging.get_verbosity()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

from tensorflow.python.framework.config import set_memory_growth
tf.compat.v1.disable_v2_behavior()
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        for gpu in gpus:
            set_memory_growth(gpu, True)
    except RuntimeError as e:
        print(e)


class SRGAN():
    def __init__(self):
        # Input shape
        self.scale_factor = 2  # 2x
        self.channels = 3
        self.hr_height = 256  # High resolution height
        self.hr_width = 256  # High resolution width
        self.hr_shape = (self.hr_height, self.hr_width, self.channels)
        self.lr_height = int(self.hr_height /
                             self.scale_factor)  # Low resolution height
        self.lr_width = int(self.hr_width /