def testUnsupportedStrategy(self):
   strategy = central_storage_strategy.CentralStorageStrategy()
   expected_error = (
       'Loss scaling is not supported with the tf.distribute.Strategy: '
       'CentralStorageStrategy. Try using a different Strategy, e.g. a '
       'MirroredStrategy')
   with strategy.scope(), self.assertRaisesRegex(ValueError, expected_error):
     loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD(), 1.)
   opt = loss_scale_optimizer.LossScaleOptimizer(gradient_descent.SGD(), 1.)
   with strategy.scope():
     var = variables.Variable(1.0)
     loss = lambda: var * 2.0
     run_fn = lambda: opt.minimize(loss, [var])
     with self.assertRaisesRegex(ValueError, expected_error):
       strategy.experimental_run(run_fn)
    required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
    "Mirrored2GPUs",
    lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
    required_gpus=2)
# Should call set_virtual_cpus_to_at_least(3) in your test's setUp methods.
mirrored_strategy_with_cpu_1_and_2 = combinations.NamedDistribution(
    "Mirrored2CPU",
    lambda: mirrored_lib.MirroredStrategy(["/cpu:1", "/cpu:2"]))
central_storage_strategy_with_two_gpus = combinations.NamedDistribution(
    "CentralStorage2GPUs",
    lambda: central_storage_strategy.CentralStorageStrategy._from_num_gpus(2),  # pylint: disable=protected-access
    required_gpus=2)
central_storage_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
    "CentralStorageCPUAndGPU",
    lambda: central_storage_strategy.CentralStorageStrategy(
        ["/gpu:0", "/cpu:0"]),
    required_gpus=1)

gradient_descent_optimizer_v1_fn = combinations.NamedObject(
    "GradientDescentV1",
    lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = combinations.NamedObject(
    "AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = combinations.NamedObject(
    "AdamV1", lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = combinations.NamedObject(
    "RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))

# TODO(shiningsun): consider adding the other v1 optimizers
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]
示例#3
0
def create_central_storage_strategy():
    """Create a CentralStorageStrategy, using a GPU if it is available."""
    compute_devices = ['cpu:0', 'gpu:0'
                       ] if context.num_gpus() >= 1 else ['cpu:0']
    return central_storage_strategy.CentralStorageStrategy(
        compute_devices, parameter_device='cpu:0')
示例#4
0
def create_central_storage_strategy():
  """Create a CentralStorageStrategy, using a GPU if it is available."""
  compute_devices = ['cpu:0', 'gpu:0'] if (
      tf_config.list_logical_devices('GPU')) else ['cpu:0']
  return central_storage_strategy.CentralStorageStrategy(
      compute_devices, parameter_device='cpu:0')