Beispiel #1
0
 def testAllReduceMeanGradientTape(self):
   distribution = parameter_server_strategy.ParameterServerStrategy(
       num_gpus_per_worker=2)
   self._test_all_reduce_mean_gradient_tape(distribution)
Beispiel #2
0
 def testAllReduceSum(self):
     distribution = parameter_server_strategy.ParameterServerStrategy(
         num_gpus_per_worker=2)
     self._test_all_reduce_sum(distribution)
Beispiel #3
0
 def testGlobalStepUpdate(self):
   strategy = parameter_server_strategy.ParameterServerStrategy(
       num_gpus_per_worker=context.num_gpus())
   self._test_global_step_update(strategy)
Beispiel #4
0
 def testDeviceAssignmentLocalTwoGPUs(self):
   distribution = parameter_server_strategy.ParameterServerStrategy(
       num_gpus_per_worker=2)
   self._test_device_assignment_local(
       distribution, compute_device='GPU', variable_device='CPU', num_gpus=2)
Beispiel #5
0
 def test_num_replicas_in_sync(self):
   distribution = parameter_server_strategy.ParameterServerStrategy(
       num_gpus_per_worker=2)
   # All the devices on a given worker are in sync which in this case is the
   # number of gpus on each worker.
   self.assertEqual(2, distribution.num_replicas_in_sync)
Beispiel #6
0
 def testTrainableVariables(self):
     distribution = parameter_server_strategy.ParameterServerStrategy()
     self._test_trainable_variable(distribution)
Beispiel #7
0
    "CoreMirrored1CPU", lambda: mirrored_lib.CoreMirroredStrategy(["/cpu:0"]))
core_mirrored_strategy_with_one_gpu = NamedDistribution(
    "CoreMirrored1GPU",
    lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0"]),
    required_gpus=1)
core_mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
    "CoreMirroredCPUAndGPU",
    lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/cpu:0"]),
    required_gpus=1)
core_mirrored_strategy_with_two_gpus = NamedDistribution(
    "CoreMirrored2GPUs",
    lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/gpu:1"]),
    required_gpus=2)
parameter_server_strategy_with_two_gpus = NamedDistribution(
    "ParameterServer2GPUs",
    lambda: parameter_server_strategy.ParameterServerStrategy(
        num_gpus_per_worker=2),
    required_gpus=2)

gradient_descent_optimizer_v1_fn = NamedObject(
    "GradientDescentV1",
    lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = NamedObject("AdagradV1",
                                      lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = NamedObject(
    "AdamV1", lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = NamedObject("RmsPropV1",
                                      lambda: rmsprop.RMSPropOptimizer(0.001))

optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]

gradient_descent_optimizer_v2_fn = NamedObject(