示例#1
0
def _distribution_strategies():
  return [
      collective_all_reduce_strategy.CollectiveAllReduceStrategy(),
      mirrored_strategy.MirroredStrategy(),
      # TODO(pulkitb): Add parameter_server
      # parameter_server_strategy.ParameterServerStrategy(),
      one_device_strategy.OneDeviceStrategy('/cpu:0'),
  ]
示例#2
0
    def testNotPassingASessionInGraph(self):
        distribution = one_device_strategy.OneDeviceStrategy("/device:CPU:0")
        step_function, _ = single_loss_example(
            lambda: gradient_descent.GradientDescentOptimizer(0.2),
            distribution)

        with context.graph_mode(), ops.Graph().as_default():
            with self.assertRaisesRegexp(ValueError, "Should provide"):
                _ = monitor_lib.Monitor(step_function, session=None)
示例#3
0
    def testPassingASessionInEager(self):
        distribution = one_device_strategy.OneDeviceStrategy("/device:CPU:0")
        step_function, _ = single_loss_example(
            lambda: gradient_descent.GradientDescentOptimizer(0.2),
            distribution)

        with session.Session() as sess, context.eager_mode():
            with self.assertRaisesRegexp(ValueError, "Should not provide"):
                _ = monitor_lib.Monitor(step_function, sess)
示例#4
0
    def test_loss_class_as_metric_with_distribution(self):
        distribution = one_device_strategy.OneDeviceStrategy('/device:CPU:0')
        with distribution.scope():
            metric_container = compile_utils.MetricsContainer(
                losses_mod.MeanSquaredError())
            y_t, y_p = array_ops.ones((10, 5)), array_ops.zeros((10, 5))
            metric_container.update_state(y_t, y_p)

            self.assertLen(metric_container.metrics, 1)
            metric = metric_container.metrics[0]
            self.assertEqual(metric.name, 'mean_squared_error')
            self.assertEqual(metric.result().numpy(), 1.)
示例#5
0
 def testMakeInputFnIteratorWithDataset(self):
   d = one_device_strategy.OneDeviceStrategy("/device:CPU:0")
   dataset_fn = lambda: dataset_ops.Dataset.range(10)
   expected_values = [[i] for i in range(10)]
   input_fn = self._input_fn_to_test_input_context(
       dataset_fn,
       expected_num_replicas_in_sync=1,
       expected_num_input_pipelines=1,
       expected_input_pipeline_id=0)
   iterator = d.make_input_fn_iterator(input_fn)
   self._test_input_fn_iterator(
       iterator, d.extended.worker_devices, expected_values)
示例#6
0
    def test_cpu_distribution(self):
        vocab_data = ["earth", "wind", "and", "fire"]
        input_array = np.array([["earth", "wind", "and", "fire"],
                                ["fire", "and", "earth", "michigan"]])
        expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]

        strategy = one_device_strategy.OneDeviceStrategy("/cpu:0")

        with strategy.scope():
            input_data = keras.Input(shape=(None, ), dtype=dtypes.string)
            layer = get_layer_class()()
            layer.set_vocabulary(vocab_data)
            int_data = layer(input_data)
            model = keras.Model(inputs=input_data, outputs=int_data)
        output_dataset = model.predict(input_array)
        self.assertAllEqual(expected_output, output_dataset)
示例#7
0
 def testMakeInputFnIteratorWithCallable(self):
   d = one_device_strategy.OneDeviceStrategy("/device:CPU:0")
   def fn():
     dataset = dataset_ops.Dataset.range(10)
     it = dataset.make_one_shot_iterator()
     return it.get_next
   expected_values = [[i] for i in range(10)]
   input_fn = self._input_fn_to_test_input_context(
       fn,
       expected_num_replicas_in_sync=1,
       expected_num_input_pipelines=1,
       expected_input_pipeline_id=0)
   iterator = d.make_input_fn_iterator(input_fn)
   self._test_input_fn_iterator(
       iterator, d.extended.worker_devices, expected_values,
       test_reinitialize=False)
            return tpu_lib.TPUStrategy(resolver, device_assignment, **kwargs)
        else:
            return tpu_lib.TPUStrategyV1(resolver, steps_per_run,
                                         device_assignment, **kwargs)

    return _create_tpu_strategy


# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
    "Default",
    distribution_strategy_context._get_default_strategy,  # pylint: disable=protected-access
    required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
    "OneDeviceCPU",
    lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
    required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
    "OneDeviceGPU",
    lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
    required_gpus=1)
one_device_strategy_on_worker_1 = combinations.NamedDistribution(
    "OneDeviceOnWorker1CPU",
    lambda: one_device_lib.OneDeviceStrategy(
        "/job:worker/replica:0/task:1/cpu:0"),  # pylint: disable=line-too-long
    required_gpus=None)
one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
    "OneDeviceOnWorker1GPU",
    lambda: one_device_lib.OneDeviceStrategy(
        "/job:worker/replica:0/task:1/gpu:0"),  # pylint: disable=line-too-long
    required_gpus=1)
示例#9
0
 def _get_distribution_strategy(self):
   return one_device_strategy.OneDeviceStrategy("/device:CPU:0")
示例#10
0
 def _get_distribution_strategy(self):
     if "test_gpu" in sys.argv[0]:
         return one_device_strategy.OneDeviceStrategy("/device:GPU:0")
     else:
         return one_device_strategy.OneDeviceStrategy("/device:CPU:0")
示例#11
0
    strategy = tpu_lib.TPUStrategy(resolver, steps_per_run=steps_per_run,
                                   device_assignment=device_assignment,
                                   **kwargs)
    return strategy
  return _create_tpu_strategy


# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
    "Default",
    distribution_strategy_context._get_default_strategy,  # pylint: disable=protected-access
    required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
    "OneDeviceCPU",
    lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
    required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
    "OneDeviceGPU",
    lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
    required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
    "TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
tpu_strategy_one_step = combinations.NamedDistribution(
    "TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)
tpu_strategy_one_core = combinations.NamedDistribution(
    "TPUOneCore",
    _get_tpu_strategy_creator(steps_per_run=2, use_single_core=True),
    required_tpu=True)
tpu_strategy_one_step_one_core = combinations.NamedDistribution(
    "TPUOneStepOneCore",
示例#12
0
def create_one_device_strategy():
    return one_device_strategy.OneDeviceStrategy('cpu:0')