def testInputContextPropertyLocal(self):
     d = one_device_strategy.OneDeviceStrategy("/device:CPU:0")
     input_fn = self._input_fn_to_test_input_context(
         expected_num_replicas_in_sync=1,
         expected_num_input_pipelines=1,
         expected_input_pipeline_id=0)
     d.make_input_fn_iterator(input_fn)
示例#2
0
    def testNotPassingASessionInGraph(self):
        distribution = one_device_strategy.OneDeviceStrategy("/device:CPU:0")
        step_function, _ = single_loss_example(
            lambda: gradient_descent.GradientDescentOptimizer(0.2),
            distribution)

        with context.graph_mode(), ops.Graph().as_default():
            with self.assertRaisesRegexp(ValueError, "Should provide"):
                _ = monitor_lib.Monitor(step_function, session=None)
示例#3
0
    def testPassingASessionInEager(self):
        distribution = one_device_strategy.OneDeviceStrategy("/device:CPU:0")
        step_function, _ = single_loss_example(
            lambda: gradient_descent.GradientDescentOptimizer(0.2),
            distribution)

        with session.Session() as sess, context.eager_mode():
            with self.assertRaisesRegexp(ValueError, "Should not provide"):
                _ = monitor_lib.Monitor(step_function, sess)
 def testMakeInputFnIterator(self):
     d = one_device_strategy.OneDeviceStrategy("/device:CPU:0")
     dataset_fn = lambda: dataset_ops.Dataset.range(10)
     expected_values = [[i] for i in range(10)]
     input_fn = self._input_fn_to_test_input_context(
         dataset_fn,
         expected_num_replicas_in_sync=1,
         expected_num_input_pipelines=1,
         expected_input_pipeline_id=0)
     iterator = d.make_input_fn_iterator(input_fn)
     self._test_input_fn_iterator(iterator, d.extended.worker_devices,
                                  expected_values)
示例#5
0
  @property
  def required_gpus(self):
    return self._required_gpus

  @property
  def required_tpu(self):
    return self._required_tpu


# pylint: disable=g-long-lambda
default_strategy = NamedDistribution(
    "Default",
    distribution_strategy_context._get_default_distribution_strategy,  # pylint: disable=protected-access
    required_gpus=None)
one_device_strategy = NamedDistribution(
    "OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
    required_gpus=None)
tpu_strategy = NamedDistribution(
    "TPU", lambda: tpu_lib.TPUStrategy(
        TPUClusterResolver(""), steps_per_run=2),
    required_tpu=True)
tpu_strategy_one_step = NamedDistribution(
    "TPUOneStep", lambda: tpu_lib.TPUStrategy(
        TPUClusterResolver(""), steps_per_run=1),
    required_tpu=True)
mirrored_strategy_with_one_cpu = NamedDistribution(
    "Mirrored1CPU",
    lambda: mirrored_lib.MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = NamedDistribution(
    "Mirrored1GPU",
    lambda: mirrored_lib.MirroredStrategy(["/gpu:0"]),
 def _get_distribution_strategy(self):
   return one_device_strategy.OneDeviceStrategy("/device:CPU:0")
示例#7
0
    @property
    def required_gpus(self):
        return self._required_gpus

    @property
    def required_tpu(self):
        return self._required_tpu


default_strategy = NamedDistribution(
    "Default",
    distribute_lib._default_distribution_strategy,  # pylint: disable=protected-access
    required_gpus=None)
one_device_strategy = NamedDistribution(
    "OneDeviceCPU",
    one_device_strategy.OneDeviceStrategy("/cpu:0"),
    required_gpus=None)
tpu_strategy_single_iteration = NamedDistribution(
    "TPUSingleIteration",
    tpu_strategy.TPUStrategy(iterations_per_step=1),
    required_tpu=True)
tpu_strategy = NamedDistribution("TPU",
                                 tpu_strategy.TPUStrategy(),
                                 required_tpu=True)
# Note that we disable prefetching for testing since prefetching makes
# the input non-deterministic.
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
    "MirroredCPUAndGPU",
    mirrored_strategy.MirroredStrategy(["/gpu:0", "/cpu:0"],
                                       prefetch_on_device=False),
    required_gpus=1)
示例#8
0
        self._required_gpus = required_gpus

    def __repr__(self):
        return self._name

    @property
    def strategy(self):
        return self._distribution

    @property
    def required_gpus(self):
        return self._required_gpus


one_device_strategy = NamedDistribution(
    "OneDeviceCPU", one_device_strategy.OneDeviceStrategy("/cpu:0"), None)
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
    "MirroredCPUAndGPU",
    mirrored_strategy.MirroredStrategy(["/gpu:0", "/cpu:0"]), 1)
mirrored_strategy_without_prefetch = NamedDistribution(
    "MirroredCPUAndGPUNoPrefetch",
    mirrored_strategy.MirroredStrategy(["/gpu:0", "/cpu:0"],
                                       prefetch_on_device=False), 1)
mirrored_strategy_with_two_gpus = NamedDistribution(
    "Mirrored2GPUs", mirrored_strategy.MirroredStrategy(["/gpu:0", "/gpu:1"]),
    2)

adam_optimizer_v1_fn = NamedObject("AdamV1",
                                   lambda: adam.AdamOptimizer(0.2, epsilon=1))
gradient_descent_optimizer_v1_fn = NamedObject(
    "GradientDescentV1",