コード例 #1
0
class MultiWorkerCrossDeviceOpsTest(multi_worker_test_base.MultiWorkerTestBase,
                                    CrossDeviceOpsTestBase):

    worker_devices = [
        "/job:worker/replica:0/task:0", "/job:worker/replica:0/task:1"
    ]
    multi_worker_allreduce_combinations = combinations.combine(
        cross_device_ops=[
            combinations.NamedObject(
                "MultiWorkerAllReduce",
                cross_device_ops_lib.MultiWorkerAllReduce(
                    worker_devices, 2, ("pscpu/pscpu", 2, -1), 0, 0, 0)),
            combinations.NamedObject(
                "MultiWorkerAllReducePack",
                cross_device_ops_lib.MultiWorkerAllReduce(
                    worker_devices, 2, ("pscpu/pscpu", 2, -1), 1, 0, 0)),
            combinations.NamedObject(
                "MultiWorkerAllReduceAggregation",
                cross_device_ops_lib.MultiWorkerAllReduce(
                    worker_devices, 2, ("pscpu/pscpu", 2, -1), 0, 100, 10)),
            combinations.NamedObject(
                "MultiWorkerAllReduceMultipleSpecs",
                cross_device_ops_lib.MultiWorkerAllReduce(
                    worker_devices, 2, [("pscpu/pscpu", 2, 100),
                                        ("xring", 2, -1)], 0, 0, 0)),
        ],
        distribution=[
            combinations.NamedDistribution(
                "MirroredCPU",
                lambda: mirrored_strategy.MirroredStrategy(["/device:CPU:0"]),
                required_gpus=0),
            combinations.NamedDistribution(
                "Mirrored1GPU",
                lambda: mirrored_strategy.MirroredStrategy(["/device:GPU:0"]),
                required_gpus=1),
            combinations.NamedDistribution(
                "Mirrored2GPUs",
                # pylint: disable=g-long-lambda
                lambda: mirrored_strategy.MirroredStrategy(
                    ["/device:GPU:0", "/device:GPU:1"]),
                required_gpus=2),
        ],
        mode=["graph"])

    @combinations.generate(multi_worker_allreduce_combinations)
    def testReductionAndBroadcast(self, cross_device_ops, distribution):
        distribution.configure(
            cluster_spec={
                "worker": [
                    "/job:worker/replica:0/task:0",
                    "/job:worker/replica:0/task:1"
                ]
            })
        with distribution.scope():
            self._testReductionAndBroadcast(cross_device_ops, distribution)
コード例 #2
0
class ClusterParametersShouldFailTest(test.TestCase, parameterized.TestCase):

  @framework_combinations.generate(  # pylint: disable=redundant-keyword-arg
      framework_combinations.combine(
          ds1=combinations.NamedDistribution(
              "Strategy1", lambda: None, has_chief=True, num_workers=2),
          ds2=combinations.NamedDistribution(
              "Strategy2", lambda: None, has_chief=True, num_workers=2),
      ),
      test_combinations=(combinations.ClusterCombination(),))
  def testMultipleDistributionMultiWorker(self, ds1, ds2):
    # combinations library should raise an exception.
    pass
コード例 #3
0
class CombinationsExpectedFailureTest(test.TestCase, parameterized.TestCase):

  @combinations.generate(
      combinations.combine(distribution=[
          combinations.NamedDistribution(
              "OneChiefOneWorker", lambda: None, has_chief=True, num_workers=1),
          combinations.NamedDistribution(
              "TwoWorkers", lambda: None, has_chief=False, num_workers=2),
      ]))
  def testMultiWorkerCanFail(self, distribution):
    resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
    # This should fail.
    self.assertIsNone(resolver.task_id)
コード例 #4
0
class ClusterParametersTest(test.TestCase, parameterized.TestCase):
  # For this test we need to use `framework.test_combinations` because our
  # `generate` eats the cluster parameters.
  #
  # Note that we don't have a standalone combination for ClusterParameters, so
  # we should use GPUCombination which contains it.

  @framework_combinations.generate(
      framework_combinations.combine(distribution=[
          combinations.NamedDistribution(
              "HasClusterParams", lambda: None, has_chief=True, num_workers=2),
      ]),
      test_combinations=(combinations.GPUCombination(),))
  def testClusterParams(self, distribution, has_chief, num_workers):
    self.assertTrue(has_chief)
    self.assertEqual(num_workers, 2)

  @framework_combinations.generate(
      framework_combinations.combine(distribution=[
          combinations.NamedDistribution("NoClusterParams", lambda: None),
      ]),
      test_combinations=(combinations.GPUCombination(),))
  def testClusterParamsHasDefault(self, distribution, has_chief, num_workers):
    self.assertFalse(has_chief)
    self.assertEqual(num_workers, 1)

  @framework_combinations.generate(
      framework_combinations.combine(v=1),
      test_combinations=(combinations.GPUCombination(),))
  def testClusterParamsNoStrategy(self, v, has_chief, num_workers):
    self.assertFalse(has_chief)
    self.assertEqual(num_workers, 1)

  @framework_combinations.generate(
      framework_combinations.combine(distribution=[
          combinations.NamedDistribution(
              "WithClusterParams", lambda: None, has_chief=True, num_workers=2),
          combinations.NamedDistribution("WithoutClusterParams", lambda: None),
      ]),
      test_combinations=(combinations.GPUCombination(),))
  def testClusterParamsAreOptional(self, distribution):
    # If combinations library doesn't raise an exception, the test is passed.
    pass

  @framework_combinations.generate(
      framework_combinations.combine(
          ds1=combinations.NamedDistribution(
              "Strategy1", lambda: None, has_chief=True, num_workers=0),
          ds2=combinations.NamedDistribution(
              "Strategy2", lambda: None, has_chief=False, num_workers=1),
          ds3=combinations.NamedDistribution(
              "Strategy3", lambda: None, has_chief=True, num_workers=0),
      ),
      test_combinations=(combinations.GPUCombination(),))
  def testMultipleDistributionSingleWorker(self, ds1, ds2, ds3):
    # If combinations library doesn't raise an exception, the test is passed.
    pass
コード例 #5
0
def parameter_server_strategy_fn(
    name, num_workers, num_ps, required_gpus=0,
    variable_partitioner=DEFAULT_PARTITIONER):
  return combinations.NamedDistribution(
      name,
      _get_ps_strategy_creator(
          num_workers=num_workers, num_ps=num_ps, required_gpus=required_gpus,
          variable_partitioner=variable_partitioner),
      required_gpus=required_gpus,
      num_workers=num_workers,
      has_chief=True,
      num_ps=num_ps)
コード例 #6
0
def parameter_server_strategy_fn(
    name, num_workers, num_ps, required_gpus=0,
    variable_partitioner=sharded_variable.FixedShardsPartitioner(2)):
  return combinations.NamedDistribution(
      name,
      _get_ps_strategy_creator(
          num_workers=num_workers, num_ps=num_ps, required_gpus=required_gpus,
          variable_partitioner=variable_partitioner),
      required_gpus=required_gpus,
      num_workers=num_workers,
      has_chief=True,
      num_ps=num_ps)
コード例 #7
0
class ClusterParametersShouldFailTest(test.TestCase, parameterized.TestCase):
    @framework_combinations.generate(
        framework_combinations.combine(
            ds1=combinations.NamedDistribution("Strategy1",
                                               lambda: None,
                                               has_chief=True,
                                               num_workers=2),
            ds2=combinations.NamedDistribution("Strategy2",
                                               lambda: None,
                                               has_chief=True,
                                               num_workers=2),
        ),
        test_combinations=(combinations.ClusterCombination(), ))
    def testMultipleDistributionMultiWorker(self, ds1, ds2):
        # combinations library should raise an exception.
        pass

    @combinations.generate(combinations.combine(num_workers=2, ))
    def testUseWithoutStrategy(self):
        # There's no perfect way to check if the test runs in a subprocess. We
        # approximate by checking the presence of TF_CONFIG, which is normally not
        # set to the main process.
        self.assertNotEqual(os.getenv("TF_CONFIG"), "")
        raise ValueError("actually run")
コード例 #8
0
# Two-worker pool where each worker gets it's own GPU. Useful for testing MWMS
# on a single host.
_two_worker_pool_noshare = _deferred_pool_runner(
    has_chief=True,
    num_workers=1,
    initializer=_get_multi_worker_mirrored_creator(required_gpus=0),
    share_gpu=False)
_four_worker_pool = _deferred_pool_runner(
    has_chief=True,
    num_workers=3,
    initializer=_get_multi_worker_mirrored_creator(required_gpus=0))

# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
    "Default",
    distribution_strategy_context._get_default_strategy,  # pylint: disable=protected-access
    required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
    "OneDeviceCPU", lambda: OneDeviceStrategy("/cpu:0"), required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
    "OneDeviceGPU", lambda: OneDeviceStrategy("/gpu:0"), required_gpus=1)
one_device_strategy_on_worker_1 = combinations.NamedDistribution(
    "OneDeviceOnWorker1CPU",
    lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/cpu:0"),
    required_gpus=None)
one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
    "OneDeviceOnWorker1GPU",
    lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/gpu:0"),
    required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
    "TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
コード例 #9
0
    cpus = tf.config.list_physical_devices("CPU")

    tf.config.set_logical_device_configuration(cpus[0], [
        tf.config.LogicalDeviceConfiguration(),
        tf.config.LogicalDeviceConfiguration(),
    ])


@tf.__internal__.distribute.combinations.generate(
    tf.__internal__.test.combinations.combine(
        distribution=[
            tf.__internal__.distribute.combinations.
            mirrored_strategy_with_gpu_and_cpu,
            ds_combinations.NamedDistribution(
                "Collective2CPUs",
                # pylint: disable=g-long-lambda
                lambda: tf.distribute.MultiWorkerMirroredStrategy.
                _from_local_devices(("/device:CPU:0", "/device:CPU:1")),
                required_gpus=0)
        ],
        mode=["graph", "eager"]))
class MirroredVariableCreationTest(tf.test.TestCase):
    """Base class that tests mirrored variable creator.

  Currently it assumes all strategy objects have two replicas.
  """
    @classmethod
    def setUpClass(cls):
        _mimic_two_cpus()

    def assertAllDifferent(self, objs):
        for i in range(len(objs)):
コード例 #10
0
      b0, b1 = distribution.experimental_local_results(result_b)
      c0, c1 = distribution.experimental_local_results(result_c)
      self.assertEqual("main/a:0", a0.name)
      self.assertEqual("main/a/replica_1:0", a1.name)
      self.assertEqual("main/b:0", b0.name)
      self.assertEqual("main/b/replica_1:0", b1.name)
      self.assertEqual("main/foo/c:0", c0.name)
      self.assertEqual("main/foo/c/replica_1:0", c1.name)


@combinations.generate(
    combinations.combine(
        distribution=[
            combinations.NamedDistribution(
                "Mirrored3Devices",
                # pylint: disable=g-long-lambda
                lambda: mirrored_strategy.MirroredStrategy(
                    ["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]),
                required_gpus=2)
        ],
        mode=["graph", "eager"]))
class MirroredThreeDeviceDistributionTest(
    strategy_test_lib.DistributionTestBase,
    parameterized.TestCase):

  def testThreeDevices(self, distribution):
    def model_fn():
      v = variable_scope.variable(1.0, name="foo")
      ds_context.get_replica_context().merge_call(lambda _: _)
      return v

    with distribution.scope():
コード例 #11
0
def get_gpus():
  gpus = context.context().list_logical_devices("GPU")
  actual_gpus = []
  for gpu in gpus:
    if "localhost" not in gpu.name:
      actual_gpus.append(gpu.name)
  return actual_gpus


@combinations.generate(
    combinations.combine(
        distribution=[
            combinations.NamedDistribution(
                "Mirrored",
                # pylint: disable=g-long-lambda
                lambda: mirrored_strategy.MirroredStrategy(get_gpus()),
                required_gpus=1)
        ],
        mode=["eager"]))
class RemoteSingleWorkerMirroredStrategyEager(
    multi_worker_test_base.SingleWorkerTestBaseEager,
    strategy_test_lib.RemoteSingleWorkerMirroredStrategyBase):

  def _get_num_gpus(self):
    return len(get_gpus())

  def testNumReplicasInSync(self, distribution):
    self._testNumReplicasInSync(distribution)

  def testMinimizeLoss(self, distribution):
コード例 #12
0
ファイル: combinations_test.py プロジェクト: chrisvon62/AiBot
                "TwoWorkers", lambda: None, has_chief=False, num_workers=2),
        ]))
    def testMultiWorkerCanFail(self, distribution):
        resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
        # This should fail.
        self.assertIsNone(resolver.task_id)


# Tests that we *actually* run the test method in multiple workers instead of
# just passing silently. More importantly, it verifies that the test can fail.
# Note that unittest.expectedFailure doesn't work with parameterized test
# methods, so we have to decorate the class instead.
@unittest.expectedFailure
@combinations.generate(
    combinations.combine(distribution=[
        combinations.NamedDistribution(
            "OneChiefOneWorker", lambda: None, has_chief=True, num_workers=1),
        combinations.NamedDistribution(
            "TwoWorkers", lambda: None, has_chief=False, num_workers=2),
    ]))
class CombinationsOnClassMultiWorkerExpectedFailureTest(
        test.TestCase, parameterized.TestCase):
    def test(self, distribution):
        resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
        # This should fail.
        self.assertIsNone(resolver.task_id)


if __name__ == "__main__":
    test_util.main()
コード例 #13
0
from absl.testing import parameterized
import numpy
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute.single_loss_example import minimize_loss_example
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables

mirrored_strategy_with_gpu_and_cpu = combinations.NamedDistribution(
    "MirroredCPUAndGPU",
    lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
    required_gpus=1)
mirrored_strategy_with_two_gpus = combinations.NamedDistribution(
    "Mirrored2GPUs",
    lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
    required_gpus=2)

# pylint: disable=g-long-lambda
gradient_descent_optimizer_v2_fn = combinations.NamedObject(
    "GradientDescentV2",
    lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
adagrad_optimizer_v2_fn = combinations.NamedObject(
    "AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))

optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
コード例 #14
0
from tensorflow.python.ops import lookup_ops

_sixteen_worker_pool = strategy_combinations._deferred_pool_runner(
    has_chief=True,
    num_workers=8,
    initializer=strategy_combinations._get_multi_worker_mirrored_creator(
        required_gpus=0))


@combinations.generate(
    combinations.combine(strategy=[
        combinations.NamedDistribution(
            "MultiWorkerMirrored8x1CPU",
            strategy_combinations._get_multi_worker_mirrored_creator(
                required_gpus=0),
            has_chief=True,
            num_workers=8,
            pool_runner_fn=_sixteen_worker_pool,
            no_xla=True,
        ),
    ],
                         mode=["eager"]))
class SaveModelForMultipleWorkers(test.TestCase, parameterized.TestCase):
    def test_read_sync_on_read_variable(self, strategy):
        # TODO(b/178943315): Enable test when the design in b/17894331 is
        # implemented.
        self.skipTest(
            "This test fails today due to issue in multiple workers trying to write"
            " to same file location: b/178943315")

        class Model(tf.Module):
from __future__ import division
from __future__ import print_function

from absl.testing import parameterized
import numpy as np

from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.eager import test

contrib_mirrored_strategies = [
    combinations.NamedDistribution(
        "ContribMirrored1CPU",
        lambda: mirrored_strategy.MirroredStrategy(["/cpu:0"])),
    combinations.NamedDistribution(
        "ContribMirrored1GPU",
        lambda: mirrored_strategy.MirroredStrategy(["/gpu:0"]),
        required_gpus=1),
    combinations.NamedDistribution(
        "ContribMirroredCPUAndGPU",
        lambda: mirrored_strategy.MirroredStrategy(["/cpu:0", "/gpu:0"]),
        required_gpus=1),
    combinations.NamedDistribution(
        "ContribMirrored2GPU",
        lambda: mirrored_strategy.MirroredStrategy(["/gpu:0", "/gpu:1"]),
        required_gpus=2),
]
コード例 #16
0
        # Steps per run is only supported in TF 1.x
        if tf2.enabled():
            strategy = tpu_lib.TPUStrategy(resolver, device_assignment,
                                           **kwargs)
        else:
            strategy = tpu_lib.TPUStrategyV1(resolver, steps_per_run,
                                             device_assignment, **kwargs)
        strategy._enable_packed_variable_in_eager_mode = enable_packed_variable  # pylint: disable=protected-access
        return strategy

    return _create_tpu_strategy


# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
    "Default",
    distribution_strategy_context._get_default_strategy,  # pylint: disable=protected-access
    required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
    "OneDeviceCPU",
    lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
    required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
    "OneDeviceGPU",
    lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
    required_gpus=1)
one_device_strategy_on_worker_1 = combinations.NamedDistribution(
    "OneDeviceOnWorker1CPU",
    lambda: one_device_lib.OneDeviceStrategy(
        "/job:worker/replica:0/task:1/cpu:0"),  # pylint: disable=line-too-long
    required_gpus=None)
one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
コード例 #17
0
    cpus = config.list_physical_devices("CPU")

    config.set_logical_device_configuration(cpus[0], [
        context.LogicalDeviceConfiguration(),
        context.LogicalDeviceConfiguration(),
    ])


@combinations.generate(
    combinations.combine(
        distribution=[
            strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
            combinations.NamedDistribution(
                "Collective2CPUs",
                # pylint: disable=g-long-lambda
                lambda: collective_all_reduce_strategy.
                CollectiveAllReduceStrategy._from_local_devices(
                    ("/device:CPU:0", "/device:CPU:1")),
                required_gpus=0)
        ],
        mode=["graph", "eager"]))
class MirroredVariableCreationTest(test.TestCase):
    """Base class that tests mirrored variable creator.

  Currently it assumes all strategy objects have two replicas.
  """
    @classmethod
    def setUpClass(cls):
        _mimic_two_cpus()

    # TODO(priyag): Modify more tests to use this helper and check more
コード例 #18
0
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test

# TODO(rchao): Merge parameter_server_strategy_with_two_gpus into
# third_party/tensorflow/python/distribute/strategy_combinations.py
# pylint: disable=g-long-lambda
parameter_server_strategy_with_two_gpus = combinations.NamedDistribution(
    'ParameterServer2GPUs',
    lambda: parameter_server_strategy.ParameterServerStrategy(
        num_gpus_per_worker=2),
    required_gpus=2)


def get_model():
    x = keras.layers.Input(shape=(3, ), name='input')
    y = keras.layers.Dense(4, name='dense')(x)
    model = keras.Model(x, y)
    return model


class MirroredStrategyOptimizerV2Test(test.TestCase, parameterized.TestCase):
    @combinations.generate(
        combinations.combine(distribution=[
            parameter_server_strategy_with_two_gpus,
コード例 #19
0
ファイル: combinations_test.py プロジェクト: chrisvon62/AiBot
class ClusterCombinationTest(test.TestCase, parameterized.TestCase):
    # For this test we need to use `framework.test_combinations` because our
    # `generate` eats the cluster parameters.
    #
    # Note that we don't have a standalone combination for ClusterParameters, so
    # we should use GPUCombination which contains it.

    @framework_combinations.generate(
        framework_combinations.combine(distribution=[
            combinations.NamedDistribution("HasClusterParams",
                                           lambda: None,
                                           has_chief=True,
                                           num_workers=2),
        ]),
        test_combinations=(combinations.ClusterCombination(), ))
    def testClusterParams(self, distribution, has_chief, num_workers):
        self.assertTrue(has_chief)
        self.assertEqual(num_workers, 2)

    @framework_combinations.generate(
        framework_combinations.combine(distribution=[
            combinations.NamedDistribution("NoClusterParams", lambda: None),
        ]),
        test_combinations=(combinations.ClusterCombination(), ))
    def testClusterParamsHasDefault(self, distribution, has_chief,
                                    num_workers):
        self.assertFalse(has_chief)
        self.assertEqual(num_workers, 1)

    @framework_combinations.generate(
        framework_combinations.combine(v=1),
        test_combinations=(combinations.ClusterCombination(), ))
    def testClusterParamsNoStrategy(self, v, has_chief, num_workers):
        self.assertFalse(has_chief)
        self.assertEqual(num_workers, 1)

    @framework_combinations.generate(
        framework_combinations.combine(distribution=[
            combinations.NamedDistribution("WithClusterParams",
                                           lambda: None,
                                           has_chief=True,
                                           num_workers=2),
            combinations.NamedDistribution("WithoutClusterParams",
                                           lambda: None),
        ]),
        test_combinations=(combinations.ClusterCombination(), ))
    def testClusterParamsAreOptional(self, distribution):
        # If combinations library doesn't raise an exception, the test is passed.
        pass

    @framework_combinations.generate(
        framework_combinations.combine(
            ds1=combinations.NamedDistribution("Strategy1",
                                               lambda: None,
                                               has_chief=True,
                                               num_workers=0),
            ds2=combinations.NamedDistribution("Strategy2",
                                               lambda: None,
                                               has_chief=False,
                                               num_workers=1),
            ds3=combinations.NamedDistribution("Strategy3",
                                               lambda: None,
                                               has_chief=True,
                                               num_workers=0),
        ),
        test_combinations=(combinations.ClusterCombination(), ))
    def testMultipleDistributionSingleWorker(self, ds1, ds2, ds3):
        # If combinations library doesn't raise an exception, the test is passed.
        pass

    @combinations.generate(combinations.combine(num_workers=2, ))
    def testUseWithoutStrategy(self):
        # There's no perfect way to check if the test runs in a subprocess. We
        # approximate by checking the presence of TF_CONFIG, which is normally not
        # set to the main process.
        self.assertNotEqual(os.getenv("TF_CONFIG"), "")
コード例 #20
0
    # creates the strategy.
    try:
      multi_process_runner.get_barrier().wait()
    except ValueError:
      # If the creator is called in the main process,
      # multi_process_runner.get_barrier() raises ValueError, which is safe to
      # ignore.
      pass
    return strategy

  return _create_multi_worker_mirrored


# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
    "Default",
    distribution_strategy_context._get_default_strategy,  # pylint: disable=protected-access
    required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
    "OneDeviceCPU", lambda: OneDeviceStrategy("/cpu:0"), required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
    "OneDeviceGPU", lambda: OneDeviceStrategy("/gpu:0"), required_gpus=1)
one_device_strategy_on_worker_1 = combinations.NamedDistribution(
    "OneDeviceOnWorker1CPU",
    lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/cpu:0"),
    required_gpus=None)
one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
    "OneDeviceOnWorker1GPU",
    lambda: OneDeviceStrategy("/job:worker/replica:0/task:1/gpu:0"),
    required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
    "TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
コード例 #21
0
                topology,
                core_assignment=device_assignment_lib.SINGLE_CORE_ASSIGNMENT)

        # Steps per run is only supported in TF 1.x
        if tf2.enabled():
            return tpu_lib.TPUStrategy(resolver, device_assignment, **kwargs)
        else:
            return tpu_lib.TPUStrategyV1(resolver, steps_per_run,
                                         device_assignment, **kwargs)

    return _create_tpu_strategy


# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
    "Default",
    distribution_strategy_context._get_default_strategy,  # pylint: disable=protected-access
    required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
    "OneDeviceCPU",
    lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
    required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
    "OneDeviceGPU",
    lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
    required_gpus=1)
one_device_strategy_on_worker_1 = combinations.NamedDistribution(
    "OneDeviceOnWorker1CPU",
    lambda: one_device_lib.OneDeviceStrategy(
        "/job:worker/replica:0/task:1/cpu:0"),  # pylint: disable=line-too-long
    required_gpus=None)
one_device_strategy_gpu_on_worker_1 = combinations.NamedDistribution(
コード例 #22
0
    device_assignment = None
    if use_single_core:
      device_assignment = device_assignment_lib.DeviceAssignment(
          topology, core_assignment=device_assignment_lib.
          SINGLE_CORE_ASSIGNMENT)

    strategy = tpu_lib.TPUStrategy(resolver, steps_per_run=steps_per_run,
                                   device_assignment=device_assignment,
                                   **kwargs)
    return strategy
  return _create_tpu_strategy


# pylint: disable=g-long-lambda
default_strategy = combinations.NamedDistribution(
    "Default",
    distribution_strategy_context._get_default_strategy,  # pylint: disable=protected-access
    required_gpus=None)
one_device_strategy = combinations.NamedDistribution(
    "OneDeviceCPU",
    lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
    required_gpus=None)
one_device_strategy_gpu = combinations.NamedDistribution(
    "OneDeviceGPU",
    lambda: one_device_lib.OneDeviceStrategy("/gpu:0"),
    required_gpus=1)
tpu_strategy = combinations.NamedDistribution(
    "TPU", _get_tpu_strategy_creator(steps_per_run=2), required_tpu=True)
tpu_strategy_one_step = combinations.NamedDistribution(
    "TPUOneStep", _get_tpu_strategy_creator(steps_per_run=1), required_tpu=True)
tpu_strategy_one_core = combinations.NamedDistribution(
    "TPUOneCore",