def testSummaryForReplicaZeroOnly(self):
   with context.graph_mode():
     strategy = mirrored_strategy.MirroredStrategy(
         mirrored_strategy.all_local_devices(),
         cross_device_ops=self._make_cross_device_ops())
     strategy.configure(cluster_spec=self._cluster_spec)
     self._test_summary_for_replica_zero_only(strategy)
 def testMinimizeLossGraphMirroredStrategy(self):
   with context.graph_mode():
     strategy = mirrored_strategy.MirroredStrategy(
         mirrored_strategy.all_local_devices(),
         cross_device_ops=self._make_cross_device_ops())
     strategy.configure(cluster_spec=self._cluster_spec)
     self._test_minimize_loss_graph(strategy, learning_rate=0.05)
Exemple #3
0
def get_strategy_object(strategy_cls):
    if strategy_cls == mirrored_strategy.MirroredStrategy:
        return strategy_cls(
            mirrored_strategy.all_local_devices(),
            cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce(
                ['/job:worker/task:0', '/job:worker/task:1'],
                context.num_gpus()))
    else:
        # CollectiveAllReduceStrategy and ParameterServerStrategy.
        return strategy_cls()
 def __init__(self,
              container_strategy,
              devices=None,
              num_gpus_per_worker=None,
              cross_device_ops=None,
              auto_shard_dataset=False):
   if devices is None:
     devices = mirrored_strategy.all_local_devices(num_gpus_per_worker)
   elif num_gpus_per_worker is not None:
     raise ValueError(
         "Must only specify one of `devices` and `num_gpus_per_worker`.")
   super(MirroredExtended, self).__init__(container_strategy, devices,
                                          cross_device_ops)
   self._auto_shard_dataset = auto_shard_dataset
Exemple #5
0
 def __init__(self,
              container_strategy,
              devices=None,
              num_gpus_per_worker=None,
              cross_device_ops=None,
              auto_shard_dataset=False):
     if devices is None:
         devices = mirrored_strategy.all_local_devices(num_gpus_per_worker)
     elif num_gpus_per_worker is not None:
         raise ValueError(
             "Must only specify one of `devices` and `num_gpus_per_worker`."
         )
     super(MirroredExtended, self).__init__(container_strategy, devices,
                                            cross_device_ops)
     self._auto_shard_dataset = auto_shard_dataset
def get_strategy_object(strategy_cls):
    if strategy_cls == mirrored_strategy.MirroredStrategy:
        return strategy_cls(mirrored_strategy.all_local_devices())
    else:
        # CollectiveAllReduceStrategy and ParameterServerStrategy.
        return strategy_cls()
Exemple #7
0
        self.evaluate(update_ops)

      updated_var_values = self.evaluate(mock_model.variables)
      # All variables start at 1.0 and get two updates of 0.25.
      self.assertAllEqual(0.5 * np.ones([10, 1]), updated_var_values[0])
      self.assertAllEqual([0.5], updated_var_values[1])


@combinations.generate(
    combinations.combine(
        distribution=[
            combinations.NamedDistribution(
                "Mirrored",
                # pylint: disable=g-long-lambda
                lambda: mirrored_strategy.MirroredStrategy(
                    devices=mirrored_strategy.all_local_devices(),
                    cross_device_ops=cross_device_ops_lib.MultiWorkerAllReduce([
                        "/job:worker/task:0", "/job:worker/task:1"
                    ], context.num_gpus())),
                required_gpus=1)
        ],
        mode=["graph"]))
class MultiWorkerMirroredStrategyTest(
    multi_worker_test_base.MultiWorkerTestBase,
    strategy_test_lib.DistributionTestBase):

  def _configure_distribution_strategy(self, distribution):
    cluster_spec = server_lib.ClusterSpec({
        "worker": ["/job:worker/task:0", "/job:worker/task:1"]
    })
    distribution.configure(cluster_spec=cluster_spec)
def get_strategy_object(strategy_cls):
  if strategy_cls == mirrored_strategy.MirroredStrategy:
    return strategy_cls(mirrored_strategy.all_local_devices())
  else:
    # CollectiveAllReduceStrategy and ParameterServerStrategy.
    return strategy_cls()