Пример #1
0
  def testMirroredStratUnseedSync(self):
    """Tests RNG/MirrorStrategy interaction #2c.

    If the RNG created in situation #2 is unseeded, the replicas' random-number
    streams are still the same.

    If the RNG created in situation #2b is unseeded, the replicas' random-number
    streams will be different. We can't test this for now because the op
    'NonDeterministicInts' is not implemented on GPU yet.
    """
    shape = [3, 4]
    dtype = dtypes.int32
    strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()])
    # TODO(wangpeng): support calling `random.Generator()` inside `f` (i.e.
    #   inside `call_for_each_replica` so that each replica can get a
    #   different random-number stream. The only obstacle is that op
    #   'NonDeterministicInts' is not implemented on GPU.)
    with strat.scope():
      gen = random.Generator()
      def f():
        t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
        t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
        t = array_ops.stack([t1, t2])
        return t
      results = strat.extended.call_for_each_replica(fn=f)
      values = results.values
      self.assertAllEqual(2, len(values))
      self.assertAllEqual(values[0], values[1])
Пример #2
0
  def testMirroredStratParaAsync(self):
    """Tests RNG/MirrorStrategy interaction #3.

    The user can create n independent RNGs outside strategy.scope(), where n
    is the number of replicas, and give one to each replica. The replicas can
    thus get different random-number streams.
    """
    shape = [3, 4]
    dtype = dtypes.int32
    gens = random.get_global_generator().split(count=2)
    devices = ["/cpu:0", test_util.gpu_device_name()]
    strat = MirroredStrategy(devices=devices)
    # Use `PerReplica` to specify which `gen` is sent to which replica
    gens = dist_values.PerReplica(
        device_map=dist_values.ReplicaDeviceMap(devices),
        values=[[g] for g in gens])
    with strat.scope():
      def f(gen):
        t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
        t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
        t = array_ops.stack([t1, t2])
        return t
      results = strat.extended.call_for_each_replica(
          fn=f, args=gens)
      values = results.values
      self.assertAllEqual(2, len(values))
      self.assertAllDifferent(values)
    def testMirroredVarAsFunctionArg(self):
        """Tests that RNG with MirroredVariable can be used as tf.function's arg.
    """
        shape = [3, 4]
        dtype = dtypes.int32
        strat = MirroredStrategy(
            devices=["/cpu:0", test_util.gpu_device_name()])
        with strat.scope():
            gen = random.Generator.from_seed(1234)

            @def_function.function
            def f(gen):
                t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
                t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
                t = array_ops.stack([t1, t2])
                return t

            def g():
                return f(gen)

            for _ in range(2):
                results = strat.extended.call_for_each_replica(fn=g)
                values = results.values
                self.assertAllEqual(2, len(values))
                self.assertAllEqual(values[0], values[1])
    def testMirroredStratParaSync(self):
        """Tests RNG/MirrorStrategy interaction #2.

    If an RNG is created (either seeded or unseeded) inside strategy.scope(),
    each replica gets an mirror of this RNG. If they access their RNGs in the
    same manner, their random-number streams are the same.
    """
        creators = [
            lambda: random.Generator.from_seed(1234),
            random.Generator.from_non_deterministic_state,
        ]
        shape = [3, 4]
        dtype = dtypes.int32
        strat = MirroredStrategy(devices=["cpu:0", "cpu:1"])
        for creator in creators:
            with strat.scope():
                gen = creator()

                def f():
                    t1 = gen.uniform_full_int(shape=shape, dtype=dtype)  # pylint: disable=cell-var-from-loop
                    t2 = gen.uniform_full_int(shape=shape, dtype=dtype)  # pylint: disable=cell-var-from-loop
                    t = array_ops.stack([t1, t2])
                    return t

                results = strat.extended.call_for_each_replica(fn=f)
                values = results.values
                self.assertAllEqual(2, len(values))
                self.assertAllEqual(values[0], values[1])
Пример #5
0
    def testMirroredStratParaSyncWithinFun(self):
        """Tests RNG/MirrorStrategy interaction #2b.

    If the RNG creation is within `f` in situation #2, the replicas'
    random-number streams are still the same. Note that whether the RNG creation
    is within strategy.scope() or not doesn't affect the result in this case
    (putting in inside strategy.scope() will cause unnecessary mirror creation
    and waste memory though).
    """
        shape = [3, 4]
        dtype = dtypes.int32
        strat = MirroredStrategy(
            devices=["/cpu:0", test_util.gpu_device_name()])

        def f():
            gen = random.Generator.from_seed(1234)
            t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
            t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
            t = array_ops.stack([t1, t2])
            return t

        results = strat.extended.call_for_each_replica(fn=f)
        values = results.values
        self.assertAllEqual(2, len(values))
        self.assertAllEqual(values[0], values[1])
    def testMirroredStratParaSyncWithinFun(self):
        """Tests RNG/MirrorStrategy interaction #2b.

    This is a slight variation of case #2 above. If the RNG is created within
    `f`, its behavior is the same as when it is created out of `f` but within
    the strategy scope.
    """
        creators = [
            lambda: random.Generator.from_seed(1234),
            random.Generator.from_non_deterministic_state,
        ]
        shape = [3, 4]
        dtype = dtypes.int32
        strat = MirroredStrategy(devices=["cpu:0", "cpu:1"])
        for creator in creators:

            def f():
                gen = creator()  # pylint: disable=cell-var-from-loop
                t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
                t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
                t = array_ops.stack([t1, t2])
                return t

            results = strat.extended.call_for_each_replica(fn=f)
            values = results.values
            self.assertAllEqual(2, len(values))
            self.assertAllEqual(values[0], values[1])
Пример #7
0
    def __init__(self):
        logger.info(
            f"Found devices: {[x.name for x in tf.config.list_logical_devices()]}"
        )

        if GPUS_COUNT == 1:
            self.model = self.get_compiled_model()
        else:
            strategy = MirroredStrategy()
            logger.info('Number of devices: {}'.format(
                strategy.num_replicas_in_sync))

            with strategy.scope():
                self.model = self.get_compiled_model()

        # self.model.summary()
        self.loss_callback = LossCallback()
Пример #8
0
  def testMirroredStratParaSync(self):
    """Tests RNG/MirrorStrategy interaction #2.

    If an RNG is created inside strategy.scope(), each replica gets an
    mirror of this RNG. If they access their RNGs in the same
    manner, their random-number streams are the same.
    """
    shape = [3, 4]
    dtype = dtypes.int32
    strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()])
    with strat.scope():
      gen = random.Generator(seed=1234)
      def f():
        t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
        t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
        t = array_ops.stack([t1, t2])
        return t
      results = strat.extended.call_for_each_replica(fn=f)
      values = results.values
      self.assertAllEqual(2, len(values))
      self.assertAllEqual(values[0], values[1])
Пример #9
0
  def testMirroredStratSeq(self):
    """Tests RNG/MirrorStrategy interaction #1.

    If an RNG is created outside strategy.scope(), all replicas will access the
    same RNG object, and accesses are serialized.
    """
    shape = [3, 4]
    dtype = dtypes.int32
    gen = random.Generator(seed=1234)
    strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()])
    with strat.scope():
      def f():
        t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
        t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
        t = array_ops.stack([t1, t2])
        return t
      results = strat.extended.call_for_each_replica(
          fn=f)
      values = results.values
      self.assertAllEqual(2, len(values))
      self.assertAllDifferent(values)
 def testMirroredStratParaSyncDisallowed(self):
   """Tests that generator creation in MirroredStrategy is disallowed.
   """
   creators = [
       lambda: random.Generator.from_seed(1234),
       random.Generator.from_non_deterministic_state,
   ]
   shape = [3, 4]
   dtype = dtypes.int32
   strat = MirroredStrategy(devices=["cpu:0", "cpu:1"])
   for creator in creators:
     with strat.scope():
       with self.assertRaisesWithPredicateMatch(
           ValueError, "disallowed"):
         creator()  # pylint: disable=cell-var-from-loop
     def f():
       gen = creator()  # pylint: disable=cell-var-from-loop
       return gen.uniform_full_int(shape=shape, dtype=dtype)
     with self.assertRaisesWithPredicateMatch(
         ValueError, "disallowed"):
       strat.extended.call_for_each_replica(fn=f)
Пример #11
0
    def _create_model(self, inference_status, gpus):
        """Create model.

        Parameters:
            inference_status: bool,
            if true the model will be trained,
             else the model will be trained and inferred.
            gpus: int, Number of GPU used.

        Returns:
            train model or train and inference model
        """
        if gpus >= 2:
            strategy = MirroredStrategy()
            with strategy.scope():
                train_model = CRNN(self.image_width, self.image_height,
                                   self.image_channels, True,
                                   self.cnn_backbone, self.rnn_backbone)
                # Compile model
                train_model.compile(optimizer=Adam(
                    learning_rate=self.learning_rate))
        else:
            train_model = CRNN(self.image_width,
                               self.image_height, self.image_channels,
                               len(self.characters), True, self.cnn_backbone,
                               self.rnn_backbone)
            # Compile model
            train_model.compile(optimizer=Adam(
                learning_rate=self.learning_rate))
        if inference_status is True:
            inference_model = CRNN(self.image_width,
                                   self.image_height, self.image_channels,
                                   len(self.characters), False,
                                   self.cnn_backbone, self.rnn_backbone)
            return train_model, inference_model
        else:
            return train_model