コード例 #1
0
    def generate_dequeue_op(self):
        """Generates the device-side Op to dequeue a tuple from the queue.

    Implicitly freezes the queue configuration if it is not already
    frozen, which will raise errors if the shapes and types have not
    been fully specified.

    Returns:
      A list of Outputs corresponding to a shard of infeed dequeued
      into XLA, suitable for use within a replicated block.

    Raises:
      ValueError: if the types or shapes of the tuple elements have not been
      set; or if a dequeue op has already been generated.
    """
        self.freeze()
        if self._generated_dequeue_op:
            raise ValueError(
                "Can't generate two dequeue Ops from the same queue")
        self._generated_dequeue_op = True
        full_name = "%s/dequeue" % self._name
        sharded_shapes = [
            policy.get_sharded_shape(shape)
            for (shape,
                 policy) in zip(self._tuple_shapes, self._sharding_policies)
        ]
        return tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types,
                                            shapes=sharded_shapes,
                                            name=full_name)
コード例 #2
0
    def generate_dequeue_op(self, tpu_device=0):
        """Generate TPU dequeue ops.

    Args:
      tpu_device: The TPU device ordinal where the infeed instruction should be
        placed.

    Returns:
      A list of Outputs corresponding to a partition of infeed dequeued
      into XLA, suitable for use within a replicated block.

    Raises:
      ValueError: if the types or shapes of the tuple elements have not been
      set; or if a dequeue op has already been generated.
    """
        self.freeze()
        if self._generated_dequeue_op:
            raise ValueError(
                "Can't generate two dequeue Ops from the same queue")
        self._generated_dequeue_op = True
        full_name = "%s/dequeue" % self._name
        sharded_shapes = [
            policy.get_sharded_shape(shape)
            for (shape,
                 policy) in zip(self._tuple_shapes, self._sharding_policies)
        ]
        with ops.device(tpu.core(tpu_device)):
            values = tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types,
                                                  shapes=sharded_shapes,
                                                  name=full_name)
        return self._tag_sharding_attribute_for_dequeued_tensors(
            values, self._input_partition_dims)
コード例 #3
0
ファイル: tpu_feed.py プロジェクト: 1000sprites/tensorflow
  def generate_dequeue_op(self):
    """Generates the device-side Op to dequeue a tuple from the queue.

    Implicitly freezes the queue configuration if it is not already
    frozen, which will raise errors if the shapes and types have not
    been fully specified.

    Returns:
      A list of Outputs corresponding to a shard of infeed dequeued
      into XLA, suitable for use within a replicated block.

    Raises:
      ValueError: if the types or shapes of the tuple elements have not been
      set; or if a dequeue op has already been generated.
    """
    self.freeze()
    if self._generated_dequeue_op:
      raise ValueError("Can't generate two dequeue Ops from the same queue")
    self._generated_dequeue_op = True
    full_name = "%s/dequeue" % self._name
    sharded_shapes = [
        policy.get_sharded_shape(shape)
        for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
    ]
    return tpu_ops.infeed_dequeue_tuple(
        dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
コード例 #4
0
ファイル: tpu_feed.py プロジェクト: AnishShah/tensorflow
  def generate_dequeue_op(self, tpu_device=0):
    """Generate TPU dequeue ops.

    Args:
      tpu_device: The TPU device ordinal where the infeed instruction should be
        placed.

    Returns:
      A list of Outputs corresponding to a partition of infeed dequeued
      into XLA, suitable for use within a replicated block.

    Raises:
      ValueError: if the types or shapes of the tuple elements have not been
      set; or if a dequeue op has already been generated.
    """
    self.freeze()
    if self._generated_dequeue_op:
      raise ValueError("Can't generate two dequeue Ops from the same queue")
    self._generated_dequeue_op = True
    full_name = "%s/dequeue" % self._name
    sharded_shapes = [
        policy.get_sharded_shape(shape)
        for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
    ]
    with ops.device(tpu.core(tpu_device)):
      values = tpu_ops.infeed_dequeue_tuple(
          dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
    return self._tag_sharding_attribute_for_dequeued_tensors(
        values, self._input_partition_dims)
コード例 #5
0
    def generate_dequeue_op(self, tpu_device=0):
        """Generates the device-side Op to dequeue a tuple from the queue.

    Implicitly freezes the queue configuration if it is not already
    frozen, which will raise errors if the shapes and types have not
    been fully specified.

    Args:
      tpu_device: The TPU device ordinal where the infeed instruction should be
        placed. If None, no explicit placement will be performed, and it is up
        to the user to call this API from within a proper TPU device scope.
        The XLA code will fail if the TPU dequeue instruction is not bound to
        any device.

    Returns:
      A list of Outputs corresponding to a shard of infeed dequeued
      into XLA, suitable for use within a replicated block.

    Raises:
      ValueError: if the types or shapes of the tuple elements have not been
      set; or if a dequeue op has already been generated.
    """
        self.freeze()
        if self._generated_dequeue_op:
            raise ValueError(
                "Can't generate two dequeue Ops from the same queue")
        self._generated_dequeue_op = True
        full_name = "%s/dequeue" % self._name
        sharded_shapes = [
            policy.get_sharded_shape(shape)
            for (shape,
                 policy) in zip(self._tuple_shapes, self._sharding_policies)
        ]
        if tpu_device is not None:
            with ops.device(tpu.core(tpu_device)):
                return tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types,
                                                    shapes=sharded_shapes,
                                                    name=full_name)
        else:
            return tpu_ops.infeed_dequeue_tuple(dtypes=self._tuple_types,
                                                shapes=sharded_shapes,
                                                name=full_name)
コード例 #6
0
ファイル: tpu_feed.py プロジェクト: AnishShah/tensorflow
  def generate_dequeue_op(self, tpu_device=0):
    """Generates the device-side Op to dequeue a tuple from the queue.

    Implicitly freezes the queue configuration if it is not already
    frozen, which will raise errors if the shapes and types have not
    been fully specified.

    Args:
      tpu_device: The TPU device ordinal where the infeed instruction should be
        placed. If None, no explicit placement will be performed, and it is up
        to the user to call this API from within a proper TPU device scope.
        The XLA code will fail if the TPU dequeue instruction is not bound to
        any device.

    Returns:
      A list of Outputs corresponding to a shard of infeed dequeued
      into XLA, suitable for use within a replicated block.

    Raises:
      ValueError: if the types or shapes of the tuple elements have not been
      set; or if a dequeue op has already been generated.
    """
    self.freeze()
    if self._generated_dequeue_op:
      raise ValueError("Can't generate two dequeue Ops from the same queue")
    self._generated_dequeue_op = True
    full_name = "%s/dequeue" % self._name
    sharded_shapes = [
        policy.get_sharded_shape(shape)
        for (shape, policy) in zip(self._tuple_shapes, self._sharding_policies)
    ]
    if tpu_device is not None:
      with ops.device(tpu.core(tpu_device)):
        return tpu_ops.infeed_dequeue_tuple(
            dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
    else:
      return tpu_ops.infeed_dequeue_tuple(
          dtypes=self._tuple_types, shapes=sharded_shapes, name=full_name)
コード例 #7
0
        def _model_fn():
            """Compute fit/eval/predict for the TPU."""
            is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
            is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
            is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT

            # During train/eval, we infeed our features as well as labels.
            if is_training or is_test:
                infeed_layers = self.model._input_layers + self.model._output_layers
            else:
                infeed_layers = self.model._input_layers

            # Generate our infeed operation to read features & labels.
            infeed_tensors = tpu_ops.infeed_dequeue_tuple(
                dtypes=[spec.dtype for spec in input_specs],
                shapes=[spec.shape for spec in input_specs],
                name='infeed-%s' % self.execution_mode)

            assert len(infeed_tensors) == len(infeed_layers), (
                'Infeed inputs did not match model: %s vs %s' %
                (infeed_layers, infeed_tensors))

            tpu_targets = []
            tpu_input_map = {}

            # Sort infeed outputs into inputs and labels for calling our Keras model.
            for tensor, layer in zip(infeed_tensors, infeed_layers):
                if layer in self.model._input_layers:
                    tpu_input_map[layer.name] = tensor
                if layer in self.model._output_layers:
                    tpu_targets.append(tensor)

            # Clone our CPU model, running within the TPU device context.
            with TPURewriteContext(tpu_input_map):
                # TODO(power): Replicate variables.
                with ops.device('/device:TPU:0'):
                    self._cloned_model = models.clone_model(self.model)

            # Create a copy of the optimizer for this graph.
            if isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
                cloned_optimizer = keras_optimizers.TFOptimizer(
                    self.model.optimizer.optimizer)
            else:
                logging.info('Cloning %s %s',
                             self.model.optimizer.__class__.__name__,
                             self._optimizer_config)
                cloned_optimizer = self.model.optimizer.__class__.from_config(
                    self._optimizer_config)

            if is_training or is_test:
                self._cloned_model.compile(
                    optimizer=_replicated_optimizer(cloned_optimizer),
                    loss=self.model.loss,
                    loss_weights=self.model.loss_weights,
                    metrics=self.model.metrics,
                    weighted_metrics=self.model.weighted_metrics,
                    target_tensors=tpu_targets,
                )

            # Compute our outfeed depending on the execution mode
            if is_training:
                self._cloned_model._make_train_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in self._cloned_model.train_function.outputs
                ]
                return [
                    self._cloned_model.train_function.updates_op,
                    tpu_ops.outfeed_enqueue_tuple(
                        self._cloned_model.train_function.outputs,
                        name='outfeed-enqueue-train')
                ]
            elif is_test:
                self._cloned_model._make_test_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in self._cloned_model.test_function.outputs
                ]
                return [
                    tpu_ops.outfeed_enqueue_tuple(
                        self._cloned_model.test_function.outputs,
                        name='outfeed-enqueue-test')
                ]
            elif is_predict:
                self._cloned_model._make_predict_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in self._cloned_model.predict_function.outputs
                ]
                return [
                    tpu_ops.outfeed_enqueue_tuple(
                        self._cloned_model.predict_function.outputs,
                        name='outfeed-enqueue-predict',
                    )
                ]
            else:
                assert False, 'Unexpected execution mode: %s' % self.execution_mode
コード例 #8
0
 def dequeue_fn():
     dequeued = tpu_ops.infeed_dequeue_tuple(dtypes=types,
                                             shapes=shapes)
     return nest.pack_sequence_as(iterator.output_shapes, dequeued)
コード例 #9
0
 def dequeue_fn():
   dequeued = tpu_ops.infeed_dequeue_tuple(dtypes=types, shapes=shapes)
   return nest.pack_sequence_as(iterator.output_shapes, dequeued)
コード例 #10
0
        def _model_fn():
            """Compute fit/eval/predict for the TPU."""
            is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
            is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
            is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT

            # During train/eval, we infeed our features as well as labels.
            if is_training or is_test:
                infeed_layers = self.model._input_layers + self.model._output_layers
            else:
                infeed_layers = self.model._input_layers

            # Generate our infeed operation to read features & labels.
            infeed_tensors = tpu_ops.infeed_dequeue_tuple(
                dtypes=[spec.dtype for spec in input_specs],
                shapes=[spec.shape for spec in input_specs],
                name='infeed-%s' % self.execution_mode)

            assert len(infeed_tensors) == len(infeed_layers), (
                'Infeed inputs did not match model: %s vs %s',
                (infeed_layers, infeed_tensors))

            tpu_targets = []
            tpu_inputs = []

            # Sort infeed outputs into inputs and labels for calling our Keras model.
            for tensor, layer in zip(infeed_tensors, infeed_layers):
                if layer in self.model._input_layers:
                    tpu_inputs.append(
                        layers.Input(name=layer.name, tensor=tensor))
                if layer in self.model._output_layers:
                    tpu_targets.append(tensor)

            # Call our model with our infeed inputs (re-using the weights).
            model_outputs = self.model(tpu_inputs)
            child_model = models.Model(inputs=tpu_inputs,
                                       outputs=model_outputs)

            if is_training or is_test:
                child_model.compile(
                    optimizer=_replicated_optimizer(self.model.optimizer,
                                                    self.num_replicas),
                    loss=self.model.loss,
                    loss_weights=self.model.loss_weights,
                    metrics=self.model.metrics,
                    weighted_metrics=self.model.weighted_metrics,
                    target_tensors=tpu_targets,
                )

            # Compute our outfeed depending on the execution mode
            if is_training:
                child_model._make_train_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in child_model.train_function.outputs
                ]
                return [
                    child_model.train_function.updates_op,
                    tpu_ops.outfeed_enqueue_tuple(
                        child_model.train_function.outputs,
                        name='outfeed-enqueue-train')
                ]
            elif is_test:
                child_model._make_test_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in child_model.test_function.outputs
                ]
                return [
                    tpu_ops.outfeed_enqueue_tuple(
                        child_model.test_function.outputs,
                        name='outfeed-enqueue-test')
                ]
            elif is_predict:
                child_model._make_predict_function()
                self._outfeed_spec = [
                    tensor_spec.TensorSpec(tensor.shape, tensor.dtype,
                                           tensor.name)
                    for tensor in child_model.predict_function.outputs
                ]
                return [
                    tpu_ops.outfeed_enqueue_tuple(
                        child_model.predict_function.outputs,
                        name='outfeed-enqueue-predict',
                    )
                ]
            else:
                assert False, 'Unexpected execution mode: %s' % self.execution_mode
コード例 #11
0
ファイル: keras_support.py プロジェクト: Eagle732/tensorflow
    def _model_fn():
      """Compute fit/eval/predict for the TPU."""
      is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
      is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
      is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT

      # During train/eval, we infeed our features as well as labels.
      if is_training or is_test:
        infeed_layers = self.model._input_layers + self.model._output_layers
      else:
        infeed_layers = self.model._input_layers

      # Generate our infeed operation to read features & labels.
      infeed_tensors = tpu_ops.infeed_dequeue_tuple(
          dtypes=[spec.dtype for spec in input_specs],
          shapes=[spec.shape for spec in input_specs],
          name='infeed-%s' % self.execution_mode)

      assert len(infeed_tensors) == len(infeed_layers), (
          'Infeed inputs did not match model: %s vs %s', (infeed_layers,
                                                          infeed_tensors))

      tpu_targets = []
      tpu_input_map = {}

      # Sort infeed outputs into inputs and labels for calling our Keras model.
      for tensor, layer in zip(infeed_tensors, infeed_layers):
        if layer in self.model._input_layers:
          tpu_input_map[layer.name] = tensor
        if layer in self.model._output_layers:
          tpu_targets.append(tensor)

      # Clone our CPU model, running within the TPU device context.
      with TPURewriteContext(tpu_input_map):
        self._cloned_model = models.clone_model(self.model)

      # Create a copy of the optimizer for this graph.
      if isinstance(self.model.optimizer, keras_optimizers.TFOptimizer):
        cloned_optimizer = keras_optimizers.TFOptimizer(
            self.model.optimizer.optimizer)
      else:
        logging.info('Cloning %s %s', self.model.optimizer.__class__.__name__,
                     self._optimizer_config)
        cloned_optimizer = self.model.optimizer.__class__.from_config(
            self._optimizer_config)

      if is_training or is_test:
        self._cloned_model.compile(
            optimizer=_replicated_optimizer(cloned_optimizer),
            loss=self.model.loss,
            loss_weights=self.model.loss_weights,
            metrics=self.model.metrics,
            weighted_metrics=self.model.weighted_metrics,
            target_tensors=tpu_targets,
        )

      # Compute our outfeed depending on the execution mode
      if is_training:
        self._cloned_model._make_train_function()
        self._outfeed_spec = [
            tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
            for tensor in self._cloned_model.train_function.outputs
        ]
        return [
            self._cloned_model.train_function.updates_op,
            tpu_ops.outfeed_enqueue_tuple(
                self._cloned_model.train_function.outputs,
                name='outfeed-enqueue-train')
        ]
      elif is_test:
        self._cloned_model._make_test_function()
        self._outfeed_spec = [
            tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
            for tensor in self._cloned_model.test_function.outputs
        ]
        return [
            tpu_ops.outfeed_enqueue_tuple(
                self._cloned_model.test_function.outputs,
                name='outfeed-enqueue-test')
        ]
      elif is_predict:
        self._cloned_model._make_predict_function()
        self._outfeed_spec = [
            tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
            for tensor in self._cloned_model.predict_function.outputs
        ]
        return [
            tpu_ops.outfeed_enqueue_tuple(
                self._cloned_model.predict_function.outputs,
                name='outfeed-enqueue-predict',
            )
        ]
      else:
        assert False, 'Unexpected execution mode: %s' % self.execution_mode
コード例 #12
0
ファイル: keras_support.py プロジェクト: LiuCKind/tensorflow
    def _model_fn():
      """Compute fit/eval/predict for the TPU."""
      is_training = self.execution_mode == model_fn_lib.ModeKeys.TRAIN
      is_test = self.execution_mode == model_fn_lib.ModeKeys.EVAL
      is_predict = self.execution_mode == model_fn_lib.ModeKeys.PREDICT

      # During train/eval, we infeed our features as well as labels.
      if is_training or is_test:
        infeed_layers = self.model._input_layers + self.model._output_layers
      else:
        infeed_layers = self.model._input_layers

      # Generate our infeed operation to read features & labels.
      infeed_tensors = tpu_ops.infeed_dequeue_tuple(
          dtypes=[spec.dtype for spec in input_specs],
          shapes=[spec.shape for spec in input_specs],
          name='infeed-%s' % self.execution_mode)

      assert len(infeed_tensors) == len(infeed_layers), (
          'Infeed inputs did not match model: %s vs %s', (infeed_layers,
                                                          infeed_tensors))

      tpu_targets = []
      tpu_inputs = []

      # Sort infeed outputs into inputs and labels for calling our Keras model.
      for tensor, layer in zip(infeed_tensors, infeed_layers):
        if layer in self.model._input_layers:
          tpu_inputs.append(layers.Input(name=layer.name, tensor=tensor))
        if layer in self.model._output_layers:
          tpu_targets.append(tensor)

      # Call our model with our infeed inputs (re-using the weights).
      model_outputs = self.model(tpu_inputs)
      child_model = models.Model(inputs=tpu_inputs, outputs=model_outputs)

      if is_training or is_test:
        child_model.compile(
            optimizer=_replicated_optimizer(self.model.optimizer,
                                            self.num_replicas),
            loss=self.model.loss,
            loss_weights=self.model.loss_weights,
            metrics=self.model.metrics,
            weighted_metrics=self.model.weighted_metrics,
            target_tensors=tpu_targets,
        )

      # Compute our outfeed depending on the execution mode
      if is_training:
        child_model._make_train_function()
        self._outfeed_spec = [
            tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
            for tensor in child_model.train_function.outputs
        ]
        return [
            child_model.train_function.updates_op,
            tpu_ops.outfeed_enqueue_tuple(
                child_model.train_function.outputs,
                name='outfeed-enqueue-train')
        ]
      elif is_test:
        child_model._make_test_function()
        self._outfeed_spec = [
            tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
            for tensor in child_model.test_function.outputs
        ]
        return [
            tpu_ops.outfeed_enqueue_tuple(
                child_model.test_function.outputs,
                name='outfeed-enqueue-test')
        ]
      elif is_predict:
        child_model._make_predict_function()
        self._outfeed_spec = [
            tensor_spec.TensorSpec(tensor.shape, tensor.dtype, tensor.name)
            for tensor in child_model.predict_function.outputs
        ]
        return [
            tpu_ops.outfeed_enqueue_tuple(
                child_model.predict_function.outputs,
                name='outfeed-enqueue-predict',
            )
        ]
      else:
        assert False, 'Unexpected execution mode: %s' % self.execution_mode