コード例 #1
0
 def testDeferredTensorAttributes(self):
     x = base_layer.DeferredTensor(shape=(None, 2),
                                   dtype='float32',
                                   name='x')
     self.assertEqual(str(x),
                      'DeferredTensor(\'x\', shape=(?, 2), dtype=float32)')
     self.assertEqual(repr(x),
                      '<DeferredTensor \'x\' shape=(?, 2) dtype=float32>')
コード例 #2
0
ファイル: model.py プロジェクト: Allensmile/ModelZoo
 def set_inputs(self, inputs):
     """
     set inputs and output shape according to inputs
     :param inputs: inputs data or data piece
     :return:
     """
     if isinstance(inputs, (list, tuple)):
         if tensor_util.is_tensor(inputs[0]):
             dummy_output_values = self.call(
                 training_utils.cast_if_floating_dtype(inputs[:1]))
         else:
             dummy_output_values = self.call([
                 ops.convert_to_tensor(v, dtype=K.floatx())
                 for v in inputs[:1]
             ])
         dummy_input_values = list(inputs[:1])
     else:
         if tensor_util.is_tensor(inputs):
             dummy_output_values = self.call(
                 training_utils.cast_if_floating_dtype(inputs[:1]))
         else:
             dummy_output_values = self.call(
                 ops.convert_to_tensor(inputs[:1], dtype=K.floatx()))
         dummy_input_values = [inputs[:1]]
     if isinstance(dummy_output_values, (list, tuple)):
         dummy_output_values = list(dummy_output_values)
     else:
         dummy_output_values = [dummy_output_values]
     self.outputs = [
         base_layer.DeferredTensor(shape=(None for _ in v.shape),
                                   dtype=v.dtype)
         for v in dummy_output_values
     ]
     self.inputs = [
         base_layer.DeferredTensor(shape=(None for _ in v.shape),
                                   dtype=v.dtype)
         for v in dummy_input_values
     ]
     self.input_names = [
         'input_%d' % (i + 1) for i in range(len(dummy_input_values))
     ]
     self.output_names = [
         'output_%d' % (i + 1) for i in range(len(dummy_output_values))
     ]
     self.built = True
コード例 #3
0
  def get_symbolic_inputs(self, return_single_as_list=False):
    """Returns inputs to be set as self.inputs for a model."""
    for i in range(len(self._flattened_inputs)):
      k = self._input_names[i]
      v = self._flattened_inputs[i]
      if context.executing_eagerly():
        v = base_layer.DeferredTensor(
            shape=(None for _ in v.shape), dtype=v.dtype)
      else:
        if isinstance(v, list):
          v = np.asarray(v)
          if v.ndim == 1:
            v = np.expand_dims(v, 1)
        if isinstance(v, (np.ndarray)):
          # We fix the placeholder shape except the batch size.
          # This is suboptimal, but it is the best we can do with the info
          # we have. The user should call `model._set_inputs(placeholders)`
          # to specify custom placeholders if the need arises.
          shape = (None,) + v.shape[1:]
          v = K.placeholder(shape=shape, name=k)
      self._flattened_inputs[i] = v

    return self._get(return_single_as_list)
コード例 #4
0
ファイル: input_layer.py プロジェクト: Shinepans/tensorflow-2
    def __init__(self,
                 input_shape=None,
                 batch_size=None,
                 dtype=None,
                 input_tensor=None,
                 sparse=False,
                 name=None,
                 **kwargs):
        if 'batch_input_shape' in kwargs:
            batch_input_shape = kwargs.pop('batch_input_shape')
            if input_shape and batch_input_shape:
                raise ValueError('Only provide the input_shape OR '
                                 'batch_input_shape argument to '
                                 'InputLayer, not both at the same time.')
            batch_size = batch_input_shape[0]
            input_shape = batch_input_shape[1:]
        if kwargs:
            raise ValueError('Unrecognized keyword arguments:', kwargs.keys())

        if not name:
            prefix = 'input'
            name = prefix + '_' + str(K.get_uid(prefix))

        if not dtype:
            if input_tensor is None:
                dtype = K.floatx()
            else:
                dtype = K.dtype(input_tensor)
        super(InputLayer, self).__init__(dtype=dtype, name=name)
        self.built = True
        self.sparse = sparse
        self.batch_size = batch_size

        if isinstance(input_shape, tensor_shape.TensorShape):
            input_shape = tuple(input_shape.as_list())

        if input_tensor is None:
            if input_shape is not None:
                batch_input_shape = (batch_size, ) + tuple(input_shape)
            else:
                batch_input_shape = None

            if context.executing_eagerly():
                # In eager mode, create a temporary placeholder to call the layer on.
                input_tensor = base_layer.DeferredTensor(  # pylint: disable=protected-access
                    shape=batch_input_shape,
                    dtype=dtype,
                    name=self.name)
            else:
                # In graph mode, create a graph placeholder to call the layer on.
                if sparse:
                    input_tensor = array_ops.sparse_placeholder(
                        shape=batch_input_shape, dtype=dtype, name=self.name)
                else:
                    input_tensor = array_ops.placeholder(
                        shape=batch_input_shape, dtype=dtype, name=self.name)

            # For compatibility with Keras API.
            self.is_placeholder = True
            self._batch_input_shape = batch_input_shape
        else:
            # For compatibility with Keras API.
            self.is_placeholder = False
            self._batch_input_shape = tuple(input_tensor.get_shape().as_list())

            if context.executing_eagerly():
                raise ValueError(
                    'You should not pass an input tensor when executing '
                    'in eager mode. For example, instead of creating an '
                    'InputLayer, you should instantiate your model and '
                    'directly call it on your input.')

        # Create an input node to add to self.outbound_node
        # and set output_tensors' _keras_history.
        input_tensor._keras_history = (self, 0, 0)  # pylint: disable=protected-access
        base_layer.Node(self,
                        inbound_layers=[],
                        node_indices=[],
                        tensor_indices=[],
                        input_tensors=[input_tensor],
                        output_tensors=[input_tensor])
コード例 #5
0
ファイル: model.py プロジェクト: zzzz123321/ModelZoo
    def construct(self, inputs):
        """
        Set inputs and output shape according to inputs
        :param inputs: inputs data or data piece
        :return:
        """
        if not self.multiple_inputs:
            if isinstance(inputs, (list, tuple)):
                if tensor_util.is_tensor(inputs[0]):
                    dummy_output_values = self.call(
                        training_utils.cast_if_floating_dtype(inputs[:1]))
                else:
                    dummy_output_values = self.call([
                        ops.convert_to_tensor(v, dtype=K.floatx())
                        for v in inputs[:1]
                    ])
                # set dummy input values for inputs
                dummy_input_values = list(inputs[:1])
            else:
                if tensor_util.is_tensor(inputs):
                    dummy_output_values = self.call(
                        training_utils.cast_if_floating_dtype(inputs[:1]))
                else:
                    dummy_output_values = self.call(
                        ops.convert_to_tensor(inputs[:1], dtype=K.floatx()))
                # set dummy input values for inputs
                dummy_input_values = [inputs[:1]]
            # set output values
            if isinstance(dummy_output_values, (list, tuple)):
                dummy_output_values = list(dummy_output_values)
            else:
                dummy_output_values = [dummy_output_values]
        else:
            first_inputs = copy.copy(inputs)[0]
            if tensor_util.is_tensor(inputs):
                inputs = training_utils.cast_if_floating_dtype(inputs)
            else:
                inputs = ops.convert_to_tensor(inputs, dtype=K.floatx())
            inputs = tf.unstack(inputs[:, :1, :], axis=0)
            dummy_output_values = self.call(inputs, training=True)
            dummy_input_values = [first_inputs[:1]]
            # set output values
            if isinstance(dummy_output_values, (list, tuple)):
                dummy_output_values = list(dummy_output_values)
            else:
                dummy_output_values = [dummy_output_values]
        self.outputs = [
            base_layer.DeferredTensor(shape=(None for _ in v.shape),
                                      dtype=v.dtype)
            for v in dummy_output_values
        ]
        self.inputs = [
            base_layer.DeferredTensor(shape=(None for _ in v.shape),
                                      dtype=v.dtype)
            for v in dummy_input_values
        ]
        self.input_names = [
            'input_%d' % (i + 1) for i in range(len(dummy_input_values))
        ]
        self.output_names = [
            'output_%d' % (i + 1) for i in range(len(dummy_output_values))
        ]

        # self.call(tensor, training=True)
        self.built = True
        self.init()