def __init__(self,
                 in_shape=None,
                 bit_per_sub_pixel_factor=None,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.in_shape = in_shape
        self.bit_per_sub_pixel_factor = bit_per_sub_pixel_factor
        self.conv = Conv2D(filters=self.in_shape[-1],
                           kernel_size=3,
                           padding="same",
                           kernel_initializer='zero',
                           bias_initializer='zero')

        # ------------ workarounds -----------------
        # (1) Avoid error in __call__()
        self.outputs = []
        # (2) Avoid Model.get_config() -> from_config() infinite loop (by pushing dummy node)
        # Create the node linking internal inputs to internal outputs.
        self.in_x = Input(shape=self.in_shape)
        base_layer.Node(outbound_layer=self,
                        inbound_layers=[],
                        node_indices=[],
                        tensor_indices=[],
                        input_tensors=[self.in_x],
                        output_tensors=self.outputs)
    def __init__(self,
                 in_shape=None,
                 hidden_channel_size=None,
                 bit_per_sub_pixel_factor=None,
                 *args,
                 **kwargs):
        super().__init__(*args, **kwargs)
        self.in_shape = in_shape
        self.hidden_channel_size = hidden_channel_size
        self.bit_per_sub_pixel_factor = bit_per_sub_pixel_factor
        self.conv1 = Conv2D(filters=self.hidden_channel_size,
                            name=f'{self.name}/conv1',
                            kernel_size=3,
                            strides=1,
                            padding="same",
                            use_bias=False)
        self.actnorm1 = ActNorm(use_loss=False, name=f'{self.name}/actnorm1')

        self.conv2 = Conv2D(filters=self.hidden_channel_size,
                            name=f'{self.name}/conv2',
                            kernel_size=1,
                            strides=1,
                            padding="same",
                            use_bias=False)
        self.actnorm2 = ActNorm(use_loss=False, name=f'{self.name}/actnorm2')

        self.last_conv = Conv2D(filters=self.in_shape[-1],
                                kernel_size=3,
                                padding="same",
                                name=f'{self.name}/last_conv',
                                kernel_initializer='zero',
                                bias_initializer='zero')
        self.actnorm3 = ActNorm(use_loss=False, name=f'{self.name}/actnorm3')

        # ------------ workarounds -----------------
        # (1) Avoid error in __call__()
        self.outputs = []
        # (2) Avoid Model.get_config() -> from_config() infinite loop (by pushing dummy node)
        # Create the node linking internal inputs to internal outputs.
        self.in_x = Input(shape=self.in_shape)
        base_layer.Node(outbound_layer=self,
                        inbound_layers=[],
                        node_indices=[],
                        tensor_indices=[],
                        input_tensors=[self.in_x],
                        output_tensors=self.outputs)
Beispiel #3
0
    def __init__(self,
                 input_shape=None,
                 batch_size=None,
                 dtype=None,
                 input_tensor=None,
                 sparse=False,
                 name=None,
                 **kwargs):
        if 'batch_input_shape' in kwargs:
            batch_input_shape = kwargs.pop('batch_input_shape')
            if input_shape and batch_input_shape:
                raise ValueError('Only provide the input_shape OR '
                                 'batch_input_shape argument to '
                                 'InputLayer, not both at the same time.')
            batch_size = batch_input_shape[0]
            input_shape = batch_input_shape[1:]
        if kwargs:
            raise ValueError('Unrecognized keyword arguments:', kwargs.keys())

        if not name:
            prefix = 'input'
            name = prefix + '_' + str(K.get_uid(prefix))

        if not dtype:
            if input_tensor is None:
                dtype = K.floatx()
            else:
                dtype = K.dtype(input_tensor)
        super(InputLayer, self).__init__(dtype=dtype, name=name)
        self.built = True
        self.sparse = sparse
        self.batch_size = batch_size

        if isinstance(input_shape, tensor_shape.TensorShape):
            input_shape = tuple(input_shape.as_list())

        if input_tensor is None:
            if input_shape is not None:
                batch_input_shape = (batch_size, ) + tuple(input_shape)
            else:
                batch_input_shape = None

            if context.executing_eagerly():
                # In eager mode, create a temporary placeholder to call the layer on.
                input_tensor = base_layer.DeferredTensor(  # pylint: disable=protected-access
                    shape=batch_input_shape,
                    dtype=dtype,
                    name=self.name)
            else:
                # In graph mode, create a graph placeholder to call the layer on.
                if sparse:
                    input_tensor = array_ops.sparse_placeholder(
                        shape=batch_input_shape, dtype=dtype, name=self.name)
                else:
                    input_tensor = array_ops.placeholder(
                        shape=batch_input_shape, dtype=dtype, name=self.name)

            # For compatibility with Keras API.
            self.is_placeholder = True
            self._batch_input_shape = batch_input_shape
        else:
            # For compatibility with Keras API.
            self.is_placeholder = False
            self._batch_input_shape = tuple(input_tensor.get_shape().as_list())

            if context.executing_eagerly():
                raise ValueError(
                    'You should not pass an input tensor when executing '
                    'in eager mode. For example, instead of creating an '
                    'InputLayer, you should instantiate your model and '
                    'directly call it on your input.')

        # Create an input node to add to self.outbound_node
        # and set output_tensors' _keras_history.
        input_tensor._keras_history = (self, 0, 0)  # pylint: disable=protected-access
        base_layer.Node(self,
                        inbound_layers=[],
                        node_indices=[],
                        tensor_indices=[],
                        input_tensors=[input_tensor],
                        output_tensors=[input_tensor])
Beispiel #4
0
    def __init__(self,
                 input_shape=None,
                 batch_size=None,
                 dtype=None,
                 input_tensor=None,
                 sparse=False,
                 name=None,
                 **kwargs):
        if 'batch_input_shape' in kwargs:
            batch_input_shape = kwargs.pop('batch_input_shape')
            if input_shape and batch_input_shape:
                raise ValueError('Only provide the input_shape OR '
                                 'batch_input_shape argument to '
                                 'InputLayer, not both at the same time.')
            batch_size = batch_input_shape[0]
            input_shape = batch_input_shape[1:]
        if kwargs:
            raise ValueError('Unrecognized keyword arguments:', kwargs.keys())

        if not name:
            prefix = 'input'
            name = prefix + '_' + str(backend.get_uid(prefix))

        if not dtype:
            if input_tensor is None:
                dtype = backend.floatx()
            else:
                dtype = backend.dtype(input_tensor)
        super(InputLayer, self).__init__(dtype=dtype, name=name)
        self.built = True
        self.sparse = sparse
        self.batch_size = batch_size
        self.supports_masking = True
        self._can_use_graph_functions = True

        if isinstance(input_shape, tensor_shape.TensorShape):
            input_shape = tuple(input_shape.as_list())

        if input_tensor is None:
            if input_shape is not None:
                batch_input_shape = (batch_size, ) + tuple(input_shape)
            else:
                batch_input_shape = None
            graph = backend.get_graph()
            with context.graph_mode():
                with graph.as_default():
                    # In graph mode, create a graph placeholder to call the layer on.
                    if sparse:
                        input_tensor = array_ops.sparse_placeholder(
                            shape=batch_input_shape,
                            dtype=dtype,
                            name=self.name)
                    else:
                        input_tensor = array_ops.placeholder(
                            shape=batch_input_shape,
                            dtype=dtype,
                            name=self.name)

            self.is_placeholder = True
            self._batch_input_shape = batch_input_shape
        else:
            if not tf_utils.is_symbolic_tensor(input_tensor):
                raise ValueError(
                    'You should not pass an EagerTensor to `Input`. '
                    'For example, instead of creating an '
                    'InputLayer, you should instantiate your model and '
                    'directly call it on your input.')
            self.is_placeholder = False
            self._batch_input_shape = tuple(input_tensor.get_shape().as_list())

        # Create an input node to add to self.outbound_node
        # and set output_tensors' _keras_history.
        input_tensor._keras_history = (self, 0, 0)  # pylint: disable=protected-access
        base_layer.Node(self,
                        inbound_layers=[],
                        node_indices=[],
                        tensor_indices=[],
                        input_tensors=[input_tensor],
                        output_tensors=[input_tensor])
Beispiel #5
0
  def __init__(self,
               input_shape=None,
               batch_size=None,
               dtype=None,
               input_tensor=None,
               sparse=False,
               name=None,
               **kwargs):
    strategy = distribution_strategy_context.get_strategy()
    if strategy and batch_size is not None and \
        distributed_training_utils.global_batch_size_supported(strategy):
      if batch_size % strategy.num_replicas_in_sync != 0:
        raise ValueError('The `batch_size` argument value {} cannot be '
                         'divisible by number of replicas {}'.format(
                             batch_size, strategy.num_replicas_in_sync))
      batch_size = batch_size // strategy.num_replicas_in_sync

    if 'batch_input_shape' in kwargs:
      batch_input_shape = kwargs.pop('batch_input_shape')
      if input_shape and batch_input_shape:
        raise ValueError('Only provide the input_shape OR '
                         'batch_input_shape argument to '
                         'InputLayer, not both at the same time.')
      batch_size = batch_input_shape[0]
      input_shape = batch_input_shape[1:]
    if kwargs:
      raise ValueError('Unrecognized keyword arguments:', kwargs.keys())

    if not name:
      prefix = 'input'
      name = prefix + '_' + str(backend.get_uid(prefix))

    if not dtype:
      if input_tensor is None:
        dtype = backend.floatx()
      else:
        dtype = backend.dtype(input_tensor)
    elif input_tensor is not None and input_tensor.dtype != dtype:
      raise ValueError('`input_tensor.dtype` differs from `dtype`: %s vs. %s' %
                       (input_tensor.dtype, dtype))
    super(InputLayer, self).__init__(dtype=dtype, name=name)
    self.built = True
    self.sparse = sparse
    self.batch_size = batch_size
    self.supports_masking = True

    if isinstance(input_shape, tensor_shape.TensorShape):
      input_shape = tuple(input_shape.as_list())
    elif isinstance(input_shape, int):
      input_shape = (input_shape,)

    if input_tensor is None:
      if input_shape is not None:
        batch_input_shape = (batch_size,) + tuple(input_shape)
      else:
        batch_input_shape = None
      graph = backend.get_graph()
      with graph.as_default():
        # In graph mode, create a graph placeholder to call the layer on.
        if sparse:
          input_tensor = backend.placeholder(
              shape=batch_input_shape,
              dtype=dtype,
              name=self.name,
              sparse=True)
        else:
          input_tensor = backend.placeholder(
              shape=batch_input_shape,
              dtype=dtype,
              name=self.name)

      self.is_placeholder = True
      self._batch_input_shape = batch_input_shape
    else:
      if not tf_utils.is_symbolic_tensor(input_tensor):
        raise ValueError('You should not pass an EagerTensor to `Input`. '
                         'For example, instead of creating an '
                         'InputLayer, you should instantiate your model and '
                         'directly call it on your input.')
      self.is_placeholder = False
      self._batch_input_shape = tuple(input_tensor.shape.as_list())

    # Create an input node to add to self.outbound_node
    # and set output_tensors' _keras_history.
    input_tensor._keras_history = (self, 0, 0)  # pylint: disable=protected-access
    input_tensor._keras_mask = None
    base_layer.Node(
        self,
        inbound_layers=[],
        node_indices=[],
        tensor_indices=[],
        input_tensors=[input_tensor],
        output_tensors=[input_tensor])