def compress(self, inputs):
    """Compress inputs and store their binary representations into strings.

    Args:
      inputs: `Tensor` with values to be compressed.

    Returns:
      String `Tensor` vector containing the compressed representation of each
      batch element of `inputs`.
    """
    with ops.name_scope(self._name_scope()):
      inputs = ops.convert_to_tensor(inputs)
      if not self.built:
        # Check input assumptions set before layer building, e.g. input rank.
        input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
        if self.dtype is None:
          self._dtype = inputs.dtype.base_dtype.name
        self.build(inputs.shape)

      # Check input assumptions set after layer building, e.g. input shape.
      if not context.executing_eagerly():
        input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)

      ndim = self.input_spec.ndim
      channel_axis = self._channel_axis(ndim)
      # Tuple of slices for expanding dimensions of tensors below.
      slices = ndim * [None] + [slice(None)]
      slices[channel_axis] = slice(None)
      slices = tuple(slices)

      # Expand dimensions of CDF to input dimensions, keeping the channels along
      # the right dimension.
      cdf = self._quantized_cdf[slices[1:]]
      num_levels = array_ops.shape(cdf)[-1] - 1

      # Bring inputs to the right range by centering the range on the medians.
      half = constant_op.constant(.5, dtype=self.dtype)
      medians = array_ops.squeeze(self._medians, [1, 2])
      offsets = (math_ops.cast(num_levels // 2, self.dtype) + half) - medians
      # Expand offsets to input dimensions and add to inputs.
      values = inputs + offsets[slices[:-1]]

      # Clip to range and cast to integers. Because we have added .5 above, and
      # all values are positive, the cast effectively implements rounding.
      values = math_ops.maximum(values, half)
      values = math_ops.minimum(
          values, math_ops.cast(num_levels, self.dtype) - half)
      values = math_ops.cast(values, dtypes.int16)

      def loop_body(tensor):
        return coder_ops.range_encode(
            tensor, cdf, precision=self.range_coder_precision)
      strings = functional_ops.map_fn(
          loop_body, values, dtype=dtypes.string, back_prop=False)

      if not context.executing_eagerly():
        strings.set_shape(inputs.shape[:1])

      return strings
  def compress(self, inputs, input_stddev):
    """Compress inputs and store their binary representations into strings.

    Args:
      inputs: `Tensor` with values to be compressed.

    Returns:
      String `Tensor` vector containing the compressed representation of each
      batch element of `inputs`.
    """
    with ops.name_scope(self._name_scope()):
      inputs = ops.convert_to_tensor(inputs)
      if not self.built:
        # Check input assumptions set before layer building, e.g. input rank.
        input_spec.assert_input_compatibility(self.input_spec, inputs,
                                              self.name)
        if self.dtype is None:
          self._dtype = inputs.dtype.base_dtype.name
        self.build(inputs.shape)
      input_stddev = ops.convert_to_tensor(input_stddev)
      inputs = array_ops.expand_dims(inputs, axis=4)
      input_stddev = array_ops.expand_dims(input_stddev, axis=4)
      self.build_gauss(input_stddev)
      return tf.zeros(shape=inputs.shape[:1],dtype=tf.string)
      # Check input assumptions set after layer building, e.g. input shape.

      # Expand dimensions of CDF to input dimensions, keeping the channels along
      # the right dimension.
      cdf = self._quantized_cdf
      num_levels = array_ops.shape(cdf)[-1] - 1
      half_num_levels = math_ops.cast(num_levels // 2,self.dtype)

      # Bring inputs to the right range by centering the range on the medians.
      half = constant_op.constant(.5, dtype=self.dtype)
      offsets = - self._medians + ( half_num_levels + half)
      # Expand offsets to input dimensions and add to inputs.
      values = inputs + offsets

      # Clip to range and cast to integers. Because we have added .5 above, and
      # all values are positive, the cast effectively implements rounding.
      values = math_ops.maximum(values, half)
      values = math_ops.minimum(
          values, math_ops.cast(num_levels, self.dtype) - half)
      values = math_ops.cast(values, dtypes.int16)

      values = array_ops.squeeze(values, [-1])

      def loop_body(tensor):
        return coder_ops.range_encode(
            tensor, cdf, precision=self.range_coder_precision)
      strings = functional_ops.map_fn(
          loop_body, values, dtype=dtypes.string, back_prop=False)

      if not context.executing_eagerly():
        strings.set_shape(inputs.shape[:1])

      return strings
Exemple #3
0
    def _maybe_build(self, inputs):
        # Check input assumptions set before layer building, e.g. input rank.
        if not self.built:
            input_spec.assert_input_compatibility(self.input_spec, inputs,
                                                  self.name)
            input_list = nest.flatten(inputs)

            input_shapes = None
            if all(hasattr(x, 'shape') for x in input_list):
                input_shapes = nest.map_structure(lambda x: x.shape, inputs)
            # Only call `build` if the user has manually overridden the build method.
            if not hasattr(self.build, '_is_default'):
                # Any setup work performed only once should happen in an `init_scope`
                # to avoid creating symbolic Tensors that will later pollute any eager
                # operations.
                with tf_utils.maybe_init_scope(self):
                    self.build(input_shapes)
            # We must set self.built since user defined build functions are not
            # constrained to set self.built.
            self.built = True
Exemple #4
0
    def compress(self, inputs):
        """Compress inputs and store their binary representations into strings.

    Arguments:
      inputs: `Tensor` with values to be compressed.

    Returns:
      compressed: String `Tensor` vector containing the compressed
        representation of each batch element of `inputs`.

    Raises:
      ValueError: if `inputs` has an integral or inconsistent `DType`, or
        inconsistent number of channels.
    """
        with tf.name_scope(self._name_scope()):
            inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
            if not self.built:
                # Check input assumptions set before layer building, e.g. input rank.
                input_spec.assert_input_compatibility(self.input_spec, inputs,
                                                      self.name)
                if self.dtype is None:
                    self._dtype = inputs.dtype.base_dtype.name
                self.build(inputs.shape)

            # Check input assumptions set after layer building, e.g. input shape.
            if not tf.executing_eagerly():
                input_spec.assert_input_compatibility(self.input_spec, inputs,
                                                      self.name)
                if inputs.dtype.is_integer:
                    raise ValueError("{} can't take integer inputs.".format(
                        type(self).__name__))

            symbols = self._quantize(inputs, "symbols")
            assert symbols.dtype == tf.int32

            ndim = self.input_spec.ndim
            indexes = self._prepare_indexes(shape=tf.shape(symbols)[1:])
            broadcast_indexes = (indexes.shape.ndims != ndim)
            if broadcast_indexes:
                # We can't currently broadcast over anything else but the batch axis.
                assert indexes.shape.ndims == ndim - 1
                args = (symbols, )
            else:
                args = (symbols, indexes)

            def loop_body(args):
                string = range_coding_ops.unbounded_index_range_encode(
                    args[0],
                    indexes if broadcast_indexes else args[1],
                    self._quantized_cdf,
                    self._cdf_length,
                    self._offset,
                    precision=self.range_coder_precision,
                    overflow_width=4,
                    debug_level=0)
                return string

            strings = tf.map_fn(loop_body,
                                args,
                                dtype=tf.string,
                                back_prop=False,
                                name="compress")

            if not tf.executing_eagerly():
                strings.set_shape(inputs.shape[:1])

            return strings
  def compress(self, inputs):
    """Compress inputs and store their binary representations into strings.

    Arguments:
      inputs: `Tensor` with values to be compressed.

    Returns:
      compressed: String `Tensor` vector containing the compressed
        representation of each batch element of `inputs`.

    Raises:
      ValueError: if `inputs` has an integral or inconsistent `DType`, or
        inconsistent number of channels.
    """
    with tf.name_scope(self._name_scope()):
      inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
      if not self.built:
        # Check input assumptions set before layer building, e.g. input rank.
        input_spec.assert_input_compatibility(
            self.input_spec, inputs, self.name)
        if self.dtype is None:
          self._dtype = inputs.dtype.base_dtype.name
        self.build(inputs.shape)

      # Check input assumptions set after layer building, e.g. input shape.
      if not tf.executing_eagerly():
        input_spec.assert_input_compatibility(
            self.input_spec, inputs, self.name)
        if inputs.dtype.is_integer:
          raise ValueError(
              "{} can't take integer inputs.".format(type(self).__name__))

      symbols = self._quantize(inputs, "symbols")
      assert symbols.dtype == tf.int32

      ndim = self.input_spec.ndim
      indexes = self._prepare_indexes(shape=tf.shape(symbols)[1:])
      broadcast_indexes = (indexes.shape.ndims != ndim)
      if broadcast_indexes:
        # We can't currently broadcast over anything else but the batch axis.
        assert indexes.shape.ndims == ndim - 1
        args = (symbols,)
      else:
        args = (symbols, indexes)

      def loop_body(args):
        string = range_coding_ops.unbounded_index_range_encode(
            args[0], indexes if broadcast_indexes else args[1],
            self._quantized_cdf, self._cdf_length, self._offset,
            precision=self.range_coder_precision, overflow_width=4,
            debug_level=0)
        return string

      strings = tf.map_fn(
          loop_body, args, dtype=tf.string,
          back_prop=False, name="compress")

      if not tf.executing_eagerly():
        strings.set_shape(inputs.shape[:1])

      return strings
Exemple #6
0
    def inference(self, inputs, *args, **kwargs):

        call_context = base_layer_utils.call_context()
        input_list = nest.flatten(inputs)

        # We will attempt to build a TF graph if & only if all inputs are symbolic.
        # This is always the case in graph mode. It can also be the case in eager
        # mode when all inputs can be traced back to `keras.Input()` (when building
        # models using the functional API).
        build_graph = tf_utils.are_all_symbolic_tensors(input_list)

        # Accept NumPy and scalar inputs by converting to Tensors.
        if any(isinstance(x, (np.ndarray, float, int)) for x in input_list):
            def _convert_non_tensor(x):
                # Don't call `ops.convert_to_tensor` on all `inputs` because
                # `SparseTensors` can't be converted to `Tensor`.
                if isinstance(x, (np.ndarray, float, int)):
                    return ops.convert_to_tensor(x)
                return x
            inputs = nest.map_structure(_convert_non_tensor, inputs)
            input_list = nest.flatten(inputs)

        # Handle `mask` propagation from previous layer to current layer. Masks can
        # be propagated explicitly via the `mask` argument, or implicitly via
        # setting the `_keras_mask` attribute on the inputs to a Layer. Masks passed
        # explicitly take priority.
        mask_arg_passed_by_framework = False
        input_masks = self._collect_input_masks(inputs, args, kwargs)
        if (self._expects_mask_arg and input_masks is not None and
                not self._call_arg_was_passed('mask', args, kwargs)):
            mask_arg_passed_by_framework = True
            kwargs['mask'] = input_masks

        # If `training` argument was not explicitly passed, propagate `training`
        # value from this layer's calling layer.
        training_arg_passed_by_framework = False
        # Priority 1: `training` was explicitly passed.
        if self._call_arg_was_passed('training', args, kwargs):
            training_value = self._get_call_arg_value('training', args, kwargs)
            if not self._expects_training_arg:
                kwargs.pop('training')
        else:
            training_value = None
            # Priority 2: `training` was passed to a parent layer.
            if call_context.training is not None:
                training_value = call_context.training
            # Priority 3a: `learning_phase()` has been set.
            elif backend.global_learning_phase_is_set():
                training_value = backend.learning_phase()
            # Priority 3b: Pass the `learning_phase()` if in the Keras FuncGraph.
            elif build_graph:
                with backend.get_graph().as_default():
                    if base_layer_utils.is_in_keras_graph():
                        training_value = backend.learning_phase()

            if self._expects_training_arg and training_value is not None:
                # Force the training_value to be bool type which matches to the contract
                # for layer/model call args.
                if tensor_util.is_tensor(training_value):
                    training_value = math_ops.cast(training_value, dtypes.bool)
                else:
                    training_value = bool(training_value)
                kwargs['training'] = training_value
                training_arg_passed_by_framework = True

        # Only create Keras history if at least one tensor originates from a
        # `keras.Input`. Otherwise this Layer may be being used outside the Keras
        # framework.
        if build_graph and base_layer_utils.needs_keras_history(inputs):
            base_layer_utils.create_keras_history(inputs)

        # Clear eager losses on top level model call.
        # We are clearing the losses only on the top level model call and not on
        # every layer/model call because layer/model may be reused.
        if (base_layer_utils.is_in_eager_or_tf_function() and
                not call_context.in_call):
            self._clear_losses()

        with call_context.enter(self, inputs, build_graph, training_value):
            # Check input assumptions set after layer building, e.g. input shape.
            if build_graph:
                # Symbolic execution on symbolic tensors. We will attempt to build
                # the corresponding TF subgraph inside `backend.get_graph()`
                # TODO(reedwm): We should assert input compatibility after the inputs
                # are casted, not before.
                input_spec.assert_input_compatibility(self.input_spec, inputs,
                                                                                            self.name)
                if (any(isinstance(x, ragged_tensor.RaggedTensor) for x in input_list)
                        and self._supports_ragged_inputs is False):    # pylint: disable=g-bool-id-comparison
                    raise ValueError('Layer %s does not support RaggedTensors as input. '
                                                     'Inputs received: %s. You can try converting your '
                                                     'input to an uniform tensor.' % (self.name, inputs))

                graph = backend.get_graph()
                with graph.as_default(), backend.name_scope(self._name_scope()):
                    # Build layer if applicable (if the `build` method has been
                    # overridden).
                    self._maybe_build(inputs)
                    cast_inputs = self._maybe_cast_inputs(inputs)

                    # Wrapping `call` function in autograph to allow for dynamic control
                    # flow and control dependencies in call. We are limiting this to
                    # subclassed layers as autograph is strictly needed only for
                    # subclassed layers and models.
                    # tf_convert will respect the value of autograph setting in the
                    # enclosing tf.function, if any.
                    if (base_layer_utils.is_subclassed(self) and
                            not base_layer_utils.from_saved_model(self)):
                        call_fn = autograph.tf_convert(
                                self._inference, ag_ctx.control_status_ctx())
                    else:
                        call_fn = self._inference

                    if not self.dynamic:
                        try:
                            with base_layer_utils.autocast_context_manager(
                                    self._compute_dtype):
                                # Add auto_control_deps in V2 when they are not already added by
                                # a `tf.function`.
                                if (ops.executing_eagerly_outside_functions() and
                                        not base_layer_utils.is_in_eager_or_tf_function()):
                                    with auto_control_deps.AutomaticControlDependencies() as acd:
                                        outputs = call_fn(cast_inputs, *args, **kwargs)
                                        # Wrap Tensors in `outputs` in `tf.identity` to avoid
                                        # circular dependencies.
                                        outputs = base_layer_utils.mark_as_return(outputs, acd)
                                else:
                                    outputs = call_fn(cast_inputs, *args, **kwargs)

                        except errors.OperatorNotAllowedInGraphError as e:
                            raise TypeError('You are attempting to use Python control '
                                                            'flow in a layer that was not declared to be '
                                                            'dynamic. Pass `dynamic=True` to the class '
                                                            'constructor.\nEncountered error:\n"""\n' +
                                                            str(e) + '\n"""')
                    else:
                        # We will use static shape inference to return symbolic tensors
                        # matching the specifications of the layer outputs.
                        # Since `self.dynamic` is True, we will never attempt to
                        # run the underlying TF graph (which is disconnected).
                        # TODO(fchollet): consider py_func as an alternative, which
                        # would enable us to run the underlying graph if needed.
                        outputs = self._symbolic_call(inputs)

                    if outputs is None:
                        raise ValueError('A layer\'s `call` method should return a '
                                                         'Tensor or a list of Tensors, not None '
                                                         '(layer: ' + self.name + ').')
                    if base_layer_utils.have_all_keras_metadata(inputs):
                        if training_arg_passed_by_framework:
                            kwargs.pop('training')
                        if mask_arg_passed_by_framework:
                            kwargs.pop('mask')
                        inputs, outputs = self._set_connectivity_metadata_(
                                inputs, outputs, args, kwargs)
                    self._handle_activity_regularization(inputs, outputs)
                    self._set_mask_metadata(inputs, outputs, input_masks)
                    if hasattr(self, '_set_inputs') and not self.inputs:
                        # Subclassed network: explicitly set metadata normally set by
                        # a call to self._set_inputs().
                        # TODO(b/120997007): This should be done in Eager as well, but
                        # causes garbage collection issues because of the placeholders
                        # created on the default Keras graph.
                        self._set_inputs(inputs, outputs)
            else:
                # Eager execution on data tensors.
                with backend.name_scope(self._name_scope()):
                    self._maybe_build(inputs)
                    cast_inputs = self._maybe_cast_inputs(inputs)
                    with base_layer_utils.autocast_context_manager(
                            self._compute_dtype):
                        outputs = self._inference(cast_inputs, *args, **kwargs)
                    self._handle_activity_regularization(inputs, outputs)
                    self._set_mask_metadata(inputs, outputs, input_masks)

        return outputs