def testReadmeExample(self):
    data = random_ops.random_uniform((128, 128), 0, 10, dtype=dtypes.int32)
    histogram = math_ops.bincount(data, minlength=10, maxlength=10)
    cdf = math_ops.cumsum(histogram, exclusive=False)
    cdf = array_ops.pad(cdf, [[1, 0]])
    cdf = array_ops.reshape(cdf, [1, 1, -1])

    data = math_ops.cast(data, dtypes.int16)
    encoded = coder_ops.range_encode(data, cdf, precision=14)
    decoded = coder_ops.range_decode(
        encoded, array_ops.shape(data), cdf, precision=14)

    with self.test_session() as sess:
      self.assertAllEqual(*sess.run((data, decoded)))
Example #2
0
  def compress(self, inputs, device='cpu'):
    """Compress inputs and store their binary representations into strings.

    Arguments:
      inputs: `Tensor` with values to be compressed. Must have shape 
      [points, channels] (torch)
    Returns:
      compressed: String vector containing the compressed
        representation of each batch element of `inputs`. (numpy)
      min_v & max_v (numpy).
    """   
    # get symbols
    values = self._quantize(inputs, "symbols")
    values = values.detach()
    self._channels = values.shape[-1]

    # get range[min_v, max_v]
    min_v = torch.min(values.data)
    min_v = torch.floor(min_v)
    min_v = min_v.short()
    #print('min:',min_v)
    max_v = torch.max(values.data)
    max_v = torch.ceil(max_v)
    max_v = max_v.short()
    #print('max:', max_v)
    min_v_data = min_v.cpu().numpy()
    max_v_data = max_v.cpu().numpy()

    # early stop
    if min_v_data == max_v_data:
      strings = bytes(1)
      return strings, min_v_data, max_v_data

    # get cdf
    cdf = self._get_cdf(min_v_data, max_v_data, device)

    # To tensorflow
    values_data = values.detach().cpu().numpy()
    values_tf = tf.convert_to_tensor(values_data)# shape=[points, channels]
    values_tf = tf.reshape(values_tf, [-1, self._channels])# TODO: delete.
    values_tf = tf.cast(values_tf, tf.int16)
    values_tf -= min_v_data
    
    # range encode.
    strings = coder_ops.range_encode(
      values_tf, cdf, precision=self._range_coder_precision)

    # TODO: check numpy, torch, tf.
    return strings.numpy(), min_v_data, max_v_data
    def compress(self, inputs, loc, scale):
        """Compress inputs and store their binary representations into strings.

    Arguments:
      inputs: `Tensor` with values to be compressed. Must have shape 
      [**batch size**, length, width, height, channels]
      locs & scales: same shape like inputs.
    Returns:
      compressed: String `Tensor` vector containing the compressed
        representation of each batch element of `inputs`.
    """

        inputs = tf.convert_to_tensor(inputs, dtype=self.dtype)
        loc = tf.convert_to_tensor(loc, dtype=self.dtype)
        scale = tf.convert_to_tensor(scale, dtype=self.dtype)

        datashape = tf.shape(inputs)
        channels = datashape[-1]

        # reshape.
        loc = tf.reshape(loc, [-1, channels])
        scale = tf.reshape(scale, [-1, channels])
        inputs = tf.reshape(inputs, [-1, channels])

        # quantize.
        values = self._quantize(inputs, "symbols")
        # get cdf
        min_v = tf.cast(tf.floor(tf.reduce_min(values)), dtype=tf.int32)
        max_v = tf.cast(tf.ceil(tf.reduce_max(values)), dtype=tf.int32)
        cdf = self._get_cdf(loc, scale, min_v, max_v,
                            datashape)  # [BatchSizexHxWxD, C, N]

        # range encode.
        values = tf.cast(values, "int32")
        values -= min_v
        values = tf.cast(values, "int16")
        strings = coder_ops.range_encode(values,
                                         cdf,
                                         precision=self._range_coder_precision)

        return strings, min_v, max_v
Example #4
0
    def compress(self, inputs):
        """Compress inputs and store their binary representations into strings.

    Arguments:
      inputs: `Tensor` with values to be compressed. Must have shape 
      [**batch size**, length, width, height, channels]
    Returns:
      compressed: String `Tensor` vector containing the compressed
        representation of each batch element of `inputs`.
    """

        with tf.name_scope(self._name_scope()):
            inputs = tf.convert_to_tensor(inputs)
            if not self.built:
                if self.dtype is None:
                    self._dtype = inputs.dtype.base_dtype.name
                self.build(inputs.shape)

            ndim = self.input_spec.ndim
            channel_axes = ndim - 1
            channels = self.input_spec.axes[channel_axes]

            # quantize.
            values = self._quantize(inputs, "symbols")

            # get cdf
            min_v = tf.cast(tf.floor(tf.reduce_min(values)), dtype=tf.int32)
            max_v = tf.cast(tf.ceil(tf.reduce_max(values)), dtype=tf.int32)
            cdf = self._get_cdf(min_v, max_v)

            # range encode.
            values = tf.reshape(values, [-1, channels])
            values = tf.cast(values, tf.int32)
            values = values - min_v
            values = tf.cast(values, tf.int16)
            strings = coder_ops.range_encode(
                values, cdf, precision=self._range_coder_precision)

            return strings, min_v, max_v
Example #5
0
 def loop_body(tensor):
     return coder_ops.range_encode(
         tensor, cdf, precision=self.range_coder_precision)
 def loop_body(tensor):
   return coder_ops.range_encode(
       tensor, cdf, precision=self.range_coder_precision)