def compress(self, bottleneck, indexes): """Compresses a floating-point tensor. Compresses the tensor to bit strings. `bottleneck` is first quantized as in `quantize()`, and then compressed using the probability tables derived from `indexes`. The quantized tensor can later be recovered by calling `decompress()`. The innermost `self.coding_rank` dimensions are treated as one coding unit, i.e. are compressed into one string each. Any additional dimensions to the left are treated as batch dimensions. Args: bottleneck: `tf.Tensor` containing the data to be compressed. indexes: `tf.Tensor` specifying the scalar distribution for each element in `bottleneck`. See class docstring for examples. Returns: A `tf.Tensor` having the same shape as `bottleneck` without the `self.coding_rank` innermost dimensions, containing a string for each coding unit. """ bottleneck = tf.convert_to_tensor(bottleneck, dtype=self.bottleneck_dtype) indexes = _add_offset_indexes(indexes, self._num_noise_levels) indexes = self._normalize_indexes(indexes) flat_indexes = self._flatten_indexes(indexes) symbols_shape = tf.shape(flat_indexes) batch_shape = symbols_shape[:-self.coding_rank] offset = self._offset_from_indexes(indexes) symbols = tf.cast(tf.round(bottleneck - offset), tf.int32) symbols -= tf.gather(self.cdf_offset, flat_indexes) handle = gen_ops.create_range_encoder(batch_shape, self.cdf) handle = gen_ops.entropy_encode_index(handle, flat_indexes, symbols) return gen_ops.entropy_encode_finalize(handle)
def compress(self, bottleneck): """Compresses a floating-point tensor. Compresses the tensor to bit strings. `bottleneck` is first quantized as in `quantize()`, and then compressed using the probability tables in `self.cdf` (derived from `self.prior`). The quantized tensor can later be recovered by calling `decompress()`. The innermost `self.coding_rank` dimensions are treated as one coding unit, i.e. are compressed into one string each. Any additional dimensions to the left are treated as batch dimensions. Args: bottleneck: `tf.Tensor` containing the data to be compressed. Must have at least `self.coding_rank` dimensions, and the innermost dimensions must be broadcastable to `self.prior_shape`. Returns: A `tf.Tensor` having the same shape as `bottleneck` without the `self.coding_rank` innermost dimensions, containing a string for each coding unit. """ input_shape = tf.shape(bottleneck) all_but_last_n_elems = lambda t, n: t[:-n] if n else t batch_shape = all_but_last_n_elems(input_shape, self.coding_rank) iid_shape = all_but_last_n_elems(input_shape, self.prior_shape.rank) offset = self.quantization_offset if offset is not None: bottleneck -= offset symbols = tf.cast(tf.round(bottleneck), tf.int32) symbols = tf.reshape(symbols, tf.concat([iid_shape, [-1]], 0)) symbols -= self.cdf_offset handle = gen_ops.create_range_encoder(batch_shape, self.cdf) handle = gen_ops.entropy_encode_channel(handle, symbols) return gen_ops.entropy_encode_finalize(handle)
def compress(self, bottleneck): """Compresses a floating-point tensor. Compresses the tensor to bit strings. `bottleneck` is first quantized as in `quantize()`, and then compressed using the probability tables in `self.cdf` derived from `self.prior`. The quantized tensor can later be recovered by calling `decompress()`. The innermost `self.coding_rank` dimensions are treated as one coding unit, i.e. are compressed into one string each. Any additional dimensions to the left are treated as batch dimensions. Args: bottleneck: `tf.Tensor` containing the data to be compressed. Must have at least `self.coding_rank` dimensions, and the innermost dimensions must be broadcastable to `self.prior_shape`. Returns: A `tf.Tensor` having the same shape as `bottleneck` without the `self.coding_rank` innermost dimensions, containing a string for each coding unit. """ bottleneck = tf.convert_to_tensor(bottleneck, dtype=self.bottleneck_dtype) input_shape = tf.shape(bottleneck) input_rank = tf.shape(input_shape)[0] batch_shape, coding_shape = tf.split( input_shape, [input_rank - self.coding_rank, self.coding_rank]) broadcast_shape = coding_shape[:self.coding_rank - len(self.prior_shape)] indexes, offset = self._compute_indexes_and_offset(broadcast_shape) bottleneck -= offset symbols = tf.cast(tf.round(bottleneck), tf.int32) symbols -= tf.gather(self.cdf_offset, indexes) handle = gen_ops.create_range_encoder(batch_shape, self.cdf) encode_indexes = tf.broadcast_to(indexes, tf.shape(symbols)) handle = gen_ops.entropy_encode_index(handle, encode_indexes, symbols) return gen_ops.entropy_encode_finalize(handle)