Beispiel #1
0
    def testWrapper_CreatesProperCompressorOption1(self, low_rank_mock):
        hparams = self._create_compression_op_spec(1)
        mock_compressor = MatrixCompressorInterfaceMock(
            self._default_compressor_spec(hparams))
        low_rank_mock.side_effect = [mock_compressor]

        with mock.patch.object(comp_op, 'ApplyCompression') as apply_mock:
            compression_wrapper.get_apply_compression(hparams, _GLOBAL_STEP)
            apply_mock.assert_called_with(scope='default_scope',
                                          compression_spec=hparams,
                                          compressor=mock_compressor,
                                          global_step=_GLOBAL_STEP)
Beispiel #2
0
    def testWrapper_CreatesProperCompressorOption2(self, sim_hash_mock):
        hparams = self._create_compression_op_spec(
            compression_wrapper.CompressionOptions.SIMHASH_MATRIX_COMPRESSION,
            compression_wrapper.UpdateOptions.PYTHON_UPDATE)
        mock_compressor = MatrixCompressorInterfaceMock(
            self._default_compressor_spec(hparams))
        sim_hash_mock.side_effect = [mock_compressor]

        with mock.patch.object(compression_wrapper,
                               'ApplyCompression') as apply_mock:
            compression_wrapper.get_apply_compression(hparams, _GLOBAL_STEP)
            apply_mock.assert_called_with(scope='default_scope',
                                          compression_spec=hparams,
                                          compressor=mock_compressor,
                                          global_step=_GLOBAL_STEP)
Beispiel #3
0
    def testCompressedConv2DLayer(self):
        hparams = ("name=mnist_compression,"
                   "compress_input=True,"
                   "input_block_size=16,"
                   "input_compression_factor=2,"
                   "compression_option=9")

        compression_hparams = compression_op.InputOutputCompressionOp.get_default_hparams(
        ).parse(hparams)
        compression_obj = compression_wrapper.get_apply_compression(
            compression_hparams, global_step=0)

        val = np.random.random((10, 4, 10, 10))
        x = tf.Variable(val, dtype=tf.float32)
        y_compressed = compression_layers.CompressedConv2D(
            20,
            3,
            padding="valid",
            data_format="channels_last",
            compression_obj=compression_obj)(x)
        y = tf.keras.layers.Conv2D(20,
                                   3,
                                   padding="valid",
                                   data_format="channels_last")(x)

        self.assertAllEqual(y.shape.as_list(), y_compressed.shape.as_list())
Beispiel #4
0
    def testWrapper_CreatesProperCompressorOption1(self):
        hparams = self._create_compression_op_spec(
            compression_wrapper.CompressionOptions.LOWRANK_MATRIX_COMPRESSION,
            compression_wrapper.UpdateOptions.PYTHON_UPDATE)
        mock_compressor = MatrixCompressorInterfaceMock(
            self._default_compressor_spec(hparams))
        self.enter_context(
            mock.patch.object(comp_op,
                              'LowRankDecompMatrixCompressor',
                              side_effect=[mock_compressor]))

        with mock.patch.object(compression_wrapper,
                               'ApplyCompression') as apply_mock:
            compression_wrapper.get_apply_compression(hparams, _GLOBAL_STEP)
            apply_mock.assert_called_with(scope='default_scope',
                                          compression_spec=hparams,
                                          compressor=mock_compressor,
                                          global_step=_GLOBAL_STEP)
def main(argv):
    del argv  # unused

    tf.enable_v2_behavior()

    # Load MNIST data.
    mnist = tf.keras.datasets.mnist
    (x_train, y_train), (_, _) = mnist.load_data()
    x_train = x_train / 255.0

    if not FLAGS.use_lennet:
        x_train = x_train.reshape(60000, 784).astype('float32')

    train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
    train_dataset = train_dataset.shuffle(buffer_size=1024).batch(64)

    # Define model.

    num_hidden_nodes = 64
    num_classes = 10

    hparams = ('name=mnist_compression,'
               'prune_option=compression,'
               'input_block_size=20,'
               'rank=2,'
               'compression_option=9')

    compression_hparams = compression.InputCompressionOp.get_default_hparams(
    ).parse(hparams)
    compression_obj = compression_wrapper.get_apply_compression(
        compression_hparams, global_step=0)

    if not FLAGS.use_lennet:
        compressed_model = CompressedModelV2(num_hidden_nodes, num_classes,
                                             compression_obj)
    else:
        compressed_model = compressed_lenet5([28, 28, 1], num_classes,
                                             compression_obj)

    optimizer = tf.keras.optimizers.SGD(learning_rate=1e-3)
    loss = tf.keras.losses.SparseCategoricalCrossentropy()
    epochs = 10

    step_number = 0
    for epoch in range(epochs):
        for x, y in train_dataset:
            with tf.GradientTape() as t:
                loss_value = loss(y, compressed_model(x))
            grads = t.gradient(loss_value,
                               compressed_model.trainable_variables)
            optimizer.apply_gradients(
                zip(grads, compressed_model.trainable_variables))

            # compressed_model.run_alpha_update(step_number)

            step_number += 1
        print('Training loss at epoch {} is {}.'.format(epoch, loss_value))
Beispiel #6
0
def create_compressor(hparams='', global_step=None):
  """Creates an ApplyCompression object to use during the inference step."""
  # Parse compression hyperparameters
  compression_hparams = compression.CompressionOp.get_default_hparams().parse(
      hparams)

  # Create a compression object using the compression hyperparameters
  compression_obj = compression_wrapper.get_apply_compression(
      compression_hparams, global_step=global_step)
  return compression_obj
Beispiel #7
0
    def testCompressedDenseLayer(self):
        hparams = ("name=mnist_compression,"
                   "compress_input=True,"
                   "input_block_size=16,"
                   "input_compression_factor=4,")

        compression_hparams = compression_op.InputOutputCompressionOp.get_default_hparams(
        ).parse(hparams)
        # Create a compression object using the compression hyperparameters
        compression_obj = compression_wrapper.get_apply_compression(
            compression_hparams, global_step=0)
        val = np.random.random((10, 48))
        x = tf.Variable(val, dtype=tf.float32)
        y_compressed = compression_layers.CompressedDense(
            20, compression_obj=compression_obj)(x)
        y = tf.keras.layers.Dense(20)(x)

        self.assertAllEqual(y.shape.as_list(), y_compressed.shape.as_list())
Beispiel #8
0
def get_matrix_compression_object(hparams,
                                  global_step=None,
                                  sparsity=None):
  """Returns a pruning/compression object.

  Args:
    hparams: Pruning spec as defined in pruing.py;
    global_step: A tensorflow variable that is used for scheduling
    pruning/compression;
    sparsity: A tensorflow scalar variable storing the sparsity.

  Returns:
    A Pruning or compression_lib.compression_op.ApplyCompression object.
  """
  if not global_step:
    global_step = tf.cast(tf.train.get_global_step(), tf.int32)
  if hparams.prune_option in [
      'weight', 'first_order_gradient', 'second_order_gradient']:
    return pruning.Pruning(hparams, global_step, sparsity)
  else:
    return compression_wrapper.get_apply_compression(hparams,
                                                     global_step=global_step)