Example #1
0
def create_compressed_model_and_algo_for_test(model,
                                              config,
                                              compression_state=None,
                                              force_no_init=False):
    assert isinstance(config, NNCFConfig)
    tf.keras.backend.clear_session()
    if force_no_init:
        compression_state = {
            BaseCompressionAlgorithmController.BUILDER_STATE: {}
        }
    algo, model = create_compressed_model(model, config, compression_state)
    return model, algo
Example #2
0
def od_checkpoint_saver(config):
    """
    Load object detection checkpoint and re-save it without optimizer (memory footprint is reduced).
    """
    model_builder = get_model_od_builder(config)
    model = model_builder.build_model()

    compression_state = load_compression_state(config.ckpt_path)
    compression_ctrl, compress_model = create_compressed_model(
        model, config.nncf_config, compression_state)

    checkpoint = tf.train.Checkpoint(
        model=compress_model,
        compression_state=TFCompressionState(compression_ctrl))
    load_and_save_checkpoint(checkpoint, config)
Example #3
0
def seg_checkpoint_saver(config):
    """
    Load segmentation checkpoint and re-save it without optimizer (memory footprint is reduced).
    """
    model_builder = get_model_seg_builder(config)
    model = model_builder.build_model()

    compression_state = load_compression_state(config.ckpt_path)
    compression_ctrl, compress_model = create_compressed_model(
        model, config.nncf_config, compression_state)

    variables = get_variables(compress_model)
    checkpoint = tf.train.Checkpoint(
        variables=variables,
        compression_state=TFCompressionState(compression_ctrl),
        step=tf.Variable(0))
    load_and_save_checkpoint(checkpoint, config)
Example #4
0
def test_distributed_masks_are_equal(quantization):
    # Clean output file
    try:
        os.remove(MASKS_SEEDS_PATH)
    except OSError:
        pass
    # Fill file with seeds
    num_of_replicas = 3
    strategy = tf.distribute.MirroredStrategy(
        [f'GPU:{i}' for i in range(num_of_replicas)])
    with strategy.scope():
        config = NNCFConfig.from_json(CONF)
        if quantization:
            config.update({
                'compression':
                [config['compression'], {
                    'algorithm': 'quantization'
                }]
            })
        model = TEST_MODELS['Conv2D']()
        compression_state_to_skip_init = {
            BaseCompressionAlgorithmController.BUILDER_STATE: {}
        }
        algo, model = create_compressed_model(model, config,
                                              compression_state_to_skip_init)
        model.add_loss(algo.loss)
        compression_callbacks = create_compression_callbacks(
            algo, log_tensorboard=False)

        model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
                      optimizer=tf.keras.optimizers.Adam(5e-4),
                      metrics=["accuracy"])

        dataset_len_per_replica = 10
        dataset_len = dataset_len_per_replica * num_of_replicas

        dummy_x = tf.random.normal((dataset_len, ) + model.input_shape[1:])
        dummy_y = tf.random.normal((dataset_len, ) + model.output_shape[1:])
        options = tf.data.Options()
        options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
        dataset = tf.data.Dataset.from_tensor_slices(
            (dummy_x, dummy_y)).batch(num_of_replicas).with_options(options)

        model.fit(dataset,
                  epochs=1,
                  validation_split=0,
                  callbacks=[compression_callbacks])
    # Check seeds in file
    with open(MASKS_SEEDS_PATH, 'r', encoding='utf8') as f:
        seeds = f.readlines()
    seeds_per_replica = defaultdict(list)
    for row in seeds:
        replica_id, *seed = row.split()
        seeds_per_replica[replica_id].append(
            (int(seed[0][1:]), int(seed[1][:-1])))

    # Check seeds are equal for all replicas
    for key, other_key in combinations(seeds_per_replica, 2):
        assert seeds_per_replica[key] == seeds_per_replica[other_key]
    # Check seeds differs during training
    assert len(set(seeds_per_replica['0'])) > 1
    # Remove temporary file
    os.remove(MASKS_SEEDS_PATH)
Example #5
0
def test_rb_sparse_target_lenet(distributed, quantized):
    if not os.path.exists(MODEL_PATH):
        train_lenet()

    (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
    x_test, y_test = x_test[:2], y_test[:2]

    x_train = tf.transpose(tf.reshape(x_train, (-1, 1, 28, 28)), (0, 2, 3, 1))
    x_test = tf.transpose(tf.reshape(x_test, (-1, 1, 28, 28)), (0, 2, 3, 1))

    x_train = x_train / 255
    x_test = x_test / 255

    batch_size = 128
    if distributed:
        num_of_replicas = 3
        strategy = tf.distribute.MirroredStrategy(
            [f'GPU:{i}' for i in range(num_of_replicas)])
    else:
        strategy = tf.distribute.OneDeviceStrategy('device:CPU:0')

    tf.keras.backend.clear_session()
    with strategy.scope():
        dataset_train = tf.data.Dataset.from_tensor_slices(
            (x_train, y_train)).batch(batch_size)
        dataset_test = tf.data.Dataset.from_tensor_slices(
            (x_test, y_test)).batch(batch_size)
        options = tf.data.Options()
        options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
        dataset_train = dataset_train.with_options(options)
        dataset_test = dataset_test.with_options(options)

        model = get_lenet_model()
        model.load_weights(MODEL_PATH)

        freeze_epoch = 4
        config = get_basic_sparsity_config(sparsity_init=0.04,
                                           sparsity_target=0.3,
                                           sparsity_target_epoch=3,
                                           sparsity_freeze_epoch=freeze_epoch,
                                           scheduler='exponential')
        if quantized:
            config.update({
                'compression':
                [config['compression'], {
                    'algorithm': 'quantization'
                }]
            })

        compression_state_to_skip_init = {
            BaseCompressionAlgorithmController.BUILDER_STATE: {}
        }
        compress_algo, compress_model = create_compressed_model(
            model, config, compression_state_to_skip_init)
        compression_callbacks = create_compression_callbacks(
            compress_algo, log_tensorboard=True, log_dir='logdir/')

        sparse_algo = compress_algo.child_ctrls[0] \
            if isinstance(compress_algo, CompositeCompressionAlgorithmController) else compress_algo

        class SparsityRateTestCallback(tf.keras.callbacks.Callback):
            def on_epoch_end(self, epoch, logs=None):
                target = sparse_algo.loss.target_sparsity_rate
                nncf_stats = sparse_algo.statistics()
                actual = nncf_stats.rb_sparsity.model_statistics.sparsity_level_for_layers
                print(f'target {target}, actual {actual}')
                if epoch + 1 <= freeze_epoch:
                    assert abs(actual - target) < 0.05
                else:
                    assert tf.cast(sparse_algo.loss.disabled, tf.bool)
                    assert tf.equal(sparse_algo.loss.calculate(),
                                    tf.constant(0.))

        loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()

        metrics = [
            tf.keras.metrics.CategoricalAccuracy(name='acc@1'),
            tf.keras.metrics.TopKCategoricalAccuracy(k=5, name='acc@5'),
            tfa.metrics.MeanMetricWrapper(loss_obj, name='ce_loss'),
            tfa.metrics.MeanMetricWrapper(compress_algo.loss, name='cr_loss')
        ]

        compress_model.add_loss(compress_algo.loss)

        compress_model.compile(
            loss=loss_obj,
            optimizer=tf.keras.optimizers.Adam(5e-3),
            metrics=metrics,
        )

    compress_model.fit(
        dataset_train,
        validation_data=dataset_test,
        epochs=5,
        callbacks=[
            tf.keras.callbacks.ReduceLROnPlateau(),
            get_progress_bar(stateful_metrics=['loss'] +
                             [metric.name for metric in metrics]),
            *get_callbacks(include_tensorboard=True,
                           track_lr=False,
                           profile_batch=0,
                           initial_step=0,
                           log_dir='logdir/',
                           ckpt_dir='logdir/cpt/'), compression_callbacks,
            SparsityRateTestCallback()
        ])
Example #6
0
 you may not use this file except in compliance with the License.
 You may obtain a copy of the License at
      http://www.apache.org/licenses/LICENSE-2.0
 Unless required by applicable law or agreed to in writing, software
 distributed under the License is distributed on an "AS IS" BASIS,
 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 See the License for the specific language governing permissions and
 limitations under the License.
"""

# Do not remove - these imports are for testing purposes.
#pylint:disable=unused-import
import nncf

import tensorflow as tf

from nncf.common.compression import BaseCompressionAlgorithmController
from nncf.tensorflow.helpers.model_creation import create_compressed_model
from tests.tensorflow.quantization.utils import get_basic_quantization_config

inputs = tf.keras.Input(shape=(3, 3, 1))
outputs = tf.keras.layers.Conv2D(filters=3, kernel_size=3)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)

config = get_basic_quantization_config()
compression_state_to_skip_init = {
    BaseCompressionAlgorithmController.BUILDER_STATE: {}
}
compression_model, compression_ctrl = create_compressed_model(
    model, config, compression_state_to_skip_init)
Example #7
0
def get_simple_compressed_model(compression_state=None):
    model = get_basic_conv_test_model()
    config = get_empty_config()
    config.update({'compression': {'algorithm': 'magnitude_sparsity'}})
    compression_ctrl, model = create_compressed_model(model, config, compression_state=compression_state)
    return compression_ctrl, model