Exemplo n.º 1
0
def channel_pruning_auto_mode():

    # Load trained MNIST model
    model = torch.load(os.path.join('../', 'data', 'mnist_trained_on_GPU.pth'))

    # Specify the necessary parameters
    greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(0.8),
                                              num_comp_ratio_candidates=10)
    auto_params = ChannelPruningParameters.AutoModeParams(
        greedy_params, modules_to_ignore=[model.conv1])

    data_loader = mnist_torch_model.DataLoaderMnist(cuda=True,
                                                    seed=1,
                                                    shuffle=True)
    params = ChannelPruningParameters(data_loader=data_loader.train_loader,
                                      num_reconstruction_samples=500,
                                      allow_custom_downsample_ops=True,
                                      mode=ChannelPruningParameters.Mode.auto,
                                      params=auto_params)

    # Single call to compress the model
    results = ModelCompressor.compress_model(
        model,
        eval_callback=evaluate_model,
        eval_iterations=1000,
        input_shape=(1, 1, 28, 28),
        compress_scheme=CompressionScheme.channel_pruning,
        cost_metric=CostMetric.mac,
        parameters=params)

    compressed_model, stats = results
    print(compressed_model)
    print(stats)  # Stats object can be pretty-printed easily
Exemplo n.º 2
0
def weight_svd_auto_mode():

    # Load trained MNIST model
    model = torch.load(os.path.join('../', 'data', 'mnist_trained_on_GPU.pth'))

    # Specify the necessary parameters
    greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(0.8),
                                              num_comp_ratio_candidates=10)
    rank_select = RankSelectScheme.greedy
    auto_params = WeightSvdParameters.AutoModeParams(
        rank_select_scheme=rank_select,
        select_params=greedy_params,
        modules_to_ignore=[model.conv1])

    params = WeightSvdParameters(mode=WeightSvdParameters.Mode.auto,
                                 params=auto_params)

    # Single call to compress the model
    results = ModelCompressor.compress_model(
        model,
        eval_callback=evaluate_model,
        eval_iterations=1000,
        input_shape=(1, 1, 28, 28),
        compress_scheme=CompressionScheme.weight_svd,
        cost_metric=CostMetric.mac,
        parameters=params)

    compressed_model, stats = results
    print(compressed_model)
    print(stats)  # Stats object can be pretty-printed easily
Exemplo n.º 3
0
def channel_pruning_auto_mode():

    sess = tf.compat.v1.Session()
    # Construct graph
    with sess.graph.as_default():
        _ = VGG16(weights=None, input_shape=(224, 224, 3))
        init = tf.compat.v1.global_variables_initializer()
    sess.run(init)

    # ignore first Conv2D op
    conv2d = sess.graph.get_operation_by_name('block1_conv1/Conv2D')
    modules_to_ignore = [conv2d]

    greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(0.8),
                                              num_comp_ratio_candidates=2,
                                              use_monotonic_fit=True,
                                              saved_eval_scores_dict=None)

    auto_params = ChannelPruningParameters.AutoModeParams(
        greedy_select_params=greedy_params,
        modules_to_ignore=modules_to_ignore)

    # Create random dataset
    batch_size = 1
    input_data = np.random.rand(100, 224, 224, 3)
    dataset = tf.data.Dataset.from_tensor_slices(input_data)
    dataset = dataset.batch(batch_size=batch_size)

    params = ChannelPruningParameters(input_op_names=['input_1'],
                                      output_op_names=['predictions/Softmax'],
                                      data_set=dataset,
                                      batch_size=32,
                                      num_reconstruction_samples=50,
                                      allow_custom_downsample_ops=False,
                                      mode=ChannelPruningParameters.Mode.auto,
                                      params=auto_params,
                                      multiplicity=8)

    # Single call to compress the model
    results = ModelCompressor.compress_model(
        sess,
        working_dir=None,
        eval_callback=evaluate_model,
        eval_iterations=10,
        input_shape=(32, 224, 224, 3),
        compress_scheme=CompressionScheme.channel_pruning,
        cost_metric=CostMetric.mac,
        parameters=params)

    compressed_model, stats = results
    print(compressed_model)
    print(stats)  # Stats object can be pretty-printed easily
Exemplo n.º 4
0
def weight_svd_auto_mode(model, comp_ratio=0.8, retrain=False):
    input_shape = coord_dataset.mgrid.shape

    # Specify the necessary parameters

    greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(comp_ratio),
                                              num_comp_ratio_candidates=20)
    #tar_params = TarRankSelectionParameters(num_rank_indices=2)
    #rank_select = RankSelectScheme.tar
    rank_select = RankSelectScheme.greedy
    auto_params = WeightSvdParameters.AutoModeParams(rank_select_scheme=rank_select,
                                                     select_params=greedy_params,
                                                     )#modules_to_ignore=[model.conv1])

    params = WeightSvdParameters(mode=WeightSvdParameters.Mode.auto,
                                 params=auto_params)

    #Single call to compress the model
    results = ModelCompressor.compress_model(model,
                                             eval_callback=evaluate_model,
                                             eval_iterations=1,
                                             input_shape=input_shape,
                                             compress_scheme=CompressionScheme.weight_svd,
                                             cost_metric=CostMetric.memory,
                                             parameters=params)

    compressed_model, stats = results
    # torch.save(compressed_model,
    #            os.path.join(os.path.join(exp_folder, image_name + '/checkpoints/model_aimet_' + str(comp_ratio) +'.pth')))
    #print(compressed_model)
    print(stats)     # Stats object can be pretty-printed easily
   # print(os.path.join(os.path.join(exp_folder, image_name + '/checkpoints/model_aimet_.pth')))
    #res = check_metrics(dataloader, compressed_model, image_resolution)
    #print(res)
    loss_fn = partial(loss_functions.image_mse, None)
    if retrain:
        compressed_model = retrain_model(compressed_model, dataloader,2000, loss_fn, 0.00005, TRAINING_FLAGS['l1_reg'])
        torch.save(compressed_model,
                   os.path.join(
                       os.path.join(exp_folder, image_name + '/checkpoints/model_aimet_' + str(comp_ratio) + '_retrained.pth')))
        res = check_metrics(dataloader, compressed_model, image_resolution)
        print(res)
    return compressed_model
Exemplo n.º 5
0
def spatial_svd_auto_mode():

    sess = tf.compat.v1.Session()
    # Construct graph
    with sess.graph.as_default():
        _ = VGG16(weights=None, input_shape=(224, 224, 3))
        init = tf.compat.v1.global_variables_initializer()
    sess.run(init)

    # ignore first Conv2D op
    conv2d = sess.graph.get_operation_by_name('block1_conv1/Conv2D')
    modules_to_ignore = [conv2d]

    greedy_params = GreedySelectionParameters(target_comp_ratio=Decimal(0.8),
                                              num_comp_ratio_candidates=10,
                                              use_monotonic_fit=True,
                                              saved_eval_scores_dict=None)

    auto_params = SpatialSvdParameters.AutoModeParams(
        greedy_select_params=greedy_params,
        modules_to_ignore=modules_to_ignore)

    params = SpatialSvdParameters(input_op_names=['input_1'],
                                  output_op_names=['predictions/Softmax'],
                                  mode=SpatialSvdParameters.Mode.auto,
                                  params=auto_params,
                                  multiplicity=8)
    input_shape = (1, 3, 224, 224)

    # Single call to compress the model
    compr_model_sess, stats = ModelCompressor.compress_model(
        sess=sess,
        working_dir=str('./'),
        eval_callback=evaluate_model,
        eval_iterations=10,
        input_shape=input_shape,
        compress_scheme=CompressionScheme.spatial_svd,
        cost_metric=CostMetric.mac,
        parameters=params,
        trainer=None)

    print(stats)  # Stats object can be pretty-printed easily