Ejemplo n.º 1
0
    def runTest(self):
        with fluid.unique_name.guard():
            net = MulNet()
            ratios = {}
            ratios['conv2d_0.w_0'] = 0.5
            pruners = []
            pruner = L1NormFilterPruner(net, [2, 6, 3, 3], skip_leaves=False)
            pruners.append(pruner)
            pruner = FPGMFilterPruner(net, [2, 6, 3, 3], skip_leaves=False)
            pruners.append(pruner)
            pruner = L2NormFilterPruner(net, [2, 6, 3, 3], skip_leaves=False)
            pruners.append(pruner)

            shapes = {
                'b': [3, 18],
                'conv2d_0.w_0': [3, 6, 1, 1],
                'conv2d_0.b_0': [3]
            }
            for pruner in pruners:
                plan = pruner.prune_vars(ratios, 0)
                for param in net.parameters():
                    if param.name not in shapes:
                        shapes[param.name] = param.shape

                    self.assertTrue(shapes[param.name] == param.shape)
                pruner.restore()
Ejemplo n.º 2
0
 def runTest(self):
     x_shape = (1, 3, 32, 32)
     net = Net()
     x = np.random.uniform(-1, 1, x_shape).astype('float32')
     pruner = L1NormFilterPruner(net, [paddle.to_tensor(x)])
     pruner.prune_vars({"conv2d_0.w_0": 0.2}, [0])
     self.assertTrue(net.linear.weight.shape == [5400, 5])
Ejemplo n.º 3
0
    def dygraph_sen(self):
        paddle.disable_static()
        net = paddle.vision.models.LeNet()
        optimizer = paddle.optimizer.Adam(learning_rate=0.001,
                                          parameters=net.parameters())
        inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
        labels = [Input([None, 1], 'int64', name='label')]
        model = paddle.Model(net, inputs, labels)
        model.prepare(optimizer, paddle.nn.CrossEntropyLoss(),
                      paddle.metric.Accuracy(topk=(1, 5)))

        model.fit(self.train_dataset, epochs=1, batch_size=128, verbose=1)
        result = model.evaluate(self.val_dataset, batch_size=128, verbose=1)
        pruner = None
        if self._pruner == 'l1norm':
            pruner = L1NormFilterPruner(net, [1, 1, 28, 28])
        elif self._pruner == 'fpgm':
            pruner = FPGMFilterPruner(net, [1, 1, 28, 28])

        def eval_fn():
            result = model.evaluate(self.val_dataset, batch_size=128)
            return result['acc_top1']

        sen = pruner.sensitive(
            eval_func=eval_fn,
            sen_file="_".join(["./dygraph_sen_",
                               str(time.time())]),
            #sen_file="sen.pickle",
            target_vars=self._param_names)
        params = {}
        for param in net.parameters():
            params[param.name] = np.array(param.value().get_tensor())
        print(f'dygraph sen: {sen}')
        return sen, params
Ejemplo n.º 4
0
 def dygraph_prune(self, net, ratios):
     paddle.disable_static()
     model = net(pretrained=False)
     pruner = L1NormFilterPruner(model, [1, 3, 16, 16])
     pruner.prune_vars(ratios, [0])
     shapes = {}
     for param in model.parameters():
         shapes[param.name] = param.shape
     return shapes
Ejemplo n.º 5
0
    def runTest(self):
        with fluid.unique_name.guard():
            net = paddle.vision.models.LeNet()
            optimizer = paddle.optimizer.Adam(learning_rate=0.001,
                                              parameters=net.parameters())
            inputs = [Input([None, 1, 28, 28], 'float32', name='image')]
            labels = [Input([None, 1], 'int64', name='label')]
            model = paddle.Model(net, inputs, labels)
            model.prepare(optimizer, paddle.nn.CrossEntropyLoss(),
                          paddle.metric.Accuracy(topk=(1, 5)))
            model.fit(self.train_dataset, epochs=1, batch_size=128, verbose=1)
            pruners = []
            pruner = L1NormFilterPruner(net, [1, 1, 28, 28], opt=optimizer)
            pruners.append(pruner)
            pruner = FPGMFilterPruner(net, [1, 1, 28, 28], opt=optimizer)
            pruners.append(pruner)
            pruner = L2NormFilterPruner(net, [1, 1, 28, 28], opt=optimizer)
            pruners.append(pruner)

            def eval_fn():
                result = model.evaluate(self.val_dataset,
                                        batch_size=128,
                                        verbose=1)
                return result['acc_top1']

            sen_file = "_".join(["./dygraph_sen_", str(time.time())])
            for pruner in pruners:
                sen = pruner.sensitive(eval_func=eval_fn,
                                       sen_file=sen_file,
                                       target_vars=self._param_names)
                model.fit(self.train_dataset,
                          epochs=1,
                          batch_size=128,
                          verbose=1)
                base_acc = eval_fn()
                plan = pruner.sensitive_prune(0.01)
                pruner.restore()
                restore_acc = eval_fn()
                self.assertTrue(restore_acc == base_acc)

                plan = pruner.sensitive_prune(0.01, align=4)
                for param in net.parameters():
                    if param.name in self._param_names:
                        print(f"name: {param.name}; shape: {param.shape}")
                        self.assertTrue(param.shape[0] % 4 == 0)
                pruner.restore()
Ejemplo n.º 6
0
    def runTest(self):
        with fluid.unique_name.guard():
            net = paddle.vision.models.mobilenet_v1()
            ratios = {}
            for param in net.parameters():
                if len(param.shape) == 4:
                    ratios[param.name] = 0.5
            pruners = []
            pruner = L1NormFilterPruner(net, [1, 3, 128, 128])
            pruners.append(pruner)
            pruner = FPGMFilterPruner(net, [1, 3, 128, 128])
            pruners.append(pruner)
            pruner = L2NormFilterPruner(net, [1, 3, 128, 128])
            pruners.append(pruner)

            shapes = {}
            for pruner in pruners:
                plan = pruner.prune_vars(ratios, 0)
                for param in net.parameters():
                    if param.name not in shapes:
                        shapes[param.name] = param.shape
                    assert (shapes[param.name] == param.shape)
                pruner.restore()
Ejemplo n.º 7
0
def main(args):
    env_info = get_sys_env()

    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'
    paddle.set_device(place)

    if not (0.0 < args.pruning_ratio < 1.0):
        raise RuntimeError(
            'The model pruning rate must be in the range of (0, 1).')

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    cfg = Config(args.cfg,
                 iters=args.retraining_iters,
                 batch_size=args.batch_size,
                 learning_rate=args.learning_rate)

    train_dataset = cfg.train_dataset
    if not train_dataset:
        raise RuntimeError(
            'The training dataset is not specified in the configuration file.')

    val_dataset = cfg.val_dataset
    if not val_dataset:
        raise RuntimeError(
            'The validation dataset is not specified in the c;onfiguration file.'
        )
    os.environ['PADDLESEG_EXPORT_STAGE'] = 'True'
    net = cfg.model

    if args.model_path:
        para_state_dict = paddle.load(args.model_path)
        net.set_dict(para_state_dict)
        logger.info('Loaded trained params of model successfully')

    logger.info(
        'Step 1/3: Start calculating the sensitivity of model parameters...')
    sample_shape = [1] + list(train_dataset[0][0].shape)
    sen_file = os.path.join(args.save_dir, 'sen.pickle')
    pruner = L1NormFilterPruner(net, sample_shape)
    pruner.sensitive(eval_func=partial(eval_fn, net, val_dataset,
                                       args.num_workers),
                     sen_file=sen_file)
    logger.info(
        f'The sensitivity calculation of model parameters is complete. The result is saved in {sen_file}.'
    )

    flops = dygraph_flops(net, sample_shape)
    logger.info(
        f'Step 2/3: Start to prune the model, the ratio of pruning is {args.pruning_ratio}. FLOPs before pruning: {flops}.'
    )

    # Avoid the bug when pruning conv2d with small channel number.
    # Remove this code after PaddleSlim 2.1 is available.
    # Related issue: https://github.com/PaddlePaddle/PaddleSlim/issues/674.
    skips = []
    for param in net.parameters():
        if param.shape[0] <= 8:
            skips.append(param.name)

    pruner.sensitive_prune(args.pruning_ratio, skip_vars=skips)
    flops = dygraph_flops(net, sample_shape)
    logger.info(f'Model pruning completed. FLOPs after pruning: {flops}.')

    logger.info(f'Step 3/3: Start retraining the model.')
    train(net,
          train_dataset,
          optimizer=cfg.optimizer,
          save_dir=args.save_dir,
          num_workers=args.num_workers,
          iters=cfg.iters,
          batch_size=cfg.batch_size,
          losses=cfg.loss)

    evaluate(net, val_dataset)

    if paddle.distributed.get_rank() == 0:
        export_model(net, cfg, args.save_dir)

        ckpt = os.path.join(args.save_dir, f'iter_{args.retraining_iters}')
        if os.path.exists(ckpt):
            shutil.rmtree(ckpt)

    logger.info(f'Model retraining finish. Model is saved in {args.save_dir}')