def test_sensitivity(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
            input = fluid.data(name="image", shape=[None, 1, 28, 28])
            label = fluid.data(name="label", shape=[None, 1], dtype="int64")
            conv1 = conv_bn_layer(input, 8, 3, "conv1")
            conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
            sum1 = conv1 + conv2
            conv3 = conv_bn_layer(sum1, 8, 3, "conv3")
            conv4 = conv_bn_layer(conv3, 8, 3, "conv4")
            sum2 = conv4 + sum1
            conv5 = conv_bn_layer(sum2, 8, 3, "conv5")
            conv6 = conv_bn_layer(conv5, 8, 3, "conv6")
            out = fluid.layers.fc(conv6, size=10, act='softmax')
            acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
        eval_program = main_program.clone(for_test=True)

        place = fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
        exe.run(startup_program)

        val_reader = paddle.fluid.io.batch(paddle.dataset.mnist.test(),
                                           batch_size=128)

        def eval_func(program):
            feeder = fluid.DataFeeder(feed_list=['image', 'label'],
                                      place=place,
                                      program=program)
            acc_set = []
            for data in val_reader():
                acc_np = exe.run(program=program,
                                 feed=feeder.feed(data),
                                 fetch_list=[acc_top1])
                acc_set.append(float(acc_np[0]))
            acc_val_mean = numpy.array(acc_set).mean()
            print("acc_val_mean: {}".format(acc_val_mean))
            return acc_val_mean

        sensitivity(eval_program,
                    place, ["conv4_weights"],
                    eval_func,
                    "./sensitivities_file_0",
                    pruned_ratios=[0.1, 0.2])

        sensitivity(eval_program,
                    place, ["conv4_weights"],
                    eval_func,
                    "./sensitivities_file_1",
                    pruned_ratios=[0.3, 0.4])

        sens_0 = load_sensitivities('./sensitivities_file_0')
        sens_1 = load_sensitivities('./sensitivities_file_1')
        sens = merge_sensitive([sens_0, sens_1])
        origin_sens = sensitivity(eval_program,
                                  place, ["conv4_weights"],
                                  eval_func,
                                  "./sensitivities_file_1",
                                  pruned_ratios=[0.1, 0.2, 0.3, 0.4])
        self.assertTrue(sens == origin_sens)
Beispiel #2
0
    def static_sen(self, params):
        paddle.enable_static()
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.unique_name.guard():
            with fluid.program_guard(main_program, startup_program):
                input = fluid.data(name="image", shape=[None, 1, 28, 28])
                label = fluid.data(name="label",
                                   shape=[None, 1],
                                   dtype="int64")
                model = paddle.vision.models.LeNet()
                out = model(input)
                acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
        eval_program = main_program.clone(for_test=True)
        place = fluid.CUDAPlace(0)
        scope = fluid.global_scope()
        exe = fluid.Executor(place)
        exe.run(startup_program)

        val_reader = paddle.fluid.io.batch(self.val_reader, batch_size=128)

        def eval_func(program):
            feeder = fluid.DataFeeder(feed_list=['image', 'label'],
                                      place=place,
                                      program=program)
            acc_set = []
            for data in val_reader():
                acc_np = exe.run(program=program,
                                 feed=feeder.feed(data),
                                 fetch_list=[acc_top1])
                acc_set.append(float(acc_np[0]))
            acc_val_mean = np.array(acc_set).mean()
            return acc_val_mean

        for _name, _value in params.items():
            t = scope.find_var(_name).get_tensor()
            t.set(_value, place)
        print(f"static base: {eval_func(eval_program)}")
        criterion = None
        if self._pruner == 'l1norm':
            criterion = 'l1_norm'
        elif self._pruner == 'fpgm':
            criterion = 'geometry_median'
        sen = sensitivity(eval_program,
                          place,
                          self._param_names,
                          eval_func,
                          sensitivities_file="_".join(
                              ["./sensitivities_file",
                               str(time.time())]),
                          criterion=criterion)
        return sen
Beispiel #3
0
def main():
    env = os.environ

    print("FLAGS.config: {}".format(FLAGS.config))
    cfg = load_config(FLAGS.config)
    merge_config(FLAGS.opt)
    check_config(cfg)
    check_version()

    main_arch = cfg.architecture

    place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)

    # build program
    startup_prog = fluid.Program()
    eval_prog = fluid.Program()
    with fluid.program_guard(eval_prog, startup_prog):
        with fluid.unique_name.guard():
            model = create(main_arch)
            inputs_def = cfg['EvalReader']['inputs_def']
            feed_vars, eval_loader = model.build_inputs(**inputs_def)
            fetches = model.eval(feed_vars)
    eval_prog = eval_prog.clone(True)
    if FLAGS.print_params:
        print(
            "-------------------------All parameters in current graph----------------------"
        )
        for block in eval_prog.blocks:
            for param in block.all_parameters():
                print("parameter name: {}\tshape: {}".format(param.name,
                                                             param.shape))
        print(
            "------------------------------------------------------------------------------"
        )
        return

    eval_reader = create_reader(cfg.EvalReader)
    # When iterable mode, set set_sample_list_generator(eval_reader, place)
    eval_loader.set_sample_list_generator(eval_reader)

    # parse eval fetches
    extra_keys = []
    if cfg.metric == 'COCO':
        extra_keys = ['im_info', 'im_id', 'im_shape']
    if cfg.metric == 'VOC':
        extra_keys = ['gt_bbox', 'gt_class', 'is_difficult']
    if cfg.metric == 'WIDERFACE':
        extra_keys = ['im_id', 'im_shape', 'gt_box']
    eval_keys, eval_values, eval_cls = parse_fetches(fetches, eval_prog,
                                                     extra_keys)

    exe.run(startup_prog)

    fuse_bn = getattr(model.backbone, 'norm_type', None) == 'affine_channel'

    ignore_params = cfg.finetune_exclude_pretrained_params \
                 if 'finetune_exclude_pretrained_params' in cfg else []

    start_iter = 0

    if cfg.weights:
        checkpoint.load_params(exe, eval_prog, cfg.weights)
    else:
        logger.warn("Please set cfg.weights to load trained model.")

    # whether output bbox is normalized in model output layer
    is_bbox_normalized = False
    if hasattr(model, 'is_bbox_normalized') and \
            callable(model.is_bbox_normalized):
        is_bbox_normalized = model.is_bbox_normalized()

    # if map_type not set, use default 11point, only use in VOC eval
    map_type = cfg.map_type if 'map_type' in cfg else '11point'

    def test(program):

        compiled_eval_prog = fluid.CompiledProgram(program)

        results = eval_run(
            exe,
            compiled_eval_prog,
            eval_loader,
            eval_keys,
            eval_values,
            eval_cls,
            cfg=cfg)
        resolution = None
        if 'mask' in results[0]:
            resolution = model.mask_head.resolution
        dataset = cfg['EvalReader']['dataset']
        box_ap_stats = eval_results(
            results,
            cfg.metric,
            cfg.num_classes,
            resolution,
            is_bbox_normalized,
            FLAGS.output_eval,
            map_type,
            dataset=dataset)
        return box_ap_stats[0]

    pruned_params = FLAGS.pruned_params

    assert (
        FLAGS.pruned_params is not None
    ), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option."
    pruned_params = FLAGS.pruned_params.strip().split(",")
    logger.info("pruned params: {}".format(pruned_params))
    pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(" ")]
    logger.info("pruned ratios: {}".format(pruned_ratios))
    sensitivity(
        eval_prog,
        place,
        pruned_params,
        test,
        sensitivities_file=FLAGS.sensitivities_file,
        pruned_ratios=pruned_ratios)
Beispiel #4
0
def compress(args):
    test_reader = None
    if args.data == "mnist":
        import paddle.dataset.mnist as reader
        val_reader = reader.test()
        class_dim = 10
        image_shape = "1,28,28"
    elif args.data == "imagenet":
        import imagenet_reader as reader
        val_reader = reader.val()
        class_dim = 1000
        image_shape = "3,224,224"
    else:
        raise ValueError("{} is not supported.".format(args.data))
    image_shape = [int(m) for m in image_shape.split(",")]
    assert args.model in model_list, "{} is not in lists: {}".format(
        args.model, model_list)
    image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')
    # model definition
    model = models.__dict__[args.model]()
    out = model.net(input=image, class_dim=class_dim)
    acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
    acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
    val_program = fluid.default_main_program().clone(for_test=True)
    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    if args.pretrained_model:

        def if_exist(var):
            return os.path.exists(
                os.path.join(args.pretrained_model, var.name))

        fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)

    val_reader = paddle.batch(val_reader, batch_size=args.batch_size)

    val_feeder = feeder = fluid.DataFeeder(
        [image, label], place, program=val_program)

    def test(program):
        batch_id = 0
        acc_top1_ns = []
        acc_top5_ns = []
        for data in val_reader():
            start_time = time.time()
            acc_top1_n, acc_top5_n = exe.run(
                program,
                feed=val_feeder.feed(data),
                fetch_list=[acc_top1.name, acc_top5.name])
            end_time = time.time()
            if batch_id % args.log_period == 0:
                _logger.info(
                    "Eval batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".
                    format(batch_id,
                           np.mean(acc_top1_n),
                           np.mean(acc_top5_n), end_time - start_time))
            acc_top1_ns.append(np.mean(acc_top1_n))
            acc_top5_ns.append(np.mean(acc_top5_n))
            batch_id += 1

        _logger.info("Final eva - acc_top1: {}; acc_top5: {}".format(
            np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
        return np.mean(np.array(acc_top1_ns))

    params = []
    for param in fluid.default_main_program().global_block().all_parameters():
        if "_sep_weights" in param.name:
            params.append(param.name)

    sensitivity(
        val_program,
        place,
        params,
        test,
        sensitivities_file="sensitivities_0.data",
        pruned_ratios=[0.1, 0.2, 0.3, 0.4])

    sensitivity(
        val_program,
        place,
        params,
        test,
        sensitivities_file="sensitivities_1.data",
        pruned_ratios=[0.5, 0.6, 0.7])

    sens = merge_sensitive(
        ["./sensitivities_0.data", "./sensitivities_1.data"])

    ratios = get_ratios_by_loss(sens, 0.01)

    print ratios
Beispiel #5
0
def compress(args):
    test_reader = None
    if args.data == "mnist":
        val_dataset = paddle.vision.datasets.MNIST(mode='test')
        class_dim = 10
        image_shape = "1,28,28"
    elif args.data == "imagenet":
        import imagenet_reader as reader
        val_dataset = reader.ImageNetDataset(mode='val')
        class_dim = 1000
        image_shape = "3,224,224"
    else:
        raise ValueError("{} is not supported.".format(args.data))
    image_shape = [int(m) for m in image_shape.split(",")]
    assert args.model in model_list, "{} is not in lists: {}".format(
        args.model, model_list)
    image = paddle.static.data(name='image',
                               shape=[None] + image_shape,
                               dtype='float32')
    label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
    # model definition
    model = models.__dict__[args.model]()
    out = model.net(input=image, class_dim=class_dim)
    acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)
    acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)
    val_program = paddle.static.default_main_program().clone(for_test=True)
    places = paddle.static.cuda_places(
    ) if args.use_gpu else paddle.static.cpu_places()
    place = places[0]
    exe = paddle.static.Executor(place)
    exe.run(paddle.static.default_startup_program())

    if args.pretrained_model:

        def if_exist(var):
            return os.path.exists(os.path.join(args.pretrained_model,
                                               var.name))

        paddle.fluid.io.load_vars(exe,
                                  args.pretrained_model,
                                  predicate=if_exist)

    valid_loader = paddle.io.DataLoader(val_dataset,
                                        places=place,
                                        feed_list=[image, label],
                                        drop_last=False,
                                        batch_size=args.batch_size,
                                        use_shared_memory=True,
                                        shuffle=False)

    def test(program):
        acc_top1_ns = []
        acc_top5_ns = []
        for batch_id, data in enumerate(valid_loader):
            start_time = time.time()
            acc_top1_n, acc_top5_n = exe.run(
                program, feed=data, fetch_list=[acc_top1.name, acc_top5.name])
            end_time = time.time()
            if batch_id % args.log_period == 0:
                _logger.info(
                    "Eval batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".
                    format(batch_id, np.mean(acc_top1_n), np.mean(acc_top5_n),
                           end_time - start_time))
            acc_top1_ns.append(np.mean(acc_top1_n))
            acc_top5_ns.append(np.mean(acc_top5_n))
            batch_id += 1

        _logger.info("Final eva - acc_top1: {}; acc_top5: {}".format(
            np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
        return np.mean(np.array(acc_top1_ns))

    params = []
    for param in paddle.static.default_main_program().global_block(
    ).all_parameters():
        if "weights" in param.name:
            print(param.name)
            params.append(param.name)

    sensitivity(val_program,
                place,
                params,
                test,
                sensitivities_file="sensitivities_0.data",
                pruned_ratios=[0.1, 0.2, 0.3, 0.4])

    sensitivity(val_program,
                place,
                params,
                test,
                sensitivities_file="sensitivities_1.data",
                pruned_ratios=[0.5, 0.6, 0.7])

    sens = merge_sensitive(
        ["./sensitivities_0.data", "./sensitivities_1.data"])

    ratios = get_ratios_by_loss(sens, 0.01)

    print(sens)