def test_sensitivity(self):
        main_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(main_program, startup_program):
            input = fluid.data(name="image", shape=[None, 1, 28, 28])
            label = fluid.data(name="label", shape=[None, 1], dtype="int64")
            conv1 = conv_bn_layer(input, 8, 3, "conv1")
            conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
            sum1 = conv1 + conv2
            conv3 = conv_bn_layer(sum1, 8, 3, "conv3")
            conv4 = conv_bn_layer(conv3, 8, 3, "conv4")
            sum2 = conv4 + sum1
            conv5 = conv_bn_layer(sum2, 8, 3, "conv5")
            conv6 = conv_bn_layer(conv5, 8, 3, "conv6")
            out = fluid.layers.fc(conv6, size=10, act='softmax')
            acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
        eval_program = main_program.clone(for_test=True)

        place = fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
        exe.run(startup_program)

        val_reader = paddle.fluid.io.batch(paddle.dataset.mnist.test(),
                                           batch_size=128)

        def eval_func(program):
            feeder = fluid.DataFeeder(feed_list=['image', 'label'],
                                      place=place,
                                      program=program)
            acc_set = []
            for data in val_reader():
                acc_np = exe.run(program=program,
                                 feed=feeder.feed(data),
                                 fetch_list=[acc_top1])
                acc_set.append(float(acc_np[0]))
            acc_val_mean = numpy.array(acc_set).mean()
            print("acc_val_mean: {}".format(acc_val_mean))
            return acc_val_mean

        sensitivity(eval_program,
                    place, ["conv4_weights"],
                    eval_func,
                    "./sensitivities_file_0",
                    pruned_ratios=[0.1, 0.2])

        sensitivity(eval_program,
                    place, ["conv4_weights"],
                    eval_func,
                    "./sensitivities_file_1",
                    pruned_ratios=[0.3, 0.4])

        sens_0 = load_sensitivities('./sensitivities_file_0')
        sens_1 = load_sensitivities('./sensitivities_file_1')
        sens = merge_sensitive([sens_0, sens_1])
        origin_sens = sensitivity(eval_program,
                                  place, ["conv4_weights"],
                                  eval_func,
                                  "./sensitivities_file_1",
                                  pruned_ratios=[0.1, 0.2, 0.3, 0.4])
        self.assertTrue(sens == origin_sens)
Example #2
0
def sensitivity(program,
                place,
                param_names,
                eval_func,
                sensitivities_file=None,
                pruned_ratios=None):
    scope = fluid.global_scope()
    graph = GraphWrapper(program)
    sensitivities = load_sensitivities(sensitivities_file)

    if pruned_ratios is None:
        pruned_ratios = np.arange(0.1, 1, step=0.1)

    total_evaluate_iters = 0
    for name in param_names:
        if name not in sensitivities:
            sensitivities[name] = {}
            total_evaluate_iters += len(list(pruned_ratios))
        else:
            total_evaluate_iters += (len(list(pruned_ratios)) -
                                     len(sensitivities[name]))
    eta = '-'
    start_time = time.time()
    baseline = eval_func(graph.program)
    cost = time.time() - start_time
    eta = cost * (total_evaluate_iters - 1)
    current_iter = 1
    for name in sensitivities:
        for ratio in pruned_ratios:
            if ratio in sensitivities[name]:
                logging.debug('{}, {} has computed.'.format(name, ratio))
                continue

            progress = float(current_iter) / total_evaluate_iters
            progress = "%.2f%%" % (progress * 100)
            logging.info(
                "Total evaluate iters={}, current={}, progress={}, eta={}".
                format(
                    total_evaluate_iters, current_iter, progress,
                    seconds_to_hms(
                        int(cost * (total_evaluate_iters - current_iter)))),
                use_color=True)
            current_iter += 1

            pruner = Pruner()
            logging.info("sensitive - param: {}; ratios: {}".format(
                name, ratio))
            pruned_program, param_backup, _ = pruner.prune(
                program=graph.program,
                scope=scope,
                params=[name],
                ratios=[ratio],
                place=place,
                lazy=True,
                only_graph=False,
                param_backup=True)
            pruned_metric = eval_func(pruned_program)
            loss = (baseline - pruned_metric) / baseline
            logging.info("pruned param: {}; {}; loss={}".format(
                name, ratio, loss))

            sensitivities[name][ratio] = loss

            with open(sensitivities_file, 'wb') as f:
                pickle.dump(sensitivities, f)

            for param_name in param_backup.keys():
                param_t = scope.find_var(param_name).get_tensor()
                param_t.set(param_backup[param_name], place)
    return sensitivities
def main():
    config = program.load_config(FLAGS.config)
    program.merge_config(FLAGS.opt)
    logger.info(config)

    # check if set use_gpu=True in paddlepaddle cpu version
    use_gpu = config['Global']['use_gpu']
    program.check_gpu(use_gpu)

    alg = config['Global']['algorithm']
    assert alg in ['EAST', 'DB', 'Rosetta', 'CRNN', 'STARNet', 'RARE']
    if alg in ['Rosetta', 'CRNN', 'STARNet', 'RARE']:
        config['Global']['char_ops'] = CharacterOps(config['Global'])

    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    startup_program = fluid.Program()
    train_program = fluid.Program()
    train_build_outputs = program.build(config,
                                        train_program,
                                        startup_program,
                                        mode='train')
    train_loader = train_build_outputs[0]
    train_fetch_name_list = train_build_outputs[1]
    train_fetch_varname_list = train_build_outputs[2]
    train_opt_loss_name = train_build_outputs[3]

    eval_program = fluid.Program()
    eval_build_outputs = program.build(config,
                                       eval_program,
                                       startup_program,
                                       mode='eval')
    eval_fetch_name_list = eval_build_outputs[1]
    eval_fetch_varname_list = eval_build_outputs[2]
    eval_program = eval_program.clone(for_test=True)

    train_reader = reader_main(config=config, mode="train")
    train_loader.set_sample_list_generator(train_reader, places=place)

    eval_reader = reader_main(config=config, mode="eval")

    exe = fluid.Executor(place)
    exe.run(startup_program)

    # compile program for multi-devices
    init_model(config, train_program, exe)

    sen = load_sensitivities("sensitivities_0.data")
    for i in skip_list:
        if i in sen.keys():
            sen.pop(i)
    back_bone_list = ['conv' + str(x) for x in range(1, 5)]
    for i in back_bone_list:
        for key in list(sen.keys()):
            if i + '_' in key:
                sen.pop(key)
    ratios = get_ratios_by_loss(sen, 0.03)
    logger.info("FLOPs before pruning: {}".format(flops(eval_program)))
    pruner = Pruner(criterion='geometry_median')
    print("ratios: {}".format(ratios))
    pruned_val_program, _, _ = pruner.prune(eval_program,
                                            fluid.global_scope(),
                                            params=ratios.keys(),
                                            ratios=ratios.values(),
                                            place=place,
                                            only_graph=True)

    pruned_program, _, _ = pruner.prune(train_program,
                                        fluid.global_scope(),
                                        params=ratios.keys(),
                                        ratios=ratios.values(),
                                        place=place)
    logger.info("FLOPs after pruning: {}".format(flops(pruned_val_program)))
    train_compile_program = program.create_multi_devices_program(
        pruned_program, train_opt_loss_name)


    train_info_dict = {'compile_program':train_compile_program,\
        'train_program':pruned_program,\
        'reader':train_loader,\
        'fetch_name_list':train_fetch_name_list,\
        'fetch_varname_list':train_fetch_varname_list}

    eval_info_dict = {'program':pruned_val_program,\
        'reader':eval_reader,\
        'fetch_name_list':eval_fetch_name_list,\
        'fetch_varname_list':eval_fetch_varname_list}

    if alg in ['EAST', 'DB']:
        program.train_eval_det_run(config,
                                   exe,
                                   train_info_dict,
                                   eval_info_dict,
                                   is_slim="prune")
    else:
        program.train_eval_rec_run(config, exe, train_info_dict,
                                   eval_info_dict)