Example #1
0
def main():
    startup_prog, eval_program, place, config, _ = program.preprocess()

    feeded_var_names, target_vars, fetches_var_name = program.build_export(
        config, eval_program, startup_prog)
    eval_program = eval_program.clone(for_test=True)
    exe = fluid.Executor(place)
    exe.run(startup_prog)

    if config['Global']['checkpoints'] is not None:
        path = config['Global']['checkpoints']
    else:
        path = config['Global']['pretrain_weights']

    load_model(exe, eval_program, path)

    save_inference_dir = config['Global']['save_inference_dir']
    if not os.path.exists(save_inference_dir):
        os.makedirs(save_inference_dir)
    fluid.io.save_inference_model(dirname=save_inference_dir,
                                  feeded_var_names=feeded_var_names,
                                  main_program=eval_program,
                                  target_vars=target_vars,
                                  executor=exe,
                                  model_filename='model',
                                  params_filename='params')
    print("inference model saved in {}/model and {}/params".format(
        save_inference_dir, save_inference_dir))
    print("save success, output_name_list:", fetches_var_name)
Example #2
0
def eval(args):
    train_reader = None
    test_reader = None
    if args.data == "mnist":
        val_reader = paddle.dataset.mnist.test()
        class_dim = 10
        image_shape = "1,28,28"
    elif args.data == "imagenet":
        import imagenet_reader as reader
        train_reader = reader.train()
        val_reader = reader.val()
        class_dim = 1000
        image_shape = "3,224,224"
    else:
        raise ValueError("{} is not supported.".format(args.data))
    image_shape = [int(m) for m in image_shape.split(",")]
    assert args.model in model_list, "{} is not in lists: {}".format(
        args.model, model_list)
    image = paddle.static.data(name='image',
                               shape=[None] + image_shape,
                               dtype='float32')
    label = paddle.static.data(name='label', shape=[None, 1], dtype='int64')
    # model definition
    model = models.__dict__[args.model]()
    out = model.net(input=image, class_dim=class_dim)
    acc_top1 = paddle.metric.accuracy(input=out, label=label, k=1)
    acc_top5 = paddle.metric.accuracy(input=out, label=label, k=5)
    val_program = paddle.static.default_main_program().clone(for_test=True)
    place = paddle.CUDAPlace(0) if args.use_gpu else paddle.CPUPlace()
    exe = paddle.static.Executor(place)
    exe.run(paddle.static.default_startup_program())

    val_reader = paddle.batch(val_reader, batch_size=args.batch_size)

    valid_loader = paddle.io.DataLoader.from_generator(
        feed_list=[image, label],
        capacity=64,
        use_double_buffer=True,
        iterable=True)
    valid_loader.set_sample_list_generator(val_reader, place)

    load_model(exe, val_program, args.model_path)

    acc_top1_ns = []
    acc_top5_ns = []
    for batch_id, data in enumerate(valid_loader):
        start_time = time.time()
        acc_top1_n, acc_top5_n = exe.run(
            val_program, feed=data, fetch_list=[acc_top1.name, acc_top5.name])
        end_time = time.time()
        if batch_id % args.log_period == 0:
            _logger.info(
                "Eval batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".format(
                    batch_id, np.mean(acc_top1_n), np.mean(acc_top5_n),
                    end_time - start_time))
        acc_top1_ns.append(np.mean(acc_top1_n))
        acc_top5_ns.append(np.mean(acc_top5_n))

    _logger.info("Final eval - acc_top1: {}; acc_top5: {}".format(
        np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
    def test_prune(self):
        train_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            input = fluid.data(name="image", shape=[None, 3, 16, 16])
            conv1 = conv_bn_layer(input, 8, 3, "conv1")
            conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
            sum1 = conv1 + conv2
            conv3 = conv_bn_layer(sum1, 8, 3, "conv3")
            conv4 = conv_bn_layer(conv3, 8, 3, "conv4")
            sum2 = conv4 + sum1
            conv5 = conv_bn_layer(sum2, 8, 3, "conv5")
            conv6 = conv_bn_layer(conv5, 8, 3, "conv6")
            feature = fluid.layers.reshape(conv6, [-1, 128, 16])
            predict = fluid.layers.fc(input=feature, size=10, act='softmax')
            label = fluid.data(name='label', shape=[None, 1], dtype='int64')
            print(label.shape)
            print(predict.shape)
            cost = fluid.layers.cross_entropy(input=predict, label=label)
            avg_cost = fluid.layers.mean(cost)
            adam_optimizer = fluid.optimizer.AdamOptimizer(0.01)
            adam_optimizer.minimize(avg_cost)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        scope = fluid.global_scope()
        exe.run(startup_program, scope=scope)
        criterion = 'bn_scale'
        pruner = Pruner(criterion)
        main_program, _, _ = pruner.prune(train_program,
                                          scope,
                                          params=["conv4_weights"],
                                          ratios=[0.5],
                                          place=place,
                                          lazy=False,
                                          only_graph=False,
                                          param_backup=None,
                                          param_shape_backup=None)

        x = numpy.random.random(size=(10, 3, 16, 16)).astype('float32')
        label = numpy.random.random(size=(10, 1)).astype('int64')
        loss_data, = exe.run(train_program,
                             feed={
                                 "image": x,
                                 "label": label
                             },
                             fetch_list=[cost.name])

        save_model(exe, main_program, 'model_file')
        pruned_program = fluid.Program()
        pruned_startup_program = fluid.Program()
        with fluid.program_guard(pruned_program, pruned_startup_program):
            input = fluid.data(name="image", shape=[None, 3, 16, 16])
            conv1 = conv_bn_layer(input, 8, 3, "conv1")
            conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
            sum1 = conv1 + conv2
            conv3 = conv_bn_layer(sum1, 8, 3, "conv3")
            conv4 = conv_bn_layer(conv3, 8, 3, "conv4")
            sum2 = conv4 + sum1
            conv5 = conv_bn_layer(sum2, 8, 3, "conv5")
            conv6 = conv_bn_layer(conv5, 8, 3, "conv6")
        pruned_test_program = pruned_program.clone(for_test=True)
        exe.run(pruned_startup_program)
        load_model(exe, pruned_program, 'model_file')
        load_model(exe, pruned_test_program, 'model_file')
        shapes = {
            "conv1_weights": (4, 3, 3, 3),
            "conv2_weights": (4, 4, 3, 3),
            "conv3_weights": (8, 4, 3, 3),
            "conv4_weights": (4, 8, 3, 3),
            "conv5_weights": (8, 4, 3, 3),
            "conv6_weights": (8, 8, 3, 3)
        }

        for param in pruned_program.global_block().all_parameters():
            if "weights" in param.name:
                print("param: {}; param shape: {}".format(
                    param.name, param.shape))
                self.assertTrue(param.shape == shapes[param.name])
        for param in pruned_test_program.global_block().all_parameters():
            if "weights" in param.name:
                print("param: {}; param shape: {}".format(
                    param.name, param.shape))
                self.assertTrue(param.shape == shapes[param.name])
Example #4
0
def evaluate(cfg, ckpt_dir=None, use_gpu=False, use_mpio=False, **kwargs):
    np.set_printoptions(precision=5, suppress=True)

    startup_prog = fluid.Program()
    test_prog = fluid.Program()
    dataset = SegDataset(file_list=cfg.DATASET.VAL_FILE_LIST,
                         mode=ModelPhase.EVAL,
                         data_dir=cfg.DATASET.DATA_DIR)

    def data_generator():
        #TODO: check is batch reader compatitable with Windows
        if use_mpio:
            data_gen = dataset.multiprocess_generator(
                num_processes=cfg.DATALOADER.NUM_WORKERS,
                max_queue_size=cfg.DATALOADER.BUF_SIZE)
        else:
            data_gen = dataset.generator()

        for b in data_gen:
            yield b[0], b[1], b[2]

    data_loader, avg_loss, pred, grts, masks = build_model(
        test_prog, startup_prog, phase=ModelPhase.EVAL)

    data_loader.set_sample_generator(data_generator,
                                     drop_last=False,
                                     batch_size=cfg.BATCH_SIZE)

    # Get device environment
    places = fluid.cuda_places() if use_gpu else fluid.cpu_places()
    place = places[0]
    dev_count = len(places)
    print("#Device count: {}".format(dev_count))

    exe = fluid.Executor(place)
    exe.run(startup_prog)

    test_prog = test_prog.clone(for_test=True)

    ckpt_dir = cfg.TEST.TEST_MODEL if not ckpt_dir else ckpt_dir

    if not os.path.exists(ckpt_dir):
        raise ValueError(
            'The TEST.TEST_MODEL {} is not found'.format(ckpt_dir))

    if ckpt_dir is not None:
        print('load test model:', ckpt_dir)
        load_model(exe, test_prog, ckpt_dir)

    # Use streaming confusion matrix to calculate mean_iou
    np.set_printoptions(precision=4,
                        suppress=True,
                        linewidth=160,
                        floatmode="fixed")
    conf_mat = ConfusionMatrix(cfg.DATASET.NUM_CLASSES, streaming=True)
    fetch_list = [avg_loss.name, pred.name, grts.name, masks.name]
    num_images = 0
    step = 0
    all_step = cfg.DATASET.TEST_TOTAL_IMAGES // cfg.BATCH_SIZE + 1
    timer = Timer()
    timer.start()
    data_loader.start()
    while True:
        try:
            step += 1
            loss, pred, grts, masks = exe.run(test_prog,
                                              fetch_list=fetch_list,
                                              return_numpy=True)

            loss = np.mean(np.array(loss))

            num_images += pred.shape[0]
            conf_mat.calculate(pred, grts, masks)
            _, iou = conf_mat.mean_iou()
            _, acc = conf_mat.accuracy()

            speed = 1.0 / timer.elapsed_time()

            print(
                "[EVAL]step={} loss={:.5f} acc={:.4f} IoU={:.4f} step/sec={:.2f} | ETA {}"
                .format(step, loss, acc, iou, speed,
                        calculate_eta(all_step - step, speed)))
            timer.restart()
            sys.stdout.flush()
        except fluid.core.EOFException:
            break

    category_iou, avg_iou = conf_mat.mean_iou()
    category_acc, avg_acc = conf_mat.accuracy()
    print("[EVAL]#image={} acc={:.4f} IoU={:.4f}".format(
        num_images, avg_acc, avg_iou))
    print("[EVAL]Category IoU:", category_iou)
    print("[EVAL]Category Acc:", category_acc)
    print("[EVAL]Kappa:{:.4f}".format(conf_mat.kappa()))

    return category_iou, avg_iou, category_acc, avg_acc
Example #5
0
def eval(args):
    train_reader = None
    test_reader = None
    if args.data == "mnist":
        import paddle.dataset.mnist as reader
        train_reader = reader.train()
        val_reader = reader.test()
        class_dim = 10
        image_shape = "1,28,28"
    elif args.data == "imagenet":
        import imagenet_reader as reader
        train_reader = reader.train()
        val_reader = reader.val()
        class_dim = 1000
        image_shape = "3,224,224"
    else:
        raise ValueError("{} is not supported.".format(args.data))
    image_shape = [int(m) for m in image_shape.split(",")]
    assert args.model in model_list, "{} is not in lists: {}".format(
        args.model, model_list)
    image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')
    # model definition
    model = models.__dict__[args.model]()
    out = model.net(input=image, class_dim=class_dim)
    acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
    acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
    val_program = fluid.default_main_program().clone(for_test=True)
    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    val_reader = paddle.batch(val_reader, batch_size=args.batch_size)

    val_feeder = feeder = fluid.DataFeeder([image, label],
                                           place,
                                           program=val_program)

    load_model(val_program, "./model/mobilenetv1_prune_50")

    batch_id = 0
    acc_top1_ns = []
    acc_top5_ns = []
    for data in val_reader():
        start_time = time.time()
        acc_top1_n, acc_top5_n = exe.run(
            val_program,
            feed=val_feeder.feed(data),
            fetch_list=[acc_top1.name, acc_top5.name])
        end_time = time.time()
        if batch_id % args.log_period == 0:
            _logger.info(
                "Eval batch[{}] - acc_top1: {}; acc_top5: {}; time: {}".format(
                    batch_id, np.mean(acc_top1_n), np.mean(acc_top5_n),
                    end_time - start_time))
        acc_top1_ns.append(np.mean(acc_top1_n))
        acc_top5_ns.append(np.mean(acc_top5_n))
        batch_id += 1

    _logger.info("Final eval - acc_top1: {}; acc_top5: {}".format(
        np.mean(np.array(acc_top1_ns)), np.mean(np.array(acc_top5_ns))))
Example #6
0
    def test_prune(self):
        train_program = fluid.Program()
        startup_program = fluid.Program()
        with fluid.program_guard(train_program, startup_program):
            input = fluid.data(name="image", shape=[None, 3, 16, 16])
            conv1 = conv_bn_layer(input, 8, 3, "conv1")
            conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
            sum1 = conv1 + conv2
            conv3 = conv_bn_layer(sum1, 8, 3, "conv3")
            conv4 = conv_bn_layer(conv3, 8, 3, "conv4")
            sum2 = conv4 + sum1
            conv5 = conv_bn_layer(sum2, 8, 3, "conv5")
            conv6 = conv_bn_layer(conv5, 8, 3, "conv6")

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        scope = fluid.global_scope()
        exe.run(startup_program, scope=scope)
        criterion = 'bn_scale'
        pruner = Pruner(criterion)
        main_program, _, _ = pruner.prune(train_program,
                                          scope,
                                          params=["conv4_weights"],
                                          ratios=[0.5],
                                          place=place,
                                          lazy=False,
                                          only_graph=False,
                                          param_backup=None,
                                          param_shape_backup=None)

        x = numpy.random.random(size=(10, 3, 16, 16)).astype('float32')
        loss_data, = exe.run(train_program,
                             feed={"image": x},
                             fetch_list=[conv6.name])

        save_model(exe, main_program, 'model_file')
        pruned_program = fluid.Program()
        pruned_startup_program = fluid.Program()
        with fluid.program_guard(pruned_program, pruned_startup_program):
            input = fluid.data(name="image", shape=[None, 3, 16, 16])
            conv1 = conv_bn_layer(input, 8, 3, "conv1")
            conv2 = conv_bn_layer(conv1, 8, 3, "conv2")
            sum1 = conv1 + conv2
            conv3 = conv_bn_layer(sum1, 8, 3, "conv3")
            conv4 = conv_bn_layer(conv3, 8, 3, "conv4")
            sum2 = conv4 + sum1
            conv5 = conv_bn_layer(sum2, 8, 3, "conv5")
            conv6 = conv_bn_layer(conv5, 8, 3, "conv6")
        exe.run(pruned_startup_program)
        load_model(exe, pruned_program, 'model_file')
        shapes = {
            "conv1_weights": (4, 3, 3, 3),
            "conv2_weights": (4, 4, 3, 3),
            "conv3_weights": (8, 4, 3, 3),
            "conv4_weights": (4, 8, 3, 3),
            "conv5_weights": (8, 4, 3, 3),
            "conv6_weights": (8, 8, 3, 3)
        }

        for param in pruned_program.global_block().all_parameters():
            if "weights" in param.name:
                print("param: {}; param shape: {}".format(
                    param.name, param.shape))
                self.assertTrue(param.shape == shapes[param.name])