コード例 #1
0
ファイル: test.py プロジェクト: zhong110020/models
def test(args):
    if args.dataset == 'coco':
        import lib.coco_reader as reader
        IMAGE_SIZE = [288, 384]
        FLIP_PAIRS = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],
                      [13, 14], [15, 16]]
        args.kp_dim = 17
    elif args.dataset == 'mpii':
        import lib.mpii_reader as reader
        IMAGE_SIZE = [384, 384]
        FLIP_PAIRS = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]
        args.kp_dim = 16
    else:
        raise ValueError('The dataset {} is not supported yet.'.format(
            args.dataset))

    print_arguments(args)

    # Image and target
    image = layers.data(name='image',
                        shape=[3, IMAGE_SIZE[1], IMAGE_SIZE[0]],
                        dtype='float32')
    file_id = layers.data(name='file_id', shape=[
        1,
    ], dtype='int')

    # Build model
    model = pose_resnet.ResNet(layers=50, kps_num=args.kp_dim, test_mode=True)

    # Output
    output = model.net(input=image, target=None, target_weight=None)

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    if args.checkpoint is not None:
        fluid.io.load_persistables(exe, args.checkpoint)

    # Dataloader
    test_reader = paddle.batch(reader.test(), batch_size=args.batch_size)
    feeder = fluid.DataFeeder(place=place, feed_list=[image, file_id])

    test_exe = fluid.ParallelExecutor(
        use_cuda=True if args.use_gpu else False,
        main_program=fluid.default_main_program().clone(for_test=True),
        loss_name=None)

    fetch_list = [image.name, output.name]

    for batch_id, data in enumerate(test_reader()):
        print_immediately("Processing batch #%d" % batch_id)
        num_images = len(data)

        file_ids = []
        for i in range(num_images):
            file_ids.append(data[i][1])

        input_image, out_heatmaps = test_exe.run(fetch_list=fetch_list,
                                                 feed=feeder.feed(data))

        if args.flip_test:
            # Flip all the images in a same batch
            data_fliped = []
            for i in range(num_images):
                data_fliped.append((data[i][0][:, :, ::-1], data[i][1]))

            # Inference again
            _, output_flipped = test_exe.run(fetch_list=fetch_list,
                                             feed=feeder.feed(data_fliped))

            # Flip back
            output_flipped = flip_back(output_flipped, FLIP_PAIRS)

            # Feature is not aligned, shift flipped heatmap for higher accuracy
            if args.shift_heatmap:
                output_flipped[:, :, :, 1:] = \
                        output_flipped.copy()[:, :, :, 0:-1]

            # Aggregate
            out_heatmaps = (out_heatmaps + output_flipped) * 0.5
            save_predict_results(input_image,
                                 out_heatmaps,
                                 file_ids,
                                 fold_name='results')
コード例 #2
0
def valid(args):
    if args.dataset == 'coco':
        import lib.coco_reader as reader
        IMAGE_SIZE = [288, 384]
        HEATMAP_SIZE = [72, 96]
        FLIP_PAIRS = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],
                      [13, 14], [15, 16]]
        args.kp_dim = 17
        args.total_images = 144406  # 149813
    elif args.dataset == 'mpii':
        import lib.mpii_reader as reader
        IMAGE_SIZE = [384, 384]
        HEATMAP_SIZE = [96, 96]
        FLIP_PAIRS = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]
        args.kp_dim = 16
        args.total_images = 2958  # validation
    else:
        raise ValueError('The dataset {} is not supported yet.'.format(
            args.dataset))

    print_arguments(args)

    # Image and target
    image = layers.data(name='image',
                        shape=[3, IMAGE_SIZE[1], IMAGE_SIZE[0]],
                        dtype='float32')
    target = layers.data(name='target',
                         shape=[args.kp_dim, HEATMAP_SIZE[1], HEATMAP_SIZE[0]],
                         dtype='float32')
    target_weight = layers.data(name='target_weight',
                                shape=[args.kp_dim, 1],
                                dtype='float32')
    center = layers.data(name='center', shape=[
        2,
    ], dtype='float32')
    scale = layers.data(name='scale', shape=[
        2,
    ], dtype='float32')
    score = layers.data(name='score', shape=[
        1,
    ], dtype='float32')

    # Build model
    model = pose_resnet.ResNet(layers=50, kps_num=args.kp_dim)

    # Output
    loss, output = model.net(input=image,
                             target=target,
                             target_weight=target_weight)

    # Parameters from model and arguments
    params = {}
    params["total_images"] = args.total_images
    params["lr"] = args.lr
    params["num_epochs"] = args.num_epochs
    params["learning_strategy"] = {}
    params["learning_strategy"]["batch_size"] = args.batch_size
    params["learning_strategy"]["name"] = args.lr_strategy

    if args.with_mem_opt:
        fluid.memory_optimize(
            fluid.default_main_program(),
            skip_opt_set=[loss.name, output.name, target.name])

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    args.pretrained_model = './pretrained/resnet_50/115'
    if args.pretrained_model:

        def if_exist(var):
            exist_flag = os.path.exists(
                os.path.join(args.pretrained_model, var.name))
            return exist_flag

        fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)

    if args.checkpoint is not None:
        fluid.io.load_persistables(exe, args.checkpoint)

    # Dataloader
    valid_reader = paddle.batch(reader.valid(), batch_size=args.batch_size)
    feeder = fluid.DataFeeder(
        place=place,
        feed_list=[image, target, target_weight, center, scale, score])

    valid_exe = fluid.ParallelExecutor(
        use_cuda=True if args.use_gpu else False,
        main_program=fluid.default_main_program().clone(for_test=False),
        loss_name=loss.name)

    fetch_list = [image.name, loss.name, output.name, target.name]

    # For validation
    acc = AverageMeter()
    idx = 0

    num_samples = args.total_images
    all_preds = np.zeros((num_samples, args.kp_dim, 3), dtype=np.float32)
    all_boxes = np.zeros((num_samples, 6))

    for batch_id, data in enumerate(valid_reader()):
        num_images = len(data)

        centers = []
        scales = []
        scores = []
        for i in range(num_images):
            centers.append(data[i][3])
            scales.append(data[i][4])
            scores.append(data[i][5])

        input_image, loss, out_heatmaps, target_heatmaps = valid_exe.run(
            fetch_list=fetch_list, feed=feeder.feed(data))

        if args.flip_test:
            # Flip all the images in a same batch
            data_fliped = []
            for i in range(num_images):
                # Input, target, target_weight, c, s, score
                data_fliped.append((
                    # np.flip(input_image, 3)[i],
                    data[i][0][:, :, ::-1],
                    data[i][1],
                    data[i][2],
                    data[i][3],
                    data[i][4],
                    data[i][5]))

            # Inference again
            _, _, output_flipped, _ = valid_exe.run(
                fetch_list=fetch_list, feed=feeder.feed(data_fliped))

            # Flip back
            output_flipped = flip_back(output_flipped, FLIP_PAIRS)

            # Feature is not aligned, shift flipped heatmap for higher accuracy
            if args.shift_heatmap:
                output_flipped[:, :, :, 1:] = \
                        output_flipped.copy()[:, :, :, 0:-1]

            # Aggregate
            # out_heatmaps.shape: size[b, args.kp_dim, 96, 96]
            out_heatmaps = (out_heatmaps + output_flipped) * 0.5

        loss = np.mean(np.array(loss))

        # Accuracy
        _, avg_acc, cnt, pred = accuracy(out_heatmaps, target_heatmaps)
        acc.update(avg_acc, cnt)

        # Current center, scale, score
        centers = np.array(centers)
        scales = np.array(scales)
        scores = np.array(scores)

        preds, maxvals = get_final_preds(args, out_heatmaps, centers, scales)

        all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
        all_preds[idx:idx + num_images, :, 2:3] = maxvals
        # Double check this all_boxes parts
        all_boxes[idx:idx + num_images, 0:2] = centers[:, 0:2]
        all_boxes[idx:idx + num_images, 2:4] = scales[:, 0:2]
        all_boxes[idx:idx + num_images, 4] = np.prod(scales * 200, 1)
        all_boxes[idx:idx + num_images, 5] = scores
        # image_path.extend(meta['image'])

        idx += num_images

        print('Epoch [{:4d}] '
              'Loss = {:.5f} '
              'Acc = {:.5f}'.format(batch_id, loss, acc.avg))

        if batch_id % 10 == 0:
            save_batch_heatmaps(input_image,
                                out_heatmaps,
                                file_name='*****@*****.**',
                                normalize=True)

    # Evaluate
    args.DATAROOT = 'data/mpii'
    args.TEST_SET = 'valid'
    output_dir = ''
    filenames = []
    imgnums = []
    image_path = []
    name_values, perf_indicator = mpii_evaluate(args, all_preds, output_dir,
                                                all_boxes, image_path,
                                                filenames, imgnums)

    print_name_value(name_values, perf_indicator)
コード例 #3
0
ファイル: train.py プロジェクト: sunqiang25/models-develop
def train(args):
    if args.dataset == 'coco':
        import lib.coco_reader as reader
        IMAGE_SIZE = [288, 384]
        HEATMAP_SIZE = [72, 96]
        args.kp_dim = 17
        args.total_images = 144406  # 149813
    elif args.dataset == 'mpii':
        import lib.mpii_reader as reader
        IMAGE_SIZE = [384, 384]
        HEATMAP_SIZE = [96, 96]
        args.kp_dim = 16
        args.total_images = 22246
    else:
        raise ValueError('The dataset {} is not supported yet.'.format(
            args.dataset))

    print_arguments(args)

    # Image and target
    image = layers.data(name='image',
                        shape=[3, IMAGE_SIZE[1], IMAGE_SIZE[0]],
                        dtype='float32')
    target = layers.data(name='target',
                         shape=[args.kp_dim, HEATMAP_SIZE[1], HEATMAP_SIZE[0]],
                         dtype='float32')
    target_weight = layers.data(name='target_weight',
                                shape=[args.kp_dim, 1],
                                dtype='float32')

    # Build model
    model = pose_resnet.ResNet(layers=50, kps_num=args.kp_dim)

    # Output
    loss, output = model.net(input=image,
                             target=target,
                             target_weight=target_weight)

    # Parameters from model and arguments
    params = {}
    params["total_images"] = args.total_images
    params["lr"] = args.lr
    params["num_epochs"] = args.num_epochs
    params["learning_strategy"] = {}
    params["learning_strategy"]["batch_size"] = args.batch_size
    params["learning_strategy"]["name"] = args.lr_strategy

    # Initialize optimizer
    optimizer = optimizer_setting(args, params)
    optimizer.minimize(loss)

    if args.with_mem_opt:
        fluid.memory_optimize(
            fluid.default_main_program(),
            skip_opt_set=[loss.name, output.name, target.name])

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    if args.pretrained_model:

        def if_exist(var):
            exist_flag = os.path.exists(
                os.path.join(args.pretrained_model, var.name))
            return exist_flag

        fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)

    if args.checkpoint is not None:
        fluid.io.load_persistables(exe, args.checkpoint)

    # Dataloader
    train_reader = paddle.batch(reader.train(), batch_size=args.batch_size)
    feeder = fluid.DataFeeder(place=place,
                              feed_list=[image, target, target_weight])

    train_exe = fluid.ParallelExecutor(
        use_cuda=True if args.use_gpu else False, loss_name=loss.name)
    fetch_list = [image.name, loss.name, output.name]

    for pass_id in range(params["num_epochs"]):
        for batch_id, data in enumerate(train_reader()):
            current_lr = np.array(paddle.fluid.global_scope().find_var(
                'learning_rate').get_tensor())

            input_image, loss, out_heatmaps = train_exe.run(
                fetch_list, feed=feeder.feed(data))

            loss = np.mean(np.array(loss))

            print_immediately('Epoch [{:4d}/{:3d}] LR: {:.10f} '
                              'Loss = {:.5f}'.format(batch_id, pass_id,
                                                     current_lr[0], loss))

            if batch_id % 10 == 0:
                save_batch_heatmaps(input_image,
                                    out_heatmaps,
                                    file_name='*****@*****.**',
                                    normalize=True)

        model_path = os.path.join(
            args.model_save_dir + '/' + 'simplebase-{}'.format(args.dataset),
            str(pass_id))
        if not os.path.isdir(model_path):
            os.makedirs(model_path)
        fluid.io.save_persistables(exe, model_path)
コード例 #4
0
ファイル: test.py プロジェクト: wi-code/Python3Notes
def test(args):
    if args.dataset == 'coco':
        import lib.coco_reader as reader
        IMAGE_SIZE = [288, 384]
        # HEATMAP_SIZE = [72, 96]
        FLIP_PAIRS = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12],
                      [13, 14], [15, 16]]
        args.kp_dim = 17
        args.total_images = 144406  # 149813
    elif args.dataset == 'mpii':
        import lib.mpii_reader as reader
        IMAGE_SIZE = [384, 384]
        # HEATMAP_SIZE = [96, 96]
        FLIP_PAIRS = [[0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13]]
        args.kp_dim = 16
        args.total_images = 2958  # validation
    else:
        raise ValueError('The dataset {} is not supported yet.'.format(
            args.dataset))

    print_arguments(args)

    # Image and target
    image = layers.data(name='image',
                        shape=[3, IMAGE_SIZE[1], IMAGE_SIZE[0]],
                        dtype='float32')
    file_id = layers.data(name='file_id', shape=[
        1,
    ], dtype='int')

    # Build model
    model = pose_resnet.ResNet(layers=50, kps_num=args.kp_dim, test_mode=True)

    # Output
    output = model.net(input=image, target=None, target_weight=None)

    # Parameters from model and arguments
    params = {}
    params["total_images"] = args.total_images
    params["lr"] = args.lr
    params["num_epochs"] = args.num_epochs
    params["learning_strategy"] = {}
    params["learning_strategy"]["batch_size"] = args.batch_size
    params["learning_strategy"]["name"] = args.lr_strategy

    if args.with_mem_opt:
        fluid.memory_optimize(fluid.default_main_program(),
                              skip_opt_set=[output.name])

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    args.pretrained_model = './pretrained/resnet_50/115'
    if args.pretrained_model:

        def if_exist(var):
            exist_flag = os.path.exists(
                os.path.join(args.pretrained_model, var.name))
            return exist_flag

        fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)

    if args.checkpoint is not None:
        fluid.io.load_persistables(exe, args.checkpoint)

    # Dataloader
    test_reader = paddle.batch(reader.test(), batch_size=args.batch_size)
    feeder = fluid.DataFeeder(place=place, feed_list=[image, file_id])

    test_exe = fluid.ParallelExecutor(
        use_cuda=True if args.use_gpu else False,
        main_program=fluid.default_main_program().clone(for_test=False),
        loss_name=None)

    fetch_list = [image.name, output.name]

    for batch_id, data in tqdm(enumerate(test_reader())):
        num_images = len(data)

        file_ids = []
        for i in range(num_images):
            file_ids.append(data[i][1])

        input_image, out_heatmaps = test_exe.run(fetch_list=fetch_list,
                                                 feed=feeder.feed(data))

        if args.flip_test:
            # Flip all the images in a same batch
            data_fliped = []
            for i in range(num_images):
                data_fliped.append((data[i][0][:, :, ::-1], data[i][1]))

            # Inference again
            _, output_flipped = test_exe.run(fetch_list=fetch_list,
                                             feed=feeder.feed(data_fliped))

            # Flip back
            output_flipped = flip_back(output_flipped, FLIP_PAIRS)

            # Feature is not aligned, shift flipped heatmap for higher accuracy
            if args.shift_heatmap:
                output_flipped[:, :, :, 1:] = \
                        output_flipped.copy()[:, :, :, 0:-1]

            # Aggregate
            out_heatmaps = (out_heatmaps + output_flipped) * 0.5
            save_predict_results(input_image,
                                 out_heatmaps,
                                 file_ids,
                                 fold_name='results')