def main():
    config = configparser.ConfigParser()
    config.read('config.ini', 'UTF-8')
    dataset_type = config.get('dataset', 'type')
    logger.info('loading {}'.format(dataset_type))
    if dataset_type == 'mpii':
        _, test_set = get_mpii_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'images'),
            annotations=config.get(dataset_type, 'annotations'),
            train_size=config.getfloat(dataset_type, 'train_size'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
            seed=config.getint('training_param', 'seed'),
        )
    elif dataset_type == 'coco':
        # 已经将原来的图片换成固定大小
        test_set = get_coco_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'val_images'),
            annotations=config.get(dataset_type, 'val_annotations'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
        )
    else:
        raise Exception('Unknown dataset {}'.format(dataset_type))

    model = create_model(config)

    ## 生成用于计算mAP的gt_KPs、pred_KPs, gt_bbox
    mAP = [[], [], []]
    # 测试多张图片
    for i in range(30):
        #  pdb()
        idx = random.choice(range(len(test_set)))
        image = test_set.get_example(idx)['image']
        gt_kps = test_set.get_example(idx)['keypoints']
        # coco person   mpii head
        gt_bboxs = test_set.get_example(idx)['bbox']
        humans = estimate(model,
                        image.astype(np.float32))
        mAP[0].append(gt_kps)
        mAP[1].append(humans)
        mAP[2].append(gt_bboxs)
        pil_image = Image.fromarray(image.transpose(1, 2, 0).astype(np.uint8))
        pil_image = draw_humans(
            keypoint_names=model.keypoint_names,
            edges=model.edges,
            pil_image=pil_image,
            humans=humans
        )

        pil_image.save('results/result{}.png'.format(i), 'PNG')

    gene_json(mAP)
Esempio n. 2
0
def predict(args):
    config = load_config(args)
    detection_thresh = config.getfloat('predict', 'detection_thresh')
    min_num_keypoints = config.getint('predict', 'min_num_keypoints')
    dataset_type = config.get('dataset', 'type')
    logger.info('loading {}'.format(dataset_type))
    if dataset_type == 'mpii':
        _, test_set = get_mpii_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'images'),
            annotations=config.get(dataset_type, 'annotations'),
            train_size=config.getfloat(dataset_type, 'train_size'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
            seed=config.getint('training_param', 'seed'),
        )
    elif dataset_type == 'coco':
        test_set = get_coco_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'val_images'),
            annotations=config.get(dataset_type, 'val_annotations'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
        )
    else:
        raise Exception('Unknown dataset {}'.format(dataset_type))

    model = create_model(args, config)

    # choose specific image
    idx = random.choice(range(len(test_set)))
    idx = 50
    image = test_set.get_example(idx)['image']
    humans = estimate(
        model,
        image.astype(np.float32),
        detection_thresh,
        min_num_keypoints,
    )
    pil_image = Image.fromarray(image.transpose(1, 2, 0).astype(np.uint8))
    pil_image = draw_humans(keypoint_names=model.keypoint_names,
                            edges=model.edges,
                            pil_image=pil_image,
                            humans=humans,
                            visbbox=config.getboolean('predict', 'visbbox'))

    #pil_image.save('result.png', 'PNG')
    pil_image.save(
        'result_' + 'X'.join((str(_.insize[0]), str(_.insize[1]))) + '_idx_' +
        str(idx) + '_time_' + str(round(inference_time, 3)) + 's.png', 'PNG')
Esempio n. 3
0
def main():
    config = configparser.ConfigParser()
    config.read('config.ini', 'UTF-8')
    dataset_type = config.get('dataset', 'type')
    logger.info('loading {}'.format(dataset_type))
    if dataset_type == 'mpii':
        _, test_set = get_mpii_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'images'),
            annotations=config.get(dataset_type, 'annotations'),
            train_size=config.getfloat(dataset_type, 'train_size'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
            seed=config.getint('training_param', 'seed'),
        )
    elif dataset_type == 'coco':
        test_set = get_coco_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'val_images'),
            annotations=config.get(dataset_type, 'val_annotations'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
        )
    else:
        raise Exception('Unknown dataset {}'.format(dataset_type))

    model = create_model(config)

    idx = random.choice(range(len(test_set)))
    image = test_set.get_example(idx)['image']
    humans = estimate(model,
                      image.astype(np.float32))
    pil_image = Image.fromarray(image.transpose(1, 2, 0).astype(np.uint8))
    pil_image = draw_humans(
        keypoint_names=model.keypoint_names,
        edges=model.edges,
        pil_image=pil_image,
        humans=humans
    )

    pil_image.save('result.png', 'PNG')
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config_path', type=str, default='config.ini')
    parser.add_argument('--resume')
    parser.add_argument('--plot_samples', type=int, default=0)
    args = parser.parse_args()

    config = configparser.ConfigParser()
    config.read(args.config_path, 'UTF-8')

    chainer.global_config.autotune = True
    chainer.cuda.set_max_workspace_size(11388608)

    # create result dir and copy file
    logger.info('> store file to result dir %s', config.get('result', 'dir'))
    save_files(config.get('result', 'dir'))

    logger.info('> set up devices')
    devices = setup_devices(config.get('training_param', 'gpus'))
    set_random_seed(devices, config.getint('training_param', 'seed'))

    logger.info('> get dataset')
    dataset_type = config.get('dataset', 'type')
    if dataset_type == 'coco':
        # force to set `use_cache = False`
        train_set = get_coco_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'train_images'),
            annotations=config.get(dataset_type, 'train_annotations'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
            use_cache=False,
            do_augmentation=True,
        )
        test_set = get_coco_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'val_images'),
            annotations=config.get(dataset_type, 'val_annotations'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
            use_cache=False,
        )
    elif dataset_type == 'mpii':
        train_set, test_set = get_mpii_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'images'),
            annotations=config.get(dataset_type, 'annotations'),
            train_size=config.getfloat(dataset_type, 'train_size'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
            use_cache=config.getboolean(dataset_type, 'use_cache'),
            seed=config.getint('training_param', 'seed'),
        )
    else:
        raise Exception('Unknown dataset {}'.format(dataset_type))
    logger.info('dataset type: %s', dataset_type)
    logger.info('training images: %d', len(train_set))
    logger.info('validation images: %d', len(test_set))

    if args.plot_samples > 0:
        for i in range(args.plot_samples):
            data = train_set[i]
            visualize.plot('train-{}.png'.format(i), data['image'],
                           data['keypoints'], data['bbox'], data['is_labeled'],
                           data['edges'])
            data = test_set[i]
            visualize.plot('val-{}.png'.format(i), data['image'],
                           data['keypoints'], data['bbox'], data['is_labeled'],
                           data['edges'])

    logger.info('> load model')
    model = create_model(config, train_set)

    logger.info('> transform dataset')
    train_set = TransformDataset(train_set, model.encode)
    test_set = TransformDataset(test_set, model.encode)

    logger.info('> create iterators')
    train_iter = chainer.iterators.MultiprocessIterator(
        train_set,
        config.getint('training_param', 'batchsize'),
        n_processes=config.getint('training_param', 'num_process'))
    test_iter = chainer.iterators.SerialIterator(test_set,
                                                 config.getint(
                                                     'training_param',
                                                     'batchsize'),
                                                 repeat=False,
                                                 shuffle=False)

    logger.info('> setup optimizer')
    optimizer = chainer.optimizers.MomentumSGD()
    optimizer.setup(model)
    optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005))

    logger.info('> setup trainer')
    updater = training.updaters.ParallelUpdater(train_iter,
                                                optimizer,
                                                devices=devices)
    trainer = training.Trainer(
        updater, (config.getint('training_param', 'train_iter'), 'iteration'),
        config.get('result', 'dir'))

    logger.info('> setup extensions')
    trainer.extend(extensions.LinearShift(
        'lr',
        value_range=(config.getfloat('training_param', 'learning_rate'), 0),
        time_range=(0, config.getint('training_param', 'train_iter'))),
                   trigger=(1, 'iteration'))

    trainer.extend(
        extensions.Evaluator(test_iter, model, device=devices['main']))
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport([
                'main/loss',
                'validation/main/loss',
            ],
                                  'epoch',
                                  file_name='loss.png'))
    trainer.extend(extensions.LogReport())
    trainer.extend(extensions.observe_lr())
    trainer.extend(
        extensions.PrintReport([
            'epoch',
            'elapsed_time',
            'lr',
            'main/loss',
            'validation/main/loss',
            'main/loss_resp',
            'validation/main/loss_resp',
            'main/loss_iou',
            'validation/main/loss_iou',
            'main/loss_coor',
            'validation/main/loss_coor',
            'main/loss_size',
            'validation/main/loss_size',
            'main/loss_limb',
            'validation/main/loss_limb',
        ]))
    trainer.extend(extensions.ProgressBar())

    trainer.extend(
        extensions.snapshot(filename='best_snapshot'),
        trigger=training.triggers.MinValueTrigger('validation/main/loss'))
    trainer.extend(
        extensions.snapshot_object(model, filename='bestmodel.npz'),
        trigger=training.triggers.MinValueTrigger('validation/main/loss'))

    if args.resume:
        serializers.load_npz(args.resume, trainer)

    logger.info('> start training')
    trainer.run()
Esempio n. 5
0
def main():

    import argparse
    parser = argparse.ArgumentParser()

    parser.add_argument("-m",
                        "--modelname",
                        help="model full name",
                        default='',
                        dest='modelName')
    parser.add_argument("-n",
                        "--testnum",
                        help="the number of test image",
                        type=int,
                        default=1000,
                        dest='test_num')
    args = parser.parse_args()
    modelName = args.modelName

    config = configparser.ConfigParser()
    config.read('config.ini', 'UTF-8')
    dataset_type = config.get('dataset', 'type')
    logger.info('loading {}'.format(dataset_type))
    if dataset_type == 'mpii':
        _, test_set = get_mpii_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'images'),
            annotations=config.get(dataset_type, 'annotations'),
            train_size=config.getfloat(dataset_type, 'train_size'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
            seed=config.getint('training_param', 'seed'),
        )
    elif dataset_type == 'coco':
        test_set = get_coco_dataset(
            insize=parse_size(config.get('model_param', 'insize')),
            image_root=config.get(dataset_type, 'val_images'),
            annotations=config.get(dataset_type, 'val_annotations'),
            min_num_keypoints=config.getint(dataset_type, 'min_num_keypoints'),
        )
    else:
        raise Exception('Unknown dataset {}'.format(dataset_type))

    model = create_model(config, modelName)

    ## 生成用于计算pck_object的gt_KPs、 gt_bboxs, human(pred_KPs, pred_bboxs) is _visible
    pck_object = [[], [], [], []]

    modelName = modelName if modelName else 'trained/bestmodel.npz'
    test_num = args.test_num
    print('model name: {}\t test image number: {}'.format(modelName, test_num))
    # 测试多张图片
    for i in tqdm(range(test_num)):
        idx = random.choice(range(len(test_set)))
        image = test_set.get_example(idx)['image']
        gt_kps = test_set.get_example(idx)['keypoints']
        gt_bboxs = test_set.get_example(idx)['bbox']  # (left down point, w, h)
        is_visible = test_set.get_example(idx)['is_visible']  #

        # include pred_KPs, pred_bbox
        humans = estimate(model, image.astype(np.float32), 0.15)
        pck_object[0].append(gt_kps)
        pck_object[1].append(humans)
        pck_object[2].append(gt_bboxs)
        pck_object[3].append(is_visible)
    mylog.info('model name: {}\t test image number: {}'.format(
        modelName, test_num))
    evaluation(config, pck_object)