Beispiel #1
0
def visualizer(test_model, img):

    # annotation load
    d = Dataset()
    annot = d.load_annot(cfg.testset)
    gt_img_id = d.load_imgid(annot)

    # human bbox load
    if cfg.useGTbbox and cfg.testset in ['train', 'val']:
        if cfg.testset == 'train':
            dets = d.load_train_data(score=True)
        else:
            dets = d.load_val_data_with_annot()
        dets.sort(key=lambda x: (x['image_id']))
    else:
        with open(cfg.human_det_path, 'r') as f:
            dets = json.load(f)
        dets = [i for i in dets if i['image_id'] in gt_img_id]
        dets = [i for i in dets if i['category_id'] == 1]
        dets = [i for i in dets if i['score'] > 0]
        dets.sort(key=lambda x: (x['image_id'], x['score']), reverse=True)

        img_id = []
        for i in dets:
            img_id.append(i['image_id'])
        imgname = d.imgid_to_imgname(annot, img_id, cfg.testset)
        for i in range(len(dets)):
            dets[i]['imgpath'] = imgname[i]

    dets_new = []
    for det in dets:
        if img in det['imgpath']:
            dets_new.append(det)
            break
    dets = dets_new

    # job assign (multi-gpu)
    from tfflat.mp_utils import MultiProc
    img_start = 0
    ranges = [0]
    img_num = len(np.unique([i['image_id'] for i in dets]))
    images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1
    for run_img in range(img_num):
        img_end = img_start + 1
        while img_end < len(dets) and dets[img_end]['image_id'] == dets[
                img_start]['image_id']:
            img_end += 1
        if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num:
            ranges.append(img_end)
        img_start = img_end

    def func(gpu_id):
        cfg.set_args(args.gpu_ids.split(',')[gpu_id])
        tester = Tester(Model(), cfg)
        tester.load_weights(test_model)
        range = [ranges[gpu_id], ranges[gpu_id + 1]]
        return test_net(tester, dets, range, gpu_id, True)

    MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func)
    result = MultiGPUFunc.work()
Beispiel #2
0
def test(test_model, logger):
    import csv
    anns = []
    info = []
    test_dir = '/home/lanhao/FashionAI/test3/'
    test_ann_path = '/home/lanhao/FashionAI/test3/test_skirt.csv'
    with open(test_ann_path, 'rb') as f:
        reader = csv.reader(f)
        for row in reader:
            anns.append(row)
    info.append(anns[0])
    anns = anns[1:]
    dets = []
    for ann in anns:
        imgname = ann[0]
        imgname = test_dir + '/' + imgname
        bbox = ann[2:]
        imgpath = dict(imgpath=imgname, bbox=bbox)
        dets.append(imgpath)

    img_num = len(np.unique([i['imgpath'] for i in dets]))

    from tfflat.mp_utils import MultiProc
    img_start = 0
    ranges = [0]
    images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1
    for run_img in range(img_num):
        img_end = img_start + 1
        while img_end < len(dets) and dets[img_end]['imgpath'] == dets[
                img_start]['imgpath']:
            img_end += 1
        if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num:
            ranges.append(img_end)
        img_start = img_end

    def func(id):
        cfg.set_args(args.gpu_ids.split(',')[id])
        tester = Tester(Network(), cfg)
        tester.load_weights(test_model)
        range = [ranges[id], ranges[id + 1]]
        return test_net(tester, logger, dets, range)

    MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func)
    all_res, dump_results = MultiGPUFunc.work()

    # evaluation
    results = []
    results.append(info)
    for i in range(len(anns)):
        ann = anns[i]
        keypoints = np.array(dump_results[i]['keypoints']).reshape(
            (cfg.nr_skeleton, 3))
        row = prepare_row(ann, keypoints)
        results.append(row)
    write_csv('result_skirt_test_0504.csv', results)
    '''import json
Beispiel #3
0
def validate(test_model, logger):
    eval_gt = COCO(cfg.gt_path)

    #get validation
    import json
    with open(cfg.det_path, 'r') as f:
        dets = json.load(f)["annotations"]

    root_data_dir = "/home/data/COCO/MSCOCO/"
    dets = [i for i in dets if i['image_id'] in eval_gt.imgs]
    dets = [i for i in dets if i['category_id'] == 1]
    dets.sort(key=lambda x: x['image_id'], reverse=True)
    for i in dets:
        i['imgpath'] = root_data_dir + 'val2017/' + str(
            i['image_id']).zfill(12) + '.jpg'
        i['score'] = 1.
    img_num = len(np.unique([i['image_id'] for i in dets]))

    from tfflat.mp_utils import MultiProc
    img_start = 0
    ranges = [0]
    images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1
    for run_img in range(img_num):
        img_end = img_start + 1
        while img_end < len(dets) and dets[img_end]['image_id'] == dets[
                img_start]['image_id']:
            img_end += 1
        if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num:
            ranges.append(img_end)
        img_start = img_end

    def func(id):
        cfg.set_args(args.gpu_ids.split(',')[id])
        tester = Tester(Network(), cfg)
        tester.load_weights(test_model)
        range = [ranges[id], ranges[id + 1]]
        return test_net(tester, logger, dets, range)

    MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func)
    all_res, dump_results = MultiGPUFunc.work()

    # evaluation
    result_path = osp.join(cfg.output_dir, 'results.json')
    with open(result_path, 'w') as f:
        json.dump(dump_results, f)

    eval_dt = eval_gt.loadRes(result_path)
    cocoEval = COCOeval(eval_gt, eval_dt, iouType='keypoints')

    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Beispiel #4
0
def test(test_model):

    # annotation load
    d = Dataset()
    annot = d.load_annot(cfg.testset)
    # input pose load
    #input_pose = d.coco_pose_load(annot)
    #d.evaluation(input_pose, annot, cfg.result_dir, cfg.testset)

    input_pose = d.input_pose_load(annot, cfg.testset)
    # job assign (multi-gpu)
    from tfflat.mp_utils import MultiProc
    img_start = 0
    ranges = [0]
    img_num = len(np.unique([i['image_id'] for i in input_pose]))
    print(img_num)
    images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1
    for run_img in range(img_num):
        img_end = img_start + 1
        while img_end < len(input_pose) and input_pose[img_end][
                'image_id'] == input_pose[img_start]['image_id']:
            img_end += 1
        if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num:
            ranges.append(img_end)
        img_start = img_end

    def func(gpu_id):
        cfg.set_args(args.gpu_ids.split(',')[gpu_id])
        tester = Tester(Model(), cfg)
        tester.load_weights(test_model)
        range = [ranges[gpu_id], ranges[gpu_id + 1]]
        return test_net(tester, input_pose, range, gpu_id)

    MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func)
    result = MultiGPUFunc.work()
    # func(0)
    # evaluation
    d.evaluation(result, annot, cfg.result_dir, cfg.testset)
Beispiel #5
0
def test(test_model, logger):
    eval_gt = COCO(cfg.gt_path)
    import json
    with open(cfg.det_path, 'r') as f:
        dets = json.load(f)

    test_subset = False
    if test_subset:
        eval_gt.imgs = dict(list(eval_gt.imgs.items())[:100])
        anns = dict()
        for i in eval_gt.imgs:
            for j in eval_gt.getAnnIds(i):
                anns[j] = eval_gt.anns[j]
        eval_gt.anns = anns
    dets = [i for i in dets if i['image_id'] in eval_gt.imgs]

    dets = [i for i in dets if i['category_id'] == 1]
    dets.sort(key=lambda x: (x['image_id'], x['score']), reverse=True)
    for i in dets:
        i['imgpath'] = '/home/dx/data/coco/val2014/COCO_val2014_000000%06d.jpg' % i['image_id']
    img_num = len(np.unique([i['image_id'] for i in dets]))

    use_gtboxes = False
    if use_gtboxes:
        d = COCOJoints()
        coco_train_data, coco_test_data = d.load_data()
        coco_test_data.sort(key=lambda x: x['imgid'])
        for i in coco_test_data:
            i['image_id'] = i['imgid']
            i['score'] = 1.
        dets = coco_test_data

    from tfflat.mp_utils import MultiProc
    img_start = 0
    ranges = [0]
    images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1
    for run_img in range(img_num):
        img_end = img_start + 1
        while img_end < len(dets) and dets[img_end]['image_id'] == dets[img_start]['image_id']:
            img_end += 1
        if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num:
            ranges.append(img_end)
        img_start = img_end

    def func(id):
        cfg.set_args(args.gpu_ids.split(',')[id])
        tester = Tester(Network(), cfg)
        tester.load_weights(test_model)
        range = [ranges[id], ranges[id + 1]]
        return test_net(tester, logger, dets, range)

    MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func)
    all_res, dump_results = MultiGPUFunc.work()

    # evaluation
    result_path = osp.join(cfg.output_dir, 'results.json')
    with open(result_path, 'w') as f:
        json.dump(dump_results, f)

    eval_dt = eval_gt.loadRes(result_path)
    cocoEval = COCOeval(eval_gt, eval_dt, iouType='keypoints')

    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
def test(test_model):
    # annotation load
    d = Dataset()
    d.setup_paths(cfg)
    annot = d.load_annot(cfg.TEST.testset)
    gt_img_id = d.load_imgid(annot)

    if args.use_dets:
        print("loading detections from human detector")

        with open(cfg.human_det_path, 'r') as f:
            dets = json.load(f)

        dets = [i for i in dets if i['image_id'] in gt_img_id]
        dets = [i for i in dets if i['category_id'] == 1]
        dets = [i for i in dets if i['score'] > 0]
        dets.sort(key=lambda x: (x['image_id'], x['score']), reverse=True)
        img_id = []
        for i in dets:
            img_id.append(i['image_id'])
        imgname = d.imgid_to_imgname(annot, img_id, cfg.TEST.testset)
        for i in range(len(dets)):
            dets[i]['imgpath'] = imgname[i]
    else:
        print("loading ground truth detections")
        if cfg.TEST.testset == 'train':
            dets = d.load_train_data(score=True)
        else:
            dets = d.load_val_data_with_annot(cfg.TEST.testset)
        dets.sort(key=lambda x: (x['image_id']))

    # job assign (multi-gpu)
    from tfflat.mp_utils import MultiProc
    img_start = 0
    ranges = [0]
    img_num = len(np.unique([i['image_id'] for i in dets]))
    images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1
    for run_img in range(img_num):
        img_end = img_start + 1
        while img_end < len(dets) and dets[img_end]['image_id'] == dets[
                img_start]['image_id']:
            img_end += 1
        if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num:
            ranges.append(img_end)
        img_start = img_end

    def func(gpu_id):
        config.set_args(args.gpu_ids.split(',')[gpu_id])
        tester = Tester(Model(), cfg)
        tester.load_weights(test_model)
        range = [ranges[gpu_id], ranges[gpu_id + 1]]
        if (cfg.MODEL.occluded_detection):
            return test_net_occ(tester, dets, range, gpu_id, d.sigmas)
        else:
            return test_net(tester, dets, range, gpu_id, d.sigmas)

    MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func)
    result = MultiGPUFunc.work()

    # evaluation
    d.evaluation(result, annot, cfg.result_dir, cfg.TEST.testset,
                 cfg.EXPERIMENT_NAME, args.test_epoch)