Esempio n. 1
0
    def __init__(self):
        self.sift = cv2.xfeatures2d.SIFT_create(nfeatures=1000)

        parser = argparse.ArgumentParser(description="Dense Correspondence")
        parser.add_argument(
            "--config-file",
            default="",
            metavar="FILE",
            help="path to config file",
        )
        parser.add_argument(
            "opts",
            help="Modify config options using the command-line",
            default=None,
            nargs=argparse.REMAINDER,
        )

        args = parser.parse_args()

        cfg.merge_from_file(args.config_file)
        cfg.merge_from_list(args.opts)
        # cfg.freeze()

        model = build_matching_model(cfg)
        model.to(cfg.MODEL.DEVICE)
        self.model = torch.nn.DataParallel(model)

        model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
        checkpointer = Checkpointer(cfg, self.model, save_dir=model_dir)
        _ = checkpointer.load(cfg.MODEL.WEIGHT)

        self.transform = build_transforms(cfg, is_train=False)
Esempio n. 2
0
def main():
    device = torch.device('cpu')

    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument(
        "--config_file", default="./configs/debug.yml", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

## build network
    # model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK,
    #                  cfg.TEST.NECK_FEAT, cfg.MODEL.NAME, cfg.MODEL.PRETRAIN_CHOICE,
    #                  cfg)

    num_classes = 6*5   # num cameras = 6 * 5 bg for camera
    model = build_model(cfg, num_classes)
Esempio n. 3
0
def main():
    parser = argparse.ArgumentParser(description="Dense Correspondence")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="Path to config file",
        type=str,
    )
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        '--test_model',
        dest='test_model',
        action='store_true'
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
    if output_dir:
        os.makedirs(output_dir, exist_ok=True)

    if cfg.MODEL.TEST:
        # build model
        model = build_matching_model(cfg)
        model.to(cfg.MODEL.DEVICE)
        model = torch.nn.DataParallel(model)

        # load pretrained parameters
        model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
        checkpointer = Checkpointer(cfg, model, save_dir=model_dir)
        _ = checkpointer.load(cfg.MODEL.WEIGHT)

        test(cfg, model)
    else:
        model = train(cfg)

        if not args.skip_test:
            test(cfg, model)
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file",
        default="/media/bi/Data/Mycode/car_demo/ai_city/configs/submit.yml",
        help="path to config file",
        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    ## read meta information
    dataset = VeRi(cfg.DATASETS.ROOT_DIR)
    print("test_tracks", dataset.test_tracks)
    indices_np = inference(cfg, model, val_loader, num_query, dataset)
    #write_result(indices_np, os.path.dirname(cfg.TEST.WEIGHT), topk=100)

    write_result_with_track(indices_np, os.path.dirname(cfg.TEST.WEIGHT),
                            dataset.test_tracks)
Esempio n. 5
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        # with open(args.config_file, 'r') as cf:
        #     config_str = "\n" + cf.read()
        #     logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    imgs, feats = inference_to_get_feats(cfg, model, val_loader, num_query,
                                         dataset)

    import numpy as np
    np.save('feats_extract.npy', feats.cpu().detach().numpy())
    print(imgs[:50])
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    results = []
    out_dir = os.path.join('AIC_crop')
    results += vis_actmap(model, cfg, train_loader, out_dir)
    results += vis_actmap(model, cfg, val_loader, out_dir)

    with open(os.path.join(out_dir, 'detection.json'), 'w') as f:
        json.dump(results, f)
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config-file',
                        default='',
                        help='Path to config file',
                        type=str)
    parser.add_argument('opts',
                        help="Modify config options using the command line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    train_net(cfg)
Esempio n. 8
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Training")

    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--config_file",
        default="data/veri.yml",
        help="path to config file",
        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        # with open(args.config_file, 'r') as cf:
        #     config_str = "\n" + cf.read()
        #     logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ[
            'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID  # new add by gu
    cudnn.benchmark = True
    train(cfg)
Esempio n. 9
0
def main():
    parser = argparse.ArgumentParser(description="Dense Correspondence")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # cfg.freeze()

    model = build_matching_model(cfg)
    model.to(cfg.MODEL.DEVICE)
    model = torch.nn.DataParallel(model)

    model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
    checkpointer = Checkpointer(cfg, model, save_dir=model_dir)
    _ = checkpointer.load(cfg.MODEL.WEIGHT)

    dataset_name = cfg.DATASET.TEST
    output_folder = os.path.join(model_dir, "inference", dataset_name)
    os.makedirs(output_folder, exist_ok=True)
    data_loader_val = make_data_loader(cfg, is_train=False)
    inference(
        cfg,
        model,
        data_loader_val,
        device=cfg.MODEL.DEVICE,
        output_folder=output_folder,
    )
Esempio n. 10
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--config-file',
                        default='',
                        help='Path to config file',
                        type=str)
    parser.add_argument('--test',
                        help='Test or train model',
                        action='store_true')
    parser.add_argument('opts',
                        help="Modify config options using the command line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    if args.test:
        test_net(cfg)
    else:
        train_net(cfg)
Esempio n. 11
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Training")
    parser.add_argument(
        "--config-file",
        default="data/occlusion_net_train.yaml",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument("--local_rank", type=int, default=0)

    parser.add_argument(
        "--cometml-tag",
        dest="cometml_tag",
        default="occlusion-net",
    )

    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir:
        mkdir(output_dir)

    logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank())
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(args)

    logger.info("Collecting env info (might take some time)")
    logger.info("\n" + collect_env_info())

    logger.info("Loaded configuration file {}".format(args.config_file))
    with open(args.config_file, "r") as cf:
        config_str = "\n" + cf.read()
        logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    #model = train(cfg, args.local_rank, args.distributed)
    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device).eval()

    if not args.skip_test:
        run_test(cfg, model, args.distributed)
Esempio n. 12
0
def main():
    """ main function """
    args = parse_args()

    config_file = args.config_file
    assert config_file

    assert args.url_list or args.url_txt or args.image_dir
    if len(args.url_list) > 0:
        url_list = args.url_list
    elif args.url_txt:
        url_list = list(np.loadtxt(args.url_txt, dtype=str))
    else:
        image_dir = args.image_dir
        url_list = [
            os.path.join(image_dir, item) for item in os.listdir(image_dir)
        ]
    save_image = True if args.visualize else False

    target = args.target
    vis_color = args.vis_color

    # update the config options with the config file
    cfg.merge_from_file(config_file)
    # manual override some options
    cfg.merge_from_list(["MODEL.DEVICE", "cuda"])

    coco_demo = COCODemo(
        cfg,
        min_image_size=args.min_test_size,
        confidence_threshold=args.confidence_threshold,
    )
    if target == 'person':
        coco_demo.CATEGORIES = ["__background", "person"]

    output_dir = cfg.OUTPUT_DIR
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    output_path = os.path.join(output_dir, now + '.json')

    record_dict = {'model': cfg.MODEL.WEIGHT, 'time': now, 'results': []}

    for url in url_list:
        if not os.path.exists(url):
            print('Image {} does not exist!'.format(url))
            continue
        img = cv2.imread(url)

        #predictions = coco_demo.compute_prediction(img)
        #top_predictions = coco_demo.select_top_predictions(predictions)
        #print(top_predictions.get_field("keypoints").Keypoints[0])
        try:
            #if 2>1:
            predictions = coco_demo.compute_prediction(img)
            top_predictions = coco_demo.select_top_predictions(predictions)

            scores = top_predictions.get_field("scores")
            labels = top_predictions.get_field("labels")
            boxes = predictions.bbox

            infer_result = {
                'url': url,
                'boxes': [],
                'scores': [],
                'labels': []
            }
            for box, score, label in zip(boxes, scores, labels):
                boxpoints = [item for item in box.tolist()]
                infer_result['boxes'].append(boxpoints)
                infer_result['scores'].append(score.item())
                infer_result['labels'].append(label.item())
            record_dict['results'].append(infer_result)
            # visualize the results
            if save_image:
                result = np.copy(img)
                #result = coco_demo.overlay_boxes(result, top_predictions)
                #result = coco_demo.overlay_class_names(result, top_predictions)
                if cfg.MODEL.KEYPOINT_ON:
                    if target == 'person':
                        result = coco_demo.overlay_keypoints_graph(
                            result, top_predictions, target='person')
                    if target == 'car':
                        result = coco_demo.overlay_keypoints_graph(
                            result, top_predictions, vis_color, target='car')
                cv2.imwrite(os.path.join(output_dir,
                                         url.split('/')[-1]), result)
                print(os.path.join(output_dir, url.split('/')[-1]))
        except:
            print('Fail to infer for image {}. Skipped.'.format(url))
            continue
    print(now)
    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    print(now)

    with open(output_path, 'w') as f:
        json.dump(record_dict, f)
Esempio n. 13
0
        "--config-file",
        default="",
        metavar="FILE",
        help="Path to config file",
        type=str,
    )
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER
    )
    parser.add_argument(
        "--type",
        type=str,
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    globals()["run_" + args.type](cfg)
Esempio n. 14
0
def make_cfg(args):
    cfg.merge_from_file(args.cfg_file)
    cfg.merge_from_list(args.opts)
    parse_cfg(cfg, args)
    return cfg
Esempio n. 15
0
def main():
    """ main function """
    args = parse_args()
    video = False
    config_file = args.config_file
    assert config_file

    assert args.url_list or args.url_txt or args.image_dir or args.video_dir
    if len(args.url_list) > 0:
        url_list = args.url_list
    elif args.url_txt:
        url_list = list(np.loadtxt(args.url_txt, dtype=str))
    elif args.image_dir:
        image_dir = args.image_dir
        url_list = [
            os.path.join(image_dir, item) for item in os.listdir(image_dir)
        ]

    elif args.video_dir:
        video_dir = args.video_dir
        cap = cv2.VideoCapture(video_dir)
        video = True
    else:
        image_dir = args.image_dir
        url_list = [
            os.path.join(image_dir, item) for item in os.listdir(image_dir)
        ]
    save_image = True if args.visualize else False

    target = args.target
    vis_color = args.vis_color

    # update the config options with the config file
    cfg.merge_from_file(config_file)
    # manual override some options
    cfg.merge_from_list(["MODEL.DEVICE", "cuda"])

    coco_demo = COCODemo(
        cfg,
        min_image_size=args.min_test_size,
        confidence_threshold=args.confidence_threshold,
    )
    if target == 'person':
        coco_demo.CATEGORIES = ["__background", "person"]

    output_dir = cfg.OUTPUT_DIR
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    output_path = os.path.join(output_dir, now + '.json')

    record_dict = {'model': cfg.MODEL.WEIGHT, 'time': now, 'results': []}

    if (video):
        print("Processing Video")
        video_name = video_dir.split('/')[-1].split('.')[0]
        output_path = os.path.join(output_dir, video_name + '.json')
        skipped_frames_dir = output_dir + '/' + video_name + '_skipped_frames'
        if os.path.exists(skipped_frames_dir):
            shutil.rmtree(skipped_frames_dir)

        os.makedirs(skipped_frames_dir)
        width, height = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                         int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        print(width, height)
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        out = cv2.VideoWriter()
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        out.open(os.path.join(output_dir, video_name + '.mp4'), fourcc, fps,
                 (width, height), True)
        while (cap.isOpened()):
            ret, curr_frame = cap.read()
            curr_frame_number = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
            if (ret):
                try:
                    #if 2>1:
                    predictions = coco_demo.compute_prediction(curr_frame)
                    top_predictions = coco_demo.select_top_predictions(
                        predictions)
                    scores = top_predictions.get_field("scores")
                    labels = top_predictions.get_field("labels")

                    #take boxes from top_predictions
                    boxes = top_predictions.bbox

                    #predictions.fields() - ' ['labels', 'scores', 'keypoints']'
                    keypoints = top_predictions.get_field("keypoints")
                    scores_keypoints = keypoints.get_field("logits")

                    #kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob)
                    kps = keypoints.keypoints

                    #replaces third column of KPS with confidence value for each keypoint
                    kps_cat = torch.cat(
                        (kps[:, :, 0:2], scores_keypoints[:, :, None]),
                        dim=2).numpy()

                    infer_result = {
                        'url': curr_frame_number,
                        'boxes': [],
                        'scores': [],
                        'labels': [],
                        'keypoints': []
                    }
                    for box, score, label, keypts in zip(
                            boxes, scores, labels, kps_cat):
                        boxpoints = [item for item in box.tolist()]
                        infer_result['boxes'].append(boxpoints)
                        infer_result['scores'].append(score.item())
                        infer_result['labels'].append(label.item())
                        infer_result['keypoints'].append(keypts.tolist())
                    record_dict['results'].append(infer_result)

                    # visualize the results
                    if save_image:
                        result = np.copy(curr_frame)
                        #result = coco_demo.overlay_boxes(result, top_predictions)
                        #result = coco_demo.overlay_class_names(result, top_predictions)
                        if cfg.MODEL.KEYPOINT_ON:
                            if target == 'person':
                                result = coco_demo.overlay_keypoints_graph(
                                    result, top_predictions, target='person')
                            if target == 'car':
                                result = coco_demo.overlay_keypoints_graph(
                                    result,
                                    top_predictions,
                                    vis_color,
                                    target='car')
                                result = coco_demo.overlay_boxes(
                                    result, top_predictions)
                                result = coco_demo.overlay_class_names(
                                    result, top_predictions)
                        out.write(result)
                        print('Processed frame ' + str(curr_frame_number))
                except:
                    print('Fail to infer for image {}. Skipped.'.format(
                        str(curr_frame_number)))
                    cv2.imwrite(
                        os.path.join(skipped_frames_dir,
                                     str(curr_frame_number)) + '.jpg',
                        curr_frame)
                    continue
            elif not ret:
                print('Could not read frame', str(curr_frame_number))
                #cap.set(cv2.CAP_PROP_POS_FRAMES, curr_frame_number + 1)
                break
                cap.release()
                out.release()

            else:
                break

    print(now)
    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    print(now)
    if (video):
        with open(output_path, 'w') as f:
            json.dump(record_dict, f)
            exit()

    for url in url_list:
        if not os.path.exists(url):
            print('Image {} does not exist!'.format(url))
            continue
        img = cv2.imread(url)

        #predictions = coco_demo.compute_prediction(img)
        #top_predictions = coco_demo.select_top_predictions(predictions)
        #print(top_predictions.get_field("keypoints").Keypoints[0])
        try:
            #if 2>1:
            predictions = coco_demo.compute_prediction(img)
            top_predictions = coco_demo.select_top_predictions(predictions)

            scores = top_predictions.get_field("scores")
            labels = top_predictions.get_field("labels")

            #take boxes from top_predictions
            boxes = top_predictions.bbox

            keypoints = top_predictions.get_field("keypoints")
            scores_keypoints = keypoints.get_field("logits")

            #kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob)
            kps = keypoints.keypoints

            #replaces third column of KPS with confidence value for each keypoint
            kps_cat = torch.cat((kps[:, :, 0:2], scores_keypoints[:, :, None]),
                                dim=2).numpy()

            infer_result = {
                'url': url,
                'boxes': [],
                'scores': [],
                'labels': [],
                'keypoints': []
            }
            for box, score, label, keypts in zip(boxes, scores, labels,
                                                 kps_cat):
                boxpoints = [item for item in box.tolist()]
                infer_result['boxes'].append(boxpoints)
                infer_result['scores'].append(score.item())
                infer_result['labels'].append(label.item())
                infer_result['keypoints'].append(keypts.tolist())
            record_dict['results'].append(infer_result)
            # visualize the results
            if save_image:
                result = np.copy(img)
                #result = coco_demo.overlay_boxes(result, top_predictions)
                #result = coco_demo.overlay_class_names(result, top_predictions)
                if cfg.MODEL.KEYPOINT_ON:
                    if target == 'person':
                        result = coco_demo.overlay_keypoints_graph(
                            result, top_predictions, target='person')
                    if target == 'car':
                        result = coco_demo.overlay_keypoints_graph(
                            result, top_predictions, vis_color, target='car')
                        result = coco_demo.overlay_boxes(
                            result, top_predictions)
                        result = coco_demo.overlay_class_names(
                            result, top_predictions)
                cv2.imwrite(os.path.join(output_dir,
                                         url.split('/')[-1]), result)
                print(os.path.join(output_dir, url.split('/')[-1]))
        except:
            print('Fail to infer for image {}. Skipped.'.format(url))
            continue
    print(now)
    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    print(now)

    with open(output_path, 'w') as f:
        json.dump(record_dict, f)
Esempio n. 16
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="./configs/submit.yml", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)


    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--pretrain_model_path", default="./dianshang/crop_half_model.pth", help="path to config file", type=str
    )

    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--crop_path", default=" ", help="path to config file", type=str
    )

    args = parser.parse_args()
    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)

    cfg.TEST.WEIGHT = args.pretrain_model_path

    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))



    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(cfg,shuffle_train= False)
    model = build_model(cfg, num_classes)
    print("load pretained weight",cfg.TEST.WEIGHT)
    # try:
    print("加载单卡训练权重")
    model.load_param(cfg.TEST.WEIGHT)


    results = []
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_query_new'
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_gallery_new'
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_query'
    out_dir = args.crop_path
    if os.path.exists(os.path.join(out_dir,'crop_train')):
        print("文件夹已存在")
    else:
        os.makedirs(os.path.join(out_dir,'crop_train'))
        os.makedirs(os.path.join(out_dir,'crop_query'))
        os.makedirs(os.path.join(out_dir,'crop_gallery'))

    crop_train,crop_query,crop_gallery= vis_actmap(model, cfg, train_loader, out_dir)
    pickle.dump(crop_train, open(os.path.join(out_dir, 'train_crop_img_add.pkl'), 'wb'))
    crop_train,crop_query,crop_gallery= vis_actmap(model, cfg, val_loader, out_dir)
    pickle.dump((crop_query, crop_gallery), open(os.path.join(out_dir, 'test_crop_img_add.pkl'), 'wb'))