Пример #1
0
    def __init__(self):
        self.sift = cv2.xfeatures2d.SIFT_create(nfeatures=1000)

        parser = argparse.ArgumentParser(description="Dense Correspondence")
        parser.add_argument(
            "--config-file",
            default="",
            metavar="FILE",
            help="path to config file",
        )
        parser.add_argument(
            "opts",
            help="Modify config options using the command-line",
            default=None,
            nargs=argparse.REMAINDER,
        )

        args = parser.parse_args()

        cfg.merge_from_file(args.config_file)
        cfg.merge_from_list(args.opts)
        # cfg.freeze()

        model = build_matching_model(cfg)
        model.to(cfg.MODEL.DEVICE)
        self.model = torch.nn.DataParallel(model)

        model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
        checkpointer = Checkpointer(cfg, self.model, save_dir=model_dir)
        _ = checkpointer.load(cfg.MODEL.WEIGHT)

        self.transform = build_transforms(cfg, is_train=False)
Пример #2
0
def main():
    device = torch.device('cpu')

    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument(
        "--config_file", default="./configs/debug.yml", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

## build network
    # model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK,
    #                  cfg.TEST.NECK_FEAT, cfg.MODEL.NAME, cfg.MODEL.PRETRAIN_CHOICE,
    #                  cfg)

    num_classes = 6*5   # num cameras = 6 * 5 bg for camera
    model = build_model(cfg, num_classes)
Пример #3
0
def main():
    ''' parse config file '''
    parser = argparse.ArgumentParser(
        description="Graph Reasoning Machine for Visual Question Answering")
    parser.add_argument("--config-file",
                        default="configs/baseline_res101.yaml")
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("--session", type=int, default=0)
    parser.add_argument("--resume", type=int, default=0)
    parser.add_argument("--batchsize", type=int, default=0)
    parser.add_argument("--inference", action='store_true')
    parser.add_argument("--produce", action='store_true')
    parser.add_argument("--instance", type=int, default=-1)
    parser.add_argument("--use_freq_prior", action='store_true')
    parser.add_argument("--visualize", action='store_true')
    parser.add_argument("--algorithm", type=str, default='sg_baseline')
    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.resume = args.resume
    cfg.instance = args.instance
    cfg.inference = args.inference
    cfg.MODEL.USE_FREQ_PRIOR = args.use_freq_prior
    cfg.MODEL.ALGORITHM = args.algorithm
    if args.batchsize > 0:
        cfg.DATASET.TRAIN_BATCH_SIZE = args.batchsize
    if args.session > 0:
        cfg.MODEL.SESSION = str(args.session)
    # cfg.freeze()

    if not os.path.exists("logs") and get_rank() == 0:
        os.mkdir("logs")
    logger = setup_logger("scene_graph_generation",
                          "logs",
                          get_rank(),
                          filename="{}_{}.txt".format(args.algorithm,
                                                      get_timestamp()))
    logger.info(args)
    logger.info("Loaded configuration file {}".format(args.config_file))
    output_config_path = os.path.join("logs", 'config.yml')
    logger.info("Saving config into: {}".format(output_config_path))
    save_config(cfg, output_config_path)

    if args.produce:
        produce(cfg, args)
    elif not args.inference:
        model = train(cfg, args)
    else:
        test(cfg, args)
Пример #4
0
def main():
    parser = argparse.ArgumentParser(description="Dense Correspondence")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="Path to config file",
        type=str,
    )
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        '--test_model',
        dest='test_model',
        action='store_true'
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
    if output_dir:
        os.makedirs(output_dir, exist_ok=True)

    if cfg.MODEL.TEST:
        # build model
        model = build_matching_model(cfg)
        model.to(cfg.MODEL.DEVICE)
        model = torch.nn.DataParallel(model)

        # load pretrained parameters
        model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
        checkpointer = Checkpointer(cfg, model, save_dir=model_dir)
        _ = checkpointer.load(cfg.MODEL.WEIGHT)

        test(cfg, model)
    else:
        model = train(cfg)

        if not args.skip_test:
            test(cfg, model)
Пример #5
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file",
        default="/media/bi/Data/Mycode/car_demo/ai_city/configs/submit.yml",
        help="path to config file",
        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    ## read meta information
    dataset = VeRi(cfg.DATASETS.ROOT_DIR)
    print("test_tracks", dataset.test_tracks)
    indices_np = inference(cfg, model, val_loader, num_query, dataset)
    #write_result(indices_np, os.path.dirname(cfg.TEST.WEIGHT), topk=100)

    write_result_with_track(indices_np, os.path.dirname(cfg.TEST.WEIGHT),
                            dataset.test_tracks)
Пример #6
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        # with open(args.config_file, 'r') as cf:
        #     config_str = "\n" + cf.read()
        #     logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    imgs, feats = inference_to_get_feats(cfg, model, val_loader, num_query,
                                         dataset)

    import numpy as np
    np.save('feats_extract.npy', feats.cpu().detach().numpy())
    print(imgs[:50])
Пример #7
0
def parse_config_args(exp_name):
    parser = argparse.ArgumentParser(description=exp_name)
    parser.add_argument(
        '--cfg',
        type=str,
        default='../experiments/workspace/retrain/retrain.yaml',
        help='configuration of cream')
    parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
    args = parser.parse_args()

    cfg.merge_from_file(args.cfg)
    converted_cfg = convert_lowercase(cfg)

    return args, converted_cfg
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    results = []
    out_dir = os.path.join('AIC_crop')
    results += vis_actmap(model, cfg, train_loader, out_dir)
    results += vis_actmap(model, cfg, val_loader, out_dir)

    with open(os.path.join(out_dir, 'detection.json'), 'w') as f:
        json.dump(results, f)
Пример #9
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config-file',
                        default='',
                        help='Path to config file',
                        type=str)
    parser.add_argument('opts',
                        help="Modify config options using the command line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    train_net(cfg)
Пример #10
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Training")

    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--config_file",
        default="data/veri.yml",
        help="path to config file",
        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        # with open(args.config_file, 'r') as cf:
        #     config_str = "\n" + cf.read()
        #     logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ[
            'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID  # new add by gu
    cudnn.benchmark = True
    train(cfg)
Пример #11
0
def main():
    parser = argparse.ArgumentParser(description="Dense Correspondence")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # cfg.freeze()

    model = build_matching_model(cfg)
    model.to(cfg.MODEL.DEVICE)
    model = torch.nn.DataParallel(model)

    model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
    checkpointer = Checkpointer(cfg, model, save_dir=model_dir)
    _ = checkpointer.load(cfg.MODEL.WEIGHT)

    dataset_name = cfg.DATASET.TEST
    output_folder = os.path.join(model_dir, "inference", dataset_name)
    os.makedirs(output_folder, exist_ok=True)
    data_loader_val = make_data_loader(cfg, is_train=False)
    inference(
        cfg,
        model,
        data_loader_val,
        device=cfg.MODEL.DEVICE,
        output_folder=output_folder,
    )
Пример #12
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--config-file',
                        default='',
                        help='Path to config file',
                        type=str)
    parser.add_argument('--test',
                        help='Test or train model',
                        action='store_true')
    parser.add_argument('opts',
                        help="Modify config options using the command line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    if args.test:
        test_net(cfg)
    else:
        train_net(cfg)
Пример #13
0
import _init_paths

from lib.config import cfg

parser = argparse.ArgumentParser(description='Cream of the Crop')
parser.add_argument('mode',
                    type=str,
                    default='train',
                    help='Mode in ["train", "retrain", "test"]')
parser.add_argument('cfg',
                    type=str,
                    default='../experiments/configs/baseline.yaml',
                    help='configuration of creamt')
args = parser.parse_args()
cfg.merge_from_file(args.cfg)


def main():
    date = datetime.date.today().strftime('%m%d')
    save_path = os.path.join(cfg.SAVE_PATH, "{}-{}".format(date, cfg.MODEL))
    if not os.path.exists(save_path):
        os.mkdir(save_path)

    os.system("cp {} {}".format(args.cfg, os.path.join(save_path,
                                                       'config.yaml')))

    if args.mode == 'train':
        os.system("python -m "
                  "torch.distributed.launch "
                  "--nproc_per_node={} "
Пример #14
0
def collect_sgg_features(dataset, buffer_size=1):
    sgg_cfg.merge_from_file(os.path.join(sgg_cfg_file))
    sgg_cfg.inference = True
    sgg_cfg.instance = -1
    sgg_cfg.resume = 1
    trans = build_transforms(sgg_cfg, is_train=False)
    scene_parser = build_scene_parser(sgg_cfg)
    scene_parser.to(device)
    scene_parser.rel_heads.rel_predictor.obj_predictor.register_forward_hook(get_input_hook)
    scene_parser.rel_heads.rel_predictor.pred_predictor.register_forward_hook(get_input_hook)
    checkpoint = torch.load(sgg_weight_file)
    if "model" not in checkpoint:
        checkpoint = dict(model=checkpoint)
    load_state_dict(scene_parser, checkpoint.pop("model"))
    scene_parser.eval()
    # create dataloader to loop over the dataset
    start_ = 0
    for _ in range(int(np.ceil(len(dataset)/buffer_size))):
        bs = len(dataset)-start_ if start_+buffer_size > len(dataset) else buffer_size
        buffer = {
            'object_features': np.zeros((bs, 100, 512), dtype=np.float32),
            'relation_features': np.zeros((bs, 2500, 512),
                                          dtype=np.float32),
            'object_mask': np.zeros((bs, 100), dtype=np.int),
            'relation_mask': np.zeros((bs, 2500), dtype=np.int),
            'object_labels': np.zeros((bs, 100), dtype=np.int),
            'relation_labels': np.zeros((bs, 2500), dtype=np.int),
            'object_boxes': np.zeros((bs, 100, 4), dtype=np.float32),
            'relation_boxes': np.zeros((bs, 2500, 8), dtype=np.float32),
            'relation_pairs': np.zeros((bs, 2500, 2), dtype=np.int),
            'ids': [],
            'num_rels': 0
        }
        max_rels = 0
        for b in range(bs):
            image_data = dataset[start_+b]
            if image_data['image'].mode == 'L':
                image_data['image'] = image_data['image'].convert("RGB")
            image, _ = trans(image_data['image'], image_data['image'])
            boxes, rel_boxes = scene_parser(image.to(device))
            boxes, rel_boxes = boxes[0], rel_boxes[0]
            rel_labels = rel_boxes.get_field('scores').argmax(dim=1)
            indices = rel_labels.nonzero(as_tuple=True)
            object_features = scene_parser.rel_heads.rel_predictor.obj_predictor._input_value_hook[0]\
                .squeeze().detach().cpu().numpy()
            relation_features = scene_parser.rel_heads.rel_predictor.pred_predictor._input_value_hook[0][indices]\
                .squeeze().detach().cpu().numpy()
            num_obj = object_features.shape[0]
            num_rels = relation_features.shape[0]
            if num_rels > max_rels:
                max_rels = num_rels
            # add features to buffer
            buffer['object_features'][b, :num_obj] = object_features
            buffer['relation_features'][b, :num_rels] = relation_features
            buffer['object_labels'][b, :num_obj] = boxes.get_field('labels').detach().cpu().numpy()
            buffer['relation_labels'][b, :num_rels] = rel_labels[indices].detach().cpu().numpy()
            buffer['object_mask'][b, :num_obj] = 1
            buffer['relation_mask'][b, :num_rels] = 1
            buffer['object_boxes'][b, :num_obj] = boxes.bbox.detach().cpu().numpy()
            buffer['relation_boxes'][b, :num_rels] = rel_boxes.bbox[indices].detach().cpu().numpy()
            buffer['relation_pairs'][b, :num_rels] = rel_boxes.get_field('idx_pairs')[indices].detach().cpu().numpy()
            buffer['ids'].append(image_data['id'])
        # when buffer_obj is full, return it
        start_ += bs
        buffer['num_rels'] = max_rels
        yield buffer
Пример #15
0
        "--config-file",
        default="",
        metavar="FILE",
        help="Path to config file",
        type=str,
    )
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER
    )
    parser.add_argument(
        "--type",
        type=str,
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    globals()["run_" + args.type](cfg)
Пример #16
0
def make_cfg(args):
    cfg.merge_from_file(args.cfg_file)
    cfg.merge_from_list(args.opts)
    parse_cfg(cfg, args)
    return cfg
Пример #17
0
def update_config(cfg, yamlfilename):
    cfg.defrost()
    cfg.merge_from_file(yamlfilename)
    cfg.TEST.MODEL_FILE = osp.join(cfg.DATA_DIR, cfg.TEST.MODEL_FILE)
    cfg.freeze()
Пример #18
0
def main():
    """ main function """
    args = parse_args()

    config_file = args.config_file
    assert config_file

    assert args.url_list or args.url_txt or args.image_dir
    if len(args.url_list) > 0:
        url_list = args.url_list
    elif args.url_txt:
        url_list = list(np.loadtxt(args.url_txt, dtype=str))
    else:
        image_dir = args.image_dir
        url_list = [
            os.path.join(image_dir, item) for item in os.listdir(image_dir)
        ]
    save_image = True if args.visualize else False

    target = args.target
    vis_color = args.vis_color

    # update the config options with the config file
    cfg.merge_from_file(config_file)
    # manual override some options
    cfg.merge_from_list(["MODEL.DEVICE", "cuda"])

    coco_demo = COCODemo(
        cfg,
        min_image_size=args.min_test_size,
        confidence_threshold=args.confidence_threshold,
    )
    if target == 'person':
        coco_demo.CATEGORIES = ["__background", "person"]

    output_dir = cfg.OUTPUT_DIR
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    output_path = os.path.join(output_dir, now + '.json')

    record_dict = {'model': cfg.MODEL.WEIGHT, 'time': now, 'results': []}

    for url in url_list:
        if not os.path.exists(url):
            print('Image {} does not exist!'.format(url))
            continue
        img = cv2.imread(url)

        #predictions = coco_demo.compute_prediction(img)
        #top_predictions = coco_demo.select_top_predictions(predictions)
        #print(top_predictions.get_field("keypoints").Keypoints[0])
        try:
            #if 2>1:
            predictions = coco_demo.compute_prediction(img)
            top_predictions = coco_demo.select_top_predictions(predictions)

            scores = top_predictions.get_field("scores")
            labels = top_predictions.get_field("labels")
            boxes = predictions.bbox

            infer_result = {
                'url': url,
                'boxes': [],
                'scores': [],
                'labels': []
            }
            for box, score, label in zip(boxes, scores, labels):
                boxpoints = [item for item in box.tolist()]
                infer_result['boxes'].append(boxpoints)
                infer_result['scores'].append(score.item())
                infer_result['labels'].append(label.item())
            record_dict['results'].append(infer_result)
            # visualize the results
            if save_image:
                result = np.copy(img)
                #result = coco_demo.overlay_boxes(result, top_predictions)
                #result = coco_demo.overlay_class_names(result, top_predictions)
                if cfg.MODEL.KEYPOINT_ON:
                    if target == 'person':
                        result = coco_demo.overlay_keypoints_graph(
                            result, top_predictions, target='person')
                    if target == 'car':
                        result = coco_demo.overlay_keypoints_graph(
                            result, top_predictions, vis_color, target='car')
                cv2.imwrite(os.path.join(output_dir,
                                         url.split('/')[-1]), result)
                print(os.path.join(output_dir, url.split('/')[-1]))
        except:
            print('Fail to infer for image {}. Skipped.'.format(url))
            continue
    print(now)
    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    print(now)

    with open(output_path, 'w') as f:
        json.dump(record_dict, f)
Пример #19
0
def main():
    """ main function """
    args = parse_args()
    video = False
    config_file = args.config_file
    assert config_file

    assert args.url_list or args.url_txt or args.image_dir or args.video_dir
    if len(args.url_list) > 0:
        url_list = args.url_list
    elif args.url_txt:
        url_list = list(np.loadtxt(args.url_txt, dtype=str))
    elif args.image_dir:
        image_dir = args.image_dir
        url_list = [
            os.path.join(image_dir, item) for item in os.listdir(image_dir)
        ]

    elif args.video_dir:
        video_dir = args.video_dir
        cap = cv2.VideoCapture(video_dir)
        video = True
    else:
        image_dir = args.image_dir
        url_list = [
            os.path.join(image_dir, item) for item in os.listdir(image_dir)
        ]
    save_image = True if args.visualize else False

    target = args.target
    vis_color = args.vis_color

    # update the config options with the config file
    cfg.merge_from_file(config_file)
    # manual override some options
    cfg.merge_from_list(["MODEL.DEVICE", "cuda"])

    coco_demo = COCODemo(
        cfg,
        min_image_size=args.min_test_size,
        confidence_threshold=args.confidence_threshold,
    )
    if target == 'person':
        coco_demo.CATEGORIES = ["__background", "person"]

    output_dir = cfg.OUTPUT_DIR
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    output_path = os.path.join(output_dir, now + '.json')

    record_dict = {'model': cfg.MODEL.WEIGHT, 'time': now, 'results': []}

    if (video):
        print("Processing Video")
        video_name = video_dir.split('/')[-1].split('.')[0]
        output_path = os.path.join(output_dir, video_name + '.json')
        skipped_frames_dir = output_dir + '/' + video_name + '_skipped_frames'
        if os.path.exists(skipped_frames_dir):
            shutil.rmtree(skipped_frames_dir)

        os.makedirs(skipped_frames_dir)
        width, height = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                         int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        print(width, height)
        fps = int(cap.get(cv2.CAP_PROP_FPS))
        out = cv2.VideoWriter()
        fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
        out.open(os.path.join(output_dir, video_name + '.mp4'), fourcc, fps,
                 (width, height), True)
        while (cap.isOpened()):
            ret, curr_frame = cap.read()
            curr_frame_number = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
            if (ret):
                try:
                    #if 2>1:
                    predictions = coco_demo.compute_prediction(curr_frame)
                    top_predictions = coco_demo.select_top_predictions(
                        predictions)
                    scores = top_predictions.get_field("scores")
                    labels = top_predictions.get_field("labels")

                    #take boxes from top_predictions
                    boxes = top_predictions.bbox

                    #predictions.fields() - ' ['labels', 'scores', 'keypoints']'
                    keypoints = top_predictions.get_field("keypoints")
                    scores_keypoints = keypoints.get_field("logits")

                    #kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob)
                    kps = keypoints.keypoints

                    #replaces third column of KPS with confidence value for each keypoint
                    kps_cat = torch.cat(
                        (kps[:, :, 0:2], scores_keypoints[:, :, None]),
                        dim=2).numpy()

                    infer_result = {
                        'url': curr_frame_number,
                        'boxes': [],
                        'scores': [],
                        'labels': [],
                        'keypoints': []
                    }
                    for box, score, label, keypts in zip(
                            boxes, scores, labels, kps_cat):
                        boxpoints = [item for item in box.tolist()]
                        infer_result['boxes'].append(boxpoints)
                        infer_result['scores'].append(score.item())
                        infer_result['labels'].append(label.item())
                        infer_result['keypoints'].append(keypts.tolist())
                    record_dict['results'].append(infer_result)

                    # visualize the results
                    if save_image:
                        result = np.copy(curr_frame)
                        #result = coco_demo.overlay_boxes(result, top_predictions)
                        #result = coco_demo.overlay_class_names(result, top_predictions)
                        if cfg.MODEL.KEYPOINT_ON:
                            if target == 'person':
                                result = coco_demo.overlay_keypoints_graph(
                                    result, top_predictions, target='person')
                            if target == 'car':
                                result = coco_demo.overlay_keypoints_graph(
                                    result,
                                    top_predictions,
                                    vis_color,
                                    target='car')
                                result = coco_demo.overlay_boxes(
                                    result, top_predictions)
                                result = coco_demo.overlay_class_names(
                                    result, top_predictions)
                        out.write(result)
                        print('Processed frame ' + str(curr_frame_number))
                except:
                    print('Fail to infer for image {}. Skipped.'.format(
                        str(curr_frame_number)))
                    cv2.imwrite(
                        os.path.join(skipped_frames_dir,
                                     str(curr_frame_number)) + '.jpg',
                        curr_frame)
                    continue
            elif not ret:
                print('Could not read frame', str(curr_frame_number))
                #cap.set(cv2.CAP_PROP_POS_FRAMES, curr_frame_number + 1)
                break
                cap.release()
                out.release()

            else:
                break

    print(now)
    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    print(now)
    if (video):
        with open(output_path, 'w') as f:
            json.dump(record_dict, f)
            exit()

    for url in url_list:
        if not os.path.exists(url):
            print('Image {} does not exist!'.format(url))
            continue
        img = cv2.imread(url)

        #predictions = coco_demo.compute_prediction(img)
        #top_predictions = coco_demo.select_top_predictions(predictions)
        #print(top_predictions.get_field("keypoints").Keypoints[0])
        try:
            #if 2>1:
            predictions = coco_demo.compute_prediction(img)
            top_predictions = coco_demo.select_top_predictions(predictions)

            scores = top_predictions.get_field("scores")
            labels = top_predictions.get_field("labels")

            #take boxes from top_predictions
            boxes = top_predictions.bbox

            keypoints = top_predictions.get_field("keypoints")
            scores_keypoints = keypoints.get_field("logits")

            #kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob)
            kps = keypoints.keypoints

            #replaces third column of KPS with confidence value for each keypoint
            kps_cat = torch.cat((kps[:, :, 0:2], scores_keypoints[:, :, None]),
                                dim=2).numpy()

            infer_result = {
                'url': url,
                'boxes': [],
                'scores': [],
                'labels': [],
                'keypoints': []
            }
            for box, score, label, keypts in zip(boxes, scores, labels,
                                                 kps_cat):
                boxpoints = [item for item in box.tolist()]
                infer_result['boxes'].append(boxpoints)
                infer_result['scores'].append(score.item())
                infer_result['labels'].append(label.item())
                infer_result['keypoints'].append(keypts.tolist())
            record_dict['results'].append(infer_result)
            # visualize the results
            if save_image:
                result = np.copy(img)
                #result = coco_demo.overlay_boxes(result, top_predictions)
                #result = coco_demo.overlay_class_names(result, top_predictions)
                if cfg.MODEL.KEYPOINT_ON:
                    if target == 'person':
                        result = coco_demo.overlay_keypoints_graph(
                            result, top_predictions, target='person')
                    if target == 'car':
                        result = coco_demo.overlay_keypoints_graph(
                            result, top_predictions, vis_color, target='car')
                        result = coco_demo.overlay_boxes(
                            result, top_predictions)
                        result = coco_demo.overlay_class_names(
                            result, top_predictions)
                cv2.imwrite(os.path.join(output_dir,
                                         url.split('/')[-1]), result)
                print(os.path.join(output_dir, url.split('/')[-1]))
        except:
            print('Fail to infer for image {}. Skipped.'.format(url))
            continue
    print(now)
    now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
    print(now)

    with open(output_path, 'w') as f:
        json.dump(record_dict, f)
Пример #20
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Training")
    parser.add_argument(
        "--config-file",
        default="data/occlusion_net_train.yaml",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument("--local_rank", type=int, default=0)

    parser.add_argument(
        "--cometml-tag",
        dest="cometml_tag",
        default="occlusion-net",
    )

    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir:
        mkdir(output_dir)

    logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank())
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(args)

    logger.info("Collecting env info (might take some time)")
    logger.info("\n" + collect_env_info())

    logger.info("Loaded configuration file {}".format(args.config_file))
    with open(args.config_file, "r") as cf:
        config_str = "\n" + cf.read()
        logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    #model = train(cfg, args.local_rank, args.distributed)
    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device).eval()

    if not args.skip_test:
        run_test(cfg, model, args.distributed)
Пример #21
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="./configs/submit.yml", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)


    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--pretrain_model_path", default="./dianshang/crop_half_model.pth", help="path to config file", type=str
    )

    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--crop_path", default=" ", help="path to config file", type=str
    )

    args = parser.parse_args()
    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)

    cfg.TEST.WEIGHT = args.pretrain_model_path

    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))



    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(cfg,shuffle_train= False)
    model = build_model(cfg, num_classes)
    print("load pretained weight",cfg.TEST.WEIGHT)
    # try:
    print("加载单卡训练权重")
    model.load_param(cfg.TEST.WEIGHT)


    results = []
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_query_new'
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_gallery_new'
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_query'
    out_dir = args.crop_path
    if os.path.exists(os.path.join(out_dir,'crop_train')):
        print("文件夹已存在")
    else:
        os.makedirs(os.path.join(out_dir,'crop_train'))
        os.makedirs(os.path.join(out_dir,'crop_query'))
        os.makedirs(os.path.join(out_dir,'crop_gallery'))

    crop_train,crop_query,crop_gallery= vis_actmap(model, cfg, train_loader, out_dir)
    pickle.dump(crop_train, open(os.path.join(out_dir, 'train_crop_img_add.pkl'), 'wb'))
    crop_train,crop_query,crop_gallery= vis_actmap(model, cfg, val_loader, out_dir)
    pickle.dump((crop_query, crop_gallery), open(os.path.join(out_dir, 'test_crop_img_add.pkl'), 'wb'))