Exemple #1
0
def main():
    device = torch.device('cpu')

    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument(
        "--config_file", default="./configs/debug.yml", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

## build network
    # model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK,
    #                  cfg.TEST.NECK_FEAT, cfg.MODEL.NAME, cfg.MODEL.PRETRAIN_CHOICE,
    #                  cfg)

    num_classes = 6*5   # num cameras = 6 * 5 bg for camera
    model = build_model(cfg, num_classes)
def main():
    parser = argparse.ArgumentParser(description="Dense Correspondence")
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="Path to config file",
        type=str,
    )
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        '--test_model',
        dest='test_model',
        action='store_true'
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
    if output_dir:
        os.makedirs(output_dir, exist_ok=True)

    if cfg.MODEL.TEST:
        # build model
        model = build_matching_model(cfg)
        model.to(cfg.MODEL.DEVICE)
        model = torch.nn.DataParallel(model)

        # load pretrained parameters
        model_dir = os.path.join(cfg.MODEL_DIR, cfg.MODEL.NAME)
        checkpointer = Checkpointer(cfg, model, save_dir=model_dir)
        _ = checkpointer.load(cfg.MODEL.WEIGHT)

        test(cfg, model)
    else:
        model = train(cfg)

        if not args.skip_test:
            test(cfg, model)
Exemple #3
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file",
        default="/media/bi/Data/Mycode/car_demo/ai_city/configs/submit.yml",
        help="path to config file",
        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    ## read meta information
    dataset = VeRi(cfg.DATASETS.ROOT_DIR)
    print("test_tracks", dataset.test_tracks)
    indices_np = inference(cfg, model, val_loader, num_query, dataset)
    #write_result(indices_np, os.path.dirname(cfg.TEST.WEIGHT), topk=100)

    write_result_with_track(indices_np, os.path.dirname(cfg.TEST.WEIGHT),
                            dataset.test_tracks)
Exemple #4
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        # with open(args.config_file, 'r') as cf:
        #     config_str = "\n" + cf.read()
        #     logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    imgs, feats = inference_to_get_feats(cfg, model, val_loader, num_query,
                                         dataset)

    import numpy as np
    np.save('feats_extract.npy', feats.cpu().detach().numpy())
    print(imgs[:50])
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="./configs/debug.yml",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(
        cfg)
    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    results = []
    out_dir = os.path.join('AIC_crop')
    results += vis_actmap(model, cfg, train_loader, out_dir)
    results += vis_actmap(model, cfg, val_loader, out_dir)

    with open(os.path.join(out_dir, 'detection.json'), 'w') as f:
        json.dump(results, f)
Exemple #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config-file',
                        default='',
                        help='Path to config file',
                        type=str)
    parser.add_argument('opts',
                        help="Modify config options using the command line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    train_net(cfg)
Exemple #7
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Training")

    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--config_file",
        default="data/veri.yml",
        help="path to config file",
        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        # with open(args.config_file, 'r') as cf:
        #     config_str = "\n" + cf.read()
        #     logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ[
            'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID  # new add by gu
    cudnn.benchmark = True
    train(cfg)
Exemple #8
0
def main():
    ''' parse config file '''
    parser = argparse.ArgumentParser(description="Graph Reasoning Machine for Visual Question Answering")
    parser.add_argument("--config-file", default="configs/baseline_res101.yaml")
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("--resume", type=int, default=0)
    parser.add_argument("--inference", action='store_true')
    parser.add_argument("--instance", type=int, default=-1)
    parser.add_argument("--use_freq_prior", action='store_true')
    parser.add_argument("--visualize", action='store_true')
    parser.add_argument("--algorithm", type=str, default='sg_baseline')
    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(
            backend="nccl", init_method="env://"
        )
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.resume = args.resume
    cfg.instance = args.instance
    cfg.inference = args.inference
    cfg.MODEL.USE_FREQ_PRIOR = args.use_freq_prior
    cfg.MODEL.ALGORITHM = args.algorithm
    cfg.freeze()

    if not os.path.exists("logs") and get_rank() == 0:
        os.mkdir("logs")
    logger = setup_logger("scene_graph_generation", "logs", get_rank())
    logger.info(args)
    logger.info("Loaded configuration file {}".format(args.config_file))
    output_config_path = os.path.join("logs", 'config.yml')
    logger.info("Saving config into: {}".format(output_config_path))
    save_config(cfg, output_config_path)

    if not args.inference:
        model = train(cfg, args)
    else:
        test(cfg, args)
Exemple #9
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--config-file',
                        default='',
                        help='Path to config file',
                        type=str)
    parser.add_argument('--test',
                        help='Test or train model',
                        action='store_true')
    parser.add_argument('opts',
                        help="Modify config options using the command line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    if args.test:
        test_net(cfg)
    else:
        train_net(cfg)
Exemple #10
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Training")
    parser.add_argument(
        "--config-file",
        default="data/occlusion_net_train.yaml",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument("--local_rank", type=int, default=0)

    parser.add_argument(
        "--cometml-tag",
        dest="cometml_tag",
        default="occlusion-net",
    )

    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )
    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir:
        mkdir(output_dir)

    logger = setup_logger("maskrcnn_benchmark", output_dir, get_rank())
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(args)

    logger.info("Collecting env info (might take some time)")
    logger.info("\n" + collect_env_info())

    logger.info("Loaded configuration file {}".format(args.config_file))
    with open(args.config_file, "r") as cf:
        config_str = "\n" + cf.read()
        logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    #model = train(cfg, args.local_rank, args.distributed)
    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device).eval()

    if not args.skip_test:
        run_test(cfg, model, args.distributed)
Exemple #11
0
def main():
    print('Load config...')
    args = parse_args()
    update_config(cfg, args)

    # cfg.defrost()
    # cfg.RANK = args.ranka
    # cfg.freeze()
    # device allocation

    print('Set device...')
    #print(cfg.GPUS)
    #os.environ["CUDA_VISIBLE_DEVICES"] = cfg.GPUS
    #device = torch.device('cuda')
    torch.cuda.set_device(cfg.GPUS[0])
    device = torch.device('cuda:' + str(cfg.GPUS[0]))

    print("Build dataloader ...")
    # load texture
    if cfg.DATASET.TEX_PATH:
        texture_init = cv2.cvtColor(cv2.imread(cfg.DATASET.TEX_PATH),
                                    cv2.COLOR_BGR2RGB)
        texture_init_resize = cv2.resize(
            texture_init,
            (cfg.MODEL.TEX_MAPPER.NUM_SIZE, cfg.MODEL.TEX_MAPPER.NUM_SIZE),
            interpolation=cv2.INTER_AREA).astype(np.float32) / 255.0
        texture_init_use = torch.from_numpy(texture_init_resize).to(device)
    # dataset for training views
    view_dataset = dataio.ViewDataset(
        cfg=cfg,
        root_dir=cfg.DATASET.ROOT,
        calib_path=cfg.DATASET.CALIB_PATH,
        calib_format=cfg.DATASET.CALIB_FORMAT,
        sampling_pattern=cfg.TRAIN.SAMPLING_PATTERN,
        precomp_high_dir=cfg.DATASET.PRECOMP_DIR,
        precomp_low_dir=cfg.DATASET.PRECOMP_DIR,
        preset_uv_path=cfg.DATASET.UV_PATH,
    )
    # dataset for validation views
    view_val_dataset = dataio.ViewDataset(
        cfg=cfg,
        root_dir=cfg.DATASET.ROOT,
        calib_path=cfg.DATASET.CALIB_PATH,
        calib_format=cfg.DATASET.CALIB_FORMAT,
        sampling_pattern=cfg.TRAIN.SAMPLING_PATTERN_VAL,
        precomp_high_dir=cfg.DATASET.PRECOMP_DIR,
        precomp_low_dir=cfg.DATASET.PRECOMP_DIR,
    )
    num_view_val = len(view_val_dataset)

    print('Build Network...')
    # Rasterizer
    cur_obj_path = ''
    if not cfg.DATASET.LOAD_PRECOMPUTE:
        view_data = view_dataset.read_view(0)
        cur_obj_path = view_data['obj_path']
        frame_idx = view_data['f_idx']
        obj_data = view_dataset.objs[frame_idx]
        rasterizer = network.Rasterizer(
            cfg,
            obj_fp=cur_obj_path,
            img_size=cfg.DATASET.OUTPUT_SIZE[0],
            camera_mode=cfg.DATASET.CAM_MODE,
            obj_data=obj_data,
            # preset_uv_path = cfg.DATASET.UV_PATH,
            global_RT=view_dataset.global_RT)
    # texture mapper
    texture_mapper = network.TextureMapper(
        texture_size=cfg.MODEL.TEX_MAPPER.NUM_SIZE,
        texture_num_ch=cfg.MODEL.TEX_MAPPER.NUM_CHANNELS,
        mipmap_level=cfg.MODEL.TEX_MAPPER.MIPMAP_LEVEL,
        apply_sh=cfg.MODEL.TEX_MAPPER.SH_BASIS)
    # render net
    render_net = network.RenderingNet(
        nf0=cfg.MODEL.RENDER_NET.NF0,
        in_channels=cfg.MODEL.TEX_MAPPER.NUM_CHANNELS,
        out_channels=3,
        num_down_unet=5,
        use_gcn=False)
    # interpolater
    interpolater = network.Interpolater()

    # L1 loss
    criterionL1 = nn.L1Loss(reduction='mean').to(device)
    # Optimizer
    optimizerG = torch.optim.Adam(list(texture_mapper.parameters()) +
                                  list(render_net.parameters()),
                                  lr=cfg.TRAIN.LR)

    print('Loading Model...')
    iter = 0
    dir_name = os.path.join(datetime.datetime.now().strftime('%m-%d') + '_' +
                            datetime.datetime.now().strftime('%H-%M-%S') +
                            '_' + cfg.TRAIN.SAMPLING_PATTERN + '_' +
                            cfg.DATASET.ROOT.strip('/').split('/')[-1])
    if cfg.TRAIN.EXP_NAME is not '':
        dir_name += '_' + cfg.TRAIN.EXP_NAME
    if cfg.AUTO_RESUME:
        checkpoint_path = ''
        if cfg.TRAIN.RESUME and cfg.TRAIN.CHECKPOINT:
            checkpoint_path = cfg.TRAIN.CHECKPOINT
            dir_name = cfg.TRAIN.CHECKPOINT_DIR
            nums = [
                int(s) for s in cfg.TRAIN.CHECKPOINT_NAME.split('_')
                if s.isdigit()
            ]
            cfg.defrost()
            cfg.TRAIN.BEGIN_EPOCH = nums[0] + 1
            cfg.freeze()
            iter = nums[1] + 1
        elif cfg.MODEL.PRETRAINED:
            checkpoint_path = cfg.MODEL.PRETRAIN
    if checkpoint_path:
        print(' Checkpoint_path : %s' % (checkpoint_path))
        util.custom_load([texture_mapper, render_net],
                         ['texture_mapper', 'render_net'], checkpoint_path)
    else:
        print(' Not load params. ')

    texture_mapper.to(device)
    render_net.to(device)
    interpolater.to(device)
    rasterizer.to(device)

    texture_mapper_module = texture_mapper
    render_net_module = render_net
    # use multi-GPU
    if len(cfg.GPUS) > 1:
        texture_mapper = nn.DataParallel(texture_mapper, device_ids=cfg.GPUS)
        render_net = nn.DataParallel(render_net, device_ids=cfg.GPUS)
        interpolater = nn.DataParallel(interpolater, device_ids=cfg.GPUS)
        rasterizer = nn.DataParallel(rasterizer, device_ids=cfg.GPUS)
        rasterizer = rasterizer.module

    # set to training mode
    texture_mapper.train()
    render_net.train()
    interpolater.train()
    rasterizer.eval()  # not train now

    part_list = [texture_mapper_module,
                 render_net_module]  # collect all networks
    part_name_list = ['texture_mapper', 'render_net']
    print("*" * 100)
    print("Number of generator parameters:")
    cfg.defrost()
    cfg.MODEL.TEX_MAPPER.NUM_PARAMS = util.print_network(texture_mapper).item()
    cfg.MODEL.RENDER_NET.NUM_PARAMS = util.print_network(render_net).item()
    cfg.freeze()
    print("*" * 100)

    print("Setup Log ...")
    log_dir = os.path.join(cfg.LOG.LOGGING_ROOT, dir_name)
    data_util.cond_mkdir(log_dir)
    val_out_dir = os.path.join(log_dir, 'val_out')
    val_gt_dir = os.path.join(log_dir, 'val_gt')
    val_err_dir = os.path.join(log_dir, 'val_err')
    data_util.cond_mkdir(val_out_dir)
    data_util.cond_mkdir(val_gt_dir)
    data_util.cond_mkdir(val_err_dir)
    util.custom_copy(args.cfg, os.path.join(log_dir, cfg.LOG.CFG_NAME))

    print('Start buffering data for training and validation...')
    view_dataloader = DataLoader(view_dataset,
                                 batch_size=cfg.TRAIN.BATCH_SIZE,
                                 shuffle=cfg.TRAIN.SHUFFLE,
                                 num_workers=8)
    view_val_dataloader = DataLoader(view_val_dataset,
                                     batch_size=cfg.TRAIN.BATCH_SIZE,
                                     shuffle=False,
                                     num_workers=8)
    #view_dataset.buffer_all()
    #view_val_dataset.buffer_all()

    # Save all command line arguments into a txt file in the logging directory for later referene.
    writer = SummaryWriter(log_dir)
    # iter = cfg.TRAIN.BEGIN_EPOCH * len(view_dataset) # pre model is batch-1

    print('Begin training...')
    # init value
    val_log_batch_id = 0
    first_val = True
    img_h, img_w = cfg.DATASET.OUTPUT_SIZE
    for epoch in range(cfg.TRAIN.BEGIN_EPOCH, cfg.TRAIN.END_EPOCH):
        for view_trgt in view_dataloader:
            start = time.time()

            # get image
            img_gt = []
            img_gt.append(view_trgt[0]['img_gt'].to(device))
            ROI = view_trgt[0]['ROI'].to(device)
            # get uvmap alpha
            uv_map = []
            alpha_map = []
            if not cfg.DATASET.LOAD_PRECOMPUTE:
                # raster module
                frame_idxs = view_trgt[0]['f_idx'].numpy()
                for batch_idx, frame_idx in enumerate(frame_idxs):
                    obj_path = view_trgt[0]['obj_path'][batch_idx]
                    if cur_obj_path != obj_path:
                        cur_obj_path = obj_path
                        obj_data = view_dataset.objs[frame_idx]
                        rasterizer.update_vs(obj_data['v_attr'])
                    proj = view_trgt[0]['proj'].to(device)[batch_idx, ...]
                    pose = view_trgt[0]['pose'].to(device)[batch_idx, ...]
                    dist_coeffs = view_trgt[0]['dist_coeffs'].to(device)[
                        batch_idx, ...]
                    uv_map_single, alpha_map_single, _, _, _, _, _, _, _, _, _, _, _, _ = \
                        rasterizer(proj = proj[None, ...],
                                    pose = pose[None, ...],
                                    dist_coeffs = dist_coeffs[None, ...],
                                    offset = None,
                                    scale = None,
                                    )
                    uv_map.append(uv_map_single[0, ...].clone().detach())
                    alpha_map.append(alpha_map_single[0, ...].clone().detach())
                # fix alpha map
                uv_map = torch.stack(uv_map, dim=0)
                alpha_map = torch.stack(alpha_map, dim=0)[:, None, :, :]
                # alpha_map = alpha_map * torch.tensor(img_gt[0][:,0,:,:][:,None,:,:] <= (2.0 * 255)).permute(0,2,1,3).to(alpha_map.dtype).to(alpha_map.device)

                # check per iter image
                for batch_idx, frame_idx in enumerate(frame_idxs):
                    if cfg.DEBUG.SAVE_TRANSFORMED_IMG:
                        save_dir_img_gt = './Debug/image_mask'
                        save_path_img_gt = os.path.join(
                            save_dir_img_gt,
                            '%06d_%03d.png' % (iter, frame_idx))
                        cv2.imwrite(
                            save_path_img_gt,
                            cv2.cvtColor(
                                img_gt[0][batch_idx, ...].cpu().detach().numpy(
                                ).transpose(1, 2, 0) * 255.0,
                                cv2.COLOR_RGB2BGR))
                        #cv2.imwrite(os.path.join(save_dir_img_gt, '%03d_'%frame_idx + img_fn), cv2.cvtColor(img_gt*255.0, cv2.COLOR_BGR2RGB))
                        print(' Save img: ' + save_path_img_gt)

                    if cfg.DEBUG.SAVE_TRANSFORMED_MASK:
                        save_alpha_map = alpha_map.permute(
                            0, 2, 3, 1).cpu().detach().numpy()
                        save_dir_mask = './Debug/image_mask'
                        save_path_mask = os.path.join(
                            save_dir_mask,
                            '%06d_%03d_mask.png' % (iter, frame_idx))
                        cv2.imwrite(save_path_mask,
                                    save_alpha_map[batch_idx, ...] * 255.0)
                        print(' Save mask: ' + save_path_mask)

            else:
                # get view data
                uv_map = view_trgt[0]['uv_map'].to(device)  # [N, H, W, 2]
                # sh_basis_map = view_trgt[0]['sh_basis_map'].to(device) # [N, H, W, 9]
                alpha_map = view_trgt[0]['alpha_map'][:, None, :, :].to(
                    device)  # [N, 1, H, W]

            # sample texture
            # neural_img = texture_mapper(uv_map, sh_basis_map)
            neural_img = texture_mapper(uv_map)

            # rendering net
            outputs = render_net(neural_img, None)
            img_max_val = 2.0
            outputs = (outputs * 0.5 +
                       0.5) * img_max_val  # map to [0, img_max_val]
            if type(outputs) is not list:
                outputs = [outputs]

            # # We don't enforce a loss on the outermost 5 pixels to alleviate boundary errors, also weight loss by alpha
            # alpha_map_central = alpha_map[:, :, 5:-5, 5:-5]
            # for i in range(len(view_trgt)):
            #     outputs[i] = outputs[i][:, :, 5:-5, 5:-5] * alpha_map_central
            #     img_gt[i] = img_gt[i][:, :, 5:-5, 5:-5] * alpha_map_central

            # ignore loss outside ROI
            for i in range(len(view_trgt)):
                outputs[i] = outputs[i] * ROI * alpha_map
                img_gt[i] = img_gt[i] * ROI * alpha_map

            # loss on final image
            loss_rn = list()
            for idx in range(len(view_trgt)):
                loss_rn.append(
                    criterionL1(outputs[idx].contiguous().view(-1).float(),
                                img_gt[idx].contiguous().view(-1).float()))
            loss_rn = torch.stack(loss_rn, dim=0).mean()

            # total loss for generator
            loss_g = loss_rn

            optimizerG.zero_grad()
            loss_g.backward()
            optimizerG.step()

            # error metrics
            with torch.no_grad():
                err_metrics_batch_i = metric.compute_err_metrics_batch(
                    outputs[0] * 255.0,
                    img_gt[0] * 255.0,
                    alpha_map,
                    compute_ssim=False)
                # err_metrics_batch_i = metric.compute_err_metrics_batch(outputs[0] * 255.0, img_gt[0] * 255.0, alpha_map_central, compute_ssim = False)

            # tensorboard scalar logs of training data
            writer.add_scalar("loss_g", loss_g, iter)
            writer.add_scalar("loss_rn", loss_rn, iter)
            writer.add_scalar("final_mae_valid",
                              err_metrics_batch_i['mae_valid_mean'], iter)
            writer.add_scalar("final_psnr_valid",
                              err_metrics_batch_i['psnr_valid_mean'], iter)

            end = time.time()
            print(
                "Iter %07d   Epoch %03d   loss_g %0.4f   mae_valid %0.4f   psnr_valid %0.4f   t_total %0.4f"
                % (iter, epoch, loss_g, err_metrics_batch_i['mae_valid_mean'],
                   err_metrics_batch_i['psnr_valid_mean'], end - start))

            # tensorboard figure logs of training data
            if not iter % cfg.LOG.PRINT_FREQ:
                output_final_vs_gt = []
                for i in range(len(view_trgt)):
                    output_final_vs_gt.append(outputs[i].clamp(min=0., max=1.))
                    output_final_vs_gt.append(img_gt[i].clamp(min=0., max=1.))
                    output_final_vs_gt.append(
                        (outputs[i] - img_gt[i]).abs().clamp(min=0., max=1.))

                output_final_vs_gt = torch.cat(output_final_vs_gt, dim=0)
                raster_uv_maps = torch.cat(
                    (
                        uv_map.permute(0, 3, 1, 2),  # N H W 2 -> N 2 H W
                        torch.zeros(uv_map.shape[0],
                                    1,
                                    img_h,
                                    img_w,
                                    dtype=uv_map.dtype,
                                    device=uv_map.device)),
                    dim=1)
                writer.add_image(
                    "raster_uv_vis",
                    torchvision.utils.make_grid(
                        raster_uv_maps,
                        nrow=raster_uv_maps[0].shape[0],
                        range=(0, 1),
                        scale_each=False,
                        normalize=False).cpu().detach().numpy()
                    [::-1, :, :],  # uv0 -> 0vu (rgb)
                    iter)
                writer.add_image(
                    "output_final_vs_gt",
                    torchvision.utils.make_grid(
                        output_final_vs_gt,
                        nrow=outputs[0].shape[0],  # 3 
                        range=(0, 1),
                        scale_each=False,
                        normalize=False).cpu().detach().numpy(),
                    iter)

            # validation
            if not iter % cfg.TRAIN.VAL_FREQ:
                start_val = time.time()
                with torch.no_grad():
                    # error metrics
                    err_metrics_val = {}
                    err_metrics_val['mae_valid'] = []
                    err_metrics_val['mse_valid'] = []
                    err_metrics_val['psnr_valid'] = []
                    err_metrics_val['ssim_valid'] = []
                    # loop over batches
                    batch_id = 0
                    for view_val_trgt in view_val_dataloader:
                        start_val_i = time.time()

                        # get image
                        img_gt = []
                        img_gt.append(view_val_trgt[0]['img_gt'].to(device))
                        ROI = view_val_trgt[0]['ROI'].to(device)
                        # get uvmap alpha
                        uv_map = []
                        alpha_map = []
                        if not cfg.DATASET.LOAD_PRECOMPUTE:
                            # build raster module
                            frame_idxs = view_val_trgt[0]['f_idx'].numpy()
                            for batch_idx, frame_idx in enumerate(frame_idxs):
                                obj_path = view_val_trgt[0]['obj_path'][
                                    batch_idx]
                                if cur_obj_path != obj_path:
                                    cur_obj_path = obj_path
                                    obj_data = view_val_dataset.objs[frame_idx]
                                    rasterizer.update_vs(obj_data['v_attr'])
                                proj = view_val_trgt[0]['proj'].to(device)[
                                    batch_idx, ...]
                                pose = view_val_trgt[0]['pose'].to(device)[
                                    batch_idx, ...]
                                dist_coeffs = view_val_trgt[0][
                                    'dist_coeffs'].to(device)[batch_idx, ...]
                                uv_map_single, alpha_map_single, _, _, _, _, _, _, _, _, _, _, _, _ = \
                                    rasterizer(proj = proj[None, ...],
                                                pose = pose[None, ...],
                                                dist_coeffs = dist_coeffs[None, ...],
                                                offset = None,
                                                scale = None,
                                                )
                                uv_map.append(
                                    uv_map_single[0, ...].clone().detach())
                                alpha_map.append(
                                    alpha_map_single[0, ...].clone().detach())
                            # fix alpha map
                            uv_map = torch.stack(uv_map, dim=0)
                            alpha_map = torch.stack(alpha_map,
                                                    dim=0)[:, None, :, :]
                            # alpha_map = alpha_map * torch.tensor(img_gt[0][:,0,:,:][:,None,:,:] <= (2.0 * 255)).permute(0,2,1,3).to(alpha_map.dtype).to(alpha_map.device)
                        else:
                            uv_map = view_val_trgt[0]['uv_map'].to(
                                device)  # [N, H, W, 2]
                            # sh_basis_map = view_val_trgt[0]['sh_basis_map'].to(device)  # [N, H, W, 9]
                            alpha_map = view_val_trgt[0][
                                'alpha_map'][:, None, :, :].to(
                                    device)  # [N, 1, H, W]

                        view_idx = view_val_trgt[0]['idx']
                        num_view = len(view_val_trgt)
                        img_gt = []
                        for i in range(num_view):
                            img_gt.append(
                                view_val_trgt[i]['img_gt'].to(device))

                        # sample texture
                        # neural_img = texture_mapper(uv_map, sh_basis_map)
                        neural_img = texture_mapper(uv_map)

                        # rendering net
                        outputs = render_net(neural_img, None)
                        img_max_val = 2.0
                        outputs = (outputs * 0.5 + 0.5
                                   ) * img_max_val  # map to [0, img_max_val]
                        if type(outputs) is not list:
                            outputs = [outputs]

                        # apply alpha and ROI
                        for i in range(num_view):
                            outputs[i] = outputs[i] * alpha_map * ROI
                            img_gt[i] = img_gt[i] * alpha_map * ROI

                        # tensorboard figure logs of validation data
                        if batch_id == val_log_batch_id:
                            output_final_vs_gt = []
                            for i in range(num_view):
                                output_final_vs_gt.append(outputs[i].clamp(
                                    min=0., max=1.))
                                output_final_vs_gt.append(img_gt[i].clamp(
                                    min=0., max=1.))
                                output_final_vs_gt.append(
                                    (outputs[i] - img_gt[i]).abs().clamp(
                                        min=0., max=1.))

                            output_final_vs_gt = torch.cat(output_final_vs_gt,
                                                           dim=0)
                            writer.add_image(
                                "output_final_vs_gt_val",
                                torchvision.utils.make_grid(
                                    output_final_vs_gt,
                                    nrow=outputs[0].shape[0],  # 3
                                    range=(0, 1),
                                    scale_each=False,
                                    normalize=False).cpu().detach().numpy(),
                                iter)

                        # error metrics
                        err_metrics_batch_i_final = metric.compute_err_metrics_batch(
                            outputs[0] * 255.0,
                            img_gt[0] * 255.0,
                            alpha_map,
                            compute_ssim=True)
                        batch_size = view_idx.shape[0]
                        for i in range(batch_size):
                            for key in list(err_metrics_val.keys()):
                                if key in err_metrics_batch_i_final.keys():
                                    err_metrics_val[key].append(
                                        err_metrics_batch_i_final[key][i])

                        # save images
                        for i in range(batch_size):
                            cv2.imwrite(
                                os.path.join(
                                    val_out_dir,
                                    str(iter).zfill(8) +
                                    '_' + str(view_idx[i].cpu().detach().numpy(
                                    )).zfill(5) + '.png'),
                                outputs[0][i, :].permute(
                                    (1, 2,
                                     0)).cpu().detach().numpy()[:, :, ::-1] *
                                255.)
                            cv2.imwrite(
                                os.path.join(
                                    val_err_dir,
                                    str(iter).zfill(8) +
                                    '_' + str(view_idx[i].cpu().detach().numpy(
                                    )).zfill(5) + '.png'),
                                (outputs[0] - img_gt[0]).abs().clamp(
                                    min=0., max=1.)[i, :].permute(
                                        (1, 2,
                                         0)).cpu().detach().numpy()[:, :, ::-1]
                                * 255.)
                            if first_val:
                                cv2.imwrite(
                                    os.path.join(
                                        val_gt_dir,
                                        str(view_idx[i].cpu().detach().numpy()
                                            ).zfill(5) + '.png'),
                                    img_gt[0][i, :].permute(
                                        (1, 2,
                                         0)).cpu().detach().numpy()[:, :, ::-1]
                                    * 255.)

                        end_val_i = time.time()
                        print(
                            "Val   batch %03d   mae_valid %0.4f   psnr_valid %0.4f   ssim_valid %0.4f   t_total %0.4f"
                            % (batch_id,
                               err_metrics_batch_i_final['mae_valid_mean'],
                               err_metrics_batch_i_final['psnr_valid_mean'],
                               err_metrics_batch_i_final['ssim_valid_mean'],
                               end_val_i - start_val_i))

                        batch_id += 1

                    for key in list(err_metrics_val.keys()):
                        if err_metrics_val[key]:
                            err_metrics_val[key] = np.vstack(
                                err_metrics_val[key])
                            err_metrics_val[
                                key + '_mean'] = err_metrics_val[key].mean()
                        else:
                            err_metrics_val[key + '_mean'] = np.nan

                    # tensorboard scalar logs of validation data
                    writer.add_scalar("final_mae_valid_val",
                                      err_metrics_val['mae_valid_mean'], iter)
                    writer.add_scalar("final_psnr_valid_val",
                                      err_metrics_val['psnr_valid_mean'], iter)
                    writer.add_scalar("final_ssim_valid_val",
                                      err_metrics_val['ssim_valid_mean'], iter)

                    first_val = False
                    val_log_batch_id = (val_log_batch_id + 1) % batch_id

                    end_val = time.time()
                    print(
                        "Val   mae_valid %0.4f   psnr_valid %0.4f   ssim_valid %0.4f   t_total %0.4f"
                        % (err_metrics_val['mae_valid_mean'],
                           err_metrics_val['psnr_valid_mean'],
                           err_metrics_val['ssim_valid_mean'],
                           end_val - start_val))

            iter += 1

            if iter % cfg.LOG.CHECKPOINT_FREQ == 0:
                util.custom_save(
                    os.path.join(log_dir, 'model_epoch_%d_iter_%s_.pth' %
                                 (epoch, iter)), part_list, part_name_list)

    util.custom_save(
        os.path.join(log_dir, 'model_epoch_%d_iter_%s_.pth' % (epoch, iter)),
        part_list, part_name_list)
Exemple #12
0
        "--config-file",
        default="",
        metavar="FILE",
        help="Path to config file",
        type=str,
    )
    parser.add_argument(
        "--skip-test",
        dest="skip_test",
        help="Do not test the final model",
        action="store_true",
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER
    )
    parser.add_argument(
        "--type",
        type=str,
    )

    args = parser.parse_args()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    globals()["run_" + args.type](cfg)
Exemple #13
0
def update_config(cfg, yamlfilename):
    cfg.defrost()
    cfg.merge_from_file(yamlfilename)
    cfg.TEST.MODEL_FILE = osp.join(cfg.DATA_DIR, cfg.TEST.MODEL_FILE)
    cfg.freeze()
Exemple #14
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="./configs/submit.yml", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)


    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--pretrain_model_path", default="./dianshang/crop_half_model.pth", help="path to config file", type=str
    )

    parser.add_argument(
        # "--config_file", default="/media/bi/Data/Mycode/car_demo/AICity2020-VOC-ReID-7c453723e6e9179d175772921f93441cfa621dc1/configs/aicity20.yml", help="path to config file", type=str
        "--crop_path", default=" ", help="path to config file", type=str
    )

    args = parser.parse_args()
    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)

    cfg.TEST.WEIGHT = args.pretrain_model_path

    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))



    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(cfg,shuffle_train= False)
    model = build_model(cfg, num_classes)
    print("load pretained weight",cfg.TEST.WEIGHT)
    # try:
    print("加载单卡训练权重")
    model.load_param(cfg.TEST.WEIGHT)


    results = []
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_query_new'
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_gallery_new'
    # out_dir = '/home/lab3/bi/0716/Veri/ai_city/tools/output/crop/crop_query'
    out_dir = args.crop_path
    if os.path.exists(os.path.join(out_dir,'crop_train')):
        print("文件夹已存在")
    else:
        os.makedirs(os.path.join(out_dir,'crop_train'))
        os.makedirs(os.path.join(out_dir,'crop_query'))
        os.makedirs(os.path.join(out_dir,'crop_gallery'))

    crop_train,crop_query,crop_gallery= vis_actmap(model, cfg, train_loader, out_dir)
    pickle.dump(crop_train, open(os.path.join(out_dir, 'train_crop_img_add.pkl'), 'wb'))
    crop_train,crop_query,crop_gallery= vis_actmap(model, cfg, val_loader, out_dir)
    pickle.dump((crop_query, crop_gallery), open(os.path.join(out_dir, 'test_crop_img_add.pkl'), 'wb'))