示例#1
0
def train(args):
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)
    shutil.copy(args.config_file, cfg.OUTPUT_DIR)

    num_gpus = torch.cuda.device_count()

    logger = setup_logger('reid_baseline', output_dir, 0)
    logger.info('Using {} GPUS'.format(num_gpus))
    logger.info(args)
    logger.info('Running with config:\n{}'.format(cfg))

    train_dl, val_dl, num_query, num_classes = make_dataloader(cfg, num_gpus)

    model = build_model(cfg, num_classes)
    # print(model)
    loss_func = make_loss(cfg, num_classes)

    trainer = BaseTrainer(cfg, model, train_dl, val_dl, loss_func, num_query,
                          num_gpus)

    for epoch in range(trainer.epochs):
        for batch in trainer.train_dl:
            trainer.step(batch)
            trainer.handle_new_batch()
        trainer.handle_new_epoch()
示例#2
0
def main():
    args = parse_args()
    update_config(cfg, args)
    cfg.defrost()
    cfg.RANK = args.rank
    cfg.freeze()

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'train')

    logger.info(pprint.pformat(args))
    #logger.info(cfg)

    if cfg.WITHOUT_EVAL:
        input(
            "[WARNING] According to the configuration, there will be no evaluation. If evaluation is necessary, please terminate this process. [press Enter to continue]"
        )
        logger.info("=> Training without evaluation")

    ngpus_per_node = len(cfg.GPUS)
    if ngpus_per_node == 1:
        warnings.warn(
            'You have chosen a specific GPU. This will completely disable data parallelism.'
        )

    # Simply call main_worker function
    main_worker(','.join([str(i) for i in cfg.GPUS]), ngpus_per_node, args,
                final_output_dir, tb_log_dir)
def main(args):
    """
    Main function for the script
    :param args: parsed command line arguments
    :return: None
    """

    from config import cfg as opt

    opt.merge_from_file(args.config)
    opt.freeze()

    print("Creating generator object ...")
    # create the generator object
    gen = Generator(resolution=opt.dataset.resolution,
                    num_channels=opt.dataset.channels,
                    structure=opt.structure,
                    **opt.model.gen)

    print("Loading the generator weights from:", args.generator_file)
    # load the weights into it
    # gen.load_state_dict(torch.load(args.generator_file))
    gen.load(args.generator_file)

    draw_truncation_trick_figure('figure08-truncation-trick.png',
                                 gen,
                                 out_depth=5,
                                 seeds=[1, 32, 44, 86, 91, 388],
                                 psis=[1, 0.7, 0.5, 0, -0.5, -1])

    print('Done.')
示例#4
0
def main():
    setup_seed(1)
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument(
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    # num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True
    train(cfg)
def test_cross_dataset(config_file,test_dataset, **kwargs):
    cfg.merge_from_file(config_file)
    if kwargs:
        opts = []
        for k,v in kwargs.items():
            opts.append(k)
            opts.append(v)
        cfg.merge_from_list(opts)
    cfg.freeze()
    
    PersonReID_Dataset_Downloader('./datasets',cfg.DATASETS.NAMES)
    _, _, _, num_classes = data_loader(cfg,cfg.DATASETS.NAMES)
    
    PersonReID_Dataset_Downloader('./datasets',test_dataset)
    _, val_loader, num_query, _ = data_loader(cfg,test_dataset)
    
    re_ranking=cfg.RE_RANKING
    
    if not re_ranking:
        logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR,
                             cfg.DATASETS.NAMES+'->'+test_dataset)
        logger.info("Test Results:")
    else:
        logger = make_logger("Reid_Baseline", cfg.OUTPUT_DIR,
                             cfg.DATASETS.NAMES+'->'+test_dataset+'_re-ranking')
        logger.info("Re-Ranking Test Results:") 
        
    device = torch.device(cfg.DEVICE)
    
    model = getattr(models, cfg.MODEL.NAME)(num_classes)
    model.load(cfg.OUTPUT_DIR,cfg.TEST.LOAD_EPOCH)
    model = model.eval()
    
    all_feats = []
    all_pids = []
    all_camids = []
    
    since = time.time()
    for data in tqdm(val_loader, desc='Feature Extraction', leave=False):
        model.eval()
        with torch.no_grad():
            images, pids, camids = data
            if device:
                model.to(device)
                images = images.to(device)
            
            feats = model(images)

        all_feats.append(feats)
        all_pids.extend(np.asarray(pids))
        all_camids.extend(np.asarray(camids))

    cmc, mAP = evaluation(all_feats,all_pids,all_camids,num_query,re_ranking)

    logger.info("mAP: {:.1%}".format(mAP))
    for r in [1, 5, 10]:
        logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1]))
       
    test_time = time.time() - since
    logger.info('Testing complete in {:.0f}m {:.0f}s'.format(test_time // 60, test_time % 60))
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument(
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger(name="merlin.baseline", output=output_dir)
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True
    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    # logger.info("Using {} GPUS".format(num_gpus))
    train(cfg)
示例#7
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()
    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    if not os.path.exists(cfg.OUTPUT_DIR): os.makedirs(cfg.OUTPUT_DIR)

    logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR, 0)
    logger.info("Using {} GPUs.".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True
    train(cfg)
示例#8
0
def main(args):
    """
    Main function for the script
    :param args: parsed command line arguments
    :return: None
    """

    from config import cfg as opt

    opt.merge_from_file(args.config)
    opt.freeze()

    print("Creating generator object ...")
    # create the generator object
    gen = Generator(resolution=opt.dataset.resolution,
                    num_channels=opt.dataset.channels,
                    structure=opt.structure,
                    **opt.model.gen)

    print("Loading the generator weights from:", args.generator_file)
    # load the weights into it
    # gen.load_state_dict(torch.load(args.generator_file))
    gen = load(gen, args.generator_file)

    # path for saving the files:
    # generate the images:
    # src_seeds = [639, 701, 687, 615, 1999], dst_seeds = [888, 888, 888],
    draw_style_mixing_figure(os.path.join('figure03-style-mixing.png'),
                             gen,
                             out_depth=4,
                             src_seeds=[639, 1995, 687, 615, 1999],
                             dst_seeds=[888, 888, 888],
                             style_ranges=[range(0, 2)] * 1 +
                             [range(2, 8)] * 1 + [range(8, 14)] * 1)
    print('Done.')
def main(args):
    """
    Main function for the script
    :param args: parsed command line arguments
    :return: None
    """

    from config import cfg as opt

    opt.merge_from_file(args.config)
    opt.freeze()

    save_path = args.output_dir
    os.makedirs(save_path, exist_ok=True)

    print("Creating generator object ...")
    # create the generator object
    gen = Generator(resolution=opt.dataset.resolution,
                    num_channels=opt.dataset.channels,
                    structure=opt.structure,
                    **opt.model.gen)

    print("Loading the generator weights from:", args.generator_file)
    # load the weights into it
    gen.load_state_dict(torch.load(args.generator_file))

    build_truncation_trick_seq(save_path,
                               gen,
                               out_depth=5,
                               num_samles=args.num_samples)

    print('Done.')
示例#10
0
def main():
    args = parse_args()
    update_config(cfg, args)
    cfg.defrost()
    cfg.TEST.MODEL_FILE = HRNET_PATH + '/models/pytorch/pose_coco/pose_hrnet_w32_256x192.pth'
    cfg.TEST.USE_GT_BBOX = False
    cfg.GPUS = (0, )
    cfg.freeze()

    logger, final_output_dir, tb_log_dir = create_logger(
        cfg, args.cfg, 'valid')
    logger.info(pprint.pformat(args))
    logger.info(cfg)

    # cudnn related setting
    cudnn.benchmark = cfg.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED

    logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
    model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(cfg,
                                                               is_train=False)
    model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
    model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    normalize = transforms.Compose([transforms.ToTensor(), normalize])

    predict_imgs(model, args.imgs, args.bbox, args.out, normalize, 0.85)
def main():

    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train(cfg)
示例#12
0
def main():
    parser = argparse.ArgumentParser(description="PyTorch Template MNIST Training")
    parser.add_argument(
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("wheatdetection", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    train(cfg, logger)
示例#13
0
def main(args):
    if args.use_cpu or not torch.cuda.is_available():
        device = torch.device("cpu")
        pin_memory = False
    else:
        device = torch.device("cuda")
        pin_memory = True

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    logger = setup_logger(cfg, args.config_file, "test")
    cfg.freeze()
    test_dataset = MVORDatasetTest(ann_file=cfg.DATASET.TEST.ANNO_FILE,
                                   root=cfg.DATASET.TEST.ROOT_DIR)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=cfg.DATASET.TEST.BATCH_SIZE,
        shuffle=cfg.DATASET.TEST.SHUFFLE,
        num_workers=cfg.DATASET.TEST.WORKERS,
        pin_memory=pin_memory,
    )
    logger.info(pprint.pformat(cfg))
    logger.info(pprint.pformat(args))

    model = get_model()
    model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=True)
    model.to(device)

    eval_on_lowres_mvor(cfg=cfg,
                        test_loader=test_loader,
                        model=model,
                        logger=logger,
                        device=device)
示例#14
0
def main(args):
    """
    Main function for the script
    :param args: parsed command line arguments
    :return: None
    """

    from config import cfg as opt

    opt.merge_from_file(args.config)
    opt.freeze()

    print("Creating generator object ...")
    # create the generator object
    gen = Generator(resolution=opt.dataset.resolution,
                    num_channels=opt.dataset.channels,
                    structure=opt.structure,
                    **opt.model.gen)

    print("Loading the generator weights from:", args.generator_file)
    # load the weights into it
    # gen.load_state_dict(torch.load(args.generator_file))
    gen.load(args.generator_file)
    src_seeds=[1,32, 44, 86]
    dst_seeds=[231,415,1515,16]
    draw_interp_figure(args.output, gen, out_depth=5,
                                 src_seeds=src_seeds, dst_seeds=dst_seeds, psis=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7,0.8, 0.9,1])
    
    print('Done.')
示例#15
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("flag",
                        action='store_false',
                        help="whether to test multiple models")
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)
    if args.flag:
        path, _ = os.path.split(cfg.TEST.WEIGHT)
        model_list = []
        for root, dirs, files in os.walk(path):
            for i in files:
                if i.startswith('resnet50_model'):
                    model_list.append(i)
        for i in model_list:
            print(i)
            model.load_param(os.path.join(path, i))
            inference(cfg, model, val_loader, num_query)
    else:
        model.load_param(cfg.TEST.WEIGHT)
        inference(cfg, model, val_loader, num_query)
示例#16
0
 def __prepare_fine_tune(self):
     cfg.defrost()
     cfg.TRAIN.ANNO_FILE = cfg.FINE_TUNE.ANNO_FILE
     cfg.TRAIN.YOLO_EPOCHS = cfg.FINE_TUNE.YOLO_EPOCHS
     cfg.TRAIN.LR_INIT = cfg.FINE_TUNE.LR_INIT
     cfg.TRAIN.LR_END = cfg.FINE_TUNE.LR_END
     cfg.TRAIN.WARMUP_EPOCHS = cfg.FINE_TUNE.WARMUP_EPOCHS
     cfg.freeze()
示例#17
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_mt_model(
                           num_features=cfg.mt.num_features,
                         last_stride=cfg.mt.last_conv_stride,
                         num_classes=1, #not used since clf is not loaded
                         num_classes_seg=cfg.mt.num_classes_seg,
                         global_branch=cfg.mt.global_branch,
                         mask_branch=cfg.mt.mask_branch,
                         part_branch=cfg.mt.part_branch,
                         mask_dim=cfg.mt.mask_dim,
                         part_dim=cfg.mt.part_dim,
                         part_info=cfg.mt.part_info,
                         attr_mask_weight=cfg.mt.attr_mask_weight,
                         use_attr=cfg.mt.use_attr,
                         part_layer=cfg.mt.part_layer,
                         part_abla=cfg.mt.part_abla
)

    print(cfg.TEST.WEIGHT)
    model.load_param(cfg.TEST.WEIGHT)

    inference(cfg, model, val_loader, num_query)
示例#18
0
def train(train_loader, num_classes):
    parser = argparse.ArgumentParser(description="ReID Baseline Training")
    parser.add_argument(
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    set_seed(cfg.SOLVER.SEED)

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, if_train=True)
    logger.info("Saving model in the path :{}".format(cfg.OUTPUT_DIR))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID


    if cfg.MODEL.PRETRAIN_CHOICE == 'finetune':
        model = make_model(cfg, num_class=num_classes)
        model.load_param_finetune(cfg.MODEL.PRETRAIN_PATH)
        print('Loading pretrained model for finetuning......')
    else:
        model = make_model(cfg, num_class=num_classes)

    loss_func = make_loss(cfg, num_classes=num_classes)

    optimizer = make_optimizer(cfg, model)
    
    scheduler = WarmupCosineAnnealingLR(optimizer, cfg.SOLVER.MAX_EPOCHS,  cfg.SOLVER.DELAY_ITERS, cfg.SOLVER.ETA_MIN_LR, 
                                cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_EPOCHS, cfg.SOLVER.WARMUP_METHOD)
    logger.info("use WarmupCosineAnnealingLR, delay_step:{}".format(cfg.SOLVER.DELAY_ITERS))

    do_train(
        cfg,
        model,
        train_loader,
        optimizer,
        scheduler,  # modify for using self trained model
        loss_func
    )
示例#19
0
def main():
    #解析命令行参数,详见argparse模块
    parser = argparse.ArgumentParser(
        description="Classification Baseline Training")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER
                        )  #nargs=argparse.REMAINDER是指所有剩余的参数均转化为一个列表赋值给此项

    args = parser.parse_args()

    #os.environ()是python用来获取系统相关信息的。如environ[‘HOME’]就代表了当前这个用户的主目录
    ## WORLD_SIZE 由torch.distributed.launch.py产生 具体数值为 nproc_per_node*node(主机数,这里为1)
    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    #此处是指如果有类似yaml重新赋值参数的文件在的话会把它读进来。这也是rbgirshick/yacs模块的优势所在——参数与代码分离
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.TRAIN.DATALOADER.IMS_PER_BATCH = cfg.TRAIN.DATALOADER.CATEGORIES_PER_BATCH * cfg.TRAIN.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH
    cfg.VAL.DATALOADER.IMS_PER_BATCH = cfg.VAL.DATALOADER.CATEGORIES_PER_BATCH * cfg.VAL.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH
    cfg.TEST.DATALOADER.IMS_PER_BATCH = cfg.TEST.DATALOADER.CATEGORIES_PER_BATCH * cfg.TEST.DATALOADER.INSTANCES_PER_CATEGORY_IN_BATCH
    cfg.freeze(
    )  #最终要freeze一下,prevent further modification,也就是参数设置在这一步就完成了,后面都不能再改变了

    output_dir = cfg.SOLVER.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    #logger主要用于输出运行日志,相比print有一定优势。
    logger = setup_logger("classification", output_dir, "training", 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    # print the configuration file
    '''
    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    #'''
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = ",".join(
            "%s" % i for i in
            cfg.MODEL.DEVICE_ID)  # int tuple -> str # cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train(cfg)
def main(args):
    """
    Main function for the script
    :param args: parsed command line arguments
    :return: None
    """

    from config import cfg as opt

    opt.merge_from_file(args.config)
    opt.freeze()

    print("Creating generator object ...")
    # create the generator object
    gen = Generator(resolution=opt.dataset.resolution,
                    num_channels=opt.dataset.channels,
                    structure=opt.structure,
                    **opt.model.gen)

    print("Loading the generator weights from:", args.generator_file)
    # load the weights into it
    # gen.load_state_dict(torch.load(args.generator_file))
    gen.load(args.generator_file)

    # path for saving the files:
    save_path = args.output_dir
    os.makedirs(save_path, exist_ok=True)
    latent_size = opt.model.gen.latent_size
    out_depth = int(np.log2(opt.dataset.resolution)) - 2

    print("Generating scale synchronized images ...")
    # generate the images:
    # with torch.no_grad():
    with jt.no_grad():
        # point = torch.randn(args.n_row * args.n_col, latent_size)
        np.random.seed(1000)
        point = np.random.randn(args.n_row * args.n_col, latent_size)
        # point = (point / point.norm()) * (latent_size ** 0.5)
        point = (point / np.linalg.norm(point)) * (latent_size**0.5)
        point = jt.array(point, dtype='float32')
        ss_image = gen(point, depth=out_depth, alpha=1)
        # color adjust the generated image:
        ss_image = adjust_dynamic_range(ss_image)
    print("gen done")
    # save the ss_image in the directory
    # ss_image = torch.from_numpy(ss_image.data)
    # save_image(ss_image, os.path.join(save_path, "grid.png"), nrow=args.n_row,
    #             normalize=True, scale_each=True, pad_value=128, padding=1)
    jt.save_image_my(ss_image,
                     os.path.join(save_path, "grid.png"),
                     nrow=args.n_row,
                     normalize=True,
                     scale_each=True,
                     pad_value=128,
                     padding=1)

    print('Done.')
示例#21
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file", default="", help="path to config file", type=str
                        )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))


    test_dataloader, num_query, _ = get_test_dataloader(cfg, test_phase=False)

    distmat_paths = [cfg.TEST.DISTMAT1, cfg.TEST.DISTMAT2, cfg.TEST.DISTMAT3,
                     cfg.TEST.DISTMAT4, cfg.TEST.DISTMAT5, cfg.TEST.DISTMAT6]
    # 加载dist_mats
    dist_mats = []

    cnt = 0
    thresh = 3
    for distmat_path in distmat_paths:
        if os.path.isfile(distmat_path):
            f = h5py.File(distmat_path, 'r')
            #mat = f['dist_mat'][()]

            if cnt < thresh:
                mat = f['dist_mat1'][()]
            else:
                mat = f['dist_mat1'][()]

            mat = mat[np.newaxis, ...]
            dist_mats.append(mat)
            f.close()
            cnt += 1

    logger.info(f'Average {cnt} results')
    dist_mat = np.concatenate(dist_mats, axis=0).mean(axis=0)

    inference_with_distmat(cfg, test_dataloader, num_query, dist_mat)
示例#22
0
def main():
    parser = argparse.ArgumentParser(
        description="Image Classification Training")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpu = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    experiment_name = 'no_config'
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
        experiment_name = args.config_file.split('/')[-1].split('.')[0]
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = os.path.join(cfg.MODEL.OUTPUT_PATH, experiment_name)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger, log_path = setup_logger('{}'.format(cfg.PROJECT.NAME), output_dir,
                                    experiment_name)
    logger.info("Running with config:\n{}".format(cfg.PROJECT.NAME))

    logger.info("Using {} GPU".format(num_gpu))
    logger.info(args)
    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train(cfg, experiment_name=experiment_name)

    try:
        logger.info("Drawing curve ......")
        plot_curve(log_path=log_path,
                   experiment_name=experiment_name,
                   output=output_dir)
        logger.info("The curve is saved in {}".format(output_dir))
    except Exception as e:
        print(e)
示例#23
0
def main():
    parser = argparse.ArgumentParser(description="AGW Re-ID Baseline")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ[
            'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID  # new add by gu
    cudnn.benchmark = True

    data_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)

    if 'cpu' not in cfg.MODEL.DEVICE:
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model)
        model.to(device=cfg.MODEL.DEVICE)

    if cfg.TEST.EVALUATE_ONLY == 'on':
        logger.info("Evaluate Only")
        model.load_param(cfg.TEST.WEIGHT)
        do_test(cfg, model, data_loader, num_query)
        return
示例#24
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model_pre(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)

    # inference(cfg, model, val_loader, num_query)
    device = cfg.MODEL.DEVICE

    evaluator = create_supervised_evaluator(
        model,
        metrics={
            'pre_selection_index':
            pre_selection_index(num_query,
                                max_rank=100,
                                feat_norm=cfg.TEST.FEAT_NORM)
        },
        device=device)

    evaluator.run(val_loader)

    index = evaluator.state.metrics['pre_selection_index']

    with open(cfg.Pre_Index_DIR, 'w+') as f:
        json.dump(index.tolist(), f)

    print("Pre_Selection_Done")
示例#25
0
def main():
    config.load_cfg_fom_args("Train a cls model.")
    cfg.freeze()
    global args
    global min_loss
    global step
    args = parser.parse_args()
    cuda_gpu = torch.cuda.is_available()

    mytraindata = generalclsDataset(args.data_dir, cfg)
    mytrainloader = DataLoaderX(mytraindata,
                                batch_size=args.batch_size,
                                shuffle=True,
                                num_workers=0)
    mymodel = builGraph.getModel(args.backbone,
                                 args.classnum,
                                 args.gpu,
                                 'retrieval',
                                 cuda_gpu=cuda_gpu,
                                 pretrained=True)
    #mymodel=models.resnet50(pretrained=True).cuda()
    if args.optimizer == 'gd':
        optimizer = torch.optim.SGD(mymodel.parameters(), lr=args.LR)
    else:
        optimizer = torch.optim.Adam(mymodel.parameters(), lr=args.LR)

    Arcloss = torch.nn.DataParallel(ArcMarginLoss(
        args.classnum, in_features=OUTPUT_DIM[args.backbone]),
                                    device_ids=args.gpu).cuda()
    thisloss = nn.CrossEntropyLoss()
    startepoch = 0

    if os.path.exists(args.train_dir):
        print(args.train_dir, flush=True)
        checkpoint = torch.load(args.train_dir, map_location='cpu')
        print(mymodel.named_parameters(), flush=True)
        mymodel.load_state_dict(checkpoint['model_state_dict'])
        Arcloss.load_state_dict(checkpoint['arcface_state_dict'])
        print(mymodel.named_parameters(), flush=True)
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        startepoch = checkpoint['epoch'] + 1
        min_loss = checkpoint['loss']
        if 'step' in checkpoint:
            step = checkpoint['step']

    for epoch in range(startepoch, args.maxepoch):
        _learning_rate_schedule(optimizer, epoch, args.maxepoch, args.LR)

        trainclassification(mymodel, epoch, optimizer, thisloss, mytrainloader,
                            Arcloss)
        #_learning_rate_schedule(optimizer,epoch,args.maxepoch,args.LR)
    #test(mymodel,mytrainloader)
    writer.close()
示例#26
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.DATASETS.PRELOAD_IMAGE = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    model = build_model(cfg, 0)
    #print('model', model)
    model = model.cuda()
    model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))

    test_dataloader, num_query, _ = get_test_dataloader(cfg, test_phase=False)

    #inference_no_rerank(cfg, model, test_dataloader, num_query)
    #inference(cfg, model, test_dataloader, num_query)
    #inference_aligned(cfg, model, test_dataloader, num_query) # using flipped image

    inference_aligned_flipped(cfg,
                              model,
                              test_dataloader,
                              num_query,
                              use_local_feature=False,
                              use_rerank=True,
                              use_cross_feature=True)
示例#27
0
def main(args):
    """
    Main function for the script
    :param args: parsed command line arguments
    :return: None
    """

    from config import cfg as opt

    opt.merge_from_file(args.config)
    opt.freeze()

    print("Creating generator object ...")
    print(opt.model.gen)
    # create the generator object
    gen = Generator(resolution=opt.dataset.resolution,
                    num_channels=opt.dataset.channels,
                    structure=opt.structure,
                    **opt.model.gen)

    print("Loading the generator weights from:", args.generator_file)
    # load the weights into it
    # gen.load_state_dict(torch.load(args.generator_file))
    gen = load(gen, args.generator_file)

    # path for saving the files:
    save_path = args.output_dir
    os.makedirs(save_path, exist_ok=True)
    latent_size = opt.model.gen.latent_size
    out_depth = int(np.log2(opt.dataset.resolution)) - 2

    if args.input is None:
        print("Generating scale synchronized images ...")
        for img_num in tqdm(range(1, args.num_samples + 1)):
            # generate the images:
            with torch.no_grad():
                point = torch.randn(1, latent_size)
                point = (point / point.norm()) * (latent_size ** 0.5)
                ss_image = gen(point, depth=out_depth, alpha=1)
                # color adjust the generated image:
                ss_image = adjust_dynamic_range(ss_image)

            # save the ss_image in the directory
            save_image(ss_image, os.path.join(save_path, str(img_num) + ".png"))

        print("Generated %d images at %s" % (args.num_samples, save_path))
    else:
        code = np.load(args.input)
        dlatent_in = torch.unsqueeze(torch.from_numpy(code), 0)
        ss_image = gen.g_synthesis(dlatent_in, depth=out_depth, alpha=1)
        # color adjust the generated image:
        ss_image = adjust_dynamic_range(ss_image)
        save_image(ss_image, args.output)
示例#28
0
def main():
    parser = argparse.ArgumentParser(description="ReID Model Training")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        metavar="FILE",
                        help="path to config file",
                        type=str)
    # parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)
    args = parser.parse_args()
    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)

    gpus = os.environ[
        "CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0'
    gpus = [int(i) for i in gpus.split(',')]
    num_gpus = len(gpus)

    # cfg.SOLVER.DIST = num_gpus > 1

    # if cfg.SOLVER.DIST:
    #     torch.cuda.set_device(args.local_rank)
    #     torch.distributed.init_process_group(
    #         backend="nccl", init_method="env://"
    #     )
    #     torch.cuda.synchronize()

    cfg.freeze()

    log_save_dir = os.path.join(cfg.OUTPUT_DIR,
                                '-'.join(cfg.DATASETS.TEST_NAMES),
                                cfg.MODEL.VERSION)
    if not os.path.exists(log_save_dir): os.makedirs(log_save_dir)

    logger = setup_logger("reid_baseline.train", log_save_dir, 0)
    logger.info("Using {} GPUs.".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    logger.info('start training')
    cudnn.benchmark = True

    writer = SummaryWriter(os.path.join(log_save_dir, 'tf'))
    reid_system = ReidSystem(cfg, logger, writer)
    reid_system.train()
示例#29
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)
    model.load_state_dict(torch.load(cfg.TEST.WEIGHT))
    model = model.cuda()
    model = model.eval()

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Start inferencing")
    with torch.no_grad():
        qf, gf = extract_feature(model, val_loader, num_query)

    # save feature
    np.save('../data/feature_expansion/' + cfg.TEST.QF_NAME, qf.cpu().numpy())
    np.save('../data/feature_expansion/' + cfg.TEST.GF_NAME, gf.cpu().numpy())
    '''
示例#30
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    gpus = os.environ[
        "CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0'
    gpus = [int(i) for i in gpus.split(',')]
    num_gpus = len(gpus)

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    model = build_model(cfg, 0)
    model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))
    if num_gpus > 1:
        model = nn.DataParallel(model)
    model = model.cuda()

    print('prepare test set ...')
    test_dataloader_collection, num_query_collection, test_items_collection = get_test_dataloader(
        cfg)

    inference(cfg,
              model,
              test_dataloader_collection,
              num_query_collection,
              is_vis=True,
              test_collection=test_items_collection)