Exemple #1
0
def train(cfg):
    # prepare dataset

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)

    # prepare model
    model = build_model(cfg, num_classes)
    if cfg.SOLVER.FINETUNE:
        model.load_state_dict(torch.load(cfg.TEST.WEIGHT).module.state_dict())
    model = nn.DataParallel(model)


    optimizer = make_optimizer(cfg, model)
    scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
                                  cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)
    # scheduler = WarmupStepLR(optimizer,3, 9, cfg.SOLVER.WARMUP_FACTOR,
    #                               cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)

    loss_func = make_loss(cfg)

    arguments = {}

    do_train(
        cfg,
        model,
        train_loader,
        val_loader,
        optimizer,
        scheduler,
        loss_func,
        num_query
    )
Exemple #2
0
def main():
    output_dir = cfg.MODEL.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("tracker", output_dir, 0)
    logger.info("Running with config:\n{}".format(cfg))
    torch.backends.cudnn.benchmark = True

    train_loader, val_loader = make_data_loader(cfg)

    model = build_model(cfg)

    optimizer = make_optimizer(cfg, model)
    scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
                                  cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)

    loss_func = make_loss(cfg)

    do_train(
        cfg,
        model,
        train_loader,
        val_loader,
        optimizer,
        scheduler,
        loss_func
    )
Exemple #3
0
def main(cfg):

    # data loader
    print("start DataLoader")
    print(f"dataset dir: {cfg['datasets']['dir_path']}")
    dataset = ImageDataset(cfg)
    data_loader = DataLoader(dataset,
                             batch_size=cfg["dataloader"]["batch_size"],
                             shuffle=False,
                             num_workers=cfg["dataloader"]["num_workers"],
                             collate_fn=dataloader_collate_fn)
    print(f"There are {len(dataset)} images to be processed")

    # feature extractor model
    print("start initialize model")
    model = build_model(cfg)
    model.load_param(cfg["model"]["weight"])
    model = model.cuda(0) if torch.cuda.device_count() >= 1 else img_batch
    model.eval()

    feature_bank = process_featerbank(data_loader, dataset, model, cfg)
    img_paths = list(feature_bank.keys())
    distance_mat = process_distancing(feature_bank, cfg)
    del feature_bank
    upload_to_storage(img_paths, cfg)
    argsorted_distance = process_argsort(distance_mat, cfg)
    del distance_mat
    upload_to_database(argsorted_distance, img_paths, cfg)
    del argsorted_distance
    print("processing has been completed")
Exemple #4
0
def train(cfg, logger):
    seed_everything(cfg.SEED)
    model = build_model(cfg)
    if cfg.SOLVER.TRAIN_SODA and cfg.MODEL.PRETRAINED_CMIP != '':
        model.load_state_dict(
            torch.load(cfg.MODEL.PRETRAINED_CMIP)['ema_state_dict'])
        for k, v in model.named_parameters():
            if k.startswith('model.conv1') or k.startswith(
                    'model.bn1') or k.startswith('model.layer1'):
                v.requires_grad = False
    ema = ModelEMA(model)
    if torch.cuda.is_available():
        device = 'cuda'
    else:
        device = 'cpu'
    # device = cfg.MODEL.DEVICE
    check = cfg.SOLVER.TRAIN_CHECKPOINT

    train_loader, val_loader = make_data_loader(cfg, is_train=True)

    fitter = Fitter(model=model,
                    ema=ema,
                    device=device,
                    cfg=cfg,
                    train_loader=train_loader,
                    val_loader=val_loader,
                    logger=logger)
    if check:
        curPath = os.path.abspath(os.path.dirname(__file__))
        fitter.load(f'{cfg.OUTPUT_DIR}/last-checkpoint.bin')
    fitter.fit()
 def __init__(self, cfg, logger, tng_loader, val_loader, num_classes, num_query):
     super().__init__()
     # Define networks
     self.cfg,self.logger,self.tng_loader,self.val_loader,self.num_classes,self.num_query = \
         cfg,logger,tng_loader,val_loader,num_classes,num_query
     self.model = build_model(cfg, num_classes)
     self.loss_fns = reidLoss(cfg.SOLVER.LOSSTYPE, cfg.SOLVER.MARGIN, num_classes)
Exemple #6
0
def train(cfg):
    logger = setup_logger(name='Train', level=cfg.LOGGER.LEVEL)
    logger.info(cfg)
    model = build_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    #model.to(cuda_device = 'cuda:9')

    criterion = build_loss(cfg)

    optimizer = build_optimizer(cfg, model)
    scheduler = build_lr_scheduler(cfg, optimizer)

    train_loader = build_data(cfg, is_train=True)
    val_loader = build_data(cfg, is_train=False)

    logger.info(train_loader.dataset)
    logger.info(val_loader.dataset)

    arguments = dict()
    arguments["iteration"] = 0

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
    checkpointer = Checkpointer(model, optimizer, scheduler, cfg.SAVE_DIR)

    do_train(cfg, model, train_loader, val_loader, optimizer, scheduler,
             criterion, checkpointer, device, checkpoint_period, arguments,
             logger)
Exemple #7
0
 def __init__(self, cfg, num_classes):
     self.device = cfg.MODEL.DEVICE
     self.model = build_model(cfg, num_classes)
     self.loss = Loss(cfg, num_classes, self.model.in_planes)
     self.optimizer = make_optimizer(cfg, self.model)
     self.scheduler = WarmupMultiStepLR(self.optimizer,
                                        cfg.WARMUP.STEPS,
                                        cfg.WARMUP.GAMMA,
                                        cfg.WARMUP.FACTOR,
                                        cfg.WARMUP.MAX_EPOCHS,
                                        cfg.WARMUP.METHOD)
     if cfg.APEX.IF_ON:
         logger.info("Using apex")
         try:
             import apex
         except ImportError:
             raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
         assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
         #
         # if cfg.APEX.IF_SYNC_BN:
         #     logger.info("Using apex synced BN")
         #     self.module = apex.parallel.convert_syncbn_model(self.module)
     if self.device is 'cuda':
         self.model = self.model.cuda()
         if cfg.APEX.IF_ON:
             from apex import amp
             self.model, self.optimizer = amp.initialize(self.model,
                                                         self.optimizer,
                                                         opt_level=cfg.APEX.OPT_LEVEL,
                                                         keep_batchnorm_fp32=None if cfg.APEX.OPT_LEVEL == 'O1' else True,
                                                         loss_scale=cfg.APEX.LOSS_SCALE[0])
def main():
    parser = argparse.ArgumentParser(description='cityAI Vehicle ReID')
    parser.add_argument('--input', default="../input", help='folder to the input image')
    parser.add_argument('--output', default="../output", help='folder where the output featurevectors should be saved')
    parser.add_argument('--config', default="../config.yaml", help='path to config file')
    parser.add_argument('--weights', default="../weights.pth", help='path to model weights')
    args = parser.parse_args()
    if not os.path.exists(args.output):
        os.makedirs(args.output)
    cfg = wandb_config.Config(args.config)
    device = cfg['MODEL.DEVICE']
    model = build_model(cfg, num_classes=cfg['DATASETS.NUM_CLASSES'])

    model.load_param(args.weights)
    model.to(device)

    aug = Augmenation(cfg=cfg)

    img_paths = glob.glob(os.path.join(args.input,'*'))
    model.eval()
    with torch.no_grad():
        for img_path in img_paths:
            img = aug.read_and_augment_image(img_path)
            img = img.to(device) if torch.cuda.device_count() >= 1 else img
            feat = model(img.unsqueeze(dim=0))
            np.save(os.path.join(args.output, os.path.basename(img_path).split('.')[0] + '.npy'), np.array(feat[0].cpu()))
Exemple #9
0
def train(cfg, logger):
    model = build_model(cfg)
    device = cfg.MODEL.DEVICE

    optimizer, lr_schedule = make_optimizer(cfg, model)
    metric_fc = None
    loss_fn = get_loss_fn(cfg, logger)
    logger.info("----------------------------------------------")
    train_loader = make_data_loader(cfg, is_train=True)
    val_loader = make_data_loader(cfg, is_train=False)

    loss_fn2 = torch.nn.MSELoss()

    do_train(
        cfg,
        model,
        metric_fc,
        train_loader,
        val_loader,
        optimizer,
        lr_schedule,
        loss_fn,
        loss_fn2,
        logger,
    )
Exemple #10
0
    def build_teacher_model(cls, cfg):
        teacher_cfgs = cfg.MODEL.CENTERNET.KD.TEACHER_CFG
        teacher_weights = cfg.MODEL.CENTERNET.KD.TEACHER_WEIGTHS
        assert len(teacher_cfgs) == len(teacher_weights)
        model_ts = []

        for t_cfg,t_weight in zip(teacher_cfgs, teacher_weights):
            teacher_cfg = get_cfg()
            add_centernet_config(teacher_cfg)
            teacher_cfg.merge_from_file(t_cfg)

            model_t = build_model(teacher_cfg)
            for param in model_t.parameters():
                param.requires_grad = False

            # Load pre-trained teacher model
            logger = logging.getLogger("detectron2")
            logger.info("Loading teacher model ...")
            DetectionCheckpointer(model_t).load(t_weight)

            # Load pre-trained student model
            # logger.info("Loading student model ...")
            # Checkpointer(self.model, self.data_loader.dataset).load(cfg.MODEL.STUDENT_WEIGHTS)

            def set_bn_eval(m):
                classname = m.__class__.__name__
                if classname.find('BatchNorm') != -1:
                    m.eval()

            model_t.apply(set_bn_eval)
            model_t.eval()

            model_ts.append(model_t)

        return model_ts
Exemple #11
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("flag",
                        action='store_false',
                        help="whether to test multiple models")
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)
    if args.flag:
        path, _ = os.path.split(cfg.TEST.WEIGHT)
        model_list = []
        for root, dirs, files in os.walk(path):
            for i in files:
                if i.startswith('resnet50_model'):
                    model_list.append(i)
        for i in model_list:
            print(i)
            model.load_param(os.path.join(path, i))
            inference(cfg, model, val_loader, num_query)
    else:
        model.load_param(cfg.TEST.WEIGHT)
        inference(cfg, model, val_loader, num_query)
Exemple #12
0
def train(cfg):
    # prepare dataset
    train_loader, val_loader, num_query, num_classes, clustering_loader = make_data_loader(
        cfg)

    # prepare model
    model = build_model(cfg, num_classes)

    if cfg.MODEL.IF_WITH_CENTER == 'on':
        loss_func, center_criterion_part, center_criterion_global, center_criterion_fore = make_loss_with_center(
            cfg, num_classes)
        optimizer, optimizer_center = make_optimizer_with_center(
            cfg, model, center_criterion_part, center_criterion_global,
            center_criterion_fore)
    else:
        loss_func = make_loss(cfg, num_classes)
        optimizer = make_optimizer(cfg, model)

    # Add for using self trained model
    if cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
        start_epoch = 0
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD)
    else:
        print('Only support pretrain_choice for imagenet, but got {}'.format(
            cfg.MODEL.PRETRAIN_CHOICE))

    if cfg.MODEL.IF_WITH_CENTER == 'on':
        do_train_with_center(
            cfg,
            model,
            center_criterion_part,
            center_criterion_global,
            center_criterion_fore,
            train_loader,
            val_loader,
            optimizer,
            optimizer_center,
            scheduler,  # modify for using self trained model
            loss_func,
            num_query,
            start_epoch,  # add for using self trained model
            clustering_loader)
    else:
        do_train(
            cfg,
            model,
            train_loader,
            val_loader,
            optimizer,
            scheduler,  # modify for using self trained model
            loss_func,
            num_query,
            start_epoch,  # add for using self trained model
            clustering_loader)
Exemple #13
0
def train(cfg):

    logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR)
    logger.info("Running with config:\n{}".format(cfg))

    # prepare camstyle dataset
    train_loader, train_camstyle_loader, val_loader, num_query, num_classes = make_camstyle_data_loader(
        cfg)
    num_classes.append(-1)

    # prepare model
    model = build_model(cfg, num_classes)

    optimizer, _ = make_optimizer(cfg, model)
    loss_fn = make_loss(cfg, num_classes)

    # Add for using self trained model
    if cfg.MODEL.PRETRAIN_CHOICE == 'resume':
        start_epoch = eval(
            cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')
            [-1])
        logger.info('Start epoch:%d' % start_epoch)
        path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace(
            'model', 'optimizer')
        logger.info('Path to the checkpoint of optimizer:%s' %
                    path_to_optimizer)
        model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
        optimizer.load_state_dict(torch.load(path_to_optimizer))
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD, start_epoch)
    elif cfg.MODEL.PRETRAIN_CHOICE == 'self' or cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
        start_epoch = 0
        model.load_param(cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.PRETRAIN_CHOICE)
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                      cfg.SOLVER.GAMMA,
                                      cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS,
                                      cfg.SOLVER.WARMUP_METHOD)
    else:
        logger.info(
            'Only support pretrain_choice for imagenet and self, but got {}'.
            format(cfg.MODEL.PRETRAIN_CHOICE))

    do_train(
        cfg,
        model,
        train_loader,
        val_loader,
        optimizer,
        scheduler,  # modify for using self trained model
        loss_fn,
        num_query,
        start_epoch,  # add for using self trained model
        0,
        train_camstyle_loader)
Exemple #14
0
    def __init__(self, cfg):
        self.cfg = cfg
        self.model = build_model(cfg, 0)
        self.tng_dataloader, self.val_dataloader, self.num_classes, self.num_query = get_dataloader(
            cfg)
        self.model = self.model.cuda()
        self.model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))

        self.get_distmat()
Exemple #15
0
def train(cfg):
    # prepare dataset
    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)

    # prepare model
    model = build_model(cfg, num_classes)

    total = sum([param.nelement() for param in model.parameters()])
    print("Number of parameter: %.2fM" % (total / 1e6))

    if cfg.MODEL.METRIC_LOSS_TYPE == 'triplet':
        print('Train without center loss, the loss type is',
              cfg.MODEL.METRIC_LOSS_TYPE)
        optimizer = make_optimizer(cfg, model)
        loss_func = make_loss(cfg, num_classes)

        # Add for using self trained model
        if cfg.MODEL.PRETRAIN_CHOICE == 'self':
            start_epoch = eval(
                cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')
                [-1])
            print('Start epoch:', start_epoch)
            path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace(
                'model', 'optimizer')
            print('Path to the checkpoint of optimizer:', path_to_optimizer)
            model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH))
            optimizer.load_state_dict(torch.load(path_to_optimizer))
            scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                          cfg.SOLVER.GAMMA,
                                          cfg.SOLVER.WARMUP_FACTOR,
                                          cfg.SOLVER.WARMUP_ITERS,
                                          cfg.SOLVER.WARMUP_METHOD,
                                          start_epoch)
        elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet':
            start_epoch = 0
            scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS,
                                          cfg.SOLVER.GAMMA,
                                          cfg.SOLVER.WARMUP_FACTOR,
                                          cfg.SOLVER.WARMUP_ITERS,
                                          cfg.SOLVER.WARMUP_METHOD)
        else:
            print(
                'Only support pretrain_choice for imagenet and self, but got {}'
                .format(cfg.MODEL.PRETRAIN_CHOICE))

        do_train(
            cfg,
            model,
            train_loader,
            val_loader,
            optimizer,
            scheduler,  # modify for using self trained model
            loss_func,
            num_query,
            start_epoch  # add for using self trained model
        )
Exemple #16
0
def main():

    torch.backends.cudnn.benchmark = True

    train_loader, val_loader = make_data_loader(cfg)

    model = build_model(cfg)
    weight = torch.load(cfg.MODEL.TEST_MODEL)
    model.load_state_dict(weight)
    do_validate(cfg, model, val_loader)
Exemple #17
0
 def build_model(cls, cfg):
     """
     Returns:
         torch.nn.Module:
     It now calls :func:`detectron2.modeling.build_model`.
     Overwrite it if you'd like a different model.
     """
     model = build_model(cfg)
     logger = logging.getLogger(__name__)
     logger.info("Model:\n{}".format(model))
     return model
Exemple #18
0
def main():
    parser = argparse.ArgumentParser(description='cityAI Vehicle ReID')
    parser.add_argument('-u', '--user', help='username', default='corner')
    parser.add_argument('-p',
                        '--project',
                        help='project name',
                        default='cityai2019')
    parser.add_argument('-r',
                        '--run_id',
                        nargs='+',
                        help='list of run ides, use -r xxx xyxy ...',
                        default='6qpihpn8')
    args = parser.parse_args()

    api = wandb.Api()
    runs = []
    for run_id in args.run_id:
        runs.append(api.run(args.user + '/' + args.project + '/' + run_id))

    print(
        'Das Skript nimmt die besten Models jedes runs und berechnet die mAP, Rank-1 usw..'
    )

    cmcs = []
    mAPs = []
    for run in runs:
        if run.state != "finished":
            print("training didn't finish yet")

        cfg = copy.deepcopy(run.config)
        mAP_best = run.summary['mAP_best']
        epoch_best = run.summary['epoch_best']
        fold = 1  #cfg['fold']
        train_loader, val_loader, num_query, num_classes = make_data_loader(
            cfg, fold)
        model = build_model(cfg, num_classes)
        weights_path = os.path.join(
            cfg['OUTPUT_DIR'],
            cfg['MODEL.NAME'] + '_model_' + str(epoch_best) + '.pth')
        model.load_param(weights_path)
        cmc, mAP = predict(cfg, model, val_loader, num_query)
        cmcs.append(cmc)
        mAPs.append(mAP)

    for run_id, cmc, mAP in zip(args.run_id, cmcs, mAPs):
        print('=======')
        print(run_id)
        print("mAP: {:.2%}".format(mAP))
        for r in [1, 5, 10]:
            print("CMC curve, Rank-{:<3}:{:.2%}".format(r, cmc[r - 1]))
    print('')
    print('mAP, Average: {:.2%}'.format(np.mean(mAPs)))
    for r in [1, 5, 10]:
        print("Rank-{:<3}:{:.2%}".format(r, np.mean(np.array(cmcs)[:, r - 1])))
Exemple #19
0
 def get_component(self, cfg, num_classes):
     self.device = cfg.MODEL.DEVICE
     self.model = build_model(cfg, num_classes)
     self.loss = Loss(cfg, num_classes, self.model.in_planes)
     self.optimizer = make_optimizer(cfg, self.model)
     self.scheduler = WarmupMultiStepLR(self.optimizer,
                                        cfg.WARMUP.STEPS,
                                        cfg.WARMUP.GAMMA,
                                        cfg.WARMUP.FACTOR,
                                        cfg.WARMUP.MAX_EPOCHS,
                                        cfg.WARMUP.METHOD)
Exemple #20
0
def main():
    parser = argparse.ArgumentParser(description="AGW Re-ID Baseline")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        os.makedirs(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ[
            'CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID  # new add by gu
    cudnn.benchmark = True

    data_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)

    if 'cpu' not in cfg.MODEL.DEVICE:
        if torch.cuda.device_count() > 1:
            model = torch.nn.DataParallel(model)
        model.to(device=cfg.MODEL.DEVICE)

    if cfg.TEST.EVALUATE_ONLY == 'on':
        logger.info("Evaluate Only")
        model.load_param(cfg.TEST.WEIGHT)
        do_test(cfg, model, data_loader, num_query)
        return
Exemple #21
0
def train(cfg, logger):
    seed_everything(cfg.SEED)
    model = build_model(cfg)
    device = cfg.MODEL.DEVICE
    check = cfg.SOLVER.TRAIN_CHECKPOINT

    train_loader, val_loader = make_data_loader(cfg, is_train=True)

    fitter = Fitter(model=model, device=device, cfg=cfg, train_loader=train_loader, val_loader=val_loader, logger=logger)
    if check:
        fitter.load(f'{cfg.OUTPUT_DIR}/last-checkpoint.bin')
    fitter.fit()
Exemple #22
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.DATASETS.PRELOAD_IMAGE = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    model = build_model(cfg, 0)
    #print('model', model)
    model = model.cuda()
    model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))

    test_dataloader, num_query, _ = get_test_dataloader(cfg, test_phase=False)

    #inference_no_rerank(cfg, model, test_dataloader, num_query)
    #inference(cfg, model, test_dataloader, num_query)
    #inference_aligned(cfg, model, test_dataloader, num_query) # using flipped image

    inference_aligned_flipped(cfg,
                              model,
                              test_dataloader,
                              num_query,
                              use_local_feature=False,
                              use_rerank=True,
                              use_cross_feature=True)
Exemple #23
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument("--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    model = build_model(cfg, num_classes)
    model.load_state_dict(torch.load(cfg.TEST.WEIGHT))
    model = model.cuda()
    model = model.eval()

    logger = logging.getLogger("reid_baseline.inference")
    logger.info("Start inferencing")
    with torch.no_grad():
        qf, gf = extract_feature(model, val_loader, num_query)

    # save feature
    np.save('../data/feature_expansion/' + cfg.TEST.QF_NAME, qf.cpu().numpy())
    np.save('../data/feature_expansion/' + cfg.TEST.GF_NAME, gf.cpu().numpy())
    '''
Exemple #24
0
def init_extractor(cfg, num_classes=8368):
    # if cfg.MODEL.DEVICE == "cuda":
    #     os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True
    if num_classes is None:
        dataset = init_dataset(cfg.DATASETS.NAMES, root=cfg.DATASETS.ROOT_DIR)
        num_classes = dataset.num_train_pids

    model = build_model(cfg, num_classes)
    model.load_param(cfg.TEST.WEIGHT)
    model.eval()
    return model
Exemple #25
0
def main(config):
    import torch

    from modeling import build_model, build_loss
    from data_loader import get_dataloader
    from trainer import Trainer
    from utils import CTCLabelConverter, AttnLabelConverter, load
    if os.path.isfile(config['dataset']['alphabet']):
        config['dataset']['alphabet'] = ''.join(
            load(config['dataset']['alphabet']))

    prediction_type = config['arch']['head']['type']

    # loss 设置
    criterion = build_loss(config['loss'])
    if prediction_type == 'CTC':
        converter = CTCLabelConverter(config['dataset']['alphabet'])
    elif prediction_type == 'Attn':
        converter = AttnLabelConverter(config['dataset']['alphabet'])
    else:
        raise NotImplementedError
    img_channel = 3 if config['dataset']['train']['dataset']['args'][
        'img_mode'] != 'GRAY' else 1
    config['arch']['backbone']['in_channels'] = img_channel
    config['arch']['head']['n_class'] = len(converter.character)
    # model = get_model(img_channel, len(converter.character), config['arch']['args'])
    model = build_model(config['arch'])
    img_h, img_w = 32, 100
    for process in config['dataset']['train']['dataset']['args'][
            'pre_processes']:
        if process['type'] == "Resize":
            img_h = process['args']['img_h']
            img_w = process['args']['img_w']
            break
    sample_input = torch.zeros((2, img_channel, img_h, img_w))
    num_label = model.get_batch_max_length(sample_input)
    train_loader = get_dataloader(config['dataset']['train'], num_label)
    assert train_loader is not None
    if 'validate' in config['dataset'] and config['dataset']['validate'][
            'dataset']['args']['data_path'][0] is not None:
        validate_loader = get_dataloader(config['dataset']['validate'],
                                         num_label)
    else:
        validate_loader = None

    trainer = Trainer(config=config,
                      model=model,
                      criterion=criterion,
                      train_loader=train_loader,
                      validate_loader=validate_loader,
                      sample_input=sample_input,
                      converter=converter)
    trainer.train()
Exemple #26
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument('-cfg',
                        "--config_file",
                        default="",
                        help="path to config file",
                        type=str)
    parser.add_argument("opts",
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    gpus = os.environ[
        "CUDA_VISIBLE_DEVICES"] if "CUDA_VISIBLE_DEVICES" in os.environ else '0'
    gpus = [int(i) for i in gpus.split(',')]
    num_gpus = len(gpus)

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    # set pretrian = False to avoid loading weight repeatedly
    cfg.MODEL.PRETRAIN = False
    cfg.freeze()

    logger = setup_logger("reid_baseline", False, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
    logger.info("Running with config:\n{}".format(cfg))

    cudnn.benchmark = True

    model = build_model(cfg, 0)
    model.load_params_wo_fc(torch.load(cfg.TEST.WEIGHT))
    if num_gpus > 1:
        model = nn.DataParallel(model)
    model = model.cuda()

    print('prepare test set ...')
    test_dataloader_collection, num_query_collection, test_items_collection = get_test_dataloader(
        cfg)

    inference(cfg,
              model,
              test_dataloader_collection,
              num_query_collection,
              is_vis=True,
              test_collection=test_items_collection)
Exemple #27
0
def train(cfg):
    model = build_model(cfg)
    device = cfg.MODEL.DEVICE

    optimizer = make_optimizer(cfg, model)
    scheduler = None

    arguments = {}

    train_loader = make_data_loader(cfg, is_train=True)
    val_loader = make_data_loader(cfg, is_train=False)

    do_train(cfg, model, train_loader, val_loader, optimizer, F.cross_entropy)
Exemple #28
0
def test(cfg):
    # transform
    transform_test_list = [
        transforms.Resize(size=cfg.INPUT.SIZE_TEST, interpolation=3),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]

    # prepare dataset
    test_dataset = MyDataset(root=root,
                             transform=transforms.Compose(transform_test_list),
                             type='test')
    test_loader = DataLoader(test_dataset,
                             batch_size=1,
                             shuffle=True,
                             num_workers=8,
                             pin_memory=False)
    num_classes = cfg.MODEL.HEADS.NUM_CLASSES

    # prepare model
    def load_network(network):
        save_path = cfg.LOAD_FROM
        checkpoint = torch.load(save_path)
        if 'model' in checkpoint:
            network.load_state_dict(checkpoint['model'])
        else:
            network.load_state_dict(checkpoint)
        return network

    model = build_model(cfg, num_classes)
    model = load_network(model)
    model = model.cuda()

    # for data in tqdm(test_loader):
    for data in test_loader:
        model.train(False)
        inputs, labels = data
        # print(inputs.shape)
        inputs = Variable(inputs.cuda().detach())
        with torch.no_grad():
            out = model(inputs)
            # print(logits)
            score, preds = torch.max(out['pred_class_logits'], 1)
            # print(score, preds)
            if preds.int() == 0:
                cat = "No"
            elif preds.int() == 1:
                cat = "Yes"
            print(preds.cpu().numpy().item(), labels.cpu().numpy().item())

    return
Exemple #29
0
def train(cfg, cfg_hr):
    # prepare dataset
    train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    # prepare model
    model = build_model(cfg, cfg_hr, num_classes)
    model = nn.DataParallel(model)
    if cfg.MODEL.IF_WITH_CENTER == 'no':
        print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE)
        optimizer = make_optimizer(cfg, model)
        print(cfg.SOLVER.MARGIN)
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)

        loss_func = make_loss(cfg, num_classes)     # modified by gu

        arguments = {}

        do_train(
            cfg,
            model,
            train_loader,
            val_loader,
            optimizer,
            scheduler,
            loss_func,
            num_query
        )
    elif cfg.MODEL.IF_WITH_CENTER == 'yes':
        print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE)
        loss_func, center_criterion = make_loss_with_center(cfg, num_classes)  # modified by gu
        optimizer, optimizer_center = make_optimizer_with_center(cfg, model, center_criterion)
        scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR,
                                      cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD)

        arguments = {}

        do_train_with_center(
            cfg,
            model,
            center_criterion,
            train_loader,
            val_loader,
            optimizer,
            optimizer_center,
            scheduler,
            loss_func,
            num_query
        )
    else:
        print("Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n".format(cfg.MODEL.IF_WITH_CENTER))
Exemple #30
0
def main():
    parser = argparse.ArgumentParser(description="ReID Baseline Inference")
    parser.add_argument(
        "--config_file", default="", help="path to config file", type=str
    )
    parser.add_argument("opts", help="Modify config options using the command-line", default=None,
                        nargs=argparse.REMAINDER)

    args = parser.parse_args()

    num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1

    if args.config_file != "":
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    output_dir = cfg.OUTPUT_DIR
    if output_dir and not os.path.exists(output_dir):
        mkdir(output_dir)

    logger = setup_logger("reid_baseline", output_dir, 0)
    logger.info("Using {} GPUS".format(num_gpus))
    logger.info(args)

    if args.config_file != "":
        logger.info("Loaded configuration file {}".format(args.config_file))
        with open(args.config_file, 'r') as cf:
            config_str = "\n" + cf.read()
            logger.info(config_str)
    logger.info("Running with config:\n{}".format(cfg))

    if cfg.MODEL.DEVICE == "cuda":
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID
    cudnn.benchmark = True

    #train_loader, val_loader, num_query, num_classes = make_data_loader(cfg)
    #model = build_model(cfg, num_classes)
    #model.load_param(cfg.TEST.WEIGHT)

    train_loader, val_loader, num_query, num_classes, num_classes2, image_map_label2 = make_data_loader(cfg)
    model = build_model(cfg, num_classes, num_classes2)
    print('--- resume from ', cfg.MODEL.PRETRAIN_PATH2)
    if cfg.MODEL.ONCE_LOAD == 'yes':
        print('\n---ONCE_LOAD...\n')
        model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH2, map_location=lambda storage, loc: storage))
    else:
        functions.load_state_dict(model, cfg.MODEL.PRETRAIN_PATH2, cfg.MODEL.ONLY_BASE, cfg.MODEL.WITHOUT_FC)

    inference(cfg, model, val_loader, num_query)