def train(cfg): logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR) logger.info("Running with config:\n{}".format(cfg)) # prepare dataset val_data_loader, num_query = make_val_data_loader(cfg) num_classes = np.zeros(len(cfg.DATALOADER.SAMPLER_PROB)).astype(int) - 1 source_dataset = init_dataset(cfg.SRC_DATA.NAMES, root_train=cfg.SRC_DATA.TRAIN_DIR, transfered=cfg.SRC_DATA.TRANSFERED) num_classes[0] = source_dataset.num_train_pids num_classes[1] = cfg.TGT_UNSUPDATA.CLUSTER_TOPK if cfg.MODEL.FINETUNE: num_classes[1] += 200 # prepare model model = build_model(cfg, num_classes) optimizer,fixed_lr_idxs = make_optimizer(cfg, model) loss_fn = make_loss(cfg, num_classes) # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'resume': start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1]) logger.info('Start epoch:%d' %start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer') logger.info('Path to the checkpoint of optimizer:%s' %path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch, fixed_lr_idxs) elif cfg.MODEL.PRETRAIN_CHOICE == 'self' or cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 model.load_param(cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.PRETRAIN_CHOICE) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, -1, fixed_lr_idxs) camera_model = build_camera_model(cfg, num_classes=5) camera_model.load_param(cfg.TEST.CAMERA_WEIGHT, cfg.MODEL.PRETRAIN_CHOICE) else: logger.info('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE)) do_train(cfg, model, camera_model, val_data_loader, optimizer, scheduler, # modify for using self trained model loss_fn, num_query, start_epoch, # add for using self trained model 0 )
def test(cfg): logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR) logger.info("Running with config:\n{}".format(cfg)) # prepare dataset val_data_loader, num_query = make_val_data_loader(cfg) # prepare model model = build_model(cfg, num_classes=[700, 500]) logger.info('Path to the checkpoint of model:%s' % (cfg.TEST.WEIGHT)) model.load_param(cfg.TEST.WEIGHT, 'self') camera_model = build_camera_model(cfg, num_classes=5) logger.info('Path to the checkpoint of model:%s' % (cfg.TEST.CAMERA_WEIGHT)) camera_model.load_param(cfg.TEST.CAMERA_WEIGHT, 'self') validator(cfg, model, camera_model, val_data_loader, num_query)
def train(cfg): logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR) logger.info("Running with config:\n{}".format(cfg)) # prepare camstyle dataset train_loader, val_loader, num_query = make_camera_data_loader(cfg) num_classes = 5 # prepare model model = build_camera_model(cfg, num_classes) optimizer, _ = make_optimizer(cfg, model) loss_fn = make_camera_loss(cfg, num_classes) # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'resume': start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1]) logger.info('Start epoch:%d' %start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer') logger.info('Path to the checkpoint of optimizer:%s' %path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'self' or cfg.MODEL.PRETRAIN_CHOICE == 'imagenet' or cfg.MODEL.PRETRAIN_CHOICE == 'camera': start_epoch = 0 model.load_param(cfg.MODEL.PRETRAIN_PATH,cfg.MODEL.PRETRAIN_CHOICE) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: logger.info('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE)) do_train(cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_fn, num_query, start_epoch, # add for using self trained model 0 )