def train(train_loader, num_classes): parser = argparse.ArgumentParser(description="ReID Baseline Training") parser.add_argument( "--config_file", default="", help="path to config file", type=str ) parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() if args.config_file != "": cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() set_seed(cfg.SOLVER.SEED) output_dir = cfg.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = setup_logger("reid_baseline", output_dir, if_train=True) logger.info("Saving model in the path :{}".format(cfg.OUTPUT_DIR)) logger.info(args) if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID if cfg.MODEL.PRETRAIN_CHOICE == 'finetune': model = make_model(cfg, num_class=num_classes) model.load_param_finetune(cfg.MODEL.PRETRAIN_PATH) print('Loading pretrained model for finetuning......') else: model = make_model(cfg, num_class=num_classes) loss_func = make_loss(cfg, num_classes=num_classes) optimizer = make_optimizer(cfg, model) scheduler = WarmupCosineAnnealingLR(optimizer, cfg.SOLVER.MAX_EPOCHS, cfg.SOLVER.DELAY_ITERS, cfg.SOLVER.ETA_MIN_LR, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_EPOCHS, cfg.SOLVER.WARMUP_METHOD) logger.info("use WarmupCosineAnnealingLR, delay_step:{}".format(cfg.SOLVER.DELAY_ITERS)) do_train( cfg, model, train_loader, optimizer, scheduler, # modify for using self trained model loss_func )
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID train_loader, val_loader, num_query, num_classes = make_dataloader(cfg) if cfg.MODEL.PRETRAIN_CHOICE == 'finetune': model = make_model(cfg, num_class=num_classes) model.load_param_finetune(cfg.MODEL.PRETRAIN_PATH) print('Loading pretrained model for finetuning......') else: model = make_model(cfg, num_class=num_classes) loss_func, center_criterion = make_loss(cfg, num_classes=num_classes) optimizer, optimizer_center = make_optimizer(cfg, model, center_criterion) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_EPOCHS, cfg.SOLVER.WARMUP_METHOD) do_train( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query)
os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.MODEL.DEVICE_ID cudnn.benchmark = True # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. train_loader, val_loader = make_dataloader(Cfg) model_G, model_Dip, model_Dii, model_D_reid = make_model(Cfg) optimizerG = make_optimizer(Cfg, model_G) optimizerDip = make_optimizer(Cfg, model_Dip) optimizerDii = make_optimizer(Cfg, model_Dii) schedulerG = WarmupMultiStepLR(optimizerG, Cfg.SOLVER.STEPS, Cfg.SOLVER.GAMMA, Cfg.SOLVER.WARMUP_FACTOR, Cfg.SOLVER.WARMUP_EPOCHS, Cfg.SOLVER.WARMUP_METHOD) schedulerDip = WarmupMultiStepLR(optimizerDip, Cfg.SOLVER.STEPS, Cfg.SOLVER.GAMMA, Cfg.SOLVER.WARMUP_FACTOR, Cfg.SOLVER.WARMUP_EPOCHS, Cfg.SOLVER.WARMUP_METHOD) schedulerDii = WarmupMultiStepLR(optimizerDii, Cfg.SOLVER.STEPS, Cfg.SOLVER.GAMMA, Cfg.SOLVER.WARMUP_FACTOR, Cfg.SOLVER.WARMUP_EPOCHS, Cfg.SOLVER.WARMUP_METHOD) GAN_loss, L1_loss, ReID_loss = make_loss(Cfg) do_train(Cfg, model_G, model_Dip, model_Dii, model_D_reid, train_loader, val_loader, optimizerG, optimizerDip, optimizerDii, GAN_loss, L1_loss, ReID_loss, schedulerG, schedulerDip, schedulerDii)
from loss import make_loss from processor import do_train from solver import make_optimizer, WarmupMultiStepLR from utils.logger import setup_logger if __name__ == '__main__': Cfg = Configuration() log_dir = Cfg.DATALOADER.LOG_DIR logger = setup_logger('{}'.format(Cfg.PROJECT_NAME), log_dir) logger.info("Running with config:\n{}".format(Cfg.PROJECT_NAME)) os.environ['CUDA_VISIBLE_DEVICES'] = Cfg.DEVICE_ID cudnn.benchmark = True # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. train_loader, val_loader = make_dataloader(Cfg) model = make_model(Cfg) optimizer = make_optimizer(Cfg, model) scheduler = WarmupMultiStepLR(Cfg, optimizer) loss_func = make_loss(Cfg) do_train( Cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, )
if args.config_file != "": logger.info("Loaded configuration file {}".format(args.config_file)) with open(args.config_file, 'r') as cf: config_str = "\n" + cf.read() logger.info(config_str) logger.info("Running with config:\n{}".format(cfg)) if cfg.MODEL.DIST_TRAIN: torch.distributed.init_process_group(backend='nccl', init_method='env://') os.environ['CUDA_VISIBLE_DEVICES'] = cfg.MODEL.DEVICE_ID train_loader, train_loader_normal, val_loader, num_query, num_classes, camera_num, view_num = make_dataloader( cfg) model = make_model(cfg, num_class=num_classes, camera_num=camera_num, view_num=view_num) loss_func, center_criterion = make_loss(cfg, num_classes=num_classes) optimizer, optimizer_center = make_optimizer(cfg, model, center_criterion) scheduler = create_scheduler(cfg, optimizer) do_train(cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, loss_func, num_query, args.local_rank)