def main(): output_dir = cfg.MODEL.OUTPUT_DIR if output_dir and not os.path.exists(output_dir): os.makedirs(output_dir) logger = setup_logger("tracker", output_dir, 0) logger.info("Running with config:\n{}".format(cfg)) torch.backends.cudnn.benchmark = True train_loader, val_loader = make_data_loader(cfg) model = build_model(cfg) optimizer = make_optimizer(cfg, model) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg) do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, loss_func )
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) if cfg.SOLVER.FINETUNE: model.load_state_dict(torch.load(cfg.TEST.WEIGHT).module.state_dict()) model = nn.DataParallel(model) optimizer = make_optimizer(cfg, model) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) # scheduler = WarmupStepLR(optimizer,3, 9, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg) arguments = {} do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, loss_func, num_query )
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes, clustering_loader = make_data_loader( cfg) # prepare model model = build_model(cfg, num_classes) if cfg.MODEL.IF_WITH_CENTER == 'on': loss_func, center_criterion_part, center_criterion_global, center_criterion_fore = make_loss_with_center( cfg, num_classes) optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion_part, center_criterion_global, center_criterion_fore) else: loss_func = make_loss(cfg, num_classes) optimizer = make_optimizer(cfg, model) # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print('Only support pretrain_choice for imagenet, but got {}'.format( cfg.MODEL.PRETRAIN_CHOICE)) if cfg.MODEL.IF_WITH_CENTER == 'on': do_train_with_center( cfg, model, center_criterion_part, center_criterion_global, center_criterion_fore, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model clustering_loader) else: do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model clustering_loader)
def train(cfg): logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR) logger.info("Running with config:\n{}".format(cfg)) # prepare camstyle dataset train_loader, train_camstyle_loader, val_loader, num_query, num_classes = make_camstyle_data_loader( cfg) num_classes.append(-1) # prepare model model = build_model(cfg, num_classes) optimizer, _ = make_optimizer(cfg, model) loss_fn = make_loss(cfg, num_classes) # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'resume': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) logger.info('Start epoch:%d' % start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') logger.info('Path to the checkpoint of optimizer:%s' % path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'self' or cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 model.load_param(cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.PRETRAIN_CHOICE) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: logger.info( 'Only support pretrain_choice for imagenet and self, but got {}'. format(cfg.MODEL.PRETRAIN_CHOICE)) do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_fn, num_query, start_epoch, # add for using self trained model 0, train_camstyle_loader)
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) total = sum([param.nelement() for param in model.parameters()]) print("Number of parameter: %.2fM" % (total / 1e6)) if cfg.MODEL.METRIC_LOSS_TYPE == 'triplet': print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) loss_func = make_loss(cfg, num_classes) # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model )
def train(cfg, cfg_hr): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, cfg_hr, num_classes) model = nn.DataParallel(model) if cfg.MODEL.IF_WITH_CENTER == 'no': print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) print(cfg.SOLVER.MARGIN) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg, num_classes) # modified by gu arguments = {} do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, loss_func, num_query ) elif cfg.MODEL.IF_WITH_CENTER == 'yes': print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) loss_func, center_criterion = make_loss_with_center(cfg, num_classes) # modified by gu optimizer, optimizer_center = make_optimizer_with_center(cfg, model, center_criterion) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) arguments = {} do_train_with_center( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, loss_func, num_query ) else: print("Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n".format(cfg.MODEL.IF_WITH_CENTER))
def train(cfg): logger = setup_logger("reid_baseline", cfg.OUTPUT_DIR) logger.info("Running with config:\n{}".format(cfg)) # prepare dataset val_data_loader, num_query = make_val_data_loader(cfg) num_classes = np.zeros(len(cfg.DATALOADER.SAMPLER_PROB)).astype(int) - 1 source_dataset = init_dataset(cfg.SRC_DATA.NAMES, root_train=cfg.SRC_DATA.TRAIN_DIR, transfered=cfg.SRC_DATA.TRANSFERED) num_classes[0] = source_dataset.num_train_pids num_classes[1] = cfg.TGT_UNSUPDATA.CLUSTER_TOPK if cfg.MODEL.FINETUNE: num_classes[1] += 200 # prepare model model = build_model(cfg, num_classes) optimizer,fixed_lr_idxs = make_optimizer(cfg, model) loss_fn = make_loss(cfg, num_classes) # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'resume': start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1]) logger.info('Start epoch:%d' %start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer') logger.info('Path to the checkpoint of optimizer:%s' %path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch, fixed_lr_idxs) elif cfg.MODEL.PRETRAIN_CHOICE == 'self' or cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 model.load_param(cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.PRETRAIN_CHOICE) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, -1, fixed_lr_idxs) camera_model = build_camera_model(cfg, num_classes=5) camera_model.load_param(cfg.TEST.CAMERA_WEIGHT, cfg.MODEL.PRETRAIN_CHOICE) else: logger.info('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE)) do_train(cfg, model, camera_model, val_data_loader, optimizer, scheduler, # modify for using self trained model loss_fn, num_query, start_epoch, # add for using self trained model 0 )
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) loss_func = make_loss(cfg, num_classes) if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) if cfg.MODEL.DEVICE == "cuda": for state in optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.cuda() scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: start_epoch = 0 print('Only support pretrain_choice for imagenet and self, but got {}'. format(cfg.MODEL.PRETRAIN_CHOICE)) arguments = {} do_train(cfg, model, train_loader, val_loader, optimizer, scheduler, loss_func, num_query, start_epoch)
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes, dataset = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) print('Train without center loss, the loss type is', cfg['MODEL.METRIC_LOSS_TYPE']) optimizer = make_optimizer(cfg, model) loss_func = make_loss(cfg, num_classes, model.in_planes) # modified by gu # Add for using self trained model if cfg['MODEL.PRETRAIN_CHOICE'] == 'continue': #start_epoch = eval(cfg['MODEL.PRETRAIN_PATH'].split('/')[-1].split('.')[0].split('_')[-1]) start_epoch = 0 print('Start epoch:', start_epoch) path_to_optimizer = cfg['MODEL.PRETRAIN_PATH'].replace('model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) model.load_param(cfg['MODEL.PRETRAIN_PATH']) #optimizer = make_optimizer(cfg, model) scheduler = WarmupMultiStepLR(optimizer, cfg['SOLVER.STEPS'], cfg['SOLVER.GAMMA'], cfg['SOLVER.WARMUP_FACTOR'], cfg['SOLVER.WARMUP_ITERS'], cfg['SOLVER.WARMUP_METHOD']) elif cfg['MODEL.PRETRAIN_CHOICE'] == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg['SOLVER.STEPS'], cfg['SOLVER.GAMMA'], cfg['SOLVER.WARMUP_FACTOR'], cfg['SOLVER.WARMUP_ITERS'], cfg['SOLVER.WARMUP_METHOD']) elif cfg['MODEL.PRETRAIN_CHOICE'] == 'self' or cfg['MODEL.PRETRAIN_CHOICE'] == 'self-no-head': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg['SOLVER.STEPS'], cfg['SOLVER.GAMMA'], cfg['SOLVER.WARMUP_FACTOR'], cfg['SOLVER.WARMUP_ITERS'], cfg['SOLVER.WARMUP_METHOD']) else: print('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg['MODEL.PRETRAIN_CHOICE'])) do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model dataset )
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) optimizer = make_optimizer(cfg, model) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg) arguments = {} do_train(cfg, model, train_loader, val_loader, optimizer, scheduler, loss_func, num_query)
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) if cfg.MODEL.IF_WITH_CENTER == 'no': print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg, num_classes) # modified by gu # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) arguments = {} do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model ) elif cfg.MODEL.IF_WITH_CENTER == 'yes': print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) loss_func, center_criterion = make_loss_with_center( cfg, num_classes) # modified by gu optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) arguments = {} # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'center_param') print('Path to the checkpoint of center_param:', path_to_center_param) path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer_center') print('Path to the checkpoint of optimizer_center:', path_to_optimizer_center) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) center_criterion.load_state_dict(torch.load(path_to_center_param)) optimizer_center.load_state_dict( torch.load(path_to_optimizer_center)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) do_train_with_center( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model ) else: print( "Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n" .format(cfg.MODEL.IF_WITH_CENTER))
val_loader, dataset_val = make_data_loader_view(cfg, is_train=True) model = build_model(cfg).cuda() maxs = torch.max(dataset.bbox[0], dim=0).values.cuda()+0.5 mins = torch.min(dataset.bbox[0], dim=0).values.cuda()-0.5 model.set_max_min(maxs,mins) optimizer = make_optimizer(cfg, model) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_fn = make_loss(cfg) model, optimizer = amp.initialize(model, optimizer, opt_level="O1") beg = time.time() for batch in train_loader: beg = time.time() model.train() optimizer.zero_grad() rays, rgbs, bboxes = batch rays = rays[0].cuda()
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) print('Train with the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg, num_classes) # modified by gu # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': # start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1]) # print('Start epoch:', start_epoch) # path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer') # print('Path to the checkpoint of optimizer:', path_to_optimizer) # model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) # optimizer.load_state_dict(torch.load(path_to_optimizer)) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) start_epoch = 120 print('Start epoch:', start_epoch) model.load_state_dict( torch.load('work_space_tri/se_resnet101_ibn_a_model_120.pth')) optimizer.load_state_dict( torch.load('work_space_tri/se_resnet101_ibn_a_optimizer_120.pth')) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print('Only support pretrain_choice for imagenet and self, but got {}'. format(cfg.MODEL.PRETRAIN_CHOICE)) arguments = {} do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model )
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes, num_classes2, image_map_label2 = make_data_loader( cfg) #print('\n\n*** image_map_label2:') # prepare model model = build_model(cfg, num_classes, num_classes2) #print(list(model.children())) #print(model.state_dict().keys()) #exit(0) #print('model.named_children(): \n\n', model.named_children()) ''' kk = 1 for name, child in model.base.named_children(): print(kk, name) kk += 1 print(len(list(model.base.children()))) exit(0) for i in range(len(list(model.base.children()))): print(' +++', i+1) print(list(model.base.children())[i]) exit(0) ''' if len(cfg.MODEL.PRETRAIN_PATH2) > 5: print('--- resume from ', cfg.MODEL.PRETRAIN_PATH2) #model.load_param(cfg.MODEL.PRETRAIN_PATH) #model.loiad_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH2, map_location=lambda storage, loc: storage)) if cfg.MODEL.ONCE_LOAD == 'yes': print('\n---ONCE_LOAD...\n') model.load_state_dict( torch.load(cfg.MODEL.PRETRAIN_PATH2, map_location=lambda storage, loc: storage)) #if cfg.MODEL.FREEZE_BASE == 'yes': # functions.freeze_layer(model, 'base', False) #functions.freeze_global_model(model, False) else: functions.load_state_dict_distill(model, cfg.MODEL.PRETRAIN_PATH2, cfg.MODEL.ONLY_BASE, cfg.MODEL.WITHOUT_FC) print('**** Successfully load ', cfg.MODEL.PRETRAIN_PATH2) if cfg.MODEL.FREEZE_BASE: #functions.freeze_layer(model, 'base', False) functions.freeze_global_model(model, False) if cfg.MODEL.IF_WITH_CENTER == 'no': print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) if cfg.SOLVER.MY_OPTIMIZER == "yes": print('---* my optimizer:', cfg.SOLVER.MY_OPTIMIZER_NAME) other_params = [ p for n, p in model.named_parameters() if not n.startswith('base') ] optimizer = optim.SGD([{ 'params': model.base.parameters(), 'lr': cfg.SOLVER.LR / 10 }, { 'params': other_params, 'lr': cfg.SOLVER.LR }], momentum=0.9, weight_decay=5e-4, nesterov=True) else: print('---* not my optimizer') optimizer = make_optimizer(cfg, model) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) #_C.SOLVER.MY_SCHEDULER = "no" #_C.SOLVER.MY_WARMUP = "no" loss_func = make_loss(cfg, num_classes) # modified by gu # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 if cfg.SOLVER.MY_SCHEDULER == "yes": print('cfg.SOLVER.MY_SCHEDULER_STEP:', cfg.SOLVER.MY_SCHEDULER_STEP) print('---* my scheduler: ', cfg.SOLVER.MY_SCHEDULER_NAME) if cfg.SOLVER.MY_SCHEDULER_NAME == 'SL': scheduler = lr_scheduler.StepLR( optimizer, step_size=cfg.SOLVER.MY_SCHEDULER_STEP[0], gamma=0.1) elif cfg.SOLVER.MY_SCHEDULER_NAME == 'MSL': scheduler = lr_scheduler.MultiStepLR( optimizer, cfg.SOLVER.MY_SCHEDULER_STEP, gamma=0.1) else: print(cfg.SOLVER.MY_SCHEDULER_NAME, ' not found!') eixt(0) else: print('---* not my scheduler') scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) arguments = {} print('************ do_train') do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model image_map_label2, num_classes2) # elif cfg.MODEL.IF_WITH_CENTER == 'yes': # print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) # loss_func, center_criterion = make_loss_with_center(cfg, num_classes) # modified by gu # optimizer, optimizer_center = make_optimizer_with_center(cfg, model, center_criterion) # # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) # # arguments = {} # # # Add for using self trained model # if cfg.MODEL.PRETRAIN_CHOICE == 'self': # start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1]) # print('Start epoch:', start_epoch) # path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer') # print('Path to the checkpoint of optimizer:', path_to_optimizer) # path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace('model', 'optimizer_center') # print('Path to the checkpoint of optimizer_center:', path_to_optimizer_center) # model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) # optimizer.load_state_dict(torch.load(path_to_optimizer)) # optimizer_center.load_state_dict(torch.load(path_to_optimizer_center)) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) # elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': # start_epoch = 0 # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) # else: # print('Only support pretrain_choice for imagenet and self, but got {}'.format(cfg.MODEL.PRETRAIN_CHOICE)) # # do_train_with_center( # cfg, # model, # center_criterion, # train_loader, # val_loader, # optimizer, # optimizer_center, # scheduler, # modify for using self trained model # loss_func, # num_query, # start_epoch # add for using self trained model # ) else: print( "Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n" .format(cfg.MODEL.IF_WITH_CENTER))
def train(cfg): # prepare dataset # train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) train_loader, val_loader, num_query, num_classes = make_data_loader_train( cfg) # prepare model if 'prw' in cfg.DATASETS.NAMES: num_classes = 483 elif "market1501" in cfg.DATASETS.NAMES: num_classes = 751 elif "duke" in cfg.DATASETS.NAMES: num_classes = 702 elif "cuhk" in cfg.DATASETS.NAMES: num_classes = 5532 model = build_model(cfg, num_classes) if cfg.MODEL.IF_WITH_CENTER == 'no': print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg, num_classes) # modified by gu # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': # start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1]) start_epoch = 0 print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) pretrained_dic = torch.load(cfg.MODEL.PRETRAIN_PATH).state_dict() model_dict = model.state_dict() model_dict.update(pretrained_dic) model.load_state_dict(model_dict) if cfg.MODEL.WHOLE_MODEL_TRAIN == "no": for name, value in model.named_parameters(): if "Query_Guided_Attention" not in name and "non_local" not in name and "classifier_attention" not in name: value.requires_grad = False optimizer = make_optimizer(cfg, model) # else: # cfg.SOLVER.BASE_LR = 0.0000035 # optimizer.load_state_dict(torch.load(path_to_optimizer)) # ##### # for state in optimizer.state.values(): # for k, v in state.items(): # if isinstance(v, torch.Tensor): # state[k] = v.cuda() # ##### scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) arguments = {} do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model ) elif cfg.MODEL.IF_WITH_CENTER == 'yes': print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) loss_func, center_criterion = make_loss_with_center( cfg, num_classes) # modified by gu optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) arguments = {} # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'center_param') print('Path to the checkpoint of center_param:', path_to_center_param) path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer_center') print('Path to the checkpoint of optimizer_center:', path_to_optimizer_center) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) ##### for state in optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.cuda() ##### center_criterion.load_state_dict(torch.load(path_to_center_param)) optimizer_center.load_state_dict( torch.load(path_to_optimizer_center)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) do_train_with_center( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model ) else: print( "Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n" .format(cfg.MODEL.IF_WITH_CENTER))
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) if cfg.MODEL.IF_WITH_CENTER == "no": print("Train without center loss, the loss type is", cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg, num_classes) # modified by gu # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == "self": start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split("/")[-1].split(".")[0].split("_")[-1] ) print("Start epoch:", start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace("model", "optimizer") print("Path to the checkpoint of optimizer:", path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, cfg.SOLVER.MODE, cfg.SOLVER.MAX_EPOCHS, start_epoch ) else: start_epoch = 0 scheduler = WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, cfg.SOLVER.MODE, cfg.SOLVER.MAX_EPOCHS ) do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model ) elif cfg.MODEL.IF_WITH_CENTER == "yes": print("Train with center loss, the loss type is", cfg.MODEL.METRIC_LOSS_TYPE) loss_func, center_criterion = make_loss_with_center( cfg, num_classes ) # modified by gu optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion ) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == "self": start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split("/")[-1].split(".")[0].split("_")[-1] ) print("Start epoch:", start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace("model", "optimizer") print("Path to the checkpoint of optimizer:", path_to_optimizer) path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace( "model", "center_param" ) print("Path to the checkpoint of center_param:", path_to_center_param) path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace( "model", "optimizer_center" ) print( "Path to the checkpoint of optimizer_center:", path_to_optimizer_center ) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) center_criterion.load_state_dict(torch.load(path_to_center_param)) optimizer_center.load_state_dict(torch.load(path_to_optimizer_center)) scheduler = WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, cfg.SOLVER.MODE, cfg.SOLVER.MAX_EPOCHS, start_epoch, ) else: start_epoch = 0 scheduler = WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, cfg.SOLVER.MODE, cfg.SOLVER.MAX_EPOCHS, ) do_train_with_center( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model ) else: print( "Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n".format( cfg.MODEL.IF_WITH_CENTER ) )