def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes, clustering_loader = make_data_loader( cfg) # prepare model model = build_model(cfg, num_classes) if cfg.MODEL.IF_WITH_CENTER == 'on': loss_func, center_criterion_part, center_criterion_global, center_criterion_fore = make_loss_with_center( cfg, num_classes) optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion_part, center_criterion_global, center_criterion_fore) else: loss_func = make_loss(cfg, num_classes) optimizer = make_optimizer(cfg, model) # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print('Only support pretrain_choice for imagenet, but got {}'.format( cfg.MODEL.PRETRAIN_CHOICE)) if cfg.MODEL.IF_WITH_CENTER == 'on': do_train_with_center( cfg, model, center_criterion_part, center_criterion_global, center_criterion_fore, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model clustering_loader) else: do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model clustering_loader)
def train(cfg, cfg_hr): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, cfg_hr, num_classes) model = nn.DataParallel(model) if cfg.MODEL.IF_WITH_CENTER == 'no': print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) print(cfg.SOLVER.MARGIN) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg, num_classes) # modified by gu arguments = {} do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, loss_func, num_query ) elif cfg.MODEL.IF_WITH_CENTER == 'yes': print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) loss_func, center_criterion = make_loss_with_center(cfg, num_classes) # modified by gu optimizer, optimizer_center = make_optimizer_with_center(cfg, model, center_criterion) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) arguments = {} do_train_with_center( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, loss_func, num_query ) else: print("Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n".format(cfg.MODEL.IF_WITH_CENTER))
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) if cfg.MODEL.IF_WITH_CENTER == 'no': print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg, num_classes) # modified by gu # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) arguments = {} do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model ) elif cfg.MODEL.IF_WITH_CENTER == 'yes': print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) loss_func, center_criterion = make_loss_with_center( cfg, num_classes) # modified by gu optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) arguments = {} # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'center_param') print('Path to the checkpoint of center_param:', path_to_center_param) path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer_center') print('Path to the checkpoint of optimizer_center:', path_to_optimizer_center) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) center_criterion.load_state_dict(torch.load(path_to_center_param)) optimizer_center.load_state_dict( torch.load(path_to_optimizer_center)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) do_train_with_center( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model ) else: print( "Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n" .format(cfg.MODEL.IF_WITH_CENTER))
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes, attr_labels = make_mt_data_loader( cfg) print(f'attribute labels: {attr_labels}') # prepare model model = build_mt_model(num_features=cfg.mt.num_features, dropout=cfg.mt.dropout, last_stride=cfg.mt.last_conv_stride, num_classes=num_classes, num_classes_seg=cfg.mt.num_classes_seg, global_branch=cfg.mt.global_branch, mask_branch=cfg.mt.mask_branch, part_branch=cfg.mt.part_branch, mask_dim=cfg.mt.mask_dim, part_dim=cfg.mt.part_dim, part_info=cfg.mt.part_info, attr_label_number=len(attr_labels), attr_mask_weight=cfg.mt.attr_mask_weight, wavp=cfg.mt.attr_wavp, use_attr=cfg.mt.use_attr, part_layer=cfg.mt.part_layer, part_abla=cfg.mt.part_abla) if cfg.MODEL.IF_WITH_CENTER == 'no': print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) print('Builidng criterions') loss_func = make_mt_loss(cfg, cfg.mt.normalize_size, cfg.mt.num_classes_seg, cfg.mt.weight[2],\ cfg.mt.triplet_margin, num_classes) # modified by gu # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) #try: model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) model.to(cfg.MODEL.DEVICE) #def map_func(storage, location): # return storage.cuda() optimizer.load_state_dict(torch.load(path_to_optimizer)) #except: # def map_func_cpu(storage, location): # return storage.cpu() # model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH, map_location=map_func_cpu).state_dict()) # optimizer = torch.load(path_to_optimizer) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) arguments = {} print('Runing Trainer...') do_mt_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model cfg.mt.weight) elif cfg.MODEL.IF_WITH_CENTER == 'yes': print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) loss_func, center_criterion = make_loss_with_center( cfg, num_classes) # modified by gu optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) arguments = {} # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'center_param') print('Path to the checkpoint of center_param:', path_to_center_param) path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer_center') print('Path to the checkpoint of optimizer_center:', path_to_optimizer_center) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) center_criterion.load_state_dict(torch.load(path_to_center_param)) optimizer_center.load_state_dict( torch.load(path_to_optimizer_center)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) do_train_with_center( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model ) else: print( "Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n" .format(cfg.MODEL.IF_WITH_CENTER))
def train(cfg): # prepare dataset # train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) train_loader, val_loader, num_query, num_classes = make_data_loader_train( cfg) # prepare model if 'prw' in cfg.DATASETS.NAMES: num_classes = 483 elif "market1501" in cfg.DATASETS.NAMES: num_classes = 751 elif "duke" in cfg.DATASETS.NAMES: num_classes = 702 elif "cuhk" in cfg.DATASETS.NAMES: num_classes = 5532 model = build_model(cfg, num_classes) if cfg.MODEL.IF_WITH_CENTER == 'no': print('Train without center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg, num_classes) # modified by gu # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': # start_epoch = eval(cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_')[-1]) start_epoch = 0 print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) pretrained_dic = torch.load(cfg.MODEL.PRETRAIN_PATH).state_dict() model_dict = model.state_dict() model_dict.update(pretrained_dic) model.load_state_dict(model_dict) if cfg.MODEL.WHOLE_MODEL_TRAIN == "no": for name, value in model.named_parameters(): if "Query_Guided_Attention" not in name and "non_local" not in name and "classifier_attention" not in name: value.requires_grad = False optimizer = make_optimizer(cfg, model) # else: # cfg.SOLVER.BASE_LR = 0.0000035 # optimizer.load_state_dict(torch.load(path_to_optimizer)) # ##### # for state in optimizer.state.values(): # for k, v in state.items(): # if isinstance(v, torch.Tensor): # state[k] = v.cuda() # ##### scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) arguments = {} do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model ) elif cfg.MODEL.IF_WITH_CENTER == 'yes': print('Train with center loss, the loss type is', cfg.MODEL.METRIC_LOSS_TYPE) loss_func, center_criterion = make_loss_with_center( cfg, num_classes) # modified by gu optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) arguments = {} # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == 'self': start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split('/')[-1].split('.')[0].split('_') [-1]) print('Start epoch:', start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer') print('Path to the checkpoint of optimizer:', path_to_optimizer) path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'center_param') print('Path to the checkpoint of center_param:', path_to_center_param) path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace( 'model', 'optimizer_center') print('Path to the checkpoint of optimizer_center:', path_to_optimizer_center) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) ##### for state in optimizer.state.values(): for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.cuda() ##### center_criterion.load_state_dict(torch.load(path_to_center_param)) optimizer_center.load_state_dict( torch.load(path_to_optimizer_center)) scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, start_epoch) elif cfg.MODEL.PRETRAIN_CHOICE == 'imagenet': start_epoch = 0 scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) else: print( 'Only support pretrain_choice for imagenet and self, but got {}' .format(cfg.MODEL.PRETRAIN_CHOICE)) do_train_with_center( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query, start_epoch # add for using self trained model ) else: print( "Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n" .format(cfg.MODEL.IF_WITH_CENTER))
def train(cfg): # prepare dataset train_loader, val_loader, num_query, num_classes = make_data_loader(cfg) # prepare model model = build_model(cfg, num_classes) if cfg.MODEL.IF_WITH_CENTER == "no": print("Train without center loss, the loss type is", cfg.MODEL.METRIC_LOSS_TYPE) optimizer = make_optimizer(cfg, model) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) loss_func = make_loss(cfg, num_classes) # modified by gu # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == "self": start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split("/")[-1].split(".")[0].split("_")[-1] ) print("Start epoch:", start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace("model", "optimizer") print("Path to the checkpoint of optimizer:", path_to_optimizer) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) scheduler = WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, cfg.SOLVER.MODE, cfg.SOLVER.MAX_EPOCHS, start_epoch ) else: start_epoch = 0 scheduler = WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, cfg.SOLVER.MODE, cfg.SOLVER.MAX_EPOCHS ) do_train( cfg, model, train_loader, val_loader, optimizer, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model ) elif cfg.MODEL.IF_WITH_CENTER == "yes": print("Train with center loss, the loss type is", cfg.MODEL.METRIC_LOSS_TYPE) loss_func, center_criterion = make_loss_with_center( cfg, num_classes ) # modified by gu optimizer, optimizer_center = make_optimizer_with_center( cfg, model, center_criterion ) # scheduler = WarmupMultiStepLR(optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, # cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD) # Add for using self trained model if cfg.MODEL.PRETRAIN_CHOICE == "self": start_epoch = eval( cfg.MODEL.PRETRAIN_PATH.split("/")[-1].split(".")[0].split("_")[-1] ) print("Start epoch:", start_epoch) path_to_optimizer = cfg.MODEL.PRETRAIN_PATH.replace("model", "optimizer") print("Path to the checkpoint of optimizer:", path_to_optimizer) path_to_center_param = cfg.MODEL.PRETRAIN_PATH.replace( "model", "center_param" ) print("Path to the checkpoint of center_param:", path_to_center_param) path_to_optimizer_center = cfg.MODEL.PRETRAIN_PATH.replace( "model", "optimizer_center" ) print( "Path to the checkpoint of optimizer_center:", path_to_optimizer_center ) model.load_state_dict(torch.load(cfg.MODEL.PRETRAIN_PATH)) optimizer.load_state_dict(torch.load(path_to_optimizer)) center_criterion.load_state_dict(torch.load(path_to_center_param)) optimizer_center.load_state_dict(torch.load(path_to_optimizer_center)) scheduler = WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, cfg.SOLVER.MODE, cfg.SOLVER.MAX_EPOCHS, start_epoch, ) else: start_epoch = 0 scheduler = WarmupMultiStepLR( optimizer, cfg.SOLVER.STEPS, cfg.SOLVER.GAMMA, cfg.SOLVER.WARMUP_FACTOR, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.WARMUP_METHOD, cfg.SOLVER.MODE, cfg.SOLVER.MAX_EPOCHS, ) do_train_with_center( cfg, model, center_criterion, train_loader, val_loader, optimizer, optimizer_center, scheduler, # modify for using self trained model loss_func, num_query, start_epoch, # add for using self trained model ) else: print( "Unsupported value for cfg.MODEL.IF_WITH_CENTER {}, only support yes or no!\n".format( cfg.MODEL.IF_WITH_CENTER ) )