def main(args): os.environ['CUDA_VISIBLE_DEVICES'] = args.device print('load the model from: ' + args.save_path) exp_dir = os.path.join(args.save_path, args.dataset, args.dataset, 'img_model/ckpt_max.pth') train_tsfm, valid_tsfm = get_transform(args) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm, target_transform=None, Type='val') valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=4, drop_last=True, pin_memory=True, ) print('have generated dataset') if args.model_name == 'resnet18_conv': backbone = resnet18_conv() classifier = BaseClassifier(nattr=valid_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() model.load_state_dict(torch.load(exp_dir)['state_dicts']) labels = valid_set.label sample_weight = labels.mean(0) criterion = CEL_Sigmoid(sample_weight) valid_loss, valid_gt, valid_probs = valid_trainer( model=model, valid_loader=valid_loader, criterion=criterion, ) valid_result = get_pedestrian_metrics(valid_gt, valid_probs) #print result print( f'Evaluation on test set, \n', 'ma: {:.4f}, pos_recall: {:.4f} , neg_recall: {:.4f} \n'.format( valid_result.ma, np.mean(valid_result.label_pos_recall), np.mean(valid_result.label_neg_recall)), 'Acc: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}'.format( valid_result.instance_acc, valid_result.instance_prec, valid_result.instance_recall, valid_result.instance_f1)) for index in range(len(valid_set.attr_name)): print(f'{valid_set.attr_name[index]}') print( f'pos recall: {valid_result.label_pos_recall[index]} neg_recall: {valid_result.label_neg_recall[index]} ma: {valid_result.label_ma[index]}' )
def main(args): visenv_name = args.dataset exp_dir = os.path.join(args.save_path, args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') os.environ['CUDA_VISIBLE_DEVICES'] = args.device if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print(f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}') #train_tsfm, valid_tsfm = get_transform(args) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) #train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm, target_transform=None, Type='train' ) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=4, drop_last=True, pin_memory=True, ) #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm, target_transform=None, Type='val') valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=4, drop_last=True, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label sample_weight = labels.mean(0) #backbone = resnet50() if args.model_name == 'resnet50': backbone = resnet50() if args.model_name == 'resnet18': backbone = resnet18() if args.model_name == 'resnet18_stn': backbone = resnet18_stn() if args.model_name == 'resnet_depth': backbone = resnet_depth() if args.model_name == 'resnet18_transformer': backbone = resnet18_transformer() if args.model_name == 'resnet50_dynamic_se': backbone = resnet50_dynamic_se() if args.model_name == 'resnet18_dynamic_se': backbone = resnet18_dynamic_se() if args.model_name == 'resnet18_replace_se': backbone = resnet18_replace_se() if args.model_name == 'resnet18_se': backbone = resnet18_se() if args.model_name == 'resnet34': backbone = resnet34() if args.model_name == 'resnet18_group_se': backbone = resnet18_group_se() if args.model_name == 'resnet18_vit': backbone = resnet18_vit() if args.model_name == 'resnet18_vit_v2': backbone = resnet18_vit_v2() if args.model_name == 'resnet18_vit_v3': backbone = resnet18_vit_v3() if args.model_name == 'resnet18_vit_v5': backbone = resnet18_vit_v5() if args.model_name == 'resnet18_energy_vit': backbone = resnet18_energy_vit() if args.model_name == 'resnet18_vit_split': backbone = resnet18_vit_split(num_classes = train_set.attr_num) if args.model_name == 'inception_self': backbone = inception_self() if args.model_name == 'spatial_modulator': backbone = spatial_modulator() if args.model_name == 'fusion_concat': backbone = fusion_concat() print('have generated the model') classifier = BaseClassifier(nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) print('Number of model parameters: {}'.format( sum([p.data.nelement() for p in model.parameters()]))) print('') #if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() #for k, v in model.state_dict().items(): # print(k) ''' model_dict = {} state_dict = model.state_dict() pretrain_dict = torch.load('/home/pengqy/paper/resnet18_2/PETA/PETA/img_model/ckpt_max.pth')['state_dicts'] for k, v in pretrain_dict.items(): # print('%%%%% ', k) if k in state_dict: if k.startswith('module.backbone.conv1'): #pdb.set_trace() model_dict[k] = v elif k.startswith('module.backbone.bn1'): model_dict[k] = v elif k.startswith('module.backbone.layer'): model_dict[k] = v elif k.startswith('module.classifier'): model_dict[k] = v #elif k.startswith('module.backbone.spa_conv_0'): # model_dict[k] = v #elif k.startswith('module.backbone.spa_bn_0'): # model_dict[k] = v #elif k.startswith('module.classifier'): # model_dict[k] = v #elif k.startswith('module.classifier'): # model_dict[k] = v #pdb.set_trace() state_dict.update(model_dict) model.load_state_dict(state_dict) ''' criterion = CEL_Sigmoid(sample_weight) param_groups = [{'params': model.module.finetune_params(), 'lr':0.01}, {'params': model.module.fresh_params(), 'lr':0.1}] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) loss = args.loss best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path, loss =loss) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}')
def main(args): visenv_name = args.dataset exp_dir = os.path.join(args.save_path, args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') os.environ['CUDA_VISIBLE_DEVICES'] = args.device if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) #train_tsfm, valid_tsfm = get_transform(args) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) #train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=4, pin_memory=True, ) #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=4, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label sample_weight = labels.mean(0) #backbone = resnet50() if args.model_name == 'resnet50': backbone = resnet50() if args.model_name == 'resnet18': backbone = resnet18() if args.model_name == 'resnet50_dynamic_se': backbone = resnet50_dynamic_se() if args.model_name == 'resnet18_dynamic_se': backbone = resnet18_dynamic_se() if args.model_name == 'resnet18_replace_se': backbone = resnet18_replace_se() if args.model_name == 'resnet18_se': backbone = resnet18_se() if args.model_name == 'resnet34': backbone = resnet34() if args.model_name == 'acnet': backbone = resnet18_acnet(num_classes=train_set.attr_num) print('have generated the model') classifier = BaseClassifier(nattr=train_set.attr_num) classifier_depth = BaseClassifier(nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) print('Number of model parameters: {}'.format( sum([p.data.nelement() for p in model.parameters()]))) print('') #if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() criterion = CEL_Sigmoid(sample_weight) param_groups = [{ 'params': model.module.finetune_params(), 'lr': args.lr_ft }, { 'params': model.module.fresh_params(), 'lr': args.lr_new }] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) loss = args.loss best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path, loss=loss) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}')
def main(args): visenv_name = args.dataset exp_dir = os.path.join('exp_result', args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) log_name = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) stdout_file = os.path.join(log_dir, f'stdout_{log_name}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=4, pin_memory=True, ) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=16, shuffle=False, num_workers=4, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label sample_weight = labels.mean(0) # backbone = resnet50() # classifier = BaseClassifier(nattr=train_set.attr_num) backbone = osnet_ain_x1_0(num_classes=56, pretrained=True, loss='softmax') classifier = BaseClassifier_osnet(nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() #改 criterion = CEL_Sigmoid(sample_weight) # criterion = multilabel_categorical_crossentropy param_groups = [{ 'params': model.module.finetune_params(), 'lr': args.lr_ft }, { 'params': model.module.fresh_params(), 'lr': args.lr_new }] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) if args.training: #训练,每隔1个epoch,验证集评估一次 best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path, dataset=train_set) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}') else: #仅评估,不训练 model_path = args.best_model_path saved_state_dict = torch.load(model_path)['state_dicts'] saved_state_dict = { k.replace('module.', ''): v for k, v in saved_state_dict.items() } backbone = osnet_ain_x1_0(num_classes=valid_set.attr_num, pretrained=True, loss='softmax') print('make model for only test') classifier = BaseClassifier_osnet(nattr=valid_set.attr_num) test_model = FeatClassifier(backbone, classifier) print("loading model") test_model.load_state_dict(saved_state_dict) test_model.cuda() test_alm(valid_loader, test_model, attr_num=valid_set.attr_num, description=valid_set.attr_id, set='test', threshold=0.5)
def main(args): os.environ['CUDA_VISIBLE_DEVICES'] = args.device print('load the model from: ' + args.save_path ) #exp_dir = os.path.join(args.save_path, args.dataset, args.dataset, 'img_model/ckpt_max.pth') exp_dir = 'src/ckpt_max_23.pth' #exp_dir = '/home/pengqy/paper/resnet18_part_detector/PETA/PETA/img_model/ckpt_max.pth' train_tsfm, train_tsfm_resize, valid_tsfm, valid_tsfm_resize = get_transform(args) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm, transform_resize=valid_tsfm_resize) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=0, pin_memory=True, ) print('have generated dataset') if args.model_name == 'resnet18_consistent': backbone = resnet18_consistent() classifier = BaseClassifier(nattr=valid_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() #model = model.cuda() #loading state_dict from the model #model.load_state_dict(torch.load(exp_dir)['state_dicts']) model_dict = {} state_dict = model.state_dict() pretrain_dict = torch.load(exp_dir)['state_dicts'] for k, v in pretrain_dict.items(): if k.startswith('module.backbone.'): model_dict[k] = v if k.startswith('module.classifier.conv'): model_dict['module.backbone.conv'+k[22:]] = v elif k.startswith('module.classifier.bn'): model_dict['module.backbone.bn'+k[20:]] = v state_dict.update(model_dict) model.load_state_dict(state_dict) #pdb.set_trace() labels = valid_set.label sample_weight = labels.mean(0) criterion = CEL_Sigmoid(sample_weight) valid_loss, valid_gt, valid_probs = valid_trainer( model=model, valid_loader=valid_loader, criterion=criterion, ) valid_result = get_pedestrian_metrics(valid_gt, valid_probs) #print result print(f'Evaluation on test set, \n', 'ma: {:.4f}, pos_recall: {:.4f} , neg_recall: {:.4f} \n'.format( valid_result.ma, np.mean(valid_result.label_pos_recall), np.mean(valid_result.label_neg_recall)), 'Acc: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}'.format( valid_result.instance_acc, valid_result.instance_prec, valid_result.instance_recall, valid_result.instance_f1)) for index in range(len(valid_set.attr_name)): print(f'{valid_set.attr_name[index]}') print(f'pos recall: {valid_result.label_pos_recall[index]} neg_recall: {valid_result.label_neg_recall[index]} ma: {valid_result.label_ma[index]}')
def main(args): os.environ['CUDA_VISIBLE_DEVICES'] = args.device print('load the model from: ' + args.save_path) exp_dir = os.path.join(args.save_path, args.dataset, args.dataset, 'img_model/ckpt_max.pth') train_tsfm, valid_tsfm = get_transform(args) #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm, target_transform=None, Type='val') valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=0, pin_memory=True, ) print('have generated dataset') if args.model_name == 'resnet50': backbone = resnet50() if args.model_name == 'resnet18': backbone = resnet18() if args.model_name == 'resnet18_consistent': backbone = resnet18_consistent() if args.model_name == 'resnet18_stn': backbone = resnet18_stn() if args.model_name == 'resnet18_autoencoder': backbone = resnet18_autoencoder() if args.model_name == 'resnet50_dynamic_se': backbone = resnet50_dynamic_se() if args.model_name == 'resnet18_dynamic_se': backbone = resnet18_dynamic_se() if args.model_name == 'resnet18_group_se': backbone = resnet18_group_se() if args.model_name == 'resnet18_vit': backbone = resnet18_vit() if args.model_name == 'resnet18_vit_v2': backbone = resnet18_vit_v2() if args.model_name == 'resnet18_vit_v3': backbone = resnet18_vit_v3() if args.model_name == 'resnet18_vit_v4': backbone = resnet18_vit_v4() if args.model_name == 'resnet34': backbone = resnet34() if args.model_name == 'resnet18_vit_split': backbone = resnet18_vit_split(num_classes=valid_set.attr_num) if args.model_name == 'resnet18_energy_vit': backbone = resnet18_energy_vit(num_classes=valid_set.attr_num) if args.model_name == 'resnet_depth': backbone = resnet_depth(num_classes=valid_set.attr_num) if args.model_name == 'spatial_modulator': backbone = spatial_modulator() if args.model_name == 'fusion_concat': backbone = fusion_concat() classifier = BaseClassifier(nattr=valid_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() #model = model.cuda() #loading state_dict from the model model.load_state_dict(torch.load(exp_dir)['state_dicts']) #load_ckpt(model, exp_dir) print('have load from the pretrained model') #start eval labels = valid_set.label sample_weight = labels.mean(0) criterion = CEL_Sigmoid(sample_weight) valid_loss, valid_gt, valid_probs = valid_trainer( model=model, valid_loader=valid_loader, criterion=criterion, ) valid_result = get_pedestrian_metrics(valid_gt, valid_probs) #print result print( f'Evaluation on test set, \n', 'ma: {:.4f}, pos_recall: {:.4f} , neg_recall: {:.4f} \n'.format( valid_result.ma, np.mean(valid_result.label_pos_recall), np.mean(valid_result.label_neg_recall)), 'Acc: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}'.format( valid_result.instance_acc, valid_result.instance_prec, valid_result.instance_recall, valid_result.instance_f1)) #for index in range(5, 35): for index in range(len(valid_set.attr_name)): print(f'{valid_set.attr_name[index]}') print( f'pos recall: {valid_result.label_pos_recall[index]} neg_recall: {valid_result.label_neg_recall[index]} ma: {valid_result.label_ma[index]}' )
def main(args): visenv_name = args.dataset exp_dir = os.path.join(args.save_path, args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') os.environ['CUDA_VISIBLE_DEVICES'] = args.device if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) #train_tsfm, valid_tsfm = get_transform(args) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) #train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=4, pin_memory=True, ) #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=4, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label sample_weight = labels.mean(0) #backbone = resnet50() if args.model_name == 'resnet18': backbone = resnet18() if args.model_name == 'acnet': backbone = resnet18_acnet(num_classes=train_set.attr_num) if args.model_name == 'resnet18_attention_depth': backbone = resnet18_attention_depth(num_classes=train_set.attr_num) if args.model_name == 'resnet18_no_attention_depth': backbone = resnet18_no_attention_depth(num_classes=train_set.attr_num) if args.model_name == 'resnet_depth_selective_fusion': backbone = resnet_depth_selective_fusion( num_classes=train_set.attr_num) if args.model_name == 'resnet_attention_depth_spatial': backbone = resnet_attention_depth_spatial( num_classes=train_set.attr_num) if args.model_name == 'resnet_attention_depth_cbam_spatial': backbone = resnet_attention_depth_cbam_spatial( num_classes=train_set.attr_num) if args.model_name == 'resnet18_inception_depth_4': backbone = resnet18_inception_depth_4(num_classes=train_set.attr_num) if args.model_name == 'resnet18_self_attention_depth_34': backbone = resnet18_self_attention_depth_34( num_classes=train_set.attr_num) if args.model_name == 'resnet18_resnet18_resnet18_34': backbone = resnet18_resnet18_resnet18_34( num_classes=train_set.attr_num) if args.model_name == 'resnet18_self_attention_depth_34_version2': backbone = resnet18_self_attention_depth_34_version2( num_classes=train_set.attr_num) if args.model_name == 'resnet18_inception_depth_4_wrap': backbone = resnet18_inception_depth_4_wrap( num_classes=train_set.attr_num) if args.model_name == 'ours': backbone = ours(num_classes=train_set.attr_num) if args.model_name == 'resnet50_ours': backbone = resnet50_ours(num_classes=train_set.attr_num) if args.model_name == 'resnet_attention': backbone = resnet_attention(num_classes=train_set.attr_num) if args.model_name == 'resnet18_self_mutual_attention': backbone = resnet18_self_mutual_attention( num_classes=train_set.attr_num) print('have generated the model') classifier = BaseClassifier(nattr=train_set.attr_num) #classifier_depth = BaseClassifier(nattr=train_set.attr_num) #model = FeatClassifier(backbone, classifier, classifier_depth) model = FeatClassifier(backbone, classifier) print('Number of model parameters: {}'.format( sum([p.data.nelement() for p in model.parameters()]))) print('') #if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() #pdb.set_trace() model_dict = {} state_dict = model.state_dict() pretrain_dict = torch.load( '/media/data1/pengqy/Strong_Baseline_of_Pedestrian_Attribute_Recognition/resnet18_rgb/PETA/PETA/img_model/ckpt_max.pth' )['state_dicts'] for k, v in pretrain_dict.items(): # print('%%%%% ', k) if k in state_dict: if k.startswith('module.backbone.conv1'): model_dict[k] = v elif k.startswith('module.backbone.bn1'): model_dict[k] = v elif k.startswith('module.backbone.layer'): model_dict[k] = v elif k.startswith('module.classifier'): model_dict[k] = v pdb.set_trace() pretrain_dict = torch.load( '/media/data1/pengqy/Strong_Baseline_of_Pedestrian_Attribute_Recognition/resnet_depth/PETA/PETA/img_model/ckpt_max.pth' )['state_dicts'] for k, v in pretrain_dict.items(): # print('%%%%% ', k) if k in state_dict: if k.startswith('module.backbone.conv1'): model_dict[k] = v elif k.startswith('module.backbone.bn1'): model_dict[k] = v elif k.startswith('module.backbone.layer'): model_dict[k] = v pdb.set_trace() state_dict.update(model_dict) criterion = CEL_Sigmoid(sample_weight) ''' param_groups = [{'params': model.module.finetune_params(), 'lr': args.lr_ft}, {'params': model.module.fresh_params(), 'lr': args.lr_new}, {'params': model.module.fresh_depth_params(), 'lr': args.lr_new}] ''' param_groups = [{ 'params': model.module.finetune_params(), 'lr': 0.01 }, { 'params': model.module.fresh_params(), 'lr': 0.01 }] ''' param_groups = [{'params': model.module.finetune_params(), 'lr': args.lr_ft}, {'params': model.module.fresh_params(), 'lr': args.lr_new}, {'params': model.module.depth_params(), 'lr': 0.005}] ''' optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) loss = args.loss best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path, loss=loss) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}')
def main(args): visenv_name = args.dataset exp_dir = os.path.join('exp_result', args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = str(args.device) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=4, pin_memory=True, ) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=4, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label sample_weight = labels.mean(0) backbone = resnet50() classifier = BaseClassifier(nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() criterion = CEL_Sigmoid(sample_weight) param_groups = [{ 'params': model.module.finetune_params(), 'lr': args.lr_ft }, { 'params': model.module.fresh_params(), 'lr': args.lr_new }] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = MultiStepLR(optimizer, args.lr_scheduler_steps, gamma=0.1) best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}') torch.save(model.state_dict(), os.path.join(model_dir, f'{time_str()}_model.pth.tar'))
def main(args): visenv_name = args.dataset exp_dir = os.path.join('exp_result', args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print(f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}') train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=0, pin_memory=True, ) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=0, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label # sample_weight = labels.mean(0) # sample_weight = labels[labels!=2].reshape((labels.shape[0], labels.shape[1])).mean(0) sample_weight = np.nanmean(np.where(labels!=2,labels,np.nan), axis=0) backbone = getattr(sys.modules[__name__], args.model)() if "dpn68" in args.model: net_parameter = 832 elif "dpn" in args.model: net_parameter = 2688 elif "densenet" in args.model: net_parameter = 1024 else: net_parameter = 2048 classifier = BaseClassifier(netpara=net_parameter, nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() criterion = CEL_Sigmoid(sample_weight) if torch.cuda.is_available(): param_groups = [{'params': model.module.finetune_params(), 'lr': args.lr_ft}, {'params': model.module.fresh_params(), 'lr': args.lr_new}] else: param_groups = [{'params': model.finetune_params(), 'lr': args.lr_ft}, {'params': model.fresh_params(), 'lr': args.lr_new}] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}')
def main(args): visenv_name = args.dataset exp_dir = os.path.join('exp_result', args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=4, pin_memory=True, ) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=4, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label sample_weight = labels.mean(0) if args.model == 'resnet18': backbone = resnet18() elif args.model == 'resnet34': backbone = resnet34() elif args.model == 'resnet50': backbone = resnet50() elif args.model == 'resnet101': backbone = resnet101() elif args.model == 'resnet152': backbone = resnet50() else: raise ValueError('No Defined Model!') classifier = BaseClassifier(nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() criterion = CEL_Sigmoid(sample_weight) param_groups = [{ 'params': model.module.finetune_params(), 'lr': args.lr_ft }, { 'params': model.module.fresh_params(), 'lr': args.lr_new }] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}')
def main(args): visenv_name = args.dataset exp_dir = os.path.join('exp_result', args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) # Added for logging purposes user = getpass.getuser() fixed_time_str = time_str() stdout_file = os.path.join( log_dir, "_".join(['stdout', user, f'{fixed_time_str}.txt'])) save_model_path = os.path.join( model_dir, "_".join(['ckpt_max', user, f'{fixed_time_str}.pth'])) trackitems_dir = os.path.join( log_dir, "_".join(['trackitems', user, f'{fixed_time_str}.txt'])) if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) #train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_set = AttrDataset_new(args=args, split=args.train_split, transformation_dict=args.train_transform) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=8, pin_memory=True, ) #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_set = AttrDataset_new(args=args, split=args.valid_split, transformation_dict=args.valid_transform) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=8, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label # sample_weight = labels.mean(0) sample_weight = np.nanmean(np.where(labels != 2, labels, np.nan), axis=0) backbone = getattr(sys.modules[__name__], args.model)() if "dpn68" in args.model: net_parameter = 832 elif "dpn" in args.model: net_parameter = 2688 elif "densenet" in args.model: net_parameter = 1024 else: net_parameter = 2048 classifier = BaseClassifier(netpara=net_parameter, nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() criterion = CEL_Sigmoid(sample_weight) if torch.cuda.is_available(): param_groups = [{ 'params': model.module.finetune_params(), 'lr': args.lr_ft }, { 'params': model.module.fresh_params(), 'lr': args.lr_new }] else: param_groups = [{ 'params': model.finetune_params(), 'lr': args.lr_ft }, { 'params': model.fresh_params(), 'lr': args.lr_new }] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) # Added for logging purposes with open(trackitems_dir, "a") as f: code, line_no = inspect.getsourcelines(get_transform) for line in code: f.write(str(line)) f.write(str("\n\n")) f.write(str(args.__dict__)) f.write(str("\n\n")) f.write(str(lr_scheduler.__dict__)) f.write(str("\n\n")) model_str = str(model).lower() have_dropout = 'dropout' in model_str f.write('dropout: %s' % (have_dropout)) f.write(str("\n\n")) have_leaky_relu = 'leaky_relu' in model_str f.write('leaky_relu: %s' % (have_leaky_relu)) f.write(str("\n\n")) best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path, measure="f1") print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}') # Added for logging purposes with open(trackitems_dir, "a") as f: f.write(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}') f.write("\n\n")
def main(args): os.environ['CUDA_VISIBLE_DEVICES'] = args.device print('load the model from: ' + args.save_path ) exp_dir = os.path.join(args.save_path, args.dataset, args.dataset, 'img_model/ckpt_max.pth') train_tsfm, valid_tsfm = get_transform(args) #pdb.set_trace() valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=4, pin_memory=True, ) print('have generated dataset') if args.model_name == 'resnet50': backbone = resnet50() if args.model_name == 'resnet18': backbone = resnet18() if args.model_name == 'resnet18_depth': backbone = resnet18_depth() if args.model_name == 'acnet': backbone = resnet18_acnet( num_classes = valid_set.attr_num) if args.model_name == 'resnet18_attention_depth': backbone = resnet18_attention_depth( num_classes = valid_set.attr_num) if args.model_name == 'resnet18_no_attention_depth': backbone = resnet18_no_attention_depth( num_classes = valid_set.attr_num) if args.model_name == 'resnet_depth_selective_fusion': backbone = resnet_depth_selective_fusion( num_classes = valid_set.attr_num) if args.model_name == 'resnet_attention_depth_spatial': backbone = resnet_attention_depth_spatial( num_classes = valid_set.attr_num) if args.model_name == 'resnet_attention_depth_cbam_spatial': backbone = resnet_attention_depth_cbam_spatial( num_classes = valid_set.attr_num) if args.model_name == 'resnet18_inception_depth_4': backbone = resnet18_inception_depth_4( num_classes = valid_set.attr_num) if args.model_name == 'resnet18_self_attention_depth_34': backbone = resnet18_self_attention_depth_34( num_classes = valid_set.attr_num) if args.model_name == 'resnet_attention': backbone = resnet_attention(num_classes = valid_set.attr_num) if args.model_name == 'ours': backbone = ours( num_classes = valid_set.attr_num) if args.model_name == 'resnet18_self_attention_depth_34_version2': backbone = resnet18_self_attention_depth_34_version2( num_classes = valid_set.attr_num) if args.model_name == 'resnet18_inception_depth_4_wrap': backbone = resnet18_inception_depth_4_wrap( num_classes = valid_set.attr_num) if args.model_name == 'resnet18_self_mutual_attention': backbone = resnet18_self_mutual_attention( num_classes = valid_set.attr_num) if args.model_name == 'resnet_depth': backbone = resnet_depth( num_classes = valid_set.attr_num) classifier = BaseClassifier(nattr=valid_set.attr_num) classifier_depth = BaseClassifier(nattr=valid_set.attr_num) model = FeatClassifier(backbone, classifier) #model = FeatClassifier(backbone, classifier, classifier_depth) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() #model = model.cuda() #loading state_dict from the model model.load_state_dict(torch.load(exp_dir)['state_dicts']) #load_ckpt(model, exp_dir) print('have load from the pretrained model') #start eval labels = valid_set.label sample_weight = labels.mean(0) criterion = CEL_Sigmoid(sample_weight) valid_loss, valid_gt, valid_probs = valid_trainer( model=model, valid_loader=valid_loader, criterion=criterion, ) valid_result = get_pedestrian_metrics(valid_gt, valid_probs) #print result print(f'Evaluation on test set, \n', 'ma: {:.4f}, pos_recall: {:.4f} , neg_recall: {:.4f} \n'.format( valid_result.ma, np.mean(valid_result.label_pos_recall), np.mean(valid_result.label_neg_recall)), 'Acc: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}'.format( valid_result.instance_acc, valid_result.instance_prec, valid_result.instance_recall, valid_result.instance_f1)) for index in range(len(valid_set.attr_name)): print(f'{valid_set.attr_name[index]}') print(f'pos recall: {valid_result.label_pos_recall[index]} neg_recall: {valid_result.label_neg_recall[index]} ma: {valid_result.label_ma[index]}')
def main(args): visenv_name = args.dataset exp_dir = os.path.join(args.save_path, args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') os.environ['CUDA_VISIBLE_DEVICES'] = args.device if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) #train_tsfm, valid_tsfm = get_transform(args) train_tsfm, train_tsfm_resize, valid_tsfm, valid_tsfm_resize = get_transform( args) print(train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm, transform_resize=train_tsfm_resize) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=0, pin_memory=True, ) #valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm, transform_resize=valid_tsfm_resize) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=0, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label sample_weight = labels.mean(0) if args.model_name == 'resnet18_consistent': backbone = resnet18_consistent() print('have generated the model') classifier = BaseClassifier(nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) print('Number of model parameters: {}'.format( sum([p.data.nelement() for p in model.parameters()]))) print('') #if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() #for k, v in model.state_dict().items(): # print(k) model_dict = {} state_dict = model.state_dict() #pretrain_dict = model_zoo.load_url(model_urls['resnet18']) pretrain_dict = torch.load('ckpt_max_23.pth')['state_dicts'] for k, v in pretrain_dict.items(): #print(k) #if k in state_dict: #if k.startswith('module.backbone.conv1'): #if k.startswith('module.backbone.fix'): #pdb.set_trace() # model_dict['module.backbone'+k[19:]] = v # #model_dict['module.backbone.fix'+k[15:]] = v if k.startswith('module.backbone.'): # #pdb.set_trace() model_dict[k] = v if k.startswith('module.classifier.conv'): #pdb.set_trace() model_dict['module.backbone.conv' + k[22:]] = v elif k.startswith('module.classifier.bn'): #pdb.set_trace() model_dict['module.backbone.bn' + k[20:]] = v #elif k.startswith('layer3'): # model_dict['module.backbone.fix.'+k] = v #elif k.startswith('layer4'): # model_dict['module.backbone.fix.'+k] = v #elif k.startswith('module.classifier.conv_3'): # model_dict['module.backbone.fix'+k[17:]] = v #elif k.startswith('module.classifier'): # model_dict[k] = v #elif k.startswith('module.classifier'): # model_dict[k] = v #pdb.set_trace() #model_dict['module.backbone.fix.keypoints_0'] = part_init_0 #model_dict['module.backbone.fix.keypoints_1'] = part_init_1 #model_dict['module.backbone.fix.keypoints_2'] = part_init_2 #for k , v in state_dict.items(): # print(k) #print('sss') #for k, v in state_dict.items(): # print(k) #pdb.set_trace() state_dict.update(model_dict) model.load_state_dict(state_dict) for name, child in model.module.backbone.named_children(): #print(name) if name == 'fix': #pdb.set_trace() for param in child.parameters(): #pdb.set_trace() #print('sss') param.requires_grad = True #pdb.set_trace() criterion = CEL_Sigmoid(sample_weight) #model.load_state_dict(torch.load('/home/pengqy/paper/resnet18_consist/PETA/PETA/img_model/ckpt_max.pth')['state_dicts']) param_groups = [ { 'params': model.module.finetune_params(), 'lr': 0.0001 }, #{'params': model.module.stn_params(), 'lr': 0.0001}, { 'params': model.module.fresh_params(), 'lr': 0.1 } ] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) loss = args.loss best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path, loss=loss) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}')
def main(args): visenv_name = args.dataset exp_dir = os.path.join('exp_result', args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join(model_dir, 'ckpt_max.pth') if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=4, pin_memory=True, ) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=4, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') labels = train_set.label sample_weight = labels.mean(0) backbone = resnet50() classifier = BaseClassifier(nattr=train_set.attr_num) model = FeatClassifier(backbone, classifier) checkpoint = torch.load( '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/exp_result/custom/custom/img_model/ckpt_max.pth' ) model.load_state_dict({ k.replace('module.', ''): v for k, v in checkpoint['state_dicts'].items() }) for param in model.backbone.parameters(): param.requires_grad = False if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() criterion = CEL_Sigmoid(sample_weight) param_groups = [{ 'params': filter(lambda p: p.requires_grad, model.module.finetune_params()), 'lr': args.lr_ft }, { 'params': filter(lambda p: p.requires_grad, model.module.fresh_params()), 'lr': args.lr_new }] optimizer = torch.optim.SGD(param_groups, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=False) lr_scheduler = ReduceLROnPlateau(optimizer, factor=0.1, patience=4) best_metric, epoch = trainer(epoch=args.train_epoch, model=model, train_loader=train_loader, valid_loader=valid_loader, criterion=criterion, optimizer=optimizer, lr_scheduler=lr_scheduler, path=save_model_path) print(f'{visenv_name}, best_metrc : {best_metric} in epoch{epoch}')