def model_init_par(): # model backbone = resnet50() classifier = BaseClassifier(nattr=6) model = FeatClassifier(backbone, classifier) # load checkpoint = torch.load( '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/exp_result/custom/custom/img_model/ckpt_max.pth' ) # unfolded load # state_dict = checkpoint['state_dicts'] # new_state_dict = OrderedDict() # for k, v in state_dict.items(): # name = k[7:] # new_state_dict[name] = v # model.load_state_dict(new_state_dict) # one-liner load # if torch.cuda.is_available(): # model = torch.nn.DataParallel(model).cuda() # model.load_state_dict(checkpoint['state_dicts']) # else: model.load_state_dict({ k.replace('module.', ''): v for k, v in checkpoint['state_dicts'].items() }) # cuda eval model.cuda() model.eval() # valid_transform height, width = 256, 192 normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_transform = T.Compose( [T.Resize((height, width)), T.ToTensor(), normalize]) return model, valid_transform
def model_init_par(): # model backbone = resnet50() classifier = BaseClassifier(nattr=6) model = FeatClassifier(backbone, classifier) # load checkpoint = torch.load( '/home/deep/PycharmProjects/pedestrian-attribute-recognition/exp_result/custom/custom/img_model/ckpt_max.pth') model.load_state_dict({k.replace('module.', ''): v for k, v in checkpoint['state_dicts'].items()}) # cuda eval model.cuda() model.eval() # valid_transform height, width = 256, 192 normalize = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) valid_transform = T.Compose([ T.Resize((height, width)), T.ToTensor(), normalize ]) return model, valid_transform
def main(args): visenv_name = args.dataset exp_dir = os.path.join('exp_result', args.dataset) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') print( f'train set: {args.dataset} {args.train_split}, test set: {args.valid_split}' ) train_tsfm, valid_tsfm = get_transform(args) print(train_tsfm) train_set = AttrDataset(args=args, split=args.train_split, transform=train_tsfm) train_loader = DataLoader( dataset=train_set, batch_size=args.batchsize, shuffle=True, num_workers=4, pin_memory=True, ) valid_set = AttrDataset(args=args, split=args.valid_split, transform=valid_tsfm) valid_loader = DataLoader( dataset=valid_set, batch_size=args.batchsize, shuffle=False, num_workers=4, pin_memory=True, ) print(f'{args.train_split} set: {len(train_loader.dataset)}, ' f'{args.valid_split} set: {len(valid_loader.dataset)}, ' f'attr_num : {train_set.attr_num}') backbone = resnet50() classifier = BaseClassifier(nattr=35) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available(): model = torch.nn.DataParallel(model).cuda() print("reloading pretrained models") exp_dir = os.path.join('exp_result', args.dataset) model_path = os.path.join(exp_dir, args.dataset, 'img_model') model.load_state_dict( torch.load( '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/pedestrian_model/rap2_ckpt_max.pth' )['state_dicts']) # model = get_reload_weight(model_path, model) model.eval() preds_probs = [] gt_list = [] with torch.no_grad(): for step, (imgs, gt_label, imgname) in enumerate(tqdm(valid_loader)): imgs = imgs.cuda() gt_label = gt_label.cuda() gt_list.append(gt_label.cpu().numpy()) gt_label[gt_label == -1] = 0 valid_logits = model(imgs) valid_probs = torch.sigmoid(valid_logits) preds_probs.append(valid_probs.cpu().numpy()) gt_label = np.concatenate(gt_list, axis=0) preds_probs = np.concatenate(preds_probs, axis=0) valid_result = get_pedestrian_metrics(gt_label, preds_probs) print( f'Evaluation on test set, \n', 'ma: {:.4f}, pos_recall: {:.4f} , neg_recall: {:.4f} \n'.format( valid_result.ma, np.mean(valid_result.label_pos_recall), np.mean(valid_result.label_neg_recall)), 'Acc: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}'.format( valid_result.instance_acc, valid_result.instance_prec, valid_result.instance_recall, valid_result.instance_f1))
def __init__(self): device = torch.device('cpu') FORCE_TO_CPU = True parser = argument_parser() args = parser.parse_args(['PETA', '--model=dpn107']) visenv_name = 'PETA' exp_dir = os.path.join('exp_result', visenv_name) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join( model_dir, 'ckpt_max_e0384293_2020-09-17_18-35-21.pth') if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') _, predict_tsfm = get_transform(args) valid_set = AttrDataset(args=args, split=args.valid_split, transform=predict_tsfm) args.att_list = valid_set.attr_id backbone = getattr(sys.modules[__name__], args.model)() if "dpn68" in args.model: net_parameter = 832 elif "dpn" in args.model: net_parameter = 2688 elif "densenet" in args.model: net_parameter = 1024 else: net_parameter = 2048 classifier = BaseClassifier(netpara=net_parameter, nattr=valid_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available() and not FORCE_TO_CPU: model = torch.nn.DataParallel(model).cuda() ckpt = torch.load(save_model_path) print(f'Model is served with GPU ') else: model = torch.nn.DataParallel(model) ckpt = torch.load(save_model_path, map_location=torch.device('cpu')) print(f'Model is served with CPU ') model.load_state_dict(ckpt['state_dicts']) model.eval() # from torchsummary import summary # summary(model, input_size=(3, 256, 192)) print('Total number of parameters: ', sum(p.numel() for p in model.parameters() if p.requires_grad)) self.args = args self.predict_tsfm = predict_tsfm self.model = model
net_parameter = 832 elif "dpn" in args.model: net_parameter = 2688 elif "densenet" in args.model: net_parameter = 1024 else: net_parameter = 2048 classifier = BaseClassifier(netpara=net_parameter, nattr=valid_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available() and not FORCE_TO_CPU: model = torch.nn.DataParallel(model).cuda() ckpt = torch.load(save_model_path) print(f'Model is served with GPU ') else: model = torch.nn.DataParallel(model) ckpt = torch.load(save_model_path, map_location=torch.device('cpu')) print(f'Model is served with CPU ') model.load_state_dict(ckpt['state_dicts']) model.eval() macs, params = get_model_complexity_info(model, (3, 256, 192), as_strings=True, print_per_layer_stat=False, verbose=False) data.append([trained_model, macs, params]) df = pd.DataFrame(data, columns=['model','macs','params']) df.to_csv('flops.csv')