valid_loss, valid_gt, valid_probs = valid_trainer( model=model, valid_loader=valid_loader, criterion=criterion, ) valid_result = get_pedestrian_metrics(valid_gt, valid_probs) #print result print( f'Evaluation on test set, \n', 'ma: {:.4f}, pos_recall: {:.4f} , neg_recall: {:.4f} \n'.format( valid_result.ma, np.mean(valid_result.label_pos_recall), np.mean(valid_result.label_neg_recall)), 'Acc: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}'.format( valid_result.instance_acc, valid_result.instance_prec, valid_result.instance_recall, valid_result.instance_f1)) for index in range(len(valid_set.attr_name)): print(f'{valid_set.attr_name[index]}') print( f'pos recall: {valid_result.label_pos_recall[index]} neg_recall: {valid_result.label_neg_recall[index]} ma: {valid_result.label_ma[index]}' ) if __name__ == '__main__': parser = argument_parser() args = parser.parse_args() main(args) # os.path.abspath()
def __init__(self): device = torch.device('cpu') FORCE_TO_CPU = False parser = argument_parser() args = parser.parse_args(['PETA', '--model=dpn107']) visenv_name = 'PETA' exp_dir = os.path.join('exp_result', visenv_name) model_dir, log_dir = get_model_log_path(exp_dir, visenv_name) stdout_file = os.path.join(log_dir, f'stdout_{time_str()}.txt') save_model_path = os.path.join( model_dir, 'ckpt_max_e0384293_2020-09-17_18-35-21.pth') if args.redirector: print('redirector stdout') ReDirectSTD(stdout_file, 'stdout', False) pprint.pprint(OrderedDict(args.__dict__)) print('-' * 60) print(f'use GPU{args.device} for training') _, predict_tsfm = get_transform(args) valid_set = AttrDataset(args=args, split=args.valid_split, transform=predict_tsfm) args.att_list = valid_set.attr_id backbone = getattr(sys.modules[__name__], args.model)() if "dpn68" in args.model: net_parameter = 832 elif "dpn" in args.model: net_parameter = 2688 elif "densenet" in args.model: net_parameter = 1024 else: net_parameter = 2048 classifier = BaseClassifier(netpara=net_parameter, nattr=valid_set.attr_num) model = FeatClassifier(backbone, classifier) if torch.cuda.is_available() and not FORCE_TO_CPU: model = torch.nn.DataParallel(model).cuda() ckpt = torch.load(save_model_path) print(f'Model is served with GPU ') else: model = torch.nn.DataParallel(model) ckpt = torch.load(save_model_path, map_location=torch.device('cpu')) print(f'Model is served with CPU ') model.load_state_dict(ckpt['state_dicts']) model.eval() # from torchsummary import summary # summary(model, input_size=(3, 256, 192)) print('Total number of parameters: ', sum(p.numel() for p in model.parameters() if p.requires_grad)) self.args = args self.predict_tsfm = predict_tsfm self.model = model