def main(): args = parse_args() torch.cuda.set_device(args.gpu) if args.dataset == 'domainnet': num_domain = 6 num_classes = 345 elif args.dataset == 'officehome': num_domain = 4 num_classes = 65 print(args.dataset, num_classes, num_domain) _, trg_sup_val = get_dataset(dataset=args.dataset, dataset_root=args.data_root, domain=args.trg_domain, ssl=False) trg_num = domain_dict[args.dataset][args.trg_domain] model = get_model(args.model_name, in_features=num_classes, num_classes=num_classes, num_domains=num_domain, pretrained=True) model.load_state_dict(torch.load(args.model_path)['model']) model = model.cuda(args.gpu) _, acc = test(args, model, trg_sup_val, domain_dict[args.dataset][args.trg_domain]) print('acc: %0.3f'%(acc))
def test_model(test_path, model_name, trained_model, save_path, traineddataset, testdataset, opt, lr, pretrain, num_classes, test_split=False): state = defaultdict() test_loader = defaultdict() test_transform = trn.Compose([ trn.ToTensor(), trn.Normalize([0.566, 0.496, 0.469], [0.266, 0.256, 0.258]) ]) img_pixels = (224, 224) # Load test data test_set_X, test_set_y, _ = data_utils.process_data(test_path) test_loader = data_utils.make_dataloader_iter( test_set_X, test_set_y, img_size=img_pixels, batch_size=10, transform_test=test_transform) # Load model if model_name == 'alexnet': net = Generalmodels.alexnet(num_classes, pretrain, trained_model, if_test=True) elif model_name == 'VGG': net = Generalmodels.VGG16(num_classes, pretrain, trained_model, if_test=True) elif model_name == 'densenet121': net = Generalmodels.densenet121(num_classes, pretrain, trained_model, if_test=True) elif model_name == 'resnet50': net = Generalmodels.resnet50(num_classes, pretrain, trained_model, if_test=True) device = torch.device("cuda") net.load_state_dict(torch.load(trained_model)) net = net.to(device) # If test general performance if not test_split: if not os.path.exists(save_path): os.makedirs(save_path) test(net, test_loader, state) with open( os.path.join( save_path, '{}_{}_{}_{}_{}_results.csv'.format( traineddataset, model_name, opt, lr, testdataset)), 'a') as f: f.write('%s,%0.6f\n' % ( '(test)', state['test_accuracy'], )) # Test performance per age if test_split: new_path = os.path.join(save_path, 'split') if not os.path.exists(new_path): os.makedirs(new_path) if testdataset == 'FineTuneData': testdataset = 'testing' gt,tp,mae,prediction,classpredicts,regrepredicts,labels=\ test_range(net,test_loader,state,num_classes) # write predictions f = open( os.path.join( new_path, '{}_{}_{}_{}_{}_allpredictions.csv'.format( traineddataset, model_name, opt, lr, testdataset)), 'w') f.write('%s,%s,%s\n' % ( 'label', 'classpred', 'regreepred', )) for i in range(len(classpredicts)): f.write('%d,%d,%0.2f\n' % (labels[i], classpredicts[i], regrepredicts[i])) f.close() # Write performance f = open( os.path.join( new_path, '{}_{}_{}_{}_{}_results.csv'.format(traineddataset, model_name, opt, lr, testdataset)), 'w') f.write('%s,%s,%s,%s,%s\n' % ('age', 'number', 'accuracy', 'mae', 'perceived')) for i in range(len(gt)): f.write('%d,%d,%0.6f,%0.3f,%0.3f\n' % (i, gt[i], tp[i] / gt[i], mae[i] / gt[i], prediction[i] / gt[i])) f.close()