def predict(): model = Net(model_name).to(device) model_save_path = os.path.join(config.model_path, '{}.bin'.format(model_name)) model.load_state_dict(torch.load(model_save_path)) data_len = len(os.listdir(config.image_test_path)) test_path_list = [ '{}/{}.jpg'.format(config.image_test_path, x) for x in range(0, data_len) ] test_data = np.array(test_path_list) test_dataset = MyDataset(test_data, test_transform, 'test') test_loader = DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False) model.eval() pred_list = [] with torch.no_grad(): for batch_x, _ in tqdm(test_loader): batch_x = batch_x.to(device) # compute output logits = model(batch_x) preds = torch.argmax(logits, dim=1) pred_list += [p.cpu().data.numpy() for p in preds] submission = pd.DataFrame({ "id": range(len(pred_list)), "label": pred_list }) submission.to_csv('submission.csv', index=False, header=False)
def test(**kwargs): config.parse(kwargs) test_loader = DataLoader( WholeFaceDatasets(config.whole_csv_test, config.whole_imgs_base_dir, train=False, transform=transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ])), batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers ) model = Net() if config.using_pretrain_model: model.load_state_dict(torch.load(config.pretrain_model_path)) model.cuda() model.eval() test_loss = 0 tables = None for data, target in test_loader: target = target.type(torch.FloatTensor) data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) # print(output) loss = F.l1_loss(output, target).cpu() a = output.cpu().data.numpy() b = np.reshape(target.cpu().data.numpy(), (-1, 1)) pair = np.hstack((a, b)) if tables is None: tables = pair else: tables = np.vstack((tables, pair)) test_loss += loss.data[0] * config.batch_size test_loss /= len(test_loader.dataset) # print(tables) np.save('predicted_and_real.npy', tables) print(f'Testing Accuracy is {test_loss}')