test_loader = data_utils.DataLoader(test_data, batch_size=10, shuffle=False) model.eval() for sample_batched in test_loader: if args.use_gpu and torch.cuda.is_available(): x_1 = torch.autograd.Variable(sample_batched['glove'].cuda()) x_2 = torch.autograd.Variable(sample_batched['res'].cuda()) x_3 = torch.autograd.Variable(sample_batched['bitrate'].cuda()) x_4 = torch.autograd.Variable(sample_batched['gender'].cuda()) x_5 = torch.autograd.Variable(sample_batched['age'].cuda()) target = torch.autograd.Variable(sample_batched['label'].cuda()) else: x_1 = torch.autograd.Variable(sample_batched['glove']) x_2 = torch.autograd.Variable(sample_batched['res']) x_3 = torch.autograd.Variable(sample_batched['bitrate']) x_4 = torch.autograd.Variable(sample_batched['gender']) x_5 = torch.autograd.Variable(sample_batched['age']) target = torch.autograd.Variable(sample_batched['label']) output, _ = model(x_1, x_2, x_3, x_4, x_5) pred = output.data.max(1, keepdim=True)[1] print("True: {}".format(target)) print("Predict: {}".format(pred)) if __name__ == '__main__': test_confusion_matrix(parse_arguments(sys.argv[1:]))
x_1 = torch.autograd.Variable(sample_batched['glove']) x_2 = torch.autograd.Variable(sample_batched['res']) x_3 = torch.autograd.Variable(sample_batched['bitrate']) x_4 = torch.autograd.Variable(sample_batched['gender']) x_5 = torch.autograd.Variable(sample_batched['age']) _, fc2_test = model(x_1, x_2, x_3, x_4, x_5) features_test.append(fc2_test.data.cpu().numpy()) test_features = np.concatenate(features_test, 0) total_deep = float( (datetime.datetime.now() - start_deep).total_seconds()) / float( len(test_data)) print("DeepQoE total cost {}s".format(total_deep)) print(len(test_data)) prediction = clf.predict(test_features) acc = accuracy_score(prediction, y_test) print("{} uses DeepQoE features can get {}%".format( cfg.CLASSIFIER_NAME[args.classifier], acc * 100.0)) clf_ori = cfg.CLASSIFIER[args.classifier] clf_ori.fit(x_train.astype(float), y_train) prediction_ori = clf_ori.predict(x_test.astype(float)) acc_ori = accuracy_score(prediction_ori, y_test) print("{} uses original features can get {}%".format( cfg.CLASSIFIER_NAME[args.classifier], acc_ori * 100.0)) if __name__ == '__main__': train_test_SVM(parse_arguments(sys.argv[1:]))
test_loader = data_utils.DataLoader(test_data, batch_size=18, shuffle=False) model.eval() test_loss = 0 for sample_batched in test_loader: if args.use_gpu and torch.cuda.is_available(): x_1 = torch.autograd.Variable(sample_batched['glove'].cuda()) x_2 = torch.autograd.Variable(sample_batched['res'].cuda()) x_3 = torch.autograd.Variable(sample_batched['bitrate'].cuda()) target = torch.autograd.Variable(sample_batched['label'].cuda()) else: x_1 = torch.autograd.Variable(sample_batched['glove']) x_2 = torch.autograd.Variable(sample_batched['res']) x_3 = torch.autograd.Variable(sample_batched['bitrate']) target = torch.autograd.Variable(sample_batched['label']) output, _ = model(x_1, x_2, x_3) test_loss += F.mse_loss(output, target.float(), size_average=False).data[0] # print (output) print(target.float()) test_loss /= len(test_loader.dataset) print('\nTest set: Average loss: {:.4f}'.format(test_loss)) if __name__ == '__main__': main(parse_arguments(sys.argv[1:]))