def test_net(test_loader=None, path=None, batch_size=128): #run test loop here n_batches = len(test_loader) model = torch.load(path) net = model['model'] net.load_state_dict(model['state_dict']) for par in net.parameters(): par.requires_grad = False net.eval() #writing results to spreadsheet fname = "test_pred.csv" f_out = open(fname, "w") wrt = csv.writer(f_out) net = net.cpu() #testing metrics corr_cnt = 0 total_iter = 0 for data in test_loader: [inputs, labels, snr] = data snr = snr.numpy() #inputs, labels,snr = Variable(inputs), Variable(labels), Variable(snr) pred = net(inputs.float()).numpy() pred = np.argmax(pred, axis=1) labels = np.argmax(labels.numpy(), axis=1) for s, p, l in zip(snr, pred, labels): if (p == l): corr_cnt += 1 wrt.writerow([s, p, l]) total_iter += 1 print("Test done, accr = :" + str(corr_cnt / total_iter)) f_out.close()
def test_net(test_loader=None, path='model.pt', batch_size=128, fname=None, a=8, b=12, c=20): n_batches = len(test_loader) model = torch.load(path) net = model['model'] net.load_state_dict(model['state_dict']) for par in net.parameters(): par.requires_grad = False net.eval() net = net.float() net = net.to('cuda') #writing results to spreadsheet if fname is None: fname = 'test_pred.csv' f_out = open(fname, "w") wrt = csv.writer(f_out) #testing metrics corr_cnt = 0 total_iter = 0 run_max = 0 for i in range(20, 30): for j in range(40, 60): for k in range(70, 80): for data in test_loader: [inputs, labels, snr] = data inputs, labels = Variable(inputs).to('cuda'), Variable( labels) pred = net(inputs.float()) snr = snr.numpy() pred = pred.cpu().numpy() labels = np.argmax(labels.numpy(), axis=1) for s, p, l in zip(snr, pred, labels): #wrt.writerow([s,p,l]) #wrt.writerow([p,l]) #p = bisect.bisect_left([0.25,0.5,0.75],p) p = bisect.bisect_left( [float(i / 100), float(j / 100), float(k / 100)], p) if (p == l): corr_cnt += 1 total_iter += 1 acc = corr_cnt / total_iter if (run_max < acc): run_max = acc print("Test done, accr = :" + str(acc)) #print("i" + str(float(i/100))) #print("j" + str(float(j/100))) #print("k" + str(float(k/100))) wrt.writerow([i, j, k, acc]) print(run_max) f_out.close()
import torch import dataload from net import net net.load_state_dict(torch.load('./try/model_trained.pth')) # net.load_state_dict(torch.load('./try/model_trained_2.pth')) # outputs = net(images) # _, predicted = torch.max(outputs, 1) # print('Predicted: ', ' '.join('%5s' % load.classes[predicted[j]] for j in range(4))) class_correct = list(0. for i in range(6)) class_total = list(0. for i in range(6)) # print(class_correct) # [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] with torch.no_grad(): for data in dataload.val_dataloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs, 1) # print(predicted) # tensor([3, 3, 3, 3, 0, 0, 3, 3]) # situation of batch=8, also apply to below # tensor([0, 3, 4, 3, 4, 3, 3, 3]) # tensor([0, 3, 3, 3, 4, 0, 3, 0]) # .... c = (predicted == labels).squeeze() # print(c) # tensor([False, False, True, True, True, False, False, False]) # tensor([ True, True, False, True, True, False, True, True]) # tensor([False, True, False, False, False, True, False, True]) # .... for i in range( 4): # ratio[0.1]时,test总数=251,除4余3,因此会报错index out of range.
import os, torch, pandas as pd from sklearn.metrics import mean_squared_error from find_token import find_token from test_set import test_set from net import net net.eval() batch_sizes, test_losses = [], [] for model in os.listdir('../diff_batch_sizes'): #Get batch size batch_size = int(find_token(model, 'batch_size=', '__lr=')) #Load the model net.load_state_dict( torch.load('../diff_batch_sizes/' + model, map_location=torch.device('cpu'))) test_predictions = [] for start_index in range(0, test_set.shape[0] - batch_size + 1, batch_size): #get batches X = test_set[start_index:start_index + batch_size, :-5].view( batch_size, 1, 285) y = test_set[start_index:start_index + batch_size, -5:] #forward pass outputs = net(X).view(batch_size, 5).detach() test_predictions.append(outputs) predictions = torch.cat(test_predictions, dim=0).numpy() batch_sizes.append(batch_size)