Example #1
0
def test_net(test_loader=None, path=None, batch_size=128):
    #run test loop here

    n_batches = len(test_loader)
    model = torch.load(path)
    net = model['model']
    net.load_state_dict(model['state_dict'])
    for par in net.parameters():
        par.requires_grad = False
    net.eval()

    #writing results to spreadsheet

    fname = "test_pred.csv"
    f_out = open(fname, "w")
    wrt = csv.writer(f_out)
    net = net.cpu()
    #testing metrics
    corr_cnt = 0
    total_iter = 0
    for data in test_loader:
        [inputs, labels, snr] = data
        snr = snr.numpy()
        #inputs, labels,snr = Variable(inputs), Variable(labels), Variable(snr)
        pred = net(inputs.float()).numpy()
        pred = np.argmax(pred, axis=1)
        labels = np.argmax(labels.numpy(), axis=1)
        for s, p, l in zip(snr, pred, labels):
            if (p == l):
                corr_cnt += 1
                wrt.writerow([s, p, l])
            total_iter += 1
    print("Test done, accr = :" + str(corr_cnt / total_iter))
    f_out.close()
Example #2
0
def test_net(test_loader=None,
             path='model.pt',
             batch_size=128,
             fname=None,
             a=8,
             b=12,
             c=20):

    n_batches = len(test_loader)

    model = torch.load(path)
    net = model['model']
    net.load_state_dict(model['state_dict'])
    for par in net.parameters():
        par.requires_grad = False
    net.eval()
    net = net.float()
    net = net.to('cuda')
    #writing results to spreadsheet
    if fname is None:
        fname = 'test_pred.csv'
    f_out = open(fname, "w")
    wrt = csv.writer(f_out)

    #testing metrics
    corr_cnt = 0
    total_iter = 0
    run_max = 0
    for i in range(20, 30):
        for j in range(40, 60):
            for k in range(70, 80):
                for data in test_loader:
                    [inputs, labels, snr] = data
                    inputs, labels = Variable(inputs).to('cuda'), Variable(
                        labels)
                    pred = net(inputs.float())

                    snr = snr.numpy()
                    pred = pred.cpu().numpy()
                    labels = np.argmax(labels.numpy(), axis=1)

                    for s, p, l in zip(snr, pred, labels):
                        #wrt.writerow([s,p,l])
                        #wrt.writerow([p,l])
                        #p = bisect.bisect_left([0.25,0.5,0.75],p)
                        p = bisect.bisect_left(
                            [float(i / 100),
                             float(j / 100),
                             float(k / 100)], p)
                        if (p == l):
                            corr_cnt += 1
                        total_iter += 1

                acc = corr_cnt / total_iter
                if (run_max < acc):
                    run_max = acc

                print("Test done, accr = :" + str(acc))

                #print("i" + str(float(i/100)))
                #print("j" + str(float(j/100)))
                #print("k" + str(float(k/100)))
                wrt.writerow([i, j, k, acc])
    print(run_max)
    f_out.close()
        #get loss
        loss = criterion(outputs, y)

        #backward pass
        loss.backward()
        optimizer.step()
        running_loss += loss.item()

    if epoch % 50 == 49 or epoch == 0:  # print every 10 epochs
        print('{} epoch {} loss: {}'.format(
            '', epoch if epoch > len(losses) else len(losses), running_loss))
    losses.append(running_loss)

from plot_losses import plot_losses

plot_losses(losses)

from sklearn.metrics import mean_squared_error

from predict import predict
from plot_execution_times import plot_execution_times
from test_set import test_set

test_set = test_set.to(device)
net.eval()

predictions = predict(net, 32, test_set)

print('Test Loss:', mean_squared_error(test_set[:, -5:], predictions))
plot_execution_times(predictions, test_set[:, -5:])