Esempio n. 1
0
def test_net(test_loader=None, path=None, batch_size=128):
    #run test loop here

    n_batches = len(test_loader)
    model = torch.load(path)
    net = model['model']
    net.load_state_dict(model['state_dict'])
    for par in net.parameters():
        par.requires_grad = False
    net.eval()

    #writing results to spreadsheet

    fname = "test_pred.csv"
    f_out = open(fname, "w")
    wrt = csv.writer(f_out)
    net = net.cpu()
    #testing metrics
    corr_cnt = 0
    total_iter = 0
    for data in test_loader:
        [inputs, labels, snr] = data
        snr = snr.numpy()
        #inputs, labels,snr = Variable(inputs), Variable(labels), Variable(snr)
        pred = net(inputs.float()).numpy()
        pred = np.argmax(pred, axis=1)
        labels = np.argmax(labels.numpy(), axis=1)
        for s, p, l in zip(snr, pred, labels):
            if (p == l):
                corr_cnt += 1
                wrt.writerow([s, p, l])
            total_iter += 1
    print("Test done, accr = :" + str(corr_cnt / total_iter))
    f_out.close()
Esempio n. 2
0
def get_loss_optimizer(net, learning_rate=0.01):
    #Loss
    loss = torch.nn.CrossEntropyLoss()

    #Optimizer

    #optimizer = optim.Adam(net.parameters(), lr= learning_rate)
    optimizer = geoopt.optim.RiemannianAdam(net.parameters(), lr=learning_rate)

    return (loss, optimizer)
Esempio n. 3
0
def test_net(test_loader=None,
             path='model.pt',
             batch_size=128,
             fname=None,
             a=8,
             b=12,
             c=20):

    n_batches = len(test_loader)

    model = torch.load(path)
    net = model['model']
    net.load_state_dict(model['state_dict'])
    for par in net.parameters():
        par.requires_grad = False
    net.eval()
    net = net.float()
    net = net.to('cuda')
    #writing results to spreadsheet
    if fname is None:
        fname = 'test_pred.csv'
    f_out = open(fname, "w")
    wrt = csv.writer(f_out)

    #testing metrics
    corr_cnt = 0
    total_iter = 0
    run_max = 0
    for i in range(20, 30):
        for j in range(40, 60):
            for k in range(70, 80):
                for data in test_loader:
                    [inputs, labels, snr] = data
                    inputs, labels = Variable(inputs).to('cuda'), Variable(
                        labels)
                    pred = net(inputs.float())

                    snr = snr.numpy()
                    pred = pred.cpu().numpy()
                    labels = np.argmax(labels.numpy(), axis=1)

                    for s, p, l in zip(snr, pred, labels):
                        #wrt.writerow([s,p,l])
                        #wrt.writerow([p,l])
                        #p = bisect.bisect_left([0.25,0.5,0.75],p)
                        p = bisect.bisect_left(
                            [float(i / 100),
                             float(j / 100),
                             float(k / 100)], p)
                        if (p == l):
                            corr_cnt += 1
                        total_iter += 1

                acc = corr_cnt / total_iter
                if (run_max < acc):
                    run_max = acc

                print("Test done, accr = :" + str(acc))

                #print("i" + str(float(i/100)))
                #print("j" + str(float(j/100)))
                #print("k" + str(float(k/100)))
                wrt.writerow([i, j, k, acc])
    print(run_max)
    f_out.close()
Esempio n. 4
0
def get_loss_optimizer(net, learning_rate=0.001):
    #Loss
    loss = torch.nn.MSELoss()
    #Optimizer
    optimizer = optim.Adam(net.parameters(), lr=learning_rate)
    return (loss, optimizer)
        (scaled_train_predictors, scaled_train_targets), axis=1),
                             dtype=torch.float).to(device)
    test_set = torch.tensor(data=np.concatenate(
        (scaled_test_predictors, scaled_test_targets), axis=1),
                            dtype=torch.float).to(device)
    return train_set, test_set


#Set hyperparameters
batch_size = 32
epochs = 50
lr = 1e-4
weight_decay = 1e-4

criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)

for test_app in relevent_data['APP_NAME'].unique():
    train_set, test_set = train_test_split(test_app)
    test_losses = []
    #Training loop
    for epoch in range(epochs):

        #Set train loss to zero
        running_loss = 0.0

        #Shuffle training set
        shuffled_train_set = train_set[torch.randperm(train_set.shape[0])]

        for start_index in range(0, shuffled_train_set.shape[0] - batch_size,
                                 batch_size):
Esempio n. 6
0
import torch
import torch.optim as optim
import torch.nn as nn
import dataload
from net import net

# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# # Assuming that we are on a CUDA machine, this should print a CUDA device:
# print(device)
# net.to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

for epoch in range(4):  # loop over the dataset multiple times
    running_loss = 0.0
    for i, data in enumerate(dataload.train_dataloader):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = data
        # inputs, labels = data[0].to(device), data[1].to(device)

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        # print statistics