def test_net(test_loader=None, path='model.pt', batch_size=128, fname=None): n_batches = len(test_loader) model = torch.load(path) net = model['model'] net.load_state_dict(model['state_dict']) for par in net.parameters(): par.requires_grad = False net.eval() net = net.float() net = net.to('cuda') #writing results to spreadsheet if fname is None: fname = 'test_pred.csv' f_out = open(fname, "w") wrt = csv.writer(f_out) #testing metrics corr_cnt = 0 total_iter = 0 for data in test_loader: [inputs, labels, snr] = data inputs, labels = Variable(inputs).to('cuda'), Variable(labels) pred = net(inputs.float()) snr = snr.numpy() pred = np.argmax(pred.cpu(), axis=1).numpy() labels = np.argmax(labels.numpy(), axis=1) for s, p, l in zip(snr, pred, labels): #wrt.writerow([s,p,l]) if (p == l): corr_cnt += 1 total_iter += 1 print("Test done, accr = :" + str(corr_cnt / total_iter)) f_out.close()
def train_net(train_loader=None, net=None, batch_size=128, n_epochs=5, learning_rate=0.001, saved_model=None, fname=None): #Print all of the hyperparameters of the training iteration: print("===== HYPERPARAMETERS =====") print("batch_size=", batch_size) print("epochs=", n_epochs) print("learning_rate=", learning_rate) print("=" * 30) #Get training and test data n_batches = len(train_loader) #Create our loss and optimizer functions loss, optimizer = get_loss_optimizer(net, learning_rate) #Time for printing training_start_time = time.time() f_out = open(fname, "w") wrt = csv.writer(f_out) total_train_loss = 0 scheduler = StepLR(optimizer, step_size=250, gamma=0.1) net = net.float() net = net.to('cuda') #Loop for n_epochs for epoch in range(n_epochs): running_loss = 0.0 print_every = n_batches // 10 start_time = time.time() wrt.writerow([epoch, total_train_loss]) total_train_loss = 0 if (((epoch + 1) % 250) == 0): checkpoint = { 'model': net, 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict() } file_name = 'checkpoint.pt' torch.save(checkpoint, file_name) i = 0 for data in train_loader: [inputs, labels, snr] = data #print(inputs.shape) #Wrap them in a Variable object inputs, labels, snr = Variable(inputs).to('cuda'), Variable( labels).to('cuda'), Variable(snr).to('cuda') #inputs,labels,snr = Variable(inputs), Variable(labels), Variable(snr) #Set the parameter gradients to zero optimizer.zero_grad() #Forward pass, backward pass, optimize outputs = net(inputs.float()) labels = labels.squeeze_().cpu() loss_size = loss(outputs.cpu(), np.argmax(labels, axis=1)) #loss_size = loss(outputs, np.argmax(labels,axis=1)) loss_size.backward() optimizer.step() #Print statistics running_loss += loss_size.data total_train_loss += loss_size.data #Print loss from every 10% (then resets to 0) of a batch of an epoch if (i + 1) % (print_every + 1) == 0: print("Epoch {}, {:d}% \t train_loss: {:.4f} took: {:.2f}s". format(epoch + 1, int(100 * (i + 1) / n_batches), total_train_loss / print_every, time.time() - start_time)) #Reset running loss and time running_loss = 0.0 start_time = time.time() i += 1 scheduler.step() print("Training finished, took {:.2f}s".format(time.time() - training_start_time)) final = { 'model': net, 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict() } torch.save(final, saved_model) f_out.close()
def test_net(test_loader=None, path='model.pt', batch_size=128, fname=None, a=8, b=12, c=20): n_batches = len(test_loader) model = torch.load(path) net = model['model'] net.load_state_dict(model['state_dict']) for par in net.parameters(): par.requires_grad = False net.eval() net = net.float() net = net.to('cuda') #writing results to spreadsheet if fname is None: fname = 'test_pred.csv' f_out = open(fname, "w") wrt = csv.writer(f_out) #testing metrics corr_cnt = 0 total_iter = 0 run_max = 0 for i in range(20, 30): for j in range(40, 60): for k in range(70, 80): for data in test_loader: [inputs, labels, snr] = data inputs, labels = Variable(inputs).to('cuda'), Variable( labels) pred = net(inputs.float()) snr = snr.numpy() pred = pred.cpu().numpy() labels = np.argmax(labels.numpy(), axis=1) for s, p, l in zip(snr, pred, labels): #wrt.writerow([s,p,l]) #wrt.writerow([p,l]) #p = bisect.bisect_left([0.25,0.5,0.75],p) p = bisect.bisect_left( [float(i / 100), float(j / 100), float(k / 100)], p) if (p == l): corr_cnt += 1 total_iter += 1 acc = corr_cnt / total_iter if (run_max < acc): run_max = acc print("Test done, accr = :" + str(acc)) #print("i" + str(float(i/100))) #print("j" + str(float(j/100))) #print("k" + str(float(k/100))) wrt.writerow([i, j, k, acc]) print(run_max) f_out.close()
import pandas as pd import numpy as np from sklearn.preprocessing import MinMaxScaler device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') #Load the data data = pd.read_csv('../data/data.csv') relevent_data = data.drop('code_size', axis=1) test_app = 'consumer_tiffmedian' from net import net losses = [] net = net.to(device) net.train() def train_test_split(test_app): train_data = relevent_data[relevent_data['APP_NAME'] != test_app] test_data = relevent_data[relevent_data['APP_NAME'] == test_app] scaler = MinMaxScaler(feature_range=(0, 1)).fit( relevent_data[relevent_data['APP_NAME'] != test_app].iloc[:, 1:-5]) scaled_train_predictors = scaler.transform( relevent_data[relevent_data['APP_NAME'] != test_app].iloc[:, 1:-5]) train_targets = relevent_data[ relevent_data['APP_NAME'] != test_app].iloc[:, -5:] scaled_train_targets = [] for app in range(0, train_targets.shape[0], 128): scaled_train_targets.append(