def main(mode="test"): if (sys.argv[1] == "train"): module = net.Mynet() train_dataset = data.trainSet("traindata") train_loader = torch.utils.data.DataLoader(dataset=train_dataset) net.train(10, train_loader, module) if (sys.argv[1] == "test"): module = torch.load("module/my_model.pkl") test_dataset = data.trainSet("traindata") test_loader = torch.utils.data.DataLoader(dataset=test_dataset) net.test(test_loader, module) if (sys.argv[1] == "print"): module = torch.load("module/my_model.pkl") print(module.state_dict())
# load CIFAR10 dataset cifar10 = tf.keras.datasets.cifar10 (x_train, y_train), (x_test, y_test) = cifar10.load_data() batch_size = 64 # change TRAIN to true if you want to create a new model and save it to the ckpt folder TRAIN = False if TRAIN: train(x_train, y_train, batch_size) cifar10_test = DatasetIterator(x_test, y_test, batch_size) cifar10_test_images, cifar10_test_labels = x_test, y_test # start timer start = timeit.default_timer() np.random.seed(0) # get results from test set predicted_cifar10_test_labels = test(cifar10_test_images) np.random.seed() # end timer stop = timeit.default_timer() run_time = stop - start # calculate accuracy correct_predict = (cifar10_test_labels.flatten() == predicted_cifar10_test_labels.flatten()).astype( np.int32).sum() incorrect_predict = len(cifar10_test_labels) - correct_predict accuracy = float(correct_predict) / len(cifar10_test_labels) print('Acc: {}. Testing took {}s.'.format(accuracy, stop - start)) result = OrderedDict(correct_predict=correct_predict, accuracy=accuracy, run_time=run_time)
from net import test from eval import evaluation if __name__ == '__main__': # get predicted classes and bboxes from test data in data folder # if train = true, a new model will be trained # if train = false, the saved model from ckpt will be used on the test set pred_class, pred_bboxes = test(train=False) # evaluate the model evaluation(pred_class, pred_bboxes)
lTrainAcc = [] lTestAcc = [] lF1 = [] for epoch in range(1, 100): trainAcc = net.train(model, device, batchTrainingData, optimizer, epoch) lTrainAcc.append(trainAcc) totalCorrect = 0 total = 0 dist = np.zeros((11, 11)) countTargets = np.zeros((11)) for batch in batchValidationData: data = batch[0].to(device) target = batch[1].to(device) output = net.test(model, device, data) pred = output.max( 1, keepdim=True)[1] # get the index of the max log-probability target = target.long().view_as(pred) totalCorrect += pred.eq(target).sum().item() total += len(data) for i in range(len(pred)): dist[target[i]][pred[i]] += 1 countTargets[target[i]] += 1 print("Test accuracy:" + str(100 * totalCorrect / total) + "%") print(dist) lTestAcc.append(100 * totalCorrect / total) lF1.append(stats.F1overall(np.array(dist))) print(lTrainAcc) print(lTestAcc) print(lF1)
print("Training network...") n.train(net, optimizer, num_epochs, batch_size, trn_set, num_class, vld_set=None) print('Saving network (pkl)...') pickle.dump(net, open("save1.pkl", "wb")) #print("Saving network...") #n.save_net(net, num_spec_layers = [1, 3], name_n = 'cnn(3_5_2)(0_1_2_8)', active = True ) print("Testing network...") accuracy = n.test(net, tst_set) print("Test accuracy: %0.2f%%" % (accuracy * 100 / (len(tst_set)))) ''' net_load = n.Loaded_nn(name='save(math1)') print("Testing Loaded Network...") acc_load = n.test(net_load, tst_set) print("Test accuracy of loaded nn: %0.2f%%" % (acc_load*100./(len(tst_set)))) net_tr=pickle.load(open("save.pkl", "rb")) print("Testing network...") accuracy = n.test(net_tr, tst_set) print("Test accuracy: %0.2f%%" % (accuracy*100/(len(tst_set)))) '''