def main(): X = pd.read_csv('./data/Training_data.csv') # observe the data distribution print(X[X['target'] == 1].sample(5)) print(X[X['target'] == 0].sample(5)) print(X['target'].value_counts()) # remove the derviation value in non-fraud domain X = X.drop(X.index[find_anomalies(X)]) X['col14'] = pd.Series(X['col7'] * X['col6'], index=X.index) # X_fraud = X[X['target'] == 1].sample(1000) # X_non_fraud = X[X['target'] == 0].sample(1000) # X_shuffle = X_fraud.append(X_non_fraud) # X_shuffle = X_shuffle.reindex(np.random.permutation(X_shuffle.index)) model = TrainModel(X, 0.0001) model.train() # prepare testing data X_pred = pd.read_csv('./data/Testing_data.csv') X_pred['col14'] = pd.Series(X_pred['col7'] * X_pred['col6'], index=X_pred.index) model.predict(X_pred)
def main(): # Device SEED = 1 cuda = torch.cuda.is_available() print("Cuda is available ?", cuda) torch.manual_seed(SEED) if cuda: torch.cuda.manual_seed(SEED) device = torch.device("cuda" if cuda else "cpu") # Create Train and Test Loader trainloader = Loader.getDataLoader(dataset_name, trainSet_dict, trainLoad_dict) testloader = Loader.getDataLoader(dataset_name, testSet_dict, testLoad_dict) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # Loss Function criterion = nn.NLLLoss() # Optimizer model = Net().to(device) optimizer = optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY) # Start training for epoch in range(EPOCHS): train_loss, train_acc = TrainModel.train(model, device, trainloader, criterion, optimizer, epoch) train_losses.append(train_loss) train_accuracy.append(train_acc) test_loss, test_acc = TestModel.test(model, device, testloader, criterion) test_losses.append(test_loss) test_accuracy.append(test_acc) # Plot and Save Graph getPlottedGraph(EPOCHS, train_losses, train_accuracy, test_losses, test_accuracy, name="cifar_10_plot", PATH=IMAGE_PATH) # Save Models torch.save(model.state_dict(), MODEL_PATH + "model7.pth")
def main(): # Device SEED = 1 cuda = torch.cuda.is_available() print("Cuda is available ?", cuda) torch.manual_seed(SEED) if cuda: torch.cuda.manual_seed(SEED) device = torch.device("cuda" if cuda else "cpu") # Create Train and Test Loader trainloader = Loader.getDataLoader(dataset_name, trainSet_dict, trainLoad_dict) testloader = Loader.getDataLoader(dataset_name, testSet_dict, testLoad_dict) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # Loss Function criterion = nn.CrossEntropyLoss() # Optimizer model = ResNet18().to(device) optimizer = optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY) scheduler = StepLR(optimizer, step_size=13, gamma=0.1) # Start training for epoch in range(EPOCHS): train_loss, train_acc = TrainModel.train(model, device, trainloader, criterion, optimizer, epoch) # scheduler.step() train_losses.append(train_loss) train_accuracy.append(train_acc) test_loss, test_acc = TestModel.test(model, device, testloader, criterion) test_losses.append(test_loss) test_accuracy.append(test_acc) # Plot and Save Graph getPlottedGraph(EPOCHS, train_losses, train_accuracy, test_losses, test_accuracy,name="cifar_10_plot_using_resnet18_v3", PATH=IMAGE_PATH) # Save Models torch.save(model.state_dict(), MODEL_PATH+"model8_v3.pth") #misclassified images ms.show_save_misclassified_images(model, device, testloader, classes, list(img_mean), list(img_std), "fig_cifar10_v1", IMAGE_PATH, 25)
def main(): # Create Train Loader and Test Loader trainloader = Loader.getDataLoader(dataset_name, trainSet_dict, trainLoad_dict) testloader = Loader.getDataLoader(dataset_name, testSet_dict, testLoad_dict) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # Start training for epoch in range(EPOCHS): #Train train_loss, train_acc = TrainModel.train(model, device, trainloader, criterion, optimizer, epoch) scheduler.step() train_losses.append(train_loss) train_accuracy.append(train_acc) #Test test_loss, test_acc = TestModel.test(model, device, testloader, criterion) test_losses.append(test_loss) test_accuracy.append(test_acc) #Save model state = {'epoch': epoch + 1, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()} torch.save(state, filename) # Plot and Save Graph getPlottedGraph(EPOCHS, train_losses, train_accuracy, test_losses, test_accuracy, name="S9_plot_final", PATH=MODEL_PATH) # Show and Save correct classified images show_save_correctly_classified_images(model, testloader, device, IMAGE_PATH, name="correct_classified_imgs", max_correctly_classified_images_imgs=25, labels_list=classes) # Show and Save misclassified images show_save_misclassified_images(model, testloader, device, IMAGE_PATH, name="misclassified_imgs", max_misclassified_imgs=25, labels_list=classes) # Visualize Activation Map misclassified_imgs, correctly_classified_images = classify_images(model, testloader, device, 5) layers_list = ["layer1", "layer2", "layer3", "layer4"] display_gradcam = VisualizeCam(model, classes, layers_list) correct_pred_imgs = [] for i in range(len(correctly_classified_images)): correct_pred_imgs.append(torch.as_tensor(correctly_classified_images[i]["img"])) display_gradcam(torch.stack(correct_pred_imgs), layers_list, PATH="./" + str("visualization"), metric="correct")
'test' if args.test_model else args.index, args.test_name, this_arg, cnt + 1) print('Model index: {}'.format(model_index)) result_writer = ResultWriter( "results/{}.txt".format(model_index)) exec("%s = %d" % ('args.{}'.format(args.test_name), this_arg)) if args.remove_old_files: remove_oldfiles(model_index) result_writer.write(str(args)) model_trainer = TrainModel(model_index, args) print("\nStrat training DSAN...\n") model_trainer.train() args.load_saved_data = True K.clear_session() if args.test_model: remove_oldfiles(model_index) else: for cnt in range(1 if args.test_model else args.run_time): model_index = args.dataset + '_{}_{}'.format( 'test' if args.test_model else args.index, cnt + 1) print('Model index: {}'.format(model_index)) result_writer = ResultWriter("results/{}.txt".format(model_index)) if args.remove_old_files:
def main(): # Hyper parameters EPOCHS = 2 # For reproducibility SEED = 1 # Check for CUDA? cuda = torch.cuda.is_available() print("Cuda is available ?", cuda) torch.manual_seed(SEED) if cuda: torch.cuda.manual_seed(SEED) train_dataset, test_dataset, train_loader, test_loader = DataLoaders.dataload( ) device = torch.device("cuda" if cuda else "cpu") # Summary # summary(model, input_size=(1, 28, 28)) # Optimizer model1 = bn_model().to(device) optimizer1 = optim.SGD(model1.parameters(), lr=0.01, momentum=0.9) scheduler1 = StepLR(optimizer1, step_size=7, gamma=0.1) model2 = gbn_model().to(device) optimizer2 = optim.SGD(model2.parameters(), lr=0.01, momentum=0.9) scheduler2 = StepLR(optimizer2, step_size=7, gamma=0.1) for epoch in range(EPOCHS): # With L1 l1_train_loss, l1_train_acc = TrainModel.train(model1, device, train_loader, optimizer1, epoch, L1_regularization=reg, m_type="L1") l1_train_losses.append(l1_train_loss) l1_train_accuracy.append(l1_train_acc) #scheduler1.step_size = 23 scheduler1.step() l1_test_loss, l1_test_acc = TestModel.test(model1, device, test_loader) l1_test_losses.append(l1_test_loss) l1_test_accuracy.append(l1_test_acc) # With L2 optimizer1.param_groups[0]['weight_decay'] = 0.0001 l2_train_loss, l2_train_acc = TrainModel.train(model1, device, train_loader, optimizer1, epoch, m_type="L2") l2_train_losses.append(l2_train_loss) l2_train_accuracy.append(l2_train_acc) #scheduler1.step_size = 3 scheduler1.step() l2_test_loss, l2_test_acc = TestModel.test(model1, device, test_loader) l2_test_losses.append(l2_test_loss) l2_test_accuracy.append(l2_test_acc) # With L1 and L2 optimizer1.param_groups[0]['weight_decay'] = 0.0001 l1_l2_train_loss, l1_l2_train_acc = TrainModel.train( model1, device, train_loader, optimizer1, epoch, L1_regularization=reg, m_type="L1&L2") l1_l2_train_losses.append(l1_l2_train_loss) l1_l2_train_accuracy.append(l1_l2_train_acc) # scheduler1.step_size = 19 scheduler1.step() l1_l2_test_loss, l1_l2_test_acc = TestModel.test( model1, device, test_loader) l1_l2_test_losses.append(l1_l2_test_loss) l1_l2_test_accuracy.append(l1_l2_test_acc) # With GBN gbn_train_loss, gbn_train_acc = TrainModel.train(model2, device, train_loader, optimizer2, epoch, m_type="GBN") gbn_train_losses.append(gbn_train_loss) gbn_train_accuracy.append(gbn_train_acc) # scheduler2.step_size = 11 scheduler2.step() gbn_test_loss, gbn_test_acc = TestModel.test(model2, device, test_loader) gbn_test_losses.append(gbn_test_loss) gbn_test_accuracy.append(gbn_test_acc) # GBN With L2 optimizer2.param_groups[0]['weight_decay'] = 0.0001 gbn_l2_train_loss, gbn_l2_train_acc = TrainModel.train(model2, device, train_loader, optimizer2, epoch, m_type="GBN&L2") gbn_l2_train_losses.append(gbn_l2_train_loss) gbn_l2_train_accuracy.append(gbn_l2_train_acc) # scheduler2.step_size = 6 scheduler2.step() gbn_l2_test_loss, gbn_l2_test_acc = TestModel.test( model2, device, test_loader) gbn_l2_test_losses.append(gbn_l2_test_loss) gbn_l2_test_accuracy.append(gbn_l2_test_acc) # GBN With L1 and L2 optimizer2.param_groups[0]['weight_decay'] = 0.0001 gbn_l1_l2_train_loss, gbn_l1_l2_train_acc = TrainModel.train( model2, device, train_loader, optimizer2, epoch, L1_regularization=reg, m_type="GBN&L1&L2") gbn_l1_l2_train_losses.append(gbn_l1_l2_train_loss) gbn_l1_l2_train_accuracy.append(gbn_l1_l2_train_acc) # scheduler2.step_size = 21 scheduler2.step() gbn_l1_l2_test_loss, gbn_l1_l2_test_acc = TestModel.test( model2, device, test_loader) gbn_l1_l2_test_losses.append(gbn_l1_l2_test_loss) gbn_l1_l2_test_accuracy.append(gbn_l1_l2_test_acc) #Save Models #PATH = "/content/drive/My Drive/Lab/Loss_and_accuracy_plot.png" torch.save(model1, MODEL_PATH) torch.save(model2, MODEL_PATH) #Plot and save graph of losses and accuracy getPlottedGraph(EPOCHS, l1_train_losses, l1_train_accuracy, l1_test_losses, l1_test_accuracy, l2_train_losses, l2_train_accuracy, l2_test_losses, l2_test_accuracy, l1_l2_train_losses, l1_l2_train_accuracy, l1_l2_test_losses, l1_l2_test_accuracy, gbn_train_losses, gbn_train_accuracy, gbn_test_losses, gbn_test_accuracy, gbn_l2_train_losses, gbn_l2_train_accuracy, gbn_l2_test_losses, gbn_l2_test_accuracy, gbn_l1_l2_train_losses, gbn_l1_l2_train_accuracy, gbn_l1_l2_test_losses, gbn_l1_l2_test_accuracy, name="plot", PATH=IMAGE_PATH) #Save misclassified images MI.show_save_misclassified_images(model2, test_loader, name="fig1", PATH=IMAGE_PATH, max_misclassified_imgs=25) MI.show_save_misclassified_images(model2, test_loader, name="fig2", PATH=IMAGE_PATH, max_misclassified_imgs=25)
'1: to convert image to npy file.\n' '2: to run the training.\n' '3: to test the model.\n' 'action: ') if (action == '0'): print('INFO: Please provide the data path') path = input('path to data: ') list_categories(path) elif (action == '1'): print('INFO: Please provide the path to the images and the filename') path = input('path to the images: ') filename = input('the npy filename: ') image_to_npy(filename=filename, path=path, img_size=(64, 64)) elif (action == '2'): print('INFO: Please provide the data path') data_path = input('data path: ') data = np.load(data_path, allow_pickle=True) images = np.array([i[0] for i in data]) labels = np.array([i[1] for i in data]) run_training = TrainModel(train_x=images, train_y=labels) run_training.train() elif (action == '3'): print('INFO: Please provide the image to classify and the model path!') image_path = input('image path: ') model_path = input('modelpath: ') run_classification = Test(image_path=image_path, graph_path=model_path) category = run_classification.classify() print(category) else: print('ERROR: Wrong choise of action!')