def post(self) -> BaseResponse: """ Called when we have a POST requiest. Returns: BaseResponse instance with message and status for API """ data = request.get_json() helper.set_server_data(data) is_valid, result = helper.validation(schemas.login_schema) if not is_valid: return jsonify(result) else: username, _, sentence = result # updating Tokens and Sentance helper.update_tokens(1, operator.sub) # update sentance in database config.users.update( {"Username": username}, {"$set": {"Sentance": sentence}} ) return jsonify( {"status": config.OK, "msg": "You succesfully login.", "sentance": sentence} )
def post(self) -> BaseResponse: """ Called when we have a POST requiest. Returns: BaseResponse instance with message and status for API """ server_data = request.get_json() helper.set_server_data(server_data) is_valid, register_server_data = helper.validation( schemas.register_schema, is_register=True) if not is_valid: return jsonify(register_server_data) username_or_dict, password_or_false = helper.arguments_validation( register_server_data) if isinstance(password_or_false, bool): # username_or_dict is dict with error message and code return jsonify(username_or_dict) username, password = username_or_dict, password_or_false if username == config.bank_name: return jsonify({ "Message": "This username is taken.", "Code": config.INVALID_USERNAME }) insert_register_data(username, password) insert_bank_configuragion() return jsonify({ "Code": config.OK, "Message": "You succesfully signed up." })
def post(self): """ Called when we have a POST request Returns: BaseResponse object with message and code """ data = request.get_json() is_ok, result = helper.validation(schemas.transfer_schema, data) if not is_ok: return result users_exist = usr_ex1 and usr_ex2 user_1_data = data["user1"] user_2_data = data["user2"] amount = data["amount"] username_1 = user_1_data["username"] username_2 = user_2_data["username"] try: helper.balance_validation(username_1, username_2) except ValueError as ex: return jsonify({"message": ex.args[0], "code": ex.args[1]}) helper.update_balance(config.users, user_1_data["username"], amount, balance_acc1, operator.sub) helper.update_balance(config.users, user_2_data["username"], amount, balance_acc2, operator.add) helper.update_tokens(config.users, user_1_data["username"], 1, operator.sub) return jsonify({ "Message": "Transaction completed successfully.", "Code": config.OK })
def post(self) -> BaseResponse: """ Called when we have a POST request. Returns: BaseResponse object with message and code """ admin_pwd_crypted = bcrypt.hashpw(config.admin_pwd.encode("utf8"), bcrypt.gensalt()) config.users.insert({ "Username": config.admin_name, "Password": admin_pwd_crypted }) helper.set_server_data(request.get_json()) validation, result = helper.validation(schemas.refill_schema, token_validation=False) if not validation: return jsonify(result) username, _, tokens_add = result helper.update_tokens(username, tokens_add, operator.add) return jsonify({ "Message": "Tokens updated successfully.", "Code": config.OK })
def post(self) -> BaseResponse: """ Called when we have a POST requiest. Returns: BaseResponse instance with message and status for API """ # try to get data from user data = request.get_json() is_valid, result = helper.validation(config.users, schemas.usr_change_schema, data, config.username_change_keys) if not is_valid: return jsonify(result) username, _, new_username = result # removing one token helper.update_tokens(config.users, username, 1, operator.sub) config.users.update({"Username": username}, {"$set": { "Username": new_username }}) return jsonify({ "Message": "Username changed successfully.", "Code": config.OK })
def post(self): """ Called when we have a POST request. Returns: BaseResponse object with message and code """ # taking data from server data = request.get_json() validation, result = helper.validation(config.users, schemas.update_balance_schema, data, config.update_balance_keys) if not validation: return jsonify(result) username, _, code, amount = result # add deposit to current money money_curr = config.users.find({"Username": username})[0]["Balance"] if amount <= 0: return jsonify({ "Message": "Amount must be greather than zero.", "Code": config.INVALID_AMOUNT }) # if code that is sent from user is not # 'D' for deposit or 'W' for withdraw # than we want to sent error to user # otherwise, we want to preform operation (deposit or withdraw) if code == "D": helper.update_balance(config.users, username, amount, money_curr, operator.add) elif code == "W": if amount > money_curr: return jsonify({ "Message": "You don't have enough money", "Code": config.NOT_ENOUGH_MONEY }) helper.update_balance(config.users, username, amount, money_curr, operator.sub) else: return jsonify({ "Message": "For withdraw enter 'W', for deposit enter 'D'", "Code": config.INVALID_CODE }) # remove one token helper.update_tokens(config.users, username, 1, operator.sub) return jsonify({ "Message": "You successfully updated your balance.", "Code": config.OK })
def main(): input_args = get_input_args() gpu = torch.cuda.is_available() and input_args.gpu dataloaders, class_to_idx = helper.get_dataloders(input_args.data_dir) model, optimizer, criterion = helper.model_create( input_args.architectures, input_args.learning_rate, input_args.hidden_units, class_to_idx ) if gpu: model.cuda() criterion.cuda() else: torch.set_num_threads(input_args.num_threads) epochs = 3 print_every = 40 helper.train(model, dataloaders['training'], epochs, print_every, criterion, optimizer, device='cpu') if input_args.save_dir: if not os.path.exists(input_args.save_dir): os.makedirs(input_args.save_dir) file_path = input_args.save_dir + '/' + input_args.architectures + '_checkpoint.pth' else: file_path = input_args.architectures + '_checkpoint.pth' helper.save_checkpoint(file_path, model, optimizer, input_args.architectures, input_args.learning_rate, input_args.epochs ) helper.validation(model, dataloaders['testing'], criterion)
def post(self) -> BaseResponse: """ Called when we have a POST requiest. Returns: BaseResponse instance with message and status for API """ # try to get data from user data = request.get_json() helper.set_server_data(data) is_valid, result = helper.validation(schemas.pass_change_schema) if not is_valid: return jsonify(result) username, _, new_pwd = result new_pwd_hashed = bcrypt.hashpw(new_pwd.encode("utf-8"), bcrypt.gensalt()) if helper.new_old_passwords_equal(): return jsonify({ "Message": "Old password cannot be new password", "Code": config.INVALID_PASSWORD }) # removing one token helper.update_tokens(1, operator.sub) # changing password config.users.update({"Username": username}, {"$set": { "Password": new_pwd_hashed }}) return jsonify({ "status": config.OK, "msg": "Password changed succesfully." })
def post(self): """ Called when we have a POST request Returns: BaseResponse object with message and code """ data = request.get_json() is_valid, result = helper.validation(config.users, schemas.loan_schema, data, config.loan_keys) if not is_valid: return jsonify(result) username = data["username"] balance_usr = helper.find_balance(username) amount = data["amount"] bank_balance = helper.find_balance(config.bank_name) helper.update_balance(config.bank, config.bank_name, amount, bank_balance, operator.add) helper.update_balance(config.users, username, amount, balance_usr, operator.sub) return jsonify({ "Message": "Transaction successfully terminated.", "Code": config.OK })
def train_network(model): if (model == None): model = helper.build_or_load_model() optimizer = model.optimizer criterion = model.criterion valid_pass = False valid_loader = None train_loader = None epochs = helper.get_int('Please enter the epochs for training') train_data_dir = helper.get_dir_path( 'Please enter path to the training data from current location') train_loader = helper.get_dataloader(train_data_dir, Constant.TRAIN) valid_pass = helper.get_yn_input('Do you want to do a validation pass?') if (valid_pass): valid_data_dir = helper.get_dir_path( 'Please enter path to the validating data from current location') valid_loader = helper.get_dataloader(valid_data_dir, Constant.VALID) print_every = len(train_loader) steps = 0 print('\nTraining the network\n') device = helper.get_device() model = helper.load_device(model) stepsArr = [] accuracyArr = [] print("\nTrain loader has {} images\n".format(len(train_loader))) # run a pre-set amount of times (epochs) for e in range(epochs): running_loss = 0 accuracy = 0 for ii, (images, labels) in enumerate(train_loader): steps += 1 images, labels = images.to(device), labels.to(device) optimizer.zero_grad() # Forward and backward passes outputs = model.forward(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() ps = torch.exp(outputs) #_, predicted = torch.max(outputs.data, 1) running_loss += loss.item() equality = (labels.data == ps.max(dim=1)[1]) accuracy = equality.type(torch.FloatTensor).mean() accString = "{:.2f}".format(accuracy * 100) print("Step {} Test Loss {:.3f} Running Loss {:.3f} Accuracy {}%". format(steps, loss, running_loss, accString)) stepsArr.append(steps) accuracyArr.append(accString) if (steps % print_every == 0): print("\n\nFinished Epoch: {}/{}.. ".format(e + 1, epochs)) if (valid_pass): print("\nStarting validation pass for epoch: {}/{}.. ". format(e + 1, epochs)) # Make sure network is in eval mode for inference model.eval() # Turn off gradients for validation, saves memory and computations with torch.no_grad(): test_loss, accuracy = helper.validation( model, valid_loader, criterion) print( "\nFinished validating for epoch: {}/{}.. ".format( e + 1, epochs), "Training loss: {:.3f}.. ".format( running_loss / print_every), "Validation loss: {:.3f}.. ".format(test_loss / len(valid_loader)), "Validation accuracy: {:.3f} %".format( accuracy / len(valid_loader) * 100)) running_loss = 0 if (e == epochs): break else: # Make sure training is back on model.train() print("\n\nTraining finished\n")
# Forward and backward passes outputs = model.forward(inputs) # Forward pass through architecture + my classifier network loss = criterion(outputs, labels) # Calculate the NLL loss loss.backward() # Backprop (but we actually only train the classifier layers) optimizer.step() # Take step in the negative gradient direction with momentum (ADAM) running_loss += loss.item() if steps % print_every == 0: # Make sure network is in eval mode for inference (No dropouts...) model.eval() # Turn off gradients for validation, saves memory and computations with torch.no_grad(): test_loss, accuracy = validation(model, validLoader, criterion, device) print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(running_loss/print_every), "Validation Loss: {:.3f}.. ".format(test_loss/len(validLoader)), "Validation Accuracy: {:.3f}".format(accuracy/len(validLoader))) running_loss = 0 # Make sure training is back on model.train() # Save our trained model # TODO: Save the checkpoint - I'm saving some extra data.. # Saving the model: The trained model is saved as a checkpoint along with associated hyperparameters # and the class_to_idx dictionary
def main(): input_args = get_input_args() # use helper function load_data to create trainloader, validationloader and testlodaer trainloader, validationloader, testloader, class_to_idx, batch_size = helper.load_data( input_args.data_dir) #print(trainloader) # Start building the classifier: dropout = 0.5 output_size = 102 hidden_sizes = input_args.hidden_units arch = input_args.arch if arch == 'vgg16': model = models.vgg16(pretrained=True) input_size = valid_networks[input_args.arch] arch = input_args.arch #return model, hidden_sizes elif arch == 'densenet121': model = models.densenet121(pretrained=True) input_size = valid_networks[input_args.arch] arch = input_args.arch else: print('please enter a valid network vgg16 or densenet121') # Freeze parameters for param in model.parameters(): param.requires_grad = False classifier = nn.Sequential( OrderedDict([('fc1', nn.Linear(input_size, hidden_sizes)), ('relu1', nn.ReLU()), ('dropout1', nn.Dropout(dropout)), ('output', nn.Linear(hidden_sizes, output_size)), ('softmax', nn.LogSoftmax(dim=1))])) model.classifier = classifier #print(model) #criterion criterion = nn.NLLLoss() learnrate = input_args.learning_rate optimizer = optim.Adam(model.classifier.parameters(), learnrate) #device if input_args.gpu: device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu') #return device else: device = torch.device('cpu') #print(device) # training and printing the accuracy epochs = input_args.epochs steps = 0 running_loss = 0 print_every = 40 print('training started') with active_session(): for e in range(epochs): model.to(device) for images, labels in trainloader: steps += 1 images, labels = images.to(device), labels.to(device) optimizer.zero_grad() output = model.forward(images) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every == 0: # Make sure network is in eval mode for inference model.eval() # Turn off gradients for validation, saves memory and computations with torch.no_grad(): validation_loss, accuracy = helper.validation( model, validationloader, criterion, device) print( "Epoch: {}/{}.. ".format(e + 1, epochs), "Training Loss: {:.3f}.. ".format(running_loss / print_every), "Valid Loss: {:.3f}.. ".format(validation_loss / len(validationloader)), "Validation Accuracy: {:.3f}".format( accuracy / len(validationloader))) running_loss = 0 # Make sure training is back on model.train() print('training model finished') # Do validation on test set with torch.no_grad(): test_loss, accuracy = helper.validation(model, testloader, criterion, device) print("Test-Loss: {}\n".format(test_loss / len(testloader)), "Test-Accuracy: {}".format(accuracy / len(testloader))) # : Save the checkpoint save_location = input_args.save_dir + 'checkpoint.pth' checkpoint = { 'arch': input_args.arch, 'input_size': model.classifier[0].in_features, 'state_dict': model.classifier.state_dict(), 'class_to_idx': class_to_idx, 'output_size': output_size, 'classifier': model.classifier, 'hidden_layers': hidden_sizes, 'dropout': dropout, 'optimizer state': optimizer.state_dict, 'number of epochs': epochs, 'gpu': input_args.gpu } torch.save(checkpoint, save_location) print('saved checkpoint')
optimizer.zero_grad() output = model(inputs) _, preds = torch.max(output.data, 1) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() ps = torch.exp(output) equality = (labels.data == ps.max(dim=1)[1]) accuracy += equality.type(torch.FloatTensor).mean() if steps % print_every == 0: test_loss, test_accuracy = validation(model, dataloaders['valid'], criterion, device) print("Epoch: {}/{}".format(epoch + 1, args.epochs), "Train Loss: {:.4f}".format(running_loss / print_every), "Train Accuracy : {:.4f}".format(accuracy / print_every), "Validation Loss : {:.4f}".format(test_loss), "Validation Accuracy : {:.4f}".format(test_accuracy)) model.train() accuracy = 0 running_loss = 0 # Do validation on the test set, print results test_loss, test_accuracy = validation(model, dataloaders['test'], criterion, device) print("Test Loss : {:.4f}".format(test_loss), "Test Accuracy : {:.4f}".format(test_accuracy))
optimizer.step() running_loss += loss.item() print(f'Training epoch {e + 1}/{epochs}{"." * (steps % 4)} ', end = "\r") if steps % print_every == 0: print('Running validation... ', end = '\r') # Make sure network is in eval mode for inference model.eval() # Turn off gradients for validation, saves memory and computations with torch.no_grad(): test_loss, accuracy = helper.validation(model, testloader, criterion, device) print("Epoch: {}/{}.. ".format(e+1, epochs), "Training Loss: {:.3f}.. ".format(running_loss/print_every), "Test Loss: {:.3f}.. ".format(test_loss/len(testloader)), "Test Accuracy: {:.3f}".format(accuracy/len(testloader)), "\n", end = "\r") running_loss = 0 # Make sure training is back on model.train() model.settings["current_epoch"] = e Model.save(model, optimizer, args.save_dir + "/checkpoint.pth")
else: images, labels = Variable(images), Variable(labels) output = model.forward(images) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() if steps % print_every == 0: # Make sure network is in eval mode for inference model.eval() # Turn off gradients for validation, saves memory and computations with torch.no_grad(): test_loss, accuracy = helper.validation( model, dataloaders[1], criterion, gpu_usage) print( "Epoch: {}/{}.. ".format(e + 1, epochs), "Training Loss: {:.3f}.. ".format(running_loss / print_every), "Validation Loss: {:.3f}.. ".format(test_loss / len(dataloaders[1])), "Validation Accuracy: {:.3f}".format(accuracy / len(dataloaders[1]))) running_loss = 0 # Make sure training is back on model.train() total_time = time.time() - start print("\nTotal time: {:.0f}m {:.0f}s".format(total_time // 60,