def train_prediction( net: Neural_network.NeuralNet, inputs_train: Tensor, targets_train: Tensor, inputs_test: Tensor, targets_test: Tensor, loss: Loss.Loss = Loss.MeanSquareError(), optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(), num_epochs: int = 5000, batch_size: int = 32): Data = pd.DataFrame(columns=('MSE_train', 'MSE_test', 'error_round_train', 'error_round_test')) size_training = inputs_train.shape[0] for epoch in range(num_epochs): Chi2_train = 0.0 error_round_train = 0.0 nbr_batch = 0 for i in range(0, size_training, batch_size): nbr_batch += 1 # 1) feed forward y_actual = net.forward(inputs_train[i:i + batch_size]) # 2) compute the loss and the gradients Chi2_train += loss.loss(targets_train[i:i + batch_size], y_actual) grad_ini = loss.grad(targets_train[i:i + batch_size], y_actual) # 3)feed backwards grad_fini = net.backward(grad_ini) # 4) update the net optimizer.step(net, n_epoch=epoch) error_round_train += Error_round.error_round( targets_train[i:i + batch_size], y_actual) Chi2_train = Chi2_train / nbr_batch error_round_train = error_round_train / nbr_batch y_actual_test = net.forward(inputs_test) Chi2_test = loss.loss(targets_test, y_actual_test) error_round_test = Error_round.error_round(targets_test, y_actual_test) if epoch % 100 == 0: print('epoch : ' + str(epoch) + "/" + str(num_epochs) + "\r", end="") datanew = pd.DataFrame({ 'MSE_train': [Chi2_train], 'MSE_test': [Chi2_test], 'error_round_train': [error_round_train], 'error_round_test': [error_round_test] }) Data = Data.append(datanew) os.chdir(path_ini) Data.to_csv('Opt_num_epoch_backup.csv', index=False) return Data
def train(net: Neural_network.NeuralNet, inputs: Tensor, targets: Tensor, loss: Loss = Loss.MeanSquareError(), optimizer: OptimizerClass.Optimizer = OptimizerClass.SGD(), num_epochs: int = 5000, batch_size: int = 32) -> tuple: chi2_list = [] round_error_list = [] size_training = inputs.shape[0] for epoch in range(num_epochs): chi2_loss = 0.0 round_error_loss = 0.0 nbr_batch = 0 for i in range(0, size_training, batch_size): nbr_batch += 1 # 1) Feed forward y_actual = net.forward(inputs[i:i + batch_size]) # 2) Compute the loss and the gradient chi2_loss += loss.loss(targets[i:i + batch_size], y_actual) round_error_loss += Error_round.error_round( targets[i:i + batch_size], y_actual) grad_ini = loss.grad(targets[i:i + batch_size], y_actual) # 3) Feed backwards grad_fini = net.backward(grad_ini) # 4) Update the net optimizer.step(net, n_epoch=epoch) chi2_loss = chi2_loss / nbr_batch round_error_loss = round_error_loss / nbr_batch chi2_list.append(chi2_loss) round_error_list.append(round_error_loss) # Print status every 50 iterations if epoch % 50 == 0: print('\r epoch : ' + str(epoch) + "/" + str(num_epochs) + ", training mean squared error : " + str(chi2_loss) + "\r", end="") print('epoch : ' + str(epoch) + "/" + str(num_epochs) + ", training final mean squared error : " + str(chi2_loss) + '\n') return chi2_list, round_error_list
def prediction(net: Neural_network.NeuralNet, inputs: Tensor) -> Tensor: return net.forward(inputs)
net1.print_module() print(" ") print("learning_rate = {}, regularization = {}, ".format( net1.learning_rate, net1.regularization)) print("iteration = {}, batch_size = {}".format(net1.iteration, net1.batch_size)) print(" ") loss_arr = net1.train(data_input, data_target) plt.figure(figsize=(7, 4)) plt.plot(loss_arr) plt.xlabel('iteration') plt.ylabel('Loss') plt.show() output, _ = net1.forward(data_input, mode='test') print("accuracy on training set is", torch.mean(1.0 * ((output > 0.5) == data_target)).item()) print(" ") test_input = torch.empty((nb, 2), dtype=dtype, device=device).uniform_(0, 1) test_target = ((test_input - 0.5).norm(p=2, dim=1, keepdim=True) < math.sqrt( 1 / 2 / math.pi)) * 1 output, _ = net1.forward(test_input, mode='test') print("accuracy on testing set is", torch.mean(1.0 * ((output > 0.5) == test_target)).item()) print(" ") print("-------------------------------------------------------") print("---------------- End of section -----------------------") print("-------------------------------------------------------")