break

            data, x_train, y_train, x_test, y_test, norm_constant = naf.one_file_training_data(
                actual_class_dir, file, forecast_length, backcast_length,
                batch_size, device)

            while i < limit:  #difference > threshold and
                i += 1

                global_step = naf.train_full_grad_steps(
                    data, device, net, optimiser, test_losses,
                    training_models + training_checkpoint, x_train.shape[0])

                train_eval = naf.evaluate_training(backcast_length,
                                                   forecast_length, net,
                                                   norm_constant, test_losses,
                                                   x_train, y_train,
                                                   the_lowest_error, device)
                experiment.log_metric('train_loss', train_eval)

                new_eval = naf.evaluate_training(backcast_length,
                                                 forecast_length,
                                                 net,
                                                 norm_constant,
                                                 test_losses,
                                                 x_test,
                                                 y_test,
                                                 the_lowest_error,
                                                 device,
                                                 plot_eval=False,
                                                 class_dir=name,
예제 #2
0
                while i < 2:  #old was 5  #difference > threshold and
                    i += 1
                    epoch += 1
                    print("Actual epoch: ", epoch,
                          "\nActual inside file loop: ", i)
                    global_step = train_full_grad_steps(
                        data, device, net, optimiser, test_losses,
                        training_models + training_checkpoint,
                        x_train.shape[0])

                    train_eval = naf.evaluate_training(backcast_length,
                                                       forecast_length,
                                                       net,
                                                       norm_constant,
                                                       test_losses,
                                                       x_train,
                                                       y_train,
                                                       the_lowest_error,
                                                       device,
                                                       experiment=experiment)
                    experiment.log_metric('train_loss', train_eval)

                    new_eval = naf.evaluate_training(backcast_length,
                                                     forecast_length,
                                                     net,
                                                     norm_constant,
                                                     test_losses,
                                                     x_test,
                                                     y_test,
                                                     the_lowest_error,
                                                     device,
예제 #3
0
            iteration += 1
            if iteration > 30 or difference < threshold:
                break

            data, x_train, y_train, x_test, y_test, norm_constant = naf.one_file_training_data(
                actual_class_dir, file, forecast_length, backcast_length,
                batch_size)

            while difference > threshold and i < limit:
                i += 1
                global_step = naf.train_full_grad_steps(
                    data, device, net, optimiser, test_losses,
                    training_checkpoint, x_train.shape[0])
                new_eval = naf.evaluate_training(backcast_length,
                                                 forecast_length, net,
                                                 norm_constant, test_losses,
                                                 x_test, y_test,
                                                 the_lowest_error, device)
                print(
                    f"GlobalStep: {global_step}, New evaluation sccore: {new_eval}"
                )
                if new_eval < old_eval:
                    difference = old_eval - new_eval
                    old_eval = new_eval
                    with torch.no_grad():
                        print("New evaluation value:", new_eval,
                              "  iteration:", i)
                        print("Saving...")
                        new_checkpoint_name = str(checkpoint_name[:-3] +
                                                  str(len(test_losses)) +
                                                  ".th")