nb_blocks_per_stack=3, backcast_length=backcast_length, hidden_layer_units=128, share_weights_in_stack=False, device=device) optimiser = optim.Adam(net.parameters()) test_losses = [] actual_class_dir = data_dir + "/" + name + "/" for (_, dirs, files) in os.walk(actual_class_dir): iteration = 0 for file in files: if 'mat' in file: continue iteration += 1 print(iteration) if iteration > 30: break data, x_test, y_test, norm_constant = naf.one_file_training_data( actual_class_dir, file, forecast_length, backcast_length, batch_size) for i in range(10): naf.eval_test(backcast_length, forecast_length, net, norm_constant, test_losses, x_test, y_test) naf.train_100_grad_steps(checkpoint_name, data, device, net, optimiser, test_losses)
print("\t\t FIle loop, epoch: %d\n" % (epoch)) plot_file = True i = 0 if 'mat' in fil: continue print("Reading files from: %s, file loaded: %s" % (actual_class_dir, fil)) if epoch >= epoch_limit: #or difference < threshold: break data, x_train, y_train, x_test, y_test, norm_constant, diagnosis = naf.one_file_training_data( actual_class_dir, fil, forecast_length, backcast_length, batch_size, device, lead=lead) while i < 2: #old was 5 #difference > threshold and i += 1 epoch += 1 print("Actual epoch: ", epoch, "\nActual inside file loop: ", i) global_step = train_full_grad_steps( data, device, net, optimiser, test_losses, training_models + training_checkpoint, x_train.shape[0]) train_eval = naf.evaluate_training(backcast_length,