def Big_Train(): print("Is there a GPU available: "), print(tf.test.is_gpu_available()) print("*****************Training*****************") datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version]) optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE_INIT) loss_function = tf.keras.losses.CategoricalCrossentropy() print("loading dataset") datafeeder.load_train_to_RAM() # loads the training data to RAM summary_writer = tf.summary.create_file_writer(logdir=base_directory) print("starting training") print("Making model") model = Model() model.build_model() tf.summary.trace_on(graph=True, profiler=True) for epoch in range(1001): data, label = datafeeder.nextBatchTrain_dom(150) data = data[0] with tf.GradientTape() as tape: predictions, l2_loss = model.call(data) #this is the big call pred_loss = loss_function(label, predictions) #this is the loss function pred_loss = pred_loss + L2WEIGHT * l2_loss #this implements lasso regularization if epoch == 0: #creates graph with summary_writer.as_default(): tf.summary.trace_export(name="Graph", step=0, profiler_outdir=base_directory) if epoch % 50 == 0: #takes care of validation accuracy valid_accuracy = Validation(model, datafeeder) with summary_writer.as_default(): logger.log_valid(valid_accuracy, epoch) with summary_writer.as_default( ): #this is the big player logger and printout logger.log_train(epoch, predictions, label, pred_loss, l2_loss, big_list) gradients = tape.gradient(pred_loss, big_list) optimizer.apply_gradients(zip(gradients, big_list)) Test_live(model, datafeeder)
def Big_Train(): try: os.mkdir("Graphs_and_Results/dual/" + version) except: print("dual/{} has already been created".format(version)) status = tf.test.is_gpu_available() print("Is there a GPU available: {}".format(status)) print("*****************Training*****************") datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, SELECTION_LIST) optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE_INIT) loss_function = tf.keras.losses.CategoricalCrossentropy() print("loading dataset") datafeeder.load_train_to_RAM() # loads the training data to RAM summary_writer = tf.summary.create_file_writer( logdir="Graphs_and_Results/dual/" + version + "/") print("starting training") print("Making model") model = Model() model.build_model() train_logger = csv.writer(open( "Graphs_and_Results/dual/" + version + "/xentropyloss.csv", "w"), lineterminator="\n") acc_logger = csv.writer(open( "Graphs_and_Results/dual/" + version + "/accuracy.csv", "w"), lineterminator="\n") l2_logger = csv.writer(open( "Graphs_and_Results/dual/" + version + "/l2.csv", "w"), lineterminator="\n") valid_logger = csv.writer(open( "Graphs_and_Results/dual/" + version + "/valid.csv", "w"), lineterminator="\n") tf.summary.trace_on( graph=True, profiler=False) #set profiler to true if you want compute history for epoch in range(1001): data, label = datafeeder.nextBatchTrain_dom(150) with tf.GradientTape() as tape: predictions, l2_loss = model.call(data) #this is the big call pred_loss_ = loss_function(label, predictions) #this is the loss function pred_loss = pred_loss_ + L2WEIGHT * l2_loss if epoch == 0: #creates graph with summary_writer.as_default(): tf.summary.trace_export( name="Graph", step=0, profiler_outdir="Graphs_and_Results/dual/" + version + "/") train_logger.writerow([np.asarray(pred_loss)]) acc_logger.writerow([accuracy(predictions, label)]) l2_logger.writerow([np.asarray(l2_loss)]) print("***********************") print("Finished epoch", epoch) print("Accuracy: {}".format(accuracy(predictions, label))) print("Loss: {}".format(np.asarray(pred_loss))) print("L2 Loss: {}".format(np.asarray(l2_loss))) print("***********************") if epoch % 20 == 0: with summary_writer.as_default(): tf.summary.scalar(name="XEntropyLoss", data=pred_loss_, step=epoch) tf.summary.scalar(name="L2Loss", data=l2_loss, step=epoch) tf.summary.scalar(name="Accuracy", data=accuracy(predictions, label), step=epoch) for var in big_list: name = str(var.name) tf.summary.histogram(name=name, data=var, step=epoch) tf.summary.flush() if epoch % 50 == 0: valid_accuracy = Validation(model, datafeeder) with summary_writer.as_default(): tf.summary.scalar(name="Validation_accuracy", data=valid_accuracy, step=epoch) valid_logger.writerow([valid_accuracy]) if epoch % 100 == 0 and epoch > 1: print("\n##############SAVING MODE##############\n") try: #because for some reason, the pickle files are incremental os.remove("Graphs_and_Results/dual/" + version + "/SAVED_WEIGHTS.pkl") except: print( "the saved weights were not removed because they were not there!" ) dbfile = open( "Graphs_and_Results/dual/" + version + "/SAVED_WEIGHTS.pkl", "ab") pickle.dump(big_list, dbfile) gradients = tape.gradient(pred_loss, big_list) optimizer.apply_gradients(zip(gradients, big_list)) right, wrong = Test_live(model, datafeeder) try: os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/") os.mkdir("Graphs_and_Results/dual/" + version + "/right/") except: shutil.rmtree("Graphs_and_Results/dual/" + version + "/wrong/") shutil.rmtree("Graphs_and_Results/dual/" + version + "/right/") os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/") os.mkdir("Graphs_and_Results/dual/" + version + "/right/") for i in range(len(wrong)): print("Saving wrong image {}".format(i)) carrier = np.reshape(wrong[i], [100, 100]) util.save_image( 255 * carrier, "Graphs_and_Results/dual/" + version + "/wrong/" + str(i) + ".jpg", "L") for i in range(len(right)): print("Saving right image {}".format(i)) carrier = np.reshape(right[i], [100, 100]) util.save_image( 255 * carrier, "Graphs_and_Results/dual/" + version + "/right/" + str(i) + ".jpg", "L")
def Big_Train(): try: os.mkdir("Graphs_and_Results/basic/" + version + "/") except: pass print("Is there a GPU available: "), print(tf.test.is_gpu_available()) print("*****************Training*****************") datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version]) optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE_INIT) loss_function = tf.keras.losses.CategoricalCrossentropy() print("loading dataset") datafeeder.load_train_to_RAM() # loads the training data to RAM summary_writer = tf.summary.create_file_writer( logdir="Graphs_and_Results/basic/" + version + "/") print("starting training") print("Making model") model = Model() model.build_model() tf.summary.trace_on(graph=True, profiler=True) train_logger = csv.writer(open( "Graphs_and_Results/basic/" + version + "/xentropyloss.csv", "w"), lineterminator="\n") acc_logger = csv.writer(open( "Graphs_and_Results/basic/" + version + "/accuracy.csv", "w"), lineterminator="\n") l2_logger = csv.writer(open( "Graphs_and_Results/basic/" + version + "/l2.csv", "w"), lineterminator="\n") valid_logger = csv.writer(open( "Graphs_and_Results/basic/" + version + "/valid.csv", "w"), lineterminator="\n") for epoch in range(1001): data, label = datafeeder.nextBatchTrain_dom(150) data = data[0] with tf.GradientTape() as tape: predictions, l2_loss = model.call(data) #this is the big call pred_loss = loss_function(label, predictions) #this is the loss function pred_loss = pred_loss + L2WEIGHT * l2_loss #this implements lasso regularization train_logger.writerow([np.asarray(pred_loss)]) acc_logger.writerow([accuracy(predictions, label)]) l2_logger.writerow([np.asarray(l2_loss)]) if epoch == 0: #creates graph with summary_writer.as_default(): tf.summary.trace_export( name="Graph", step=0, profiler_outdir="Graphs_and_Results/basic/" + version + "/") if epoch % 20 == 0 and epoch > 1: print("***********************") print("Finished epoch", epoch) print("Accuracy: {}".format(accuracy(predictions, label))) print("Loss: {}".format(np.asarray(pred_loss))) print("***********************") with summary_writer.as_default(): tf.summary.scalar(name="Loss", data=pred_loss, step=epoch) tf.summary.scalar(name="Accuracy", data=accuracy(predictions, label), step=epoch) for var in big_list: name = str(var.name) tf.summary.histogram(name="Variable_" + name, data=var, step=epoch) tf.summary.flush() if epoch % 50 == 0: valid_accuracy = Validation(model, datafeeder) with summary_writer.as_default(): tf.summary.scalar(name="Validation_accuracy", data=valid_accuracy, step=epoch) valid_logger.writerow([valid_accuracy]) if epoch % 100 == 0 and epoch > 1: print("\n##############SAVING MODE##############\n") try: os.remove("Graphs_and_Results/basic/" + version + "/" + "SAVED_WEIGHTS.pkl") except: print( "the saved weights were not removed, because they were not there!" ) dbfile = open( "Graphs_and_Results/basic/" + version + "/" + "SAVED_WEIGHTS.pkl", "ab") pickle.dump(big_list, dbfile) gradients = tape.gradient(pred_loss, big_list) optimizer.apply_gradients(zip(gradients, big_list)) Test_live(model, datafeeder)