def Test():
    print("Making model")
    model = Model()
    model.build_model_from_pickle("Graphs_and_Results/dual/" + version +
                                  "/SAVED_WEIGHTS.pkl")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, SELECTION_LIST)
    datafeeder.load_train_to_RAM()
    data, label = datafeeder.GetTest_dom()
    predictions, l2loss = model.call(data)

    assert len(label) == len(
        predictions), "something is wrong with the loaded model or labels"
    conf = np.zeros(shape=[len(label[0]), len(predictions[0])])
    for i in range(len(predictions)):
        k = np.argmax(predictions[i])
        l = np.argmax(label[i])
        conf[k][l] += 1
    test = open("Graphs_and_Results/dual/" + version + "/confusion.csv", "w")
    logger = csv.writer(test, lineterminator="\n")

    test_ = open("Graphs_and_Results/dual/" + version + "/results.csv", "w")
    logger_ = csv.writer(test_, lineterminator="\n")
    logger_.writerow([accuracy(predictions, label)])

    for iterate in conf:
        logger.writerow(iterate)

    right, wrong = record_error(data, label, predictions)
    try:
        os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/right/")
    except:
        shutil.rmtree("Graphs_and_Results/dual/" + version + "/wrong/")
        shutil.rmtree("Graphs_and_Results/dual/" + version + "/right/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/right/")

    for i in range(len(wrong)):
        print("Saving wrong image {}".format(i))
        carrier = np.reshape(wrong[i], [100, 100])
        util.save_image(
            255 * carrier,
            "Graphs_and_Results/dual/" + version + "/wrong/" + str(i) + ".jpg",
            "L")

    for i in range(len(right)):
        print("Saving right image {}".format(i))
        carrier = np.reshape(right[i], [100, 100])
        util.save_image(
            255 * carrier,
            "Graphs_and_Results/dual/" + version + "/right/" + str(i) + ".jpg",
            "L")

    print("This is the test set accuracy: {}".format(
        accuracy(predictions, label)))
Esempio n. 2
0
def Big_Train():

    print("Is there a GPU available: "),
    print(tf.test.is_gpu_available())
    print("*****************Training*****************")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version])

    optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE_INIT)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    print("loading dataset")
    datafeeder.load_train_to_RAM()  # loads the training data to RAM
    summary_writer = tf.summary.create_file_writer(logdir=base_directory)
    print("starting training")

    print("Making model")
    model = Model()
    model.build_model()
    tf.summary.trace_on(graph=True, profiler=True)

    for epoch in range(1001):
        data, label = datafeeder.nextBatchTrain_dom(150)
        data = data[0]

        with tf.GradientTape() as tape:
            predictions, l2_loss = model.call(data)  #this is the big call

            pred_loss = loss_function(label,
                                      predictions)  #this is the loss function
            pred_loss = pred_loss + L2WEIGHT * l2_loss  #this implements lasso regularization

            if epoch == 0:  #creates graph
                with summary_writer.as_default():
                    tf.summary.trace_export(name="Graph",
                                            step=0,
                                            profiler_outdir=base_directory)

            if epoch % 50 == 0:  #takes care of validation accuracy
                valid_accuracy = Validation(model, datafeeder)
                with summary_writer.as_default():
                    logger.log_valid(valid_accuracy, epoch)

            with summary_writer.as_default(
            ):  #this is the big player logger and printout
                logger.log_train(epoch, predictions, label, pred_loss, l2_loss,
                                 big_list)

        gradients = tape.gradient(pred_loss, big_list)
        optimizer.apply_gradients(zip(gradients, big_list))

    Test_live(model, datafeeder)
Esempio n. 3
0
def Test():
    print("Making model")
    model = Model()
    model.build_model_from_pickle(base_directory + "SAVED_WEIGHTS.pkl")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version])
    datafeeder.load_train_to_RAM()
    data, label = datafeeder.GetTest_dom()
    data = data[0]  # this is because we now have multiple images in the pickle
    predictions, l2loss = model.call(data)

    print("This is the test set accuracy: {}".format(
        accuracy(predictions, label)))
Esempio n. 4
0
def Test():
    print("Making model")
    model = Model()
    model.build_model_from_pickle("Graphs_and_Results/basic/" + version + "/" +
                                  "SAVED_WEIGHTS.pkl")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version])
    datafeeder.load_train_to_RAM()
    data, label = datafeeder.GetTest_dom()
    data = data[0]  # this is because we now have multiple images in the pickle
    predictions, l2loss = model.call(data)

    assert len(label) == len(
        predictions), "something is wrong with the loaded model or labels"
    right, wrong, wrong_list = record_error(data, label, predictions)

    print("This is the test set accuracy: {}".format(
        accuracy(predictions, label)))
    try:
        os.mkdir("Graphs_and_Results/basic/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/basic/" + version + "/right/")
        os.mkdir("Graphs_and_Results/wrongs/")
    except:
        shutil.rmtree("Graphs_and_Results/basic/" + version + "/wrong/")
        shutil.rmtree("Graphs_and_Results/basic/" + version + "/right/")
        os.mkdir("Graphs_and_Results/basic/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/basic/" + version + "/right/")

    wrong_logger = csv.writer(open(
        "Graphs_and_Results/wrongs/" + version + "SimpleCNN.csv", "w"),
                              lineterminator="\n")
    for element in wrong_list:
        wrong_logger.writerow([element])
    for i in range(len(wrong)):
        print("Saving wrong image {}".format(i))
        carrier = np.reshape(wrong[i], [100, 100])
        util.save_image(
            255 * carrier, "Graphs_and_Results/basic/" + version + "/wrong/" +
            str(i) + ".jpg", "L")

    for i in range(len(right)):
        print("Saving right image {}".format(i))
        carrier = np.reshape(right[i], [100, 100])
        util.save_image(
            255 * carrier, "Graphs_and_Results/basic/" + version + "/right/" +
            str(i) + ".jpg", "L")
def Big_Train():
    try:
        os.mkdir("Graphs_and_Results/dual/" + version)
    except:
        print("dual/{} has already been created".format(version))

    status = tf.test.is_gpu_available()
    print("Is there a GPU available: {}".format(status))

    print("*****************Training*****************")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, SELECTION_LIST)

    optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE_INIT)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    print("loading dataset")
    datafeeder.load_train_to_RAM()  # loads the training data to RAM
    summary_writer = tf.summary.create_file_writer(
        logdir="Graphs_and_Results/dual/" + version + "/")
    print("starting training")

    print("Making model")
    model = Model()
    model.build_model()

    train_logger = csv.writer(open(
        "Graphs_and_Results/dual/" + version + "/xentropyloss.csv", "w"),
                              lineterminator="\n")
    acc_logger = csv.writer(open(
        "Graphs_and_Results/dual/" + version + "/accuracy.csv", "w"),
                            lineterminator="\n")
    l2_logger = csv.writer(open(
        "Graphs_and_Results/dual/" + version + "/l2.csv", "w"),
                           lineterminator="\n")
    valid_logger = csv.writer(open(
        "Graphs_and_Results/dual/" + version + "/valid.csv", "w"),
                              lineterminator="\n")

    tf.summary.trace_on(
        graph=True,
        profiler=False)  #set profiler to true if you want compute history

    for epoch in range(1001):
        data, label = datafeeder.nextBatchTrain_dom(150)
        with tf.GradientTape() as tape:
            predictions, l2_loss = model.call(data)  #this is the big call

            pred_loss_ = loss_function(label,
                                       predictions)  #this is the loss function
            pred_loss = pred_loss_ + L2WEIGHT * l2_loss
            if epoch == 0:  #creates graph
                with summary_writer.as_default():
                    tf.summary.trace_export(
                        name="Graph",
                        step=0,
                        profiler_outdir="Graphs_and_Results/dual/" + version +
                        "/")

            train_logger.writerow([np.asarray(pred_loss)])
            acc_logger.writerow([accuracy(predictions, label)])
            l2_logger.writerow([np.asarray(l2_loss)])

            print("***********************")
            print("Finished epoch", epoch)
            print("Accuracy: {}".format(accuracy(predictions, label)))
            print("Loss: {}".format(np.asarray(pred_loss)))
            print("L2 Loss: {}".format(np.asarray(l2_loss)))
            print("***********************")

            if epoch % 20 == 0:
                with summary_writer.as_default():
                    tf.summary.scalar(name="XEntropyLoss",
                                      data=pred_loss_,
                                      step=epoch)
                    tf.summary.scalar(name="L2Loss", data=l2_loss, step=epoch)
                    tf.summary.scalar(name="Accuracy",
                                      data=accuracy(predictions, label),
                                      step=epoch)
                    for var in big_list:
                        name = str(var.name)
                        tf.summary.histogram(name=name, data=var, step=epoch)
                    tf.summary.flush()

            if epoch % 50 == 0:
                valid_accuracy = Validation(model, datafeeder)
                with summary_writer.as_default():
                    tf.summary.scalar(name="Validation_accuracy",
                                      data=valid_accuracy,
                                      step=epoch)
                valid_logger.writerow([valid_accuracy])

            if epoch % 100 == 0 and epoch > 1:
                print("\n##############SAVING MODE##############\n")
                try:  #because for some reason, the pickle files are incremental
                    os.remove("Graphs_and_Results/dual/" + version +
                              "/SAVED_WEIGHTS.pkl")
                except:
                    print(
                        "the saved weights were not removed because they were not there!"
                    )
                dbfile = open(
                    "Graphs_and_Results/dual/" + version +
                    "/SAVED_WEIGHTS.pkl", "ab")

                pickle.dump(big_list, dbfile)

        gradients = tape.gradient(pred_loss, big_list)

        optimizer.apply_gradients(zip(gradients, big_list))

    right, wrong = Test_live(model, datafeeder)
    try:
        os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/right/")
    except:
        shutil.rmtree("Graphs_and_Results/dual/" + version + "/wrong/")
        shutil.rmtree("Graphs_and_Results/dual/" + version + "/right/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/right/")

    for i in range(len(wrong)):
        print("Saving wrong image {}".format(i))
        carrier = np.reshape(wrong[i], [100, 100])
        util.save_image(
            255 * carrier,
            "Graphs_and_Results/dual/" + version + "/wrong/" + str(i) + ".jpg",
            "L")

    for i in range(len(right)):
        print("Saving right image {}".format(i))
        carrier = np.reshape(right[i], [100, 100])
        util.save_image(
            255 * carrier,
            "Graphs_and_Results/dual/" + version + "/right/" + str(i) + ".jpg",
            "L")
Esempio n. 6
0
def Big_Train():
    try:
        os.mkdir("Graphs_and_Results/basic/" + version + "/")
    except:
        pass

    print("Is there a GPU available: "),
    print(tf.test.is_gpu_available())
    print("*****************Training*****************")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version])

    optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE_INIT)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    print("loading dataset")
    datafeeder.load_train_to_RAM()  # loads the training data to RAM
    summary_writer = tf.summary.create_file_writer(
        logdir="Graphs_and_Results/basic/" + version + "/")
    print("starting training")

    print("Making model")
    model = Model()
    model.build_model()
    tf.summary.trace_on(graph=True, profiler=True)

    train_logger = csv.writer(open(
        "Graphs_and_Results/basic/" + version + "/xentropyloss.csv", "w"),
                              lineterminator="\n")
    acc_logger = csv.writer(open(
        "Graphs_and_Results/basic/" + version + "/accuracy.csv", "w"),
                            lineterminator="\n")
    l2_logger = csv.writer(open(
        "Graphs_and_Results/basic/" + version + "/l2.csv", "w"),
                           lineterminator="\n")
    valid_logger = csv.writer(open(
        "Graphs_and_Results/basic/" + version + "/valid.csv", "w"),
                              lineterminator="\n")

    for epoch in range(1001):
        data, label = datafeeder.nextBatchTrain_dom(150)
        data = data[0]
        with tf.GradientTape() as tape:
            predictions, l2_loss = model.call(data)  #this is the big call

            pred_loss = loss_function(label,
                                      predictions)  #this is the loss function
            pred_loss = pred_loss + L2WEIGHT * l2_loss  #this implements lasso regularization

            train_logger.writerow([np.asarray(pred_loss)])
            acc_logger.writerow([accuracy(predictions, label)])
            l2_logger.writerow([np.asarray(l2_loss)])

            if epoch == 0:  #creates graph
                with summary_writer.as_default():
                    tf.summary.trace_export(
                        name="Graph",
                        step=0,
                        profiler_outdir="Graphs_and_Results/basic/" + version +
                        "/")

            if epoch % 20 == 0 and epoch > 1:
                print("***********************")
                print("Finished epoch", epoch)
                print("Accuracy: {}".format(accuracy(predictions, label)))
                print("Loss: {}".format(np.asarray(pred_loss)))
                print("***********************")
                with summary_writer.as_default():
                    tf.summary.scalar(name="Loss", data=pred_loss, step=epoch)
                    tf.summary.scalar(name="Accuracy",
                                      data=accuracy(predictions, label),
                                      step=epoch)
                    for var in big_list:
                        name = str(var.name)
                        tf.summary.histogram(name="Variable_" + name,
                                             data=var,
                                             step=epoch)
                    tf.summary.flush()

            if epoch % 50 == 0:
                valid_accuracy = Validation(model, datafeeder)
                with summary_writer.as_default():
                    tf.summary.scalar(name="Validation_accuracy",
                                      data=valid_accuracy,
                                      step=epoch)
                valid_logger.writerow([valid_accuracy])

            if epoch % 100 == 0 and epoch > 1:
                print("\n##############SAVING MODE##############\n")
                try:
                    os.remove("Graphs_and_Results/basic/" + version + "/" +
                              "SAVED_WEIGHTS.pkl")
                except:
                    print(
                        "the saved weights were not removed, because they were not there!"
                    )
                dbfile = open(
                    "Graphs_and_Results/basic/" + version + "/" +
                    "SAVED_WEIGHTS.pkl", "ab")
                pickle.dump(big_list, dbfile)

        gradients = tape.gradient(pred_loss, big_list)

        optimizer.apply_gradients(zip(gradients, big_list))
    Test_live(model, datafeeder)