Esempio n. 1
0
def confMat(sess):
    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(
        os.path.dirname('Graphs_and_Results/'))
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)

    datafeeder = Prep()

    data, label = datafeeder.nextBatchTest_ConfMat()

    matrix = np.zeros([10, 10])

    prediction_ = sess.run(prediction,
                           feed_dict={
                               x: data,
                               truth: label,
                               hold_prob: 1
                           })
    for l in range(len(prediction_)):
        k = np.argmax(prediction_[l])
        m = np.argmax(label[l])
        matrix[k][m] += 1
    test = open("Graphs_and_Results/confusion.csv", "w")
    logger = csv.writer(test, lineterminator="\n")

    for iterate in matrix:
        logger.writerow(iterate)
    print([
        'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
        'horse', 'ship', 'truck'
    ])
    print(matrix)
def Conf_mat():
    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
    loss_function = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
    inputs = tf.keras.Input(shape=[32, 32, 3])

    x = Convolve([4, 4, 3, 32])(inputs)
    x = Convolve([4, 4, 32, 64])(x)
    x = Convolve([4, 4, 64, 128])(x)
    x = Flatten([-1, 4 * 4 * 128])(x)
    x = FC([4 * 4 * 128, 1024])(x)
    x = FC([1024, 10])(x)
    outputs = Softmax([])(x)

    model = tf.keras.Model(inputs=inputs, outputs=outputs)
    print(model.summary())
    model.compile(optimizer=optimizer,
                  loss=loss_function,
                  metrics=['accuracy'])
    model.load_weights("Graphs_and_Results/best_weights.h5")
    datafeeder = Prep()

    data, label = datafeeder.nextBatchTest()

    acc = model.evaluate(data, label, batch_size=100)
    print(acc)
def Test():
    print("Making model")
    model = Model()
    model.build_model_from_pickle("Graphs_and_Results/dual/" + version +
                                  "/SAVED_WEIGHTS.pkl")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, SELECTION_LIST)
    datafeeder.load_train_to_RAM()
    data, label = datafeeder.GetTest_dom()
    predictions, l2loss = model.call(data)

    assert len(label) == len(
        predictions), "something is wrong with the loaded model or labels"
    conf = np.zeros(shape=[len(label[0]), len(predictions[0])])
    for i in range(len(predictions)):
        k = np.argmax(predictions[i])
        l = np.argmax(label[i])
        conf[k][l] += 1
    test = open("Graphs_and_Results/dual/" + version + "/confusion.csv", "w")
    logger = csv.writer(test, lineterminator="\n")

    test_ = open("Graphs_and_Results/dual/" + version + "/results.csv", "w")
    logger_ = csv.writer(test_, lineterminator="\n")
    logger_.writerow([accuracy(predictions, label)])

    for iterate in conf:
        logger.writerow(iterate)

    right, wrong = record_error(data, label, predictions)
    try:
        os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/right/")
    except:
        shutil.rmtree("Graphs_and_Results/dual/" + version + "/wrong/")
        shutil.rmtree("Graphs_and_Results/dual/" + version + "/right/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/right/")

    for i in range(len(wrong)):
        print("Saving wrong image {}".format(i))
        carrier = np.reshape(wrong[i], [100, 100])
        util.save_image(
            255 * carrier,
            "Graphs_and_Results/dual/" + version + "/wrong/" + str(i) + ".jpg",
            "L")

    for i in range(len(right)):
        print("Saving right image {}".format(i))
        carrier = np.reshape(right[i], [100, 100])
        util.save_image(
            255 * carrier,
            "Graphs_and_Results/dual/" + version + "/right/" + str(i) + ".jpg",
            "L")

    print("This is the test set accuracy: {}".format(
        accuracy(predictions, label)))
Esempio n. 4
0
def Big_Train():

    print("Is there a GPU available: "),
    print(tf.test.is_gpu_available())
    print("*****************Training*****************")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version])

    optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE_INIT)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    print("loading dataset")
    datafeeder.load_train_to_RAM()  # loads the training data to RAM
    summary_writer = tf.summary.create_file_writer(logdir=base_directory)
    print("starting training")

    print("Making model")
    model = Model()
    model.build_model()
    tf.summary.trace_on(graph=True, profiler=True)

    for epoch in range(1001):
        data, label = datafeeder.nextBatchTrain_dom(150)
        data = data[0]

        with tf.GradientTape() as tape:
            predictions, l2_loss = model.call(data)  #this is the big call

            pred_loss = loss_function(label,
                                      predictions)  #this is the loss function
            pred_loss = pred_loss + L2WEIGHT * l2_loss  #this implements lasso regularization

            if epoch == 0:  #creates graph
                with summary_writer.as_default():
                    tf.summary.trace_export(name="Graph",
                                            step=0,
                                            profiler_outdir=base_directory)

            if epoch % 50 == 0:  #takes care of validation accuracy
                valid_accuracy = Validation(model, datafeeder)
                with summary_writer.as_default():
                    logger.log_valid(valid_accuracy, epoch)

            with summary_writer.as_default(
            ):  #this is the big player logger and printout
                logger.log_train(epoch, predictions, label, pred_loss, l2_loss,
                                 big_list)

        gradients = tape.gradient(pred_loss, big_list)
        optimizer.apply_gradients(zip(gradients, big_list))

    Test_live(model, datafeeder)
Esempio n. 5
0
def Test():
    print("Making model")
    model = Model()
    model.build_model_from_pickle(base_directory + "SAVED_WEIGHTS.pkl")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version])
    datafeeder.load_train_to_RAM()
    data, label = datafeeder.GetTest_dom()
    data = data[0]  # this is because we now have multiple images in the pickle
    predictions, l2loss = model.call(data)

    print("This is the test set accuracy: {}".format(
        accuracy(predictions, label)))
Esempio n. 6
0
def Test():
    print("Making model")
    model = Model()
    model.build_model_from_pickle("Graphs_and_Results/basic/" + version + "/" +
                                  "SAVED_WEIGHTS.pkl")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version])
    datafeeder.load_train_to_RAM()
    data, label = datafeeder.GetTest_dom()
    data = data[0]  # this is because we now have multiple images in the pickle
    predictions, l2loss = model.call(data)

    assert len(label) == len(
        predictions), "something is wrong with the loaded model or labels"
    right, wrong, wrong_list = record_error(data, label, predictions)

    print("This is the test set accuracy: {}".format(
        accuracy(predictions, label)))
    try:
        os.mkdir("Graphs_and_Results/basic/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/basic/" + version + "/right/")
        os.mkdir("Graphs_and_Results/wrongs/")
    except:
        shutil.rmtree("Graphs_and_Results/basic/" + version + "/wrong/")
        shutil.rmtree("Graphs_and_Results/basic/" + version + "/right/")
        os.mkdir("Graphs_and_Results/basic/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/basic/" + version + "/right/")

    wrong_logger = csv.writer(open(
        "Graphs_and_Results/wrongs/" + version + "SimpleCNN.csv", "w"),
                              lineterminator="\n")
    for element in wrong_list:
        wrong_logger.writerow([element])
    for i in range(len(wrong)):
        print("Saving wrong image {}".format(i))
        carrier = np.reshape(wrong[i], [100, 100])
        util.save_image(
            255 * carrier, "Graphs_and_Results/basic/" + version + "/wrong/" +
            str(i) + ".jpg", "L")

    for i in range(len(right)):
        print("Saving right image {}".format(i))
        carrier = np.reshape(right[i], [100, 100])
        util.save_image(
            255 * carrier, "Graphs_and_Results/basic/" + version + "/right/" +
            str(i) + ".jpg", "L")
Esempio n. 7
0
def Big_Train(sess):
    sess.run(tf.global_variables_initializer())
    writer = tf.compat.v1.summary.FileWriter(
        "Graphs_and_Results/",
        sess.graph)  # this will write summary tensorboard
    datafeeder = Prep()

    display, _ = datafeeder.nextBatchTrain(10)
    tf.compat.v1.summary.image("10 training data examples",
                               display,
                               max_outputs=10)
    for i in range(501):
        data, label = datafeeder.nextBatchTrain(100)
        prediction_, loss_, summary, _ = sess.run(
            [prediction, loss, summary_op, train],
            feed_dict={
                x: data,
                truth: label,
                hold_prob: 1
            })
        print("Epoch: {}. Loss: {}".format(i, loss_))
        if i % 10 == 0:
            writer.add_summary(summary, global_step=i)
        if i % 100 == 0 and i > 0:
            saver.save(sess, "Graphs_and_Results/CNN_test", global_step=i)
            data, label = datafeeder.nextBatchTest()
            correct = 0
            prediction_ = sess.run(prediction,
                                   feed_dict={
                                       x: data,
                                       truth: label,
                                       hold_prob: 1
                                   })
            for l in range(len(label)):
                if (np.argmax(prediction_[l]) == np.argmax(label[l])):
                    correct += 1
            print("This is the accuracy: {}".format(correct /
                                                    len(prediction_)))
def Big_Train():

    datafeeder = Prep()

    optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
    loss_function = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
    inputs = tf.keras.Input(shape=[32, 32, 3])

    x = Convolve([4, 4, 3, 32])(inputs)
    x = Convolve([4, 4, 32, 64])(x)
    x = Convolve([4, 4, 64, 128])(x)
    x = Flatten([-1, 4 * 4 * 128])(x)
    x = FC([4 * 4 * 128, 1024])(x)
    x = FC([1024, 10])(x)
    outputs = Softmax([])(x)

    model = tf.keras.Model(inputs=inputs, outputs=outputs)
    print(model.summary())
    model.compile(optimizer=optimizer,
                  loss=loss_function,
                  metrics=['accuracy'])

    data, label = datafeeder.nextBatchTrain_all()
    tensorboard = tf.keras.callbacks.TensorBoard(log_dir='Graphs_and_Results',
                                                 histogram_freq=1,
                                                 write_graph=True,
                                                 write_grads=True,
                                                 update_freq='batch')
    cp = tf.keras.callbacks.ModelCheckpoint("Graphs_and_Results/current.ckpt",
                                            verbose=1,
                                            save_weights_only=True,
                                            period=1)
    model.fit(data,
              label,
              batch_size=100,
              epochs=5,
              callbacks=[tensorboard, cp])
    model.save_weights("Graphs_and_Results/best_weights.h5")
def Big_Train():
    try:
        os.mkdir("Graphs_and_Results/dual/" + version)
    except:
        print("dual/{} has already been created".format(version))

    status = tf.test.is_gpu_available()
    print("Is there a GPU available: {}".format(status))

    print("*****************Training*****************")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, SELECTION_LIST)

    optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE_INIT)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    print("loading dataset")
    datafeeder.load_train_to_RAM()  # loads the training data to RAM
    summary_writer = tf.summary.create_file_writer(
        logdir="Graphs_and_Results/dual/" + version + "/")
    print("starting training")

    print("Making model")
    model = Model()
    model.build_model()

    train_logger = csv.writer(open(
        "Graphs_and_Results/dual/" + version + "/xentropyloss.csv", "w"),
                              lineterminator="\n")
    acc_logger = csv.writer(open(
        "Graphs_and_Results/dual/" + version + "/accuracy.csv", "w"),
                            lineterminator="\n")
    l2_logger = csv.writer(open(
        "Graphs_and_Results/dual/" + version + "/l2.csv", "w"),
                           lineterminator="\n")
    valid_logger = csv.writer(open(
        "Graphs_and_Results/dual/" + version + "/valid.csv", "w"),
                              lineterminator="\n")

    tf.summary.trace_on(
        graph=True,
        profiler=False)  #set profiler to true if you want compute history

    for epoch in range(1001):
        data, label = datafeeder.nextBatchTrain_dom(150)
        with tf.GradientTape() as tape:
            predictions, l2_loss = model.call(data)  #this is the big call

            pred_loss_ = loss_function(label,
                                       predictions)  #this is the loss function
            pred_loss = pred_loss_ + L2WEIGHT * l2_loss
            if epoch == 0:  #creates graph
                with summary_writer.as_default():
                    tf.summary.trace_export(
                        name="Graph",
                        step=0,
                        profiler_outdir="Graphs_and_Results/dual/" + version +
                        "/")

            train_logger.writerow([np.asarray(pred_loss)])
            acc_logger.writerow([accuracy(predictions, label)])
            l2_logger.writerow([np.asarray(l2_loss)])

            print("***********************")
            print("Finished epoch", epoch)
            print("Accuracy: {}".format(accuracy(predictions, label)))
            print("Loss: {}".format(np.asarray(pred_loss)))
            print("L2 Loss: {}".format(np.asarray(l2_loss)))
            print("***********************")

            if epoch % 20 == 0:
                with summary_writer.as_default():
                    tf.summary.scalar(name="XEntropyLoss",
                                      data=pred_loss_,
                                      step=epoch)
                    tf.summary.scalar(name="L2Loss", data=l2_loss, step=epoch)
                    tf.summary.scalar(name="Accuracy",
                                      data=accuracy(predictions, label),
                                      step=epoch)
                    for var in big_list:
                        name = str(var.name)
                        tf.summary.histogram(name=name, data=var, step=epoch)
                    tf.summary.flush()

            if epoch % 50 == 0:
                valid_accuracy = Validation(model, datafeeder)
                with summary_writer.as_default():
                    tf.summary.scalar(name="Validation_accuracy",
                                      data=valid_accuracy,
                                      step=epoch)
                valid_logger.writerow([valid_accuracy])

            if epoch % 100 == 0 and epoch > 1:
                print("\n##############SAVING MODE##############\n")
                try:  #because for some reason, the pickle files are incremental
                    os.remove("Graphs_and_Results/dual/" + version +
                              "/SAVED_WEIGHTS.pkl")
                except:
                    print(
                        "the saved weights were not removed because they were not there!"
                    )
                dbfile = open(
                    "Graphs_and_Results/dual/" + version +
                    "/SAVED_WEIGHTS.pkl", "ab")

                pickle.dump(big_list, dbfile)

        gradients = tape.gradient(pred_loss, big_list)

        optimizer.apply_gradients(zip(gradients, big_list))

    right, wrong = Test_live(model, datafeeder)
    try:
        os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/right/")
    except:
        shutil.rmtree("Graphs_and_Results/dual/" + version + "/wrong/")
        shutil.rmtree("Graphs_and_Results/dual/" + version + "/right/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/wrong/")
        os.mkdir("Graphs_and_Results/dual/" + version + "/right/")

    for i in range(len(wrong)):
        print("Saving wrong image {}".format(i))
        carrier = np.reshape(wrong[i], [100, 100])
        util.save_image(
            255 * carrier,
            "Graphs_and_Results/dual/" + version + "/wrong/" + str(i) + ".jpg",
            "L")

    for i in range(len(right)):
        print("Saving right image {}".format(i))
        carrier = np.reshape(right[i], [100, 100])
        util.save_image(
            255 * carrier,
            "Graphs_and_Results/dual/" + version + "/right/" + str(i) + ".jpg",
            "L")
Esempio n. 10
0
def Big_Train():
    try:
        os.mkdir("Graphs_and_Results/basic/" + version + "/")
    except:
        pass

    print("Is there a GPU available: "),
    print(tf.test.is_gpu_available())
    print("*****************Training*****************")

    datafeeder = Prep(TEST_AMOUNT, VALID_AMOUNT, [version])

    optimizer = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE_INIT)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    print("loading dataset")
    datafeeder.load_train_to_RAM()  # loads the training data to RAM
    summary_writer = tf.summary.create_file_writer(
        logdir="Graphs_and_Results/basic/" + version + "/")
    print("starting training")

    print("Making model")
    model = Model()
    model.build_model()
    tf.summary.trace_on(graph=True, profiler=True)

    train_logger = csv.writer(open(
        "Graphs_and_Results/basic/" + version + "/xentropyloss.csv", "w"),
                              lineterminator="\n")
    acc_logger = csv.writer(open(
        "Graphs_and_Results/basic/" + version + "/accuracy.csv", "w"),
                            lineterminator="\n")
    l2_logger = csv.writer(open(
        "Graphs_and_Results/basic/" + version + "/l2.csv", "w"),
                           lineterminator="\n")
    valid_logger = csv.writer(open(
        "Graphs_and_Results/basic/" + version + "/valid.csv", "w"),
                              lineterminator="\n")

    for epoch in range(1001):
        data, label = datafeeder.nextBatchTrain_dom(150)
        data = data[0]
        with tf.GradientTape() as tape:
            predictions, l2_loss = model.call(data)  #this is the big call

            pred_loss = loss_function(label,
                                      predictions)  #this is the loss function
            pred_loss = pred_loss + L2WEIGHT * l2_loss  #this implements lasso regularization

            train_logger.writerow([np.asarray(pred_loss)])
            acc_logger.writerow([accuracy(predictions, label)])
            l2_logger.writerow([np.asarray(l2_loss)])

            if epoch == 0:  #creates graph
                with summary_writer.as_default():
                    tf.summary.trace_export(
                        name="Graph",
                        step=0,
                        profiler_outdir="Graphs_and_Results/basic/" + version +
                        "/")

            if epoch % 20 == 0 and epoch > 1:
                print("***********************")
                print("Finished epoch", epoch)
                print("Accuracy: {}".format(accuracy(predictions, label)))
                print("Loss: {}".format(np.asarray(pred_loss)))
                print("***********************")
                with summary_writer.as_default():
                    tf.summary.scalar(name="Loss", data=pred_loss, step=epoch)
                    tf.summary.scalar(name="Accuracy",
                                      data=accuracy(predictions, label),
                                      step=epoch)
                    for var in big_list:
                        name = str(var.name)
                        tf.summary.histogram(name="Variable_" + name,
                                             data=var,
                                             step=epoch)
                    tf.summary.flush()

            if epoch % 50 == 0:
                valid_accuracy = Validation(model, datafeeder)
                with summary_writer.as_default():
                    tf.summary.scalar(name="Validation_accuracy",
                                      data=valid_accuracy,
                                      step=epoch)
                valid_logger.writerow([valid_accuracy])

            if epoch % 100 == 0 and epoch > 1:
                print("\n##############SAVING MODE##############\n")
                try:
                    os.remove("Graphs_and_Results/basic/" + version + "/" +
                              "SAVED_WEIGHTS.pkl")
                except:
                    print(
                        "the saved weights were not removed, because they were not there!"
                    )
                dbfile = open(
                    "Graphs_and_Results/basic/" + version + "/" +
                    "SAVED_WEIGHTS.pkl", "ab")
                pickle.dump(big_list, dbfile)

        gradients = tape.gradient(pred_loss, big_list)

        optimizer.apply_gradients(zip(gradients, big_list))
    Test_live(model, datafeeder)