def Conf_mat(): optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) loss_function = tf.keras.losses.CategoricalCrossentropy(from_logits=True) inputs = tf.keras.Input(shape=[32, 32, 3]) x = Convolve([4, 4, 3, 32])(inputs) x = Convolve([4, 4, 32, 64])(x) x = Convolve([4, 4, 64, 128])(x) x = Flatten([-1, 4 * 4 * 128])(x) x = FC([4 * 4 * 128, 1024])(x) x = FC([1024, 10])(x) outputs = Softmax([])(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) print(model.summary()) model.compile(optimizer=optimizer, loss=loss_function, metrics=['accuracy']) model.load_weights("Graphs_and_Results/best_weights.h5") datafeeder = Prep() data, label = datafeeder.nextBatchTest() acc = model.evaluate(data, label, batch_size=100) print(acc)
def Big_Train(sess): sess.run(tf.global_variables_initializer()) writer = tf.compat.v1.summary.FileWriter( "Graphs_and_Results/", sess.graph) # this will write summary tensorboard datafeeder = Prep() display, _ = datafeeder.nextBatchTrain(10) tf.compat.v1.summary.image("10 training data examples", display, max_outputs=10) for i in range(501): data, label = datafeeder.nextBatchTrain(100) prediction_, loss_, summary, _ = sess.run( [prediction, loss, summary_op, train], feed_dict={ x: data, truth: label, hold_prob: 1 }) print("Epoch: {}. Loss: {}".format(i, loss_)) if i % 10 == 0: writer.add_summary(summary, global_step=i) if i % 100 == 0 and i > 0: saver.save(sess, "Graphs_and_Results/CNN_test", global_step=i) data, label = datafeeder.nextBatchTest() correct = 0 prediction_ = sess.run(prediction, feed_dict={ x: data, truth: label, hold_prob: 1 }) for l in range(len(label)): if (np.argmax(prediction_[l]) == np.argmax(label[l])): correct += 1 print("This is the accuracy: {}".format(correct / len(prediction_)))