def main(_): resized_width = 128 resized_height = 128 model = build_CNN() opt = Adam(lr=FLAGS.learning_rate) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) model.load_weights(os.path.join(FLAGS.path, "save_model", "CNN.h5df")) image_lists = data_process.create_image_lists(FLAGS.images_dir) test_datas, test_labels = data_process.get_batch_of_data( image_lists, -1, FLAGS.images_dir, "test", resized_width, resized_height) test_loss, test_acc = model.evaluate(test_datas, test_labels) print("Test accuracy:{0:.4f}, test loss:{1:.4f}".format( test_acc, test_loss)) prediction = model.predict(test_datas) pre_data_dir = "../../predictData/CNN" if not os.path.exists(pre_data_dir): os.makedirs(pre_data_dir) with h5py.File(os.path.join(pre_data_dir, "prediction_and_labels.h5"), "w") as f: f["prediction"] = prediction f["truth"] = test_labels
def main(_): path = FLAGS.path image_name = "random" # Generate a random image img = np.random.random((128, 128, 3)) # cv2.imwrite(os.path.join(path,"visualization/filter_sample","random_image.jpg"),img*255) img = np.array([img]) model = build_CNN() model.load_weights(os.path.join(path, "weights/weights", "model.h5df")) # Select the corresponding conv_block layer_names = ["max_pool1", "max_pool2", "max_pool3", "max_pool4"] #layer_names = ["max_pool1"] for layer_name in layer_names: save_path = os.path.join(path + "visualization/filter_sample", image_name) if not os.path.exists(save_path): os.makedirs(save_path) filters = conv_filter(model, layer_name, img) filters_show(filters, FLAGS.filter_num, save_path, layer_name + ".jpg")
def main(_): path = FLAGS.path images_dir = FLAGS.images.dir category = "test" image_lists = data_process.create_image_lists(images_dir) model = build_CNN() model.load_weights(os.path.join(path, "weights/weights", "model.h5df")) # Select a different intermediate layer and change the output figure size in "image_show". #layer_names=["max_pool1","max_pool2","max_pool3","max_pool4"] layer_names = ["max_pool4"] image_num = 120 images = [] file_names = [] for label_index, label_name in enumerate(image_lists.keys()): image_list = image_lists[label_name][category] minlist = image_list[0:image_num] for image_index, image_name in enumerate(minlist): image_path = os.path.join(images_dir, category, label_name, image_name) image_data = cv2.imread(image_path) images.append( data_process.image_normalization(image_data, 128, 128)) file_names.append(image_name) images = np.array(images) file_names = np.array(file_names) print(images.shape) print(file_names.shape) for layer_name in layer_names: for label_index, label_name in enumerate(image_lists.keys()): img_inputs = images[image_num * label_index:image_num * (label_index + 1)] img_names = file_names[image_num * label_index:image_num * (label_index + 1)] feature_map = conv_output(model, layer_name, img_inputs) # If you want to save the channel with maximum activation, please comment the next 2 lines. save_path = os.path.join(path, "visualization/Hidden_layer", layer_name, label_name) # If you want to save the channel with maximum activation, please uncomment the next 2 lines. # save_path = os.path.join( # path, "visualization/Hidden_layer/activation_max", layer_name, label_name) if not os.path.exists(save_path): os.makedirs(save_path) for i in range(image_num): image = feature_map[i] image_name = img_names[i] # If you want to save the channel with maximum activation, please comment the next lines. image_show(image, image.shape[-1], save_path, image_name)
def main(_): path = FLAGS.path images_dir = FLAGS.images_dir category = "test" image_lists = data_process.create_image_lists(images_dir) model = build_CNN() model.load_weights(os.path.join(path + "weights/weights", "model.h5df")) layer_names = ["max_pool1", "max_pool2", "max_pool3", "max_pool4"] #layer_names = ["max_pool4"] # Different conv_block with different resolutions for layer_name in layer_names: if (layer_name == "max_pool1"): resolution = "63" elif (layer_name == "max_pool2"): resolution = "30" elif (layer_name == "max_pool3"): resolution = "14" elif (layer_name == "max_pool4"): resolution = "6" for label_index, label_name in enumerate(image_lists.keys()): image_list = image_lists[label_name][category] minlist = image_list[0:120] for image_index, image_name in enumerate(minlist): image_path = os.path.join(images_dir, category, label_name, image_name) # If you want to save "heatmap", please comment the next 2 lines. save_path = os.path.join(path, "visualization/CAM", resolution, label_name) # If you want to save "heatmap", please uncomment the following. # save_path = os.path.join( # path, "visualization/CAM/heatmap", resolution, label_name) if not os.path.exists(save_path): os.makedirs(save_path) Visualizing_heatmaps(model, layer_name, image_path, save_path, image_name)
def main(_): resized_width = 128 resized_height = 128 model = build_CNN() opt = Adam(lr=FLAGS.learning_rate) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) model.load_weights(os.path.join(FLAGS.path,"weights/weights/", "model.h5df")) image_lists = data_process.create_image_lists(FLAGS.images_dir) test_datas, test_labels = data_process.get_batch_of_data( image_lists, -1, FLAGS.images_dir, "test", resized_width, resized_height) test_loss, test_acc = model.evaluate(test_datas, test_labels) print("Test accuracy:{0:.4f}, test loss:{1:.4f}".format(test_acc, test_loss))
def main(_): resized_width = 128 resized_height = 128 since = time.time() model = build_CNN() model.summary() opt = Adam(lr=FLAGS.learning_rate) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) result = get_result() checkpoint_path = os.path.join(FLAGS.path, "save_model", "CNN.h5df") checkpoint = ModelCheckpoint(filepath=checkpoint_path, save_best_only=True, save_weights_only=True, monitor="val_acc", mode=max) tb = TensorBoard(log_dir=os.path.join(FLAGS.path, "results/results/logs")) callbacks = [result, checkpoint, tb] image_lists = data_process.create_image_lists(FLAGS.images_dir) with gfile.FastGFile(os.path.join(FLAGS.path, "results/output_labels.txt"), "w") as f: f.write("\n".join(image_lists.keys()) + "\n") val_datas, val_labels = data_process.get_batch_of_data( image_lists, -1, FLAGS.images_dir, "val", resized_width, resized_height) model.fit_generator(generate_train_data(image_lists, FLAGS.images_dir, FLAGS.batch_size, resized_width, resized_height), epochs=FLAGS.epochs, steps_per_epoch=100, validation_data=(val_datas, val_labels), callbacks=callbacks) test_datas, test_labels = data_process.get_batch_of_data( image_lists, -1, FLAGS.images_dir, "test", resized_width, resized_height) test_loss1, test_acc1 = model.evaluate(test_datas, test_labels) print("Test accuracy:{0:.4f}, test loss:{1:.4f}".format( test_acc1, test_loss1)) model.load_weights(os.path.join(FLAGS.path, "save_model", "CNN.h5df")) test_loss2, test_acc2 = model.evaluate(test_datas, test_labels) time_elapsed = time.time() - since print("Test accuracy with best validation = {}".format(test_acc2 * 100)) print("Final test accuracy = {}".format(test_acc1 * 100)) print("Total Model Runtime: {}min, {:0.2f}sec".format( int(time_elapsed // 60), time_elapsed % 60)) with open(os.path.join(FLAGS.path, "results/results/results.txt"), "w") as f: f.write("Test accuracy with best validation:" + str(test_acc2) + "\n") f.write("Final test accuracy: " + str(test_acc1) + "\n") f.write("Total Model Runtime: " + str(int(time_elapsed // 60)) + "min," + str(time_elapsed % 60) + "sec")