def display_tiled_image(image, tile_count, colors, save_image=True): tiled_image = processing.get_tiled_image(image, tile_count, colors) key_values, color_map_type = get_plot_configs() plot_title = "tiled image " image_info = [ { key_values[0]: image, key_values[1]: "original image", key_values[2]: color_map_type["color"] }, { key_values[0]: tiled_image, key_values[1]: "tiled image", key_values[2]: color_map_type["color"] }, ] plot_size = get_plot_size(len(image_info)) plot_image(plot_size=plot_size, plot_title=plot_title, image_info=image_info, key_values=key_values) if save_image: file_name = get_full_output_path("Color_Image_modified.jpg") cv2.imwrite(file_name, cv2.cvtColor(tiled_image, cv2.COLOR_BGR2RGB))
def display_threshld_image(image, threshold_value, max_value, save_image=True): threshlded_image = processing.get_threshlded_image(image, threshold_value, max_value) key_values, color_map_type = get_plot_configs() plot_title = "Image type" image_info = [ { key_values[0]: image, key_values[1]: "color image", key_values[2]: color_map_type["color"] }, { key_values[0]: threshlded_image, key_values[1]: "thresholded Image", key_values[2]: color_map_type["gray"] }, ] plot_size = get_plot_size(len(image_info)) plot_image(plot_size=plot_size, plot_title=plot_title, image_info=image_info, key_values=key_values) if save_image: file_name = get_full_output_path("Color_Image_thresholded.jpg") cv2.imwrite(file_name, threshlded_image)
def display_gray_scale_image(image, save_image=True): grayscale_imag = processing.get_grayscale_image(image) key_values, color_map_type = get_plot_configs() plot_title = "Image type" image_info = [ { key_values[0]: image, key_values[1]: "color image", key_values[2]: color_map_type["color"] }, { key_values[0]: grayscale_imag, key_values[1]: "gray Image", key_values[2]: color_map_type["gray"] }, ] plot_size = get_plot_size(len(image_info)) plot_image(plot_size=plot_size, plot_title=plot_title, image_info=image_info, key_values=key_values) if save_image: file_name = get_full_output_path("gray_Image.jpg") cv2.imwrite(file_name, grayscale_imag)
def display_resized_image(image, scale_percent, save_image=True): resized_image = processing.get_resized_image(image, scale_percent) key_values, color_map_type = get_plot_configs() plot_title = "resize image by " + str(scale_percent) + " of its size" image_info = [ { key_values[0]: image, key_values[1]: "original image", key_values[2]: color_map_type["color"] }, { key_values[0]: resized_image, key_values[1]: "resized image", key_values[2]: color_map_type["color"] }, ] plot_size = get_plot_size(len(image_info)) plot_image(plot_size=plot_size, plot_title=plot_title, image_info=image_info, key_values=key_values) if save_image: file_name = get_full_output_path("Color_Image_resized.jpg") cv2.imwrite(file_name, cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB))
def get_proportion(self, d_obs, d_obs_label, m_ref): self.proportion = self.classifier.predict(d_obs) f = plt.figure(figsize=[8, 4]) plt.subplot(1, 2, 1) util.plot_image(self.proportion, d_obs_label, m_ref, self.class_names) plt.subplot(1, 2, 2) util.plot_value_array(self.proportion, d_obs_label) plt.tight_layout() plt.show() f.savefig('readme/proportion.png')
def test_data_loader(loader): for i_batch, sample_batched in enumerate(loader): for i, image in enumerate(sample_batched['image']): rect = sample_batched['rectangle'][i].numpy() image = de_normalize(image) image = image.numpy().transpose((1, 2, 0)) print("For i_batch {}, image_idx {}: {} {}".format( i_batch, i, image.shape, rect.shape)) plot_image(image, rect) print(i_batch, sample_batched['image'].size(), sample_batched['rectangle'].size()) if i_batch >= 1: break
def display_color_channels(image, save_image=True): #extract red channel blue_channel, green_channel, red_channel = get_channels(image) key_values, color_map_type = get_plot_configs() plot_title = "color image and its channels" image_info = [ { key_values[0]: image, key_values[1]: "Original image", key_values[2]: color_map_type["color"] }, { key_values[0]: blue_channel, key_values[1]: "blue channel", key_values[2]: color_map_type["gray"] }, { key_values[0]: green_channel, key_values[1]: "green channl", key_values[2]: color_map_type["gray"] }, { key_values[0]: red_channel, key_values[1]: "red channl", key_values[2]: color_map_type["gray"] }, ] plot_size = get_plot_size(len(image_info)) plot_image(plot_size=plot_size, plot_title=plot_title, image_info=image_info, key_values=key_values) if save_image: file_name = get_full_output_path("blue_channel.jpg") cv2.imwrite(file_name, blue_channel) file_name = get_full_output_path("green_channel.jpg") cv2.imwrite(file_name, green_channel) file_name = get_full_output_path("red_channel.jpg") cv2.imwrite(file_name, red_channel)
def display_color_image(image, save_image=True): blue_image, green_image, red_image = get_image_channels(image) key_values, color_map_type = get_plot_configs() plot_title = "color image and its channels" image_info = [ { key_values[0]: image, key_values[1]: "Original image", key_values[2]: color_map_type["color"] }, { key_values[0]: blue_image, key_values[1]: "blue channel", key_values[2]: color_map_type["color"] }, { key_values[0]: green_image, key_values[1]: "green channl", key_values[2]: color_map_type["color"] }, { key_values[0]: red_image, key_values[1]: "red channl", key_values[2]: color_map_type["color"] }, ] plot_size = get_plot_size(len(image_info)) plot_image(plot_size=plot_size, plot_title=plot_title, image_info=image_info, key_values=key_values) if save_image: file_name = get_full_output_path("Color_Image_blue.jpg") cv2.imwrite(file_name, cv2.cvtColor(blue_image, cv2.COLOR_BGR2RGB)) file_name = get_full_output_path("Color_Image_green.jpg") cv2.imwrite(file_name, cv2.cvtColor(green_image, cv2.COLOR_BGR2RGB)) file_name = get_full_output_path("Color_Image_red.jpg") cv2.imwrite(file_name, cv2.cvtColor(red_image, cv2.COLOR_BGR2RGB))
def test(generator): filenames = ["1.jpg", "2.jpg", "3.jpg"] # orignal image for predict without mask for filename in filenames: img = process_image('test/' + filename) ## image for dreawing temp_img = process_image('test/' + filename) print("Testing ...") mask = erase_img(temp_img) img = np.expand_dims(img, 0) mask = np.expand_dims(mask, 0) completion_image = generator.predict([img, mask]) # # Delete Batch dimension completion_image = np.squeeze(completion_image, 0) img = np.squeeze(img, 0) #cv2 show #completion_image = cv2.cvtColor(completion_image, cv2.COLOR_BGR2RGB) #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.figure(figsize=(6, 3)) plot_image(temp_img, 'Input', 1) plot_image(completion_image, 'Output', 2) plot_image(img, 'Ground Truth', 3) plt.savefig("result/" + filename.split('.')[0] + "_test") plt.show() # cv2.imshow("result",completion_image) # cv2.waitKey() print("Done.....")
args = parser.parse_args() if not args.session_id: ids = [int(c.split("_")[1].split(".")[0]) for c in os.listdir("./checkpoints")] ids.sort(reverse=True) session_id = ids[0] else: session_id = args.session_id model = load_model(session_id) model.eval() model_to_device(model) if args.image_path is not None: image = Image.open(args.image_path).convert("RGB") image_tensor = ToTensor()(image).to(get_device()) accepted_bboxes = evaluate(model, [image_tensor])[0] plot_image(image_tensor, accepted_bboxes) else: data_loader = torch.utils.data.DataLoader( dataset=GlobalDataset(transforms=Compose([ToTensor()])), batch_size=args.batch_size, collate_fn=collate_fn ) for image_tensor, _ in data_loader: accepted_bbox_lists = evaluate(model, image_tensor) for i, accepted_bboxes in enumerate(accepted_bbox_lists): plot_image(image_tensor[i], accepted_bboxes) break
def display_convlution(image, save_image=True): laplacian = np.array(([0, 1, 0], [1, -4, 1], [0, 1, 0]), dtype="int") # construct the Sobel x-axis kernel sobelX = np.array(([-1, 0, 1], [-2, 0, 2], [-1, 0, 1]), dtype="int") # construct the Sobel y-axis kernel sobelY = np.array(([-1, -2, -1], [0, 0, 0], [1, 2, 1]), dtype="int") gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) conv_x = processing.convolve(gray, sobelX) post_process_conv_image(conv_x) conv_y = processing.convolve(gray, sobelY) post_process_conv_image(conv_y) conv_laplacian = processing.convolve(gray, laplacian) post_process_conv_image(conv_laplacian) # merger X and Y conv_x_y = conv_x.copy() conv_x_y = conv_x.__add__(conv_y) key_values, color_map_type = get_plot_configs() plot_title = "Edge extraction" image_info = [ { key_values[0]: image, key_values[1]: "Original image", key_values[2]: color_map_type["color"] }, { key_values[0]: gray, key_values[1]: "grayscale image", key_values[2]: color_map_type["gray"] }, { key_values[0]: conv_x, key_values[1]: "sobelX image", key_values[2]: color_map_type["gray"] }, { key_values[0]: conv_y, key_values[1]: "sobelY image", key_values[2]: color_map_type["gray"] }, { key_values[0]: conv_x_y, key_values[1]: "sobelX_Y image", key_values[2]: color_map_type["gray"] }, { key_values[0]: conv_laplacian, key_values[1]: "laplacian image", key_values[2]: color_map_type["gray"] }, ] plot_size = get_plot_size(len(image_info)) plot_image(plot_size=plot_size, plot_title=plot_title, image_info=image_info, key_values=key_values) if save_image: file_name = get_full_output_path("conv_gray_Image.jpg") cv2.imwrite(file_name, gray) file_name = get_full_output_path("conv_x.jpg") cv2.imwrite(file_name, conv_x) file_name = get_full_output_path("conv_y.jpg") cv2.imwrite(file_name, conv_y) file_name = get_full_output_path("conv_x_y.jpg") cv2.imwrite(file_name, conv_x_y) file_name = get_full_output_path("conv_laplacian.jpg") cv2.imwrite(file_name, conv_laplacian)
from util import get_data, plot_image from variables import saved_weights import os from mnist import MnistClassifier import numpy as np current_dir = os.getcwd() saved_weights = os.path.join(current_dir, saved_weights) if __name__ == "__main__": Xtrain, Ytrain, Xtest, Ytest = get_data() classifier = MnistClassifier() if os.path.exists(saved_weights): print("Loading existing model !!!") classifier.load_model() else: print("Training the model and saving!!!") classifier.mnist_model() classifier.train() classifier.save_model() idx = np.random.randint(len(Xtest)) plot_image(Xtest, idx) classifier.predict(Xtest[idx], Ytest[idx])
valid_loader=valid_loader, test_loader=test_loader, pre_trained=PRE_TRAINED) # print(model.model) if not test_and_plot == "": path = Path(test_and_plot) device = torch.device( 'cpu') # This could be gpu if your computer has one :P model.load_model(path, device=device) images, predictions = model.get_prediction(test_loader) for i, batch in enumerate(predictions): for j, rect in enumerate(batch): image = images[i][j] image = de_normalize(image, PRE_TRAINED) image = image.numpy().transpose((1, 2, 0)) # print(f"Predicted rectangles {rect}") plot_image(image, rect) else: print( f"Starting Training, pre-trained: {PRE_TRAINED}, batch_size: {batch_size}, epochs: {epochs}, num_workers:" f" {num_workers}, test_split: {test_split}, valid_split: {valid_split}" ) n_train_batches = len(train_loader) n_val_batches = len(valid_loader) n_test_batches = len(test_loader) train_network(epochs, n_train_batches, n_val_batches, n_test_batches) print("Bye")
def plot_image(self, image_info): plot_size = get_plot_size(len(image_info)) plot_image(plot_size=plot_size, plot_title=self.plot_title, image_info=image_info, key_values=self.key_values)