def resize_if_needed(self, image_name): image = load_image(image_name) width, height = image.size if width == self.image_width and height == self.image_height: return load_image(image_name) resized_image = resize_image(image, self.image_width, self.image_height, self.image_channels, 'half_crop') return scipy.misc.toimage(resized_image, cmin=0.0, cmax=...)
def get_and_seperate_data_X_y(directory, dimension, modes_convolution=None): dir_list = glob.glob(directory + '*.jpg') iterator = 1 dataset_X = [] dataset_y = [] for dir_file in dir_list: origin_image = image_processor.load_image(dir_file) noise_image = noise_generator("s&p", origin_image) noise_image = noise_generator("gauss", noise_image) #convolution if modes_convolution == None: noise_image = image_processor.convolutions_image( noise_image, modes=["gaussian_blur_5_5", "sharpen"]) else: noise_image = image_processor.convolutions_image( noise_image, modes=modes_convolution) gray_origin_image = cv.cvtColor(origin_image, cv.COLOR_BGR2GRAY) gray_noise_image = cv.cvtColor(noise_image, cv.COLOR_BGR2GRAY) num_idx_col = int(len(gray_origin_image) / dimension) num_idx_row = int(len(gray_origin_image[0]) / dimension) for idx_col in range(num_idx_col): for idx_row in range(num_idx_row): X = gray_noise_image[idx_col * dimension:(idx_col + 1) * dimension, idx_row * dimension:(idx_row + 1) * dimension] y = gray_origin_image[idx_col * dimension:(idx_col + 1) * dimension, idx_row * dimension:(idx_row + 1) * dimension] dataset_X.append(X) dataset_y.append(y) print("...image number " + str(iterator) + " has been processing") iterator += 1 print("***DATASET IMAGE HAS BEEN DONE PROCESSED***") return [dataset_X, dataset_y]
# load pretrained model here! model = load_model() for class_index, img_dir in enumerate(img_dirs): path_images = os.path.join(root_path, img_dir) image_files = list(filter(lambda x: ".DS" not in x, os.listdir(path_images))) num_images = len(image_files) # index splitting in train, test and val first_index = int(num_images * 0.8) second_index = first_index + ((num_images - first_index) // 2) for data_split_index, img_file in enumerate(image_files): img_file = os.path.join(path_images, img_file) # rezise image to 299 and pad with black img = load_image(img_file) img = prepare_image(img, 299) # create onehot vector one_hot = np.zeros(num_classes) one_hot[class_index] = 1 feature = feature_extract_image(img, model) if data_split_index < first_index: num_train_sample += 1 train_set["sample_" + str(num_train_sample)] = np.asarray([feature, one_hot]) elif data_split_index < second_index: num_val_sample += 1 val_set["sample_" + str(num_val_sample)] = np.asarray([feature, one_hot]) else:
# -*- coding: utf-8 -*- """ Created on Tue Jul 3 20:45:33 2018 @author: Davit """ from image_processor import load_image import pandas as pd import numpy import cv2 #%% img_3d = load_image("dataset/Image/All Gambar Rontgen/03 (3).jpg") df_3d_b = pd.DataFrame(img_3d[:, :, 0]) df_3d_g = pd.DataFrame(img_3d[:, :, 1]) df_3d_r = pd.DataFrame(img_3d[:, :, 2]) #%% df_3d_b.to_csv("result_report/Grayscaling/df_3d_b.csv") df_3d_g.to_csv("result_report/Grayscaling/df_3d_g.csv") df_3d_r.to_csv("result_report/Grayscaling/df_3d_r.csv") #%% #grayscale counting