def make_srcnn_rgb_dataset_based_on_cifar10(): print('making SRCNN-RGB dataset using CIFAR-10...') (Y_train, _), (Y_test, _) = get_dataset_part(cifar10.load_data(), train_part=0.2) from image_handler import get_image, get_image_data, zoom_out_image, zoom_up_image # X_train print('making X_train list...') X_train = [] for item in Y_train: image_data = item.tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_train.append(zoomed_up_image_data) # X_test print('making X_test list...') X_test = [] for item in Y_test: image_data = item.tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_test.append(zoomed_up_image_data) dtype = 'uint8' dataset = (np.array(X_train, dtype=dtype), np.array(Y_train, dtype=dtype)), \ (np.array(X_test, dtype=dtype), np.array(Y_test, dtype=dtype)) print('saving dataset to srcnn-rgb-cifar10-dataset.npz...') np.savez('datasets/srcnn-rgb-cifar10-dataset.npz', dataset)
def make_pasadena_dataset(): print('making PASADENA dataset...') from image_handler import get_image_data, get_image, zoom_out_image, zoom_up_image path = 'saved_images/Pasadena Dataset/' filename_prefix = path + 'dcp_24' size = 10 result = [] for i in range(size): filename = filename_prefix + str(12 + i) + '.jpg' image = Image.open(filename) print('Image', filename.rpartition('/')[2], 'opened') image_data = get_image_data(image) result.append(image_data) print('making Y_train and Y_test...') train_size = 7 Y_train, Y_test = result[:train_size], result[train_size:] # X_train print('making X_train list...') X_train = [] for item in Y_train: image_data = item # .tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_train.append(zoomed_up_image_data) # X_test print('making X_test list...') X_test = [] for item in Y_test: image_data = item # .tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_test.append(zoomed_up_image_data) dtype = 'uint8' dataset = (np.array(X_train, dtype=dtype), np.array(Y_train, dtype=dtype)), \ (np.array(X_test, dtype=dtype), np.array(Y_test, dtype=dtype)) print('saving dataset to pasadena-dataset.npz...') np.savez_compressed('datasets/pasadena-dataset.npz', dataset) print('pasadena-dataset.npz saved')
def make_hundred_dataset(): print('making HUNDRED dataset...') from image_handler import get_image_data, get_image, zoom_out_image, zoom_up_image path = 'images/Hundred Dataset/images/' size = 53 result = [] for i in range(size): filename = path + str(1 + i) + '.png' image = Image.open(filename) print('Image', filename.rpartition('/')[2], 'opened') image_data = get_image_data(image) result.append(image_data) print('making Y_train and Y_test...') train_size = 40 Y_train, Y_test = result[:train_size], result[train_size:] # X_train print('making X_train list...') X_train = [] for item in Y_train: image_data = item # .tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_train.append(zoomed_up_image_data) # X_test print('making X_test list...') X_test = [] for item in Y_test: image_data = item # .tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_test.append(zoomed_up_image_data) dtype = 'uint8' dataset = (np.array(X_train, dtype=dtype), np.array(Y_train, dtype=dtype)), \ (np.array(X_test, dtype=dtype), np.array(Y_test, dtype=dtype)) print('saving dataset to hundred-dataset.npz...') np.savez('datasets/hundred-dataset.npz', dataset) # _compressed print('hundred-dataset.npz saved')
def handle_mnist(): from image_handler import get_image, get_image_data, zoom_out_image from keras.datasets import mnist (X_train, y_train), (X_test, y_test) = mnist.load_data() dataset_size = 60000 dataset_X, dataset_Y = [], [] for i, X_train_item in enumerate(X_train[:dataset_size]): image_data = X_train_item.tolist() image = get_image(image_data, mode='L') zoomed_out_image = zoom_out_image(image, times=2) zoomed_out_image_data = get_image_data(zoomed_out_image) dataset_X.append(zoomed_out_image_data) dataset_Y.append(image_data) print('image ' + str(i) + ', height : ' + str(len(image_data)), 'width : ' + str(len(image_data[0])), sep=', ') print('zoomed_out_image ' + str(i) + ', height : ' + str(len(zoomed_out_image_data)), 'width : ' + str(len(zoomed_out_image_data[0])), sep=', ') # print('image_data :', image_data) # print('zoomed_out_image_data :', zoomed_out_image_data) # image.show() # zoomed_out_image.show() dataset = (dataset_X, dataset_Y) import pickle mnist_dataset_file = open('datasets/mnist-dataset.pkl', 'wb') pickle.dump(dataset, mnist_dataset_file) mnist_dataset_file.close()
def make_srcnn_dataset_based_on_mnist(): print('making SRCNN dataset using MNIST...') from image_handler import get_image, get_image_data, zoom_out_image, zoom_up_image (Y_train, _), (Y_test, _) = mnist.load_data() # 60000, 10000 # making X_train list print('making X_train list...') X_train = [] for item in Y_train: image_data = item.tolist() image = get_image(image_data, mode='L') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_train.append(zoomed_up_image_data) # making X_test list print('making X_test list...') X_test = [] for item in Y_test: image_data = item.tolist() image = get_image(image_data, mode='L') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_test.append(zoomed_up_image_data) # X_train, X_test = [], [] # for X, Y in [(X_train, Y_train), (X_test, Y_test)]: # for item in Y: # image_data = item.tolist() # image = get_image(image_data, mode='L') # zoomed_out_image = zoom_out_image(image, times=2) # zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) # zoomed_up_image_data = get_image_data(zoomed_up_image) # X.append(zoomed_up_image_data) dtype = 'uint8' dataset = (np.array(X_train, dtype=dtype), np.array(Y_train, dtype=dtype)), \ (np.array(X_test, dtype=dtype), np.array(Y_test, dtype=dtype)) print('saving dataset to srcnn-mnist-dataset.npz...') np.savez('datasets/srcnn-mnist-dataset.npz', dataset)