def make_srcnn_rgb_dataset_based_on_cifar10(): print('making SRCNN-RGB dataset using CIFAR-10...') (Y_train, _), (Y_test, _) = get_dataset_part(cifar10.load_data(), train_part=0.2) from image_handler import get_image, get_image_data, zoom_out_image, zoom_up_image # X_train print('making X_train list...') X_train = [] for item in Y_train: image_data = item.tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_train.append(zoomed_up_image_data) # X_test print('making X_test list...') X_test = [] for item in Y_test: image_data = item.tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_test.append(zoomed_up_image_data) dtype = 'uint8' dataset = (np.array(X_train, dtype=dtype), np.array(Y_train, dtype=dtype)), \ (np.array(X_test, dtype=dtype), np.array(Y_test, dtype=dtype)) print('saving dataset to srcnn-rgb-cifar10-dataset.npz...') np.savez('datasets/srcnn-rgb-cifar10-dataset.npz', dataset)
def make_pasadena_dataset(): print('making PASADENA dataset...') from image_handler import get_image_data, get_image, zoom_out_image, zoom_up_image path = 'saved_images/Pasadena Dataset/' filename_prefix = path + 'dcp_24' size = 10 result = [] for i in range(size): filename = filename_prefix + str(12 + i) + '.jpg' image = Image.open(filename) print('Image', filename.rpartition('/')[2], 'opened') image_data = get_image_data(image) result.append(image_data) print('making Y_train and Y_test...') train_size = 7 Y_train, Y_test = result[:train_size], result[train_size:] # X_train print('making X_train list...') X_train = [] for item in Y_train: image_data = item # .tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_train.append(zoomed_up_image_data) # X_test print('making X_test list...') X_test = [] for item in Y_test: image_data = item # .tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_test.append(zoomed_up_image_data) dtype = 'uint8' dataset = (np.array(X_train, dtype=dtype), np.array(Y_train, dtype=dtype)), \ (np.array(X_test, dtype=dtype), np.array(Y_test, dtype=dtype)) print('saving dataset to pasadena-dataset.npz...') np.savez_compressed('datasets/pasadena-dataset.npz', dataset) print('pasadena-dataset.npz saved')
def make_hundred_dataset(): print('making HUNDRED dataset...') from image_handler import get_image_data, get_image, zoom_out_image, zoom_up_image path = 'images/Hundred Dataset/images/' size = 53 result = [] for i in range(size): filename = path + str(1 + i) + '.png' image = Image.open(filename) print('Image', filename.rpartition('/')[2], 'opened') image_data = get_image_data(image) result.append(image_data) print('making Y_train and Y_test...') train_size = 40 Y_train, Y_test = result[:train_size], result[train_size:] # X_train print('making X_train list...') X_train = [] for item in Y_train: image_data = item # .tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_train.append(zoomed_up_image_data) # X_test print('making X_test list...') X_test = [] for item in Y_test: image_data = item # .tolist() image = get_image(image_data, mode='RGB') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_test.append(zoomed_up_image_data) dtype = 'uint8' dataset = (np.array(X_train, dtype=dtype), np.array(Y_train, dtype=dtype)), \ (np.array(X_test, dtype=dtype), np.array(Y_test, dtype=dtype)) print('saving dataset to hundred-dataset.npz...') np.savez('datasets/hundred-dataset.npz', dataset) # _compressed print('hundred-dataset.npz saved')
def make_srcnn_dataset_based_on_mnist(): print('making SRCNN dataset using MNIST...') from image_handler import get_image, get_image_data, zoom_out_image, zoom_up_image (Y_train, _), (Y_test, _) = mnist.load_data() # 60000, 10000 # making X_train list print('making X_train list...') X_train = [] for item in Y_train: image_data = item.tolist() image = get_image(image_data, mode='L') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_train.append(zoomed_up_image_data) # making X_test list print('making X_test list...') X_test = [] for item in Y_test: image_data = item.tolist() image = get_image(image_data, mode='L') zoomed_out_image = zoom_out_image(image, times=2) zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) zoomed_up_image_data = get_image_data(zoomed_up_image) X_test.append(zoomed_up_image_data) # X_train, X_test = [], [] # for X, Y in [(X_train, Y_train), (X_test, Y_test)]: # for item in Y: # image_data = item.tolist() # image = get_image(image_data, mode='L') # zoomed_out_image = zoom_out_image(image, times=2) # zoomed_up_image = zoom_up_image(zoomed_out_image, times=2) # zoomed_up_image_data = get_image_data(zoomed_up_image) # X.append(zoomed_up_image_data) dtype = 'uint8' dataset = (np.array(X_train, dtype=dtype), np.array(Y_train, dtype=dtype)), \ (np.array(X_test, dtype=dtype), np.array(Y_test, dtype=dtype)) print('saving dataset to srcnn-mnist-dataset.npz...') np.savez('datasets/srcnn-mnist-dataset.npz', dataset)