def main(): parser = argparse.ArgumentParser() parser.add_argument('--train', action='store_true') parser.add_argument('--test', action='store_true') parser.add_argument('--model') args = parser.parse_args() if args.train and args.model: nn = model.generate_model() training_input = data_utils.load_data(training_input_dir) training_output = data_utils.load_data(training_output_dir) nn.fit(training_input, training_output, batch_size=128, epochs=50) model.saveModel(nn, args.model) test = input("Do you want to test with the test images too? ") if test == 'yes': test_input = data_utils.load_data(test_input_dir) test_output = nn.predict(test_input) print(test_output.shape) data_utils.save_images(test_output_dir, test_input_dir, test_output) elif args.test and args.model: nn = model.loadModel(args.model) test_input = data_utils.load_data(test_input_dir) test_output = nn.predict(test_input) print(test_output.shape) data_utils.save_images(test_output_dir, test_input_dir, test_output)
def visualize_reordered(self, path, number, shape, permutations): data = self.visualize_sample(path, number, shape) data = data.reshape(-1, shape[0] * shape[1] * shape[2]) concat = deepcopy(data) image_frame_dim = int(np.floor(np.sqrt(number))) for i in range(1, self.n_tasks): _, inverse_permutation = permutations[i].sort() reordered_data = deepcopy(data.index_select( 1, inverse_permutation)) print("hope you're in shape") print(reordered_data.shape) print(concat.shape) concat = torch.cat((concat, reordered_data), 0) if shape[2] == 1: concat = concat.numpy().reshape(number * self.n_tasks, shape[0], shape[1], shape[2]) save_images( concat[:image_frame_dim * image_frame_dim * self.n_tasks, :, :, :], [self.n_tasks * image_frame_dim, image_frame_dim], path) else: concat = concat.numpy().reshape(number * self.n_tasks, shape[2], shape[1], shape[0]) make_samples_batche(concat[:self.batch_size], self.batch_size, path)
def test(nn, storePath): print('Loading test input images: ' + test_input_dir) test_input = data_utils.load_data(test_input_dir) print('Beginning to predict...') test_output = nn.predict(test_input) if not exists(output_path + storePath): makedirs(output_path + storePath) print('Saving test output images to: ' + output_path + storePath) data_utils.save_images(output_path + storePath, test_input_dir, test_output)
def visualize_sample(self, path, number, shape): data, target = self.get_sample(number, shape) # get sample in order from 0 to 9 target, order = target.sort() data = data[order] # # if self.transform is not None: # tf_bacth = None # for i in range(number): # tf_data = TF.to_pil_image(data[i]) # tf_data = self.transform(tf_data) # if i == 0: # tf_bacth = torch.FloatTensor(number, tf_data.shape[0], tf_data.shape[1], tf_data.shape[2]) # tf_bacth[i] = tf_data # data = tf_bacth image_frame_dim = int(np.floor(np.sqrt(number))) if shape[2] == 1: data_np = data.numpy().reshape(number, shape[0], shape[1], shape[2]) save_images(data_np[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim], path) elif shape[2] == 3: # data = data.numpy().reshape(number, shape[0], shape[1], shape[2]) # if self.dataset_name == 'cifar10': data = data.numpy().reshape(number, shape[2], shape[1], shape[0]) # data = data.numpy().reshape(number, shape[0], shape[1], shape[2]) # remap between 0 and 1 # data = data - data.min() # data = data / data.max() data = data / 2 + 0.5 # unnormalize make_samples_batche(data[:number], number, path) else: save_images(data[:image_frame_dim * image_frame_dim, :, :, :], [image_frame_dim, image_frame_dim], path) return data