def sample(self, iter_time): if np.mod(iter_time, self.flags.sample_freq) == 0: x_imgs, y_imgs = self.train_dataset.train_next_batch( batch_size=self.flags.sample_batch, which_direction=self.flags.which_direction) samples = self.model.sample_imgs(x_imgs, y_imgs) utils.plots(samples, iter_time, self.train_dataset.image_size, self.sample_out_dir)
def plots(self, imgs, iter_time, save_file, names=None): canvas = len(imgs) # transform [-1., 1.] to [0., 1.] imgs = [utils.inverse_transform(imgs[idx]) for idx in range(len(imgs))] # save more bigger image for canvas_idx in range(canvas): utils.plots(imgs[canvas_idx], iter_time, save_file, self.grid_cols, self.grid_rows, self.flags.sample_batch, name=names[canvas_idx])
def plot_closest(query, n_results = 5): word2vec = Word2Vec.load(str(Dir.model_embed_words)) query_vector = torch.from_numpy(word2vec.wv.get_vector(query)).to("cuda") closest = [] for file_img in Dir.image_vectors.rglob("*.jpg"): v = torch.load(file_img) d = ((v - query_vector)**2).sum(axis=0).item() if len(closest) < n_results: hq.heappush(closest, (-d, file_img)) elif -closest[0][0] > d: hq.heappushpop(closest, (-d, file_img)) dist, images = zip(*sorted(closest, key=lambda x: -x[0])) return utils.plots([Dir.images / img.relative_to(Dir.image_vectors) for img in images], lambda i, _: -dist[i])
def plots_idx(idx, titles=None): plots([image.load_img(path + 'valid/' + filenames[i]) for i in idx], titles=titles)
path = '../data/dogscats/' batch_size = 64 vgg = Vgg16() train_batches = vgg.get_batches(path + 'train', batch_size=batch_size) valid_batches = vgg.get_batches(path + 'valid', batch_size=batch_size * 2) # vgg.finetune(train_batches) # vgg.fit(train_batches, valid_batches, batch_size, nb_epoch=1) batches = vgg.get_batches(path + 'train', batch_size=4) imgs, labels = next(batches) plots(imgs, titles=labels) vgg.predict(imgs, True) # for theano set up .theanorc # use GPU # reload in python 3 # from importlib import reload # import a class # from vgg16custom import Vgg16Custom # vgg.model.save_weights('../data/weights/vgg.20170923.113140.hdf5', by_name=True) # vgg.model.load_weights('../data/weights/vgg.20170923.113140.hdf5', by_name=True)
def eval_test(self): total_time = 0. mae_record, psnr_record = np.zeros( self.val_dataset.num_persons), np.zeros( self.val_dataset.num_persons) # create csv file csvfile = open(os.path.join(self.test_out_dir, 'stat.csv'), 'w', newline='') csvwriter = csv.writer(csvfile, delimiter=',') csvwriter.writerow(['p_id', 'MAE', 'PSNR', 'MAE std', 'PSNR std']) global_iter = 0 for p_id in range(self.val_dataset.num_persons): self.evaluator = Eval(self.val_dataset.image_size, self.val_dataset.num_vals[p_id]) samples, y_imgs = [], [] for iter_ in range(self.val_dataset.num_vals[p_id]): print('p_id: {}, iter: {}'.format(p_id, iter_)) x_img, y_img = self.val_dataset.val_next_batch( p_id, iter_, which_direction=self.flags.which_direction) start_time = time.time() imgs = self.model.test_step(x_img, y_img) total_time += time.time() - start_time utils.plots(imgs, global_iter, self.val_dataset.image_size, save_file=self.test_out_dir) # utils.save_cycle_consistent_imgs([x_img, y_fake, y_img, recon_x], global_iter, # self.val_dataset.image_size, save_file=self.exp_out_dir) samples.append(imgs[1]) # imgs[1] == fake_y y_imgs.append(y_img) global_iter += 1 mae_record[p_id], psnr_record[p_id] = self.evaluator.calculate( samples, y_imgs) # write to csv file csvwriter.writerow([p_id + 1, mae_record[p_id], psnr_record[p_id]]) for p_id in range(self.val_dataset.num_persons): print('p_id: {}, MAE: {:.2f}, PSNR: {:.2f}'.format( p_id, mae_record[p_id], psnr_record[p_id])) print('MAE Avg. {:.2f} and SD. {:.2f}'.format(np.mean(mae_record), np.std(mae_record))) print('PSRN Avg. {:.2f} and SD. {:.2f}'.format(np.mean(psnr_record), np.std(psnr_record))) print('Average PT: {:.2f} msec.'.format( (total_time / np.sum(self.val_dataset.num_vals)) * 1000)) # write to csv file for mean and std of MAE and PSNR csvwriter.writerow([ 'MEAN', np.mean(mae_record), np.mean(psnr_record), np.std(mae_record), np.std(psnr_record) ])
def plot_results(self): utils.plots(self.model_history)
# Create a 'batch' of a single image i = ndimage.imread('data/dogscats/test/7.jpg') i2 = ndimage.imread('data/dogscats/test/7_2.jpg') img = np.expand_dims(i, 0) img2 = np.expand_dims(i2, 0) # (2, width, height, channels) imgs = np.concatenate((img, img2)) # Request the generator to create batches from this image # This returns (n, with, height, channels) # n is the number of images available # The returned images are randomly augmented and shuffled. aug_iter = gen.flow(imgs) # Get eight examples of these augmented images aug_imgs = [next(aug_iter) for i in range(8)] # The original #plt.imshow(imgs[0]) #plt.show() # Augmented data plots([i22[0].astype(np.uint8) for i22 in aug_imgs], (20, 7), 2) plt.show() plots([i23[1].astype(np.uint8) for i23 in aug_imgs], (20, 7), 2) plt.show() #K.set_image_data_format(ol_format)
test_dataset = dataset.CC359Data() elif db_name == "clarissa": test_dataset = dataset.FloatHippocampusDataset(mode="test", return_volume=True) elif db_name == "adni": #test_dataset = dataset.ADNI() split = (0.5, 0.1, 0.4) if mixed else (0, 0, 1) print("Adni split {}".format(split)) test_dataset = dataset.FloatHippocampusDataset(h5path=default_adni, mode="test", adni=True, data_split=split, return_volume=True) else: raise ValueError("Invalid db_name {} for volume test".format(db_name)) models = get_models(bias, e2d, res, small, bn, dunet, model_folder=model_folder) vol_result, ths_study, cons = per_volume_test(models, metric, data_transforms["test"], test_dataset, device, metric_name="DICE", display=display_volume, e2d=e2d, wait=wait, name=basename, study_ths=study_ths) results[basename] = vol_result plots(ths_study, cons, study_ths, opt=db_name, savepath=model_folder, mean_result=vol_result, name=basename) # Slice training and test else: # Current default Hyperparameters NEPOCHS = 500 LR = 0.005 BATCH_SIZE = 600 hiponly = True print("Hiponly: " + str(hiponly)) print("Finetune: {}".format("yes" if finetune else "no")) shuffle = {x:True for x in ["train", "validation", "test"]} if mixed: print("Concatenating ADNI and Clarissa dataset") db_name = "concat"