def patches(): #def nn(arr, scale, axis): # scale_nd = np.ones(arr.ndim) # scale_nd[axis] = scale # new_shape = np.array(arr.shape)*scale_nd # arr_new = np.zeros(new_shape) # for k in range(arr.shape[axis]): # sliceobj = tuple([slice(None)]*axis+[slice(k, k+1, None)]) # arr_new[sliceobj] = np.repeat() import scipy.ndimage import scipy.misc expno = 8 im = scipy.ndimage.imread( '/groups/saalfeld/saalfeldlab/posters/miccai-2017/with_groundtruth/exp{0:}_new/gt.png' ''.format(expno))[:, :, 0] / 255. print(im.shape) print(np.max(im), np.min(im)) im_down = utils.downscale_manually(im, 4, 0) im_cubic = utils.cubic_up(im_down, 4, 0) im_nn = np.repeat(im_down, 4, 0) print(im_down.shape) print(im_cubic.shape) print(im_nn.shape) scipy.misc.imsave( '/groups/saalfeld/saalfeldlab/posters/miccai-2017/with_groundtruth/exp{' '0:}_new/cubic.png'.format(expno), im_cubic) scipy.misc.imsave( '/groups/saalfeld/saalfeldlab/posters/miccai-2017/with_groundtruth/exp{' '0:}_new/nn.png'.format(expno), im_nn)
def h5_data_generator(self, source, input_shape, num_outputs): train_ds = h5py.File(source, 'r')['raw'] while True: if self.cubic: sample_downsampled = np.empty((self.bs,) + (input_shape[0]//self.model.scaling_factor,) + input_shape[ 1:]) batch = np.empty((self.bs,)+input_shape) if self.cubic: batch_cubicup = np.empty((self.bs,) + input_shape) z_start = np.random.random_integers(0, train_ds.shape[0] - input_shape[0] - 1, self.bs) y_start = np.random.random_integers(0, train_ds.shape[1] - input_shape[1] - 1, self.bs) x_start = np.random.random_integers(0, train_ds.shape[2] - input_shape[2] - 1, self.bs) for k in range(self.bs): train_ds.read_direct(batch, np.s_[z_start[k]:z_start[k] + input_shape[0], y_start[k]:y_start[k] + input_shape[1], x_start[k]:x_start[k] + input_shape[2]], np.s_[k, :, :, :]) if self.cubic: sample_downsampled[k, :, :, :] = utils.downscale_manually(batch[k, :, :, :], factor=self.model.scaling_factor, axis=0) batch_cubicup[k, :, :, :] = utils.cubic_up(sample_downsampled[k, :, :, :], self.model.scaling_factor, axis=0) if K.image_data_format() == 'channels_last': ch_axis = -1 else: ch_axis = 1 if self.cubic: gt = np.expand_dims(batch_cubicup, ch_axis) batch = np.expand_dims(batch, ch_axis) if num_outputs == 1: if not self.cubic: gt = batch else: #dummy gt = np.zeros((self.bs, 1, 1, 1, 1)) if self.normalize: batch = (batch/255.).astype('float32') gt = (gt/255.).astype('float32') yield ([batch], [gt] * num_outputs)
def bicubic_main(mode='validation', sc=4.): filename = utils.get_save_path('FSRCNN_d{0:}_s{1:}_m{2:}'.format( 240, 64, 2), exp_no=2, ep_no=49, mode=mode) prediction = np.array(h5py.File(filename, 'r')['raw']) gt = np.array( h5py.File( '/nrs/saalfeld/heinrichl/SR-data/FIBSEM/downscaled/bigh5-16iso/' + mode + '.h5', 'r')['raw']) / 255. gt = np.squeeze(gt) downscaled = utils.downscale_manually(gt, sc) bicubic = utils.bicubic_up(downscaled, sc, 0) prediction, [bicubic] = utils.cut_to_same_size(prediction, [bicubic]) mse, psnr, bicubic_weighted_mse, bicubic_weighted_psnr = run_eval( gt, bicubic) return mse, psnr, bicubic_weighted_mse, bicubic_weighted_psnr
def run_eval(groundtruth, prediction, sc=4., axis=0): downscaled = utils.downscale_manually(groundtruth, sc, axis) bicubic = utils.bicubic_up(downscaled, sc, axis) prediction, [groundtruth, bicubic] = utils.cut_to_same_size(prediction, [groundtruth, bicubic]) assert prediction.shape == groundtruth.shape assert prediction.shape == bicubic.shape mse_error = mse(prediction, groundtruth) psnr_error = 10 * np.log10(mse_error) print("mse: ", mse_error) print("psnr:", psnr_error) bicubic_weighting = se_arr(bicubic, groundtruth) bicubic_weighting = 0.5 + bicubic_weighting / (np.max(bicubic_weighting) * 2) weighted_error_arr = se_arr(prediction, groundtruth) * bicubic_weighting weighted_mse_error = np.sum(weighted_error_arr) / groundtruth.size weighted_psnr_error = 10 * np.log10(weighted_mse_error) print("bicubic_weighted_mse", weighted_mse_error) print("bicubic_weighted_psnr", weighted_psnr_error) return mse_error, psnr_error, weighted_mse_error, weighted_psnr_error
def run_per_slice_eval(groundtruth, prediction, avg=True, sc=4.): downscaled = utils.downscale_manually(groundtruth, sc) bicubic = utils.bicubic_up(downscaled, sc, 0) prediction, [groundtruth, bicubic] = utils.cut_to_same_size(prediction, [groundtruth, bicubic]) raw_error_arr = se_arr(prediction, groundtruth) bicubic_weighting = se_arr(bicubic, groundtruth) print(np.max(bicubic_weighting)) bicubic_weighting = 0.5 + bicubic_weighting / (np.max(bicubic_weighting) * 2) weighted_error_arr = raw_error_arr * bicubic_weighting raw_error_per_slice = evaluate_per_slice(raw_error_arr) weighted_error_per_slice = evaluate_per_slice(weighted_error_arr) if avg: raw_error_per_slice, _ = utils.running_mean(raw_error_per_slice, sc) weighted_error_per_slice, _ = utils.running_mean( weighted_error_per_slice, sc) plt.plot(raw_error_per_slice) plt.plot(weighted_error_per_slice) plt.show()
def make_nn_and_bicubic_for_exp(png_file): import scipy.ndimage import scipy.misc import utils import os.path exp_dir = os.path.dirname(png_file) hr_img = scipy.ndimage.imread(png_file, flatten=True) plt.imshow(255 - hr_img, 'Greys') plt.show() down_img = utils.downscale_manually(hr_img, 4, 0) plt.imshow(255 - down_img, 'Greys') plt.show() print(os.path.join(os.path.dirname(png_file), 'down.png')) scipy.misc.imsave(os.path.join(exp_dir, 'down.png'), down_img) nn_img = np.repeat(down_img, 4, axis=0) plt.imshow(255 - nn_img, 'Greys') plt.show() scipy.misc.imsave(os.path.join(exp_dir, 'nn.png'), nn_img) cubic_img = utils.cubic_up(down_img, 4, axis=0) plt.imshow(255 - cubic_img, 'Greys') plt.show() scipy.misc.imsave(os.path.join(exp_dir, 'cubic.png'), cubic_img)