def train(self, groundTruth, noisy, learning_rate=None): # noise_lvl, # self.noise_lvl = noise_lvl # print('In train_den:', self.noise_lvl) groundTruth = ut.unify_form(groundTruth) noisy = ut.unify_form(noisy) self.sess.run(self.optimizer, feed_dict={self.true: groundTruth, self.data: noisy, self.learning_rate: learning_rate})#,
def train(self, groundTruth, adversarial, learning_rate): #, noise_lvl): groundTruth = ut.unify_form(groundTruth) adversarial = ut.unify_form(adversarial) # self.noise_lvl = noise_lvl self.sess.run(self.optimizer, feed_dict={ self.true: groundTruth, self.gen: adversarial, self.learning_rate: learning_rate }) #,
def test(self, groundTruth, adversarial): #, noise_lvl): groundTruth = ut.unify_form(groundTruth) #/ (noise_lvl * 500) adversarial = ut.unify_form(adversarial) #/ (noise_lvl * 500) # self.noise_lvl = noise_lvl merged, step = self.sess.run([self.merged_network, self.global_step], feed_dict={ self.true: groundTruth, self.gen: adversarial }) self.writer.add_summary(merged, global_step=step)
def test(self, groundTruth, noisy, writer='train'): # noise_levle # self.noise_lvl = noise_lvl groundTruth = ut.unify_form(groundTruth) #/ (noise_lvl * 500) noisy = ut.unify_form(noisy) #/ (noise_lvl * 500) merged, step = self.sess.run([self.merged_network, self.global_step], feed_dict={self.true: groundTruth, self.data: noisy}) if writer == 'train': self.writer_train.add_summary(merged, global_step=step) if writer == 'test': self.writer_test.add_summary(merged, global_step=step)
def evaluate(self, data): data_uf = ut.unify_form(data) norm = 1.0 if self.normalize == 'l2': norm, data_uf = normalize_np(data_uf, return_norm=True) elif self.normalize == 'NO': pass print('DATA_UF_SHAPE: ', data_uf.shape) return norm * self.sess.run(self.denoised, feed_dict={self.data: data_uf})[0, ..., 0]
def evaluate(self, fourierData): #### ADD SCALING fourierData = ut.unify_form(fourierData) real_data = self.sess.run(self.real_data, feed_dict={self.fourier_data: fourierData}) norm = 1.0 if self.normalize == 'l2': norm, normalized_data = normalize_np(real_data, return_norm=True) else: normalized_data = real_data grad = self.sess.run(self.gradient, feed_dict={self.gen_normed: normalized_data}) USE_ADJOINT_IRFFT = False if USE_ADJOINT_IRFFT: return norm * ut.adjoint_irfft(grad[0, ..., 0]) else: return norm * ut.rfft(grad[0, ..., 0])
def l2_gt(x): image = unify_form(np.copy(x)) r = Rescaler(image) r.normalize(image) reg = Registrator.register(image=image, reference=ground_truth) return l2(reg - ground_truth)
with mrcfile.open( file['external_reconstruct_general']['rlnExtReconsDataReal']) as mrc: data_real = mrc.data.copy() with mrcfile.open( file['external_reconstruct_general']['rlnExtReconsDataImag']) as mrc: data_im = mrc.data.copy() with mrcfile.open( file['external_reconstruct_general']['rlnExtReconsWeight']) as mrc: kernel = mrc.data.copy() with mrcfile.open(locate_gt(PDB_ID, full_path=False)) as mrc: ground_truth = mrc.data.copy() with mrcfile.open( file['external_reconstruct_general']['rlnExtReconsResult']) as mrc: naive_recon = mrc.data.copy() ground_truth = unify_form(ground_truth) r_gt = Rescaler(ground_truth) r_gt.normalize(ground_truth) complex_data = data_real + 1j * data_im # In[23]: REG = 0.03 tikhonov_kernel = kernel + 1e6 print(tikhonov_kernel.max(), tikhonov_kernel.min()) precondioner = np.abs(np.divide(1, tikhonov_kernel)) precondioner /= precondioner.max() print(precondioner.max() / precondioner.min()) tikhonov = np.divide(complex_data, tikhonov_kernel)