def load_sample(self, lq, hq): self.sample_files = [lq, hq] lq_oct = io.OCTVolume(lq, load=True, pad={}, normalize={'min': 'mean-0.5'}) hq_oct = io.OCTVolume(hq, load=True, pad={}, normalize={'min': 'mean-0.5'}) hq = np.reshape(hq_oct.image_data, (49, 512, 512, 1)) lq = np.reshape(lq_oct.image_data, (49, 512, 512, 1)) self.samples = {'hq': hq, 'lq': lq}
def load(path): print('loading: ' + path) try: oct = oct_io.OCTVolume(path, load=True, pad={}, normalize=True) except InvalidDicomError: print('invalid file:' + path) return None return oct.image_data
def setup(): # load model checkpoint = '/media/network/DL_PC/ilja/cycoct-skip_processed/gen_norm-in_act-selu_scale-3_res-6x3_f-16/dis_norm-id_act-selu_f-1/10-cyc_1-dis/cGAN.ckpt-333200' garch = models.CyclicGAN.generate_gen_arch(3, 6, 3, tf.nn.selu, filters=16, skip_conn=True) darch = models.CyclicGAN.generate_dis_arch(512, tf.nn.selu, norm_func=models.identity) logdir = '/home/kazuki/testing' inputs = { 'hq': tf.placeholder(tf.float32, (1, 512, 512, 1), 'HQ-Input'), 'lq': tf.placeholder(tf.float32, (1, 512, 512, 1), 'LQ-Input') } graph = tf.get_default_graph() cgan = models.CyclicGAN(garch, darch, inputs, None, logdir, graph=graph, inference_mode=True) # load in sample dicoms # lq_oct = io.OCTVolume('../DeepOCTPrior/test_octs/lo.dcm') # hq_oct = io.OCTVolume('../DeepOCTPrior/test_octs/hi.dcm') lq_oct = io.OCTVolume( '/media/network/ImageRepository/cimg_db/24573/564875/6/R/pat.dcm') hq_oct = io.OCTVolume( '/media/network/ImageRepository/cimg_db/24573/564879/6/R/pat.dcm') lq_oct.load() hq_oct.load() hq_oct.pad() lq_oct.pad() return hq_oct, lq_oct, cgan, checkpoint, inputs
np.random.shuffle(indices_hq) indices_hq = list(indices_hq) # set directories for export export_dir_lq = os.path.join(export_dir, 'LQ-Comparison') export_dir_hq = os.path.join(export_dir, 'HQ-Comparison') # initialize record of exported slices export_record = [] for (pid, pos), frame in file_frame.groupby(level=[0, 1]): # load OCTs lq_volume = oct_io.OCTVolume(frame.loc[pid, pos, 'low']['Filename'], load=True, pad={}, normalize={ 'min': 'mean-0.5' }).image_data hq_volume = oct_io.OCTVolume(frame.loc[pid, pos, 'high']['Filename'], load=True, pad={}, normalize={ 'min': 'mean-0.5' }).image_data for lq, hq in zip(lq_volume, hq_volume): # enhance image enhanced = model.generate(lq[np.newaxis, ..., np.newaxis], 'hq')[0, ..., 0]
def __call__(self, csv): print('beginning analysis...', '\tcsv:\t{}'.format(csv), sep='\n') # set up input dataframe with data and filenames data = pd.DataFrame.from_csv(csv, sep=',', index_col=None) data['Filename'] = data.apply(partial(build_filenames, root=self.image_repository, columns=self.path_columns), axis=1) data.set_index(['PID', 'Position', 'Quality'], inplace=True) total_slices = data.loc[:, 'ImgCount'].sum() / 4 analized_slices = 0 print('\tnumber of datapoins:\t{}'.format(len(data.index))) # initialize result list for accumulating measurements results = [] # cycle over volumes in test set for (pid, pos), frame in data.groupby(level=[0, 1]): # prepare template for results result_template = {'PID': pid, 'SID': frame.loc[pid, pos, 'low']['SID'], 'Slice': None, 'Position': pos, 'ExamType1': frame.loc[pid, pos, 'low']['ExamType1'], 'Filename': frame.loc[pid, pos, 'low']['Filename'], 'Method': None} # load OCTs lq = io.OCTVolume(frame.loc[pid, pos, 'low']['Filename'], load=True, pad={}, normalize=True).image_data hq = io.OCTVolume(frame.loc[pid, pos, 'high']['Filename'], load=True, pad={}, normalize=True).image_data # register volumes lq = self.registrator.register_volume(hq, lq, offsets=0.5) # cycle over b-scans in volume for index, (image, reference) in enumerate(zip(lq, hq)): # skip failed registrations if image is None: analized_slices += 1 continue rois = self.get_rois(reference) background = self.get_background(reference) # initialize dict for the denoised images datapoints = [Datapoint(template=result_template, index=index, reference=reference, rois=rois, background=background, method='original', image=image)] # generate all denoised images # self.denoising = partial(self.apply_denoising, datapoint=datapoints[0]) # datapoints += self.pool.map(self.denoising, self.methods.keys()) datapoints += [self.apply_denoising(datapoints[0], method) for method in self.methods.keys()] # perform measurements on all denoised images # results += self.pool.map(self.apply_measurements, datapoints) results += [self.apply_measurements(datapoint) for datapoint in datapoints] analized_slices += 1 print('\r\tprogress: \t{}%'.format(round(100*analized_slices/total_slices, 2))) # convert results to a dataframe and save results_frame = pd.DataFrame(data=results) results_frame.to_csv(SAVEFILE) return results_frame