def __init__(self, datasets, opt, group='test'): datasets = list(datasets.items()) self.mode = group self.opt = opt self.dataset_names = [n for n, p in datasets] datasets = [(create_data_sources(k, opt) if type(k) is str else k , v) for k, v in datasets ] self.datasets = [d for d, p, in datasets] self.dataset_ids = list(set([dds.typeID for dds in self.datasets if dds.typeID != -1])) self.data_sources = [(d[group], p) for d, p, in datasets] self.group = group self.lengths = [len(d) for d, p in self.data_sources] self.probs = [p for d, p in self.data_sources] self.probs_acc = [] pc = 0 for p in self.probs: pc +=p self.probs_acc.append(pc) self.probs_max = self.probs_acc[-1] self.data_type = None self.tags = None self.__fpp = None self.__repeat = False self.__out = None self.__additional_source = None self.__channel_mask = None self.__callback = None if self.opt.add_lr_channel: self.wfBlur = GaussianBlurring(sigma=['uniform', opt.lr_sigma-1.5, opt.lr_sigma+1.5]) self.wfNoise = AddGaussianNoise(mean=0, sigma=['uniform', 0.5, 1.5]) else: self.wfBlur = None self.wfNoise = None
def __init__(self, opt): self.typeID = DatasetTypeIDs['tubulin'] self.iRot = RandomRotate() self.iMerge = Merge() self.iSplit = Split([0, 1], [1, 2]) self.irCropTrain = RandomCropNumpy(size=(opt.fineSize+100, opt.fineSize+100)) self.ioCropTrain = CenterCropNumpy(size=[opt.fineSize, opt.fineSize]) self.iCropTest = CenterCropNumpy(size=(1024, 1024)) self.iElastic = ElasticTransform(alpha=1000, sigma=40) self.iBlur = GaussianBlurring(sigma=1.5) self.iPoisson = PoissonSubsampling(peak=['lognormal', -0.5, 0.001]) self.iBG = AddGaussianPoissonNoise(sigma=25, peak=0.06) self.train_count = 0 self.test_count = 0 self.dim_ordering = opt.dim_ordering self.repeat = 1 self.opt = opt
def test(self, dropout=0): self._current_visuals = {} self._current_report = {} if dropout <= 1: if dropout <= 0: self.switch_dropout(False) else: self.switch_dropout(True) results = self.forward() else: self.switch_dropout(True) outputsList = [] uncertaintyList = [] lastInputs = None for i in range(repeat): if i > 0: self.set_input() results = self.forward() assert lastInputs is None or np.all( lastInputs == results['inputs']), 'inputs must be the same.' lastInputs = results['inputs'] outputsList.append(results['outputs']) if 'aleatoric_uncertainty' in results: uncertaintyList.append(results['aleatoric_uncertainty']) if repeat > 1: outputss = np.stack(outputsList) vs = np.var(outputss, axis=0) results['outputs'] = outputss.mean(axis=0) blur = GaussianBlurring(sigma=self.uncertainty_blur_sigma) for outputs in outputsList: for j in range(outputs.shape[0]): outputs[j] = blur(outputs[j]) outputssb = np.stack(outputsList) results['epistemic_uncertainty'] = outputssb.std(axis=0) if len(uncertaintyList) > 0: auncertainty = np.stack(uncertaintyList) uncertainty = auncertainty.mean(axis=0) results['aleatoric_uncertainty'] = uncertainty results['uncertainty_var'] = vs + np.mean( np.square(auncertainty), axis=0) self.retrieve_results(results)
def predict(self, data_source, dropout=0, cached=False, label=None, step_callback=None, repeat_callback=None, verbose=1): repeat = dropout if dropout == 0: self.switch_dropout(False) repeat = 1 else: self.switch_dropout(True) # data_loader_test = CreateDataLoader(data_source, self.opt, cached=cached, verbose=verbose) # dataset_test = data_loader_test.load_data() # dataset_test_size = len(data_loader_test) queue_start, queue_stop = networks.setup_data_loader( data_source, self.enqueue_data, shuffle=False, repeat=repeat, control_nc=self.opt.control_nc, seed=self.opt.seed) print("#samples = {}".format(len(data_source))) steps_per_epoch = int(math.ceil(len(data_source) / self.opt.batchSize)) self._current_visuals = {} max_steps = 2**32 # start the data queue queue_start(self.sess, callback=self.stop_coord) options = tf.RunOptions(timeout_in_ms=500000) max_steps = min(steps_per_epoch, max_steps) for step in range(max_steps): outputsList = [] uncertaintyList = [] lastInputs = None for i in range(repeat): fetches = {} fetches.update(self.loss_fetches) fetches.update(self.display_fetches) results = self.sess.run( fetches, feed_dict={'dropout_prob:0': self.get_dropout_prob()}, options=options) assert lastInputs is None or np.all( lastInputs == results['inputs']), 'inputs must be the same.' print('{}-{}'.format(step, results['paths'][0][0])) for k, v in results.items(): if k in self.loss_fetches: self._current_report[k] = v print('{}={}'.format(k, v), end=', ') print('') lastInputs = results['inputs'] outputsList.append(results['outputs']) if 'aleatoric_uncertainty' in results: uncertaintyList.append(results['aleatoric_uncertainty']) if repeat_callback: try: details = {'step': step, 'repeat': i} repeat_callback(self, details) except Exception as e: print('\nerror in repeat callback.') if repeat > 1: outputss = np.stack(outputsList) vs = np.var(outputss, axis=0) results['outputs'] = outputss.mean(axis=0) blur = GaussianBlurring(sigma=self.uncertainty_blur_sigma) for outputs in outputsList: for j in range(outputs.shape[0]): outputs[j] = blur(outputs[j]) outputssb = np.stack(outputsList) results['epistemic_uncertainty'] = outputssb.std(axis=0) if len(uncertaintyList) > 0: auncertainty = np.stack(uncertaintyList) uncertainty = auncertainty.mean(axis=0) results['aleatoric_uncertainty'] = uncertainty results['uncertainty_var'] = vs + np.mean( np.square(auncertainty), axis=0) self.retrieve_results(results) self.save_current_visuals(label) if step_callback: try: details = {'step': step} step_callback(self, details) except Exception as e: print('\nerror in step callback.') queue_stop()