def add_patch(self, labels, predicted, weights, coord=None, volname=None, patches=None): """Evaluates single-object segmentation quality.""" predicted = mask.crop_and_pad(predicted, (0, 0, 0), self._eval_shape) weights = mask.crop_and_pad(weights, (0, 0, 0), self._eval_shape) labels = mask.crop_and_pad(labels, (0, 0, 0), self._eval_shape) loss, = self.sess.run([self.eval_loss], { self.eval_labels: labels, self.eval_preds: predicted }) self.loss += loss self.total_voxels += labels.size self.masked_voxels += np.sum(weights == 0.0) pred_mask = predicted >= self.eval_threshold true_mask = labels > 0.5 pred_bg = np.logical_not(pred_mask) true_bg = np.logical_not(true_mask) self.tp += np.sum(pred_mask & true_mask) self.fp += np.sum(pred_mask & true_bg) self.fn += np.sum(pred_bg & true_mask) self.tn += np.sum(pred_bg & true_bg) self.num_patches += 1 predicted = expit(predicted) self.images_xy.append(self.slice_image(labels, predicted, weights, 0)) self.images_xz.append(self.slice_image(labels, predicted, weights, 1)) self.images_yz.append(self.slice_image(labels, predicted, weights, 2))
def add_patch(self, labels, predicted, weights, coord=None, volname=None, patches=None): """Evaluates single-object segmentation quality.""" options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) #profiling run_metadata = tf.RunMetadata() predicted = mask.crop_and_pad(predicted, (0, 0, 0), self._eval_shape) weights = mask.crop_and_pad(weights, (0, 0, 0), self._eval_shape) labels = mask.crop_and_pad(labels, (0, 0, 0), self._eval_shape) loss, = self.sess.run([self.eval_loss], {self.eval_labels: labels, self.eval_preds: predicted},options=options, run_metadata=run_metadata) fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() with open('timeline_01.json', 'w') as f: f.write(chrome_trace) self.loss += loss self.total_voxels += labels.size self.masked_voxels += np.sum(weights == 0.0) pred_mask = predicted >= self.eval_threshold true_mask = labels > 0.5 pred_bg = np.logical_not(pred_mask) true_bg = np.logical_not(true_mask) self.tp += np.sum(pred_mask & true_mask) self.fp += np.sum(pred_mask & true_bg) self.fn += np.sum(pred_bg & true_mask) self.tn += np.sum(pred_bg & true_bg) self.num_patches += 1 predicted = expit(predicted) self.images_xy.append(self.slice_image(labels, predicted, weights, 0)) self.images_xz.append(self.slice_image(labels, predicted, weights, 1)) self.images_yz.append(self.slice_image(labels, predicted, weights, 2))
def get_example(load_example, eval_tracker, model, get_offsets): """Generates individual training examples. Args: load_example: callable returning a tuple of image and label ndarrays as well as the seed coordinate and volume name of the example eval_tracker: EvalTracker object model: FFNModel object get_offsets: iterable of (x, y, z) offsets to investigate within the training patch Yields: tuple of: seed array, shape [1, z, y, x, 1] image array, shape [1, z, y, x, 1] label array, shape [1, z, y, x, 1] """ seed_shape = train_canvas_size(model).tolist()[::-1] while True: full_patches, full_labels, loss_weights, coord, volname = load_example( ) # Write a random fraction of paired examples to images and make sure they have # matching and correct orientations. if FLAGS.debug: if random.uniform(0, 1) > 0.999: write_patch_and_label_to_img( patch=full_patches[0, 0, :, :, 0] * FLAGS.image_stddev + FLAGS.image_mean, label=full_labels[0, 0, :, :, 0] * 255, unique_id='_'.join(coord[0].astype(str).tolist()), dirname="./debug") # Always start with a clean seed. seed = logit(mask.make_seed(seed_shape, 1, pad=FLAGS.seed_pad)) for off in get_offsets(model, seed): predicted = mask.crop_and_pad(seed, off, model.input_seed_size[::-1]) patches = mask.crop_and_pad(full_patches, off, model.input_image_size[::-1]) labels = mask.crop_and_pad(full_labels, off, model.pred_mask_size[::-1]) weights = mask.crop_and_pad(loss_weights, off, model.pred_mask_size[::-1]) # Necessary, since the caller is going to update the array and these # changes need to be visible in the following iterations. assert predicted.base is seed yield predicted, patches, labels, weights # TODO(jpgard): track volname in eval_tracker. Currently nothing is done with # volname, but it should be monitored to ensure coverage of all training # volumes. Similar for coord; would be good to check coordinates covered, # or at least a sample of them. eval_tracker.add_patch(full_labels, seed, loss_weights, coord, volname, full_patches)
def get_example(load_example, eval_tracker, model, get_offsets): """Generates individual training examples. Args: load_example: callable returning a tuple of image and label ndarrays as well as the seed coordinate and volume name of the example eval_tracker: EvalTracker object model: FFNModel object get_offsets: iterable of (x, y, z) offsets to investigate within the training patch Yields: tuple of: seed array, shape [1, z, y, x, 1] image array, shape [1, z, y, x, 1] label array, shape [1, z, y, x, 1] """ seed_shape = train_canvas_size(model).tolist()[::-1] while True: full_patches, full_labels, loss_weights, coord, volname = load_example( ) # Always start with a clean seed. seed = logit(mask.make_seed(seed_shape, 1, pad=FLAGS.seed_pad)) for off in get_offsets(model, seed): predicted = mask.crop_and_pad(seed, off, model.input_seed_size[::-1]) patches = mask.crop_and_pad(full_patches, off, model.input_image_size[::-1]) labels = mask.crop_and_pad(full_labels, off, model.pred_mask_size[::-1]) weights = mask.crop_and_pad(loss_weights, off, model.pred_mask_size[::-1]) # Necessary, since the caller is going to update the array and these # changes need to be visible in the following iterations. assert predicted.base is seed yield predicted, patches, labels, weights # eval_tracker.add_patch( # full_labels, seed, loss_weights, coord, volname, full_patches) eval_tracker.add_patch_v2(full_labels, seed, loss_weights, coord, volname, full_patches)
def max_pred_offsets(model, seed): """Generates offsets with the policy used for inference.""" # Always start at the center. queue = deque([(0, 0, 0)]) done = set() train_image_radius = train_image_size(model) // 2 input_image_radius = np.array(model.input_image_size) // 2 while queue: offset = queue.popleft() # Drop any offsets that would take us beyond the image fragment we # loaded for training. if np.any(np.abs(np.array(offset)) + input_image_radius > train_image_radius): continue # Ignore locations that were visited previously. quantized_offset = ( offset[0] // max(model.deltas[0], 1), offset[1] // max(model.deltas[1], 1), offset[2] // max(model.deltas[2], 1)) if quantized_offset in done: continue done.add(quantized_offset) yield offset # Look for new offsets within the updated seed. curr_seed = mask.crop_and_pad(seed, offset, model.pred_mask_size[::-1]) todos = sorted( movement.get_scored_move_offsets( model.deltas[::-1], curr_seed[0, ..., 0], threshold=logit(FLAGS.threshold)), reverse=True) queue.extend((x[2] + offset[0], x[1] + offset[1], x[0] + offset[2]) for _, x in todos)