def setup_losses( gitapp: GetInputTargetAndPredictedParameters, name: str = None, ) -> Tuple[Dict[str, lt.LabeledTensor], Dict[str, lt.LabeledTensor]]: """Creates cross entropy losses. Args: gitapp: GetInputTargetAndPredictedParameters. name: Optional op name. Returns: A dictionary of tensors with the input reconstruction losses. A dictionary of tensors with the target prediction losses. """ logging.info('Setting up losses') with tf.name_scope(name, 'setup_losses', []) as scope: (_, input_lt, target_lt, predict_input_lt, predict_target_lt) = get_input_target_and_predicted(gitapp) predicted_size = len(predict_input_lt.axes['row']) visualize.summarize_image( visualize.error_panel(util.crop_center(predicted_size, input_lt), visualize.to_softmax(predict_input_lt), name=scope + 'input_patch_error_panel')) visualize.summarize_image( visualize.error_panel(util.crop_center(predicted_size, target_lt), visualize.to_softmax(predict_target_lt), name=scope + 'target_patch_error_panel')) def mean(lts: Dict[str, lt.LabeledTensor]) -> tf.Tensor: sum_op = tf.add_n([t.tensor for t in lts.values()]) return sum_op / float(len(lts)) tag = 'input' input_loss_lts = itemize_losses(gitapp.loss, input_lt, predict_input_lt, name=scope + tag) tf.summary.scalar(name='loss/' + tag, tensor=mean(input_loss_lts)) tag = 'target' target_loss_lts = itemize_losses(gitapp.loss, target_lt, predict_target_lt, name=scope + tag) tf.summary.scalar(name='loss/' + tag, tensor=mean(target_loss_lts)) variables = tf.global_variables() for v in variables: tf.summary.histogram(name='variable/' + v.name, values=v) return input_loss_lts, target_loss_lts
def func(data): data = np.rot90(data, k=-1, axes=(1, 2)) data = crop_center(data, 256, 256) data = minmax_normalize(data) return data
def build_model(source, target): """Build the model graph.""" with tf.name_scope('model'): prediction = model.build_model( source, FLAGS.scale, training=True, reuse=False) target_cropped = util.crop_center(target, tf.shape(prediction)[1:3]) tf.summary.histogram('prediction', prediction) tf.summary.histogram('groundtruth', target) return prediction, target_cropped
def add_loss( loss: Callable, target_lt: lt.LabeledTensor, predicted_lt: lt.LabeledTensor, name: str = None, ) -> lt.LabeledTensor: """Add a loss. Args: loss: Loss function to use. Arguments should be (target, mask, prediction, name). target_lt: The target values in the canonical format. predicted_lt: The predicted values in the canonical prediction format. name: Optional op name. Returns: A scalar tensor representing the weighted cross-entropy loss. """ with tf.name_scope(name, 'loss', [target_lt, predicted_lt]) as scope: target_lt = lt.transpose(target_lt, util.CANONICAL_AXIS_ORDER) predicted_lt = lt.transpose(predicted_lt, util.CANONICAL_PREDICTION_AXIS_ORDER) predicted_size = len(predicted_lt.axes['row']) assert predicted_size == len(predicted_lt.axes['column']) target_lt = util.crop_center(predicted_size, target_lt) signal_lt = lt.select(target_lt, {'mask': False}) mask_lt = lt.select(target_lt, {'mask': True}) signal_lt = lt.reshape(signal_lt, util.CANONICAL_AXIS_ORDER[:-1], ['batch']) mask_lt = lt.reshape(mask_lt, util.CANONICAL_AXIS_ORDER[:-1], ['batch']) predicted_lt = lt.reshape(predicted_lt, util.CANONICAL_PREDICTION_AXIS_ORDER[:-1], ['batch']) assert list(signal_lt.axes.keys()) == ['batch'] assert list(mask_lt.axes.keys()) == ['batch'] assert list(predicted_lt.axes.keys()) == ['batch', 'class'] signal_lt = tensorcheck.bounds(0.0, 1.0, signal_lt) mask_lt = tensorcheck.bounds(0.0, 1.0, mask_lt) loss_lt = loss(signal_lt, mask_lt, predicted_lt) return lt.identity(loss_lt, name=scope)
def build_model(source, target): """Build the model graph.""" with tf.name_scope('model'): prediction = model.build_model(source, FLAGS.scale, training=True, reuse=False) ''' with tf.Session() as sess: sess.run(tf.local_variables_initializer()) sess.run(tf.global_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) print(sess.run(source).shape) coord.request_stop() coord.join(threads) ''' target_cropped = util.crop_center(target, tf.shape(prediction)[1:3]) tf.summary.histogram('prediction', prediction) tf.summary.histogram('groundtruth', target) return prediction, target_cropped
losses = [] grads = [] for i in range(FLAGS.gpu_num): with tf.device('/gpu:' + str(i)): stager = data_flow_ops.StagingArea([tf.float32, tf.float32], shapes=[[None, None, None, 3], [None, None, None, 3]]) stage = stager.put([target_batch_staging, source_batch_staging]) stages.append(stage) target_batch, source_batch = stager.get() predict_batch = model.build_model(source_batch, FLAGS.scale, training=True, reuse=(i > 0)) target_cropped_batch = util.crop_center( target_batch, tf.shape(predict_batch)[1:3]) loss = tf.losses.mean_squared_error(target_cropped_batch, predict_batch) losses.append(loss) grad = optimizer.compute_gradients(loss) grads.append(grad) loss = tf.reduce_mean(tf.stack(losses)) def average_gradients(tower_grads): average_grads = [] for grad_and_vars in zip(*tower_grads): grads = [] for g, _ in grad_and_vars: grads.append(g) grad = tf.stack(grads)
lr_image = tf.expand_dims(lr_image, 0) lr_image_shape = tf.shape(lr_image)[1:3] hr_image_shape = lr_image_shape * FLAGS.scale if (data.resize): lr_image = util.resize_func(lr_image, hr_image_shape) lr_image = tf.reshape(lr_image, [1, hr_image_shape[0], hr_image_shape[1], 3]) else: lr_image = tf.reshape(lr_image, [1, lr_image_shape[0], lr_image_shape[1], 3]) lr_image_padded = util.pad_boundary(lr_image) hr_image = model.build_model(lr_image_padded - 0.5, FLAGS.scale, training=False, reuse=False) hr_image = util.crop_center(hr_image, hr_image_shape) if (data.residual): if (data.resize): hr_image += lr_image else: hr_image += util.resize_func(lr_image, hr_image_shape) hr_image = hr_image * tf.uint8.max + 0.5 hr_image = tf.saturate_cast(hr_image, tf.uint8) hr_image = tf.reshape(hr_image, [hr_image_shape[0], hr_image_shape[1], 3]) hr_image = tf.image.encode_png(hr_image) init = tf.global_variables_initializer() init_local = tf.local_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(init_local)
vts_nps = [util.VTKSPtoNumpyFromFile(v) for v in vts_files] if any([(v.shape[1] < crop_dims or v.shape[2] < crop_dims) for v in vts_nps]): raise RuntimeError( "Error Vtk structured points files have dimension smaller than 128x128, need at least 128x128" ) #Crop and normalize the images M = len(vts_nps) V = np.zeros((M, crop_dims, crop_dims, 1)) max_ = np.amax(vts_nps) min_ = np.amin(vts_nps) for i in range(M): v = vts_nps[i][0] print v.shape V[i] = util.crop_center(v, crop_dims, crop_dims).reshape(128, 128, 1) vts_nps = V #vts_nps = [util.crop_center_nd(v,crop_dims,crop_dims) for v in vts_nps] #vts_nps = np.asarray(vts_nps)[:,0,:,:,np.newaxis].astype(float) print vts_nps.shape if modality == 'ct': print "CT" #vts_nps = 1.0*vts_nps/3000 vts_nps = 1.0 * vts_nps / 3000 if modality == 'mr': vts_nps = 2.0 * (1.0 * vts_nps - min_) / (max_ - min_) - 1 print "MR" #Need there to be a multiple of Nbatch images print "Padding images to be multiple of Nbatch"
def setup_stitch( gitapp: GetInputTargetAndPredictedParameters, name=None, ) -> Dict[str, lt.LabeledTensor]: """Creates diagnostic images. All diagnostic images are registered as summaries. Args: gitapp: GetInputTargetAndPredictedParameters. name: Optional op name. Returns: A mapping where the keys are names of summary images and the values are image tensors. """ logging.info('Setting up stitch') with tf.name_scope(name, 'setup_stitch', []) as scope: (patch_centers, input_lt, target_lt, predict_input_lt, predict_target_lt) = get_input_target_and_predicted(gitapp) predicted_size = len(predict_input_lt.axes['row']) assert predicted_size == len(predict_input_lt.axes['column']) input_lt = util.crop_center(predicted_size, input_lt) target_lt = util.crop_center(predicted_size, target_lt) # For now, we're not handling overlap or missing data. assert gitapp.stride == predicted_size if gitapp.bp is not None: # Rebatch so a single tensor is all the patches in a single image. [input_lt, target_lt, predict_input_lt, predict_target_lt] = util.entry_point_batch( [input_lt, target_lt, predict_input_lt, predict_target_lt], bp=util.BatchParameters(size=len(patch_centers), num_threads=1, capacity=1), enqueue_many=True, entry_point_names=[ 'input_stitch', 'target_stitch', 'predict_input_stitch', 'predict_target_stitch' ], name='stitch') rc = lt.ReshapeCoder(util.CANONICAL_AXIS_ORDER[3:], ['channel']) input_lt = rc.decode( ops.patches_to_image(patch_centers, rc.encode(input_lt))) rc = lt.ReshapeCoder(util.CANONICAL_AXIS_ORDER[3:], ['channel']) target_lt = rc.decode( ops.patches_to_image(patch_centers, rc.encode(target_lt))) rc = lt.ReshapeCoder(util.CANONICAL_PREDICTION_AXIS_ORDER[3:], ['channel']) predict_input_lt = rc.decode( ops.patches_to_image(patch_centers, rc.encode(predict_input_lt))) rc = lt.ReshapeCoder(util.CANONICAL_PREDICTION_AXIS_ORDER[3:], ['channel']) predict_target_lt = rc.decode( ops.patches_to_image(patch_centers, rc.encode(predict_target_lt))) def get_statistics(t: lt.LabeledTensor) -> lt.LabeledTensor: t = visualize.to_softmax(t) rc = lt.ReshapeCoder(list(t.axes.keys())[:-1], ['batch']) return rc.decode(ops.distribution_statistics(rc.encode(t))) # C++ entry points . with tf.name_scope(''): input_lt = lt.identity(input_lt, name='entry_point_stitched_input') target_lt = lt.identity(target_lt, name='entry_point_stitched_target') # The nodes are used purely to export data to C++. lt.identity(get_statistics(predict_input_lt), name='entry_point_stitched_predicted_input') lt.identity(get_statistics(predict_target_lt), name='entry_point_stitched_predicted_target') predict_input_lt = visualize.to_softmax(predict_input_lt) predict_target_lt = visualize.to_softmax(predict_target_lt) input_summary_lt = visualize.error_panel(input_lt, predict_input_lt) target_summary_lt = visualize.error_panel(target_lt, predict_target_lt) if gitapp.bp is not None: input_summary_lt, target_summary_lt = lt.batch( [input_summary_lt, target_summary_lt], # We'll see 3 images in the visualizer. batch_size=3, enqueue_many=True, num_threads=1, capacity=1, name='group') input_summary_lt = lt.identity(input_summary_lt, name=scope + 'input_error_panel') target_summary_lt = lt.identity(target_summary_lt, name=scope + 'target_error_panel') visualize_op_dict = {} visualize_op_dict['input'] = input_lt visualize_op_dict['predict_input'] = predict_input_lt visualize_op_dict['target'] = target_lt visualize_op_dict['predict_target'] = predict_target_lt def summarize(tag, labeled_tensor): visualize.summarize_image(labeled_tensor, name=scope + 'summarize/' + tag) visualize_op_dict[tag] = labeled_tensor summarize('input_error_panel', input_summary_lt) summarize('target_error_panel', target_summary_lt) return visualize_op_dict