Esempio n. 1
0
 def model_fn_with_summary(features, labels, mode, params):
   del features, labels, params
   loss = constant_op.constant(_EXPECTED_LOSS)
   summary.scalar('loss_scalar_summary', loss)
   summary.histogram('loss_histogram_summary', loss)
   summary.image('loss_image_summary', loss)
   return tpu_estimator.TPUEstimatorSpec(mode=mode, loss=loss)
Esempio n. 2
0
 def model_fn_with_summary(features, labels, mode, params):
     del features, labels, params
     loss = constant_op.constant(_EXPECTED_LOSS)
     summary.scalar('loss_scalar_summary', loss)
     summary.histogram('loss_histogram_summary', loss)
     summary.image('loss_image_summary', loss)
     return tpu_estimator.TPUEstimatorSpec(mode=mode, loss=loss)
Esempio n. 3
0
 def model_fn_with_summary(features, labels, mode, params):
   del features, labels, params
   loss = constant_op.constant(_EXPECTED_LOSS)
   summary.scalar('loss_scalar_summary', loss)
   summary.histogram('loss_histogram_summary', loss)
   summary.image('loss_image_summary', loss)
   return model_fn_lib.EstimatorSpec(
       mode=mode, loss=loss, train_op=array_ops.identity(loss))
Esempio n. 4
0
 def model_fn_with_summary(features, labels, mode, params):
     del features, labels, params
     loss = constant_op.constant(_EXPECTED_LOSS)
     summary.scalar('loss_scalar_summary', loss)
     summary.histogram('loss_histogram_summary', loss)
     summary.image('loss_image_summary', loss)
     return model_fn_lib.EstimatorSpec(
         mode=mode, loss=loss, train_op=array_ops.identity(loss))
Esempio n. 5
0
    def test_report_unsupported_operations_graph_mode(self):
        """Tests that unsupported operations are detected."""
        context = self.create_test_xla_compile_context()
        context.Enter()
        dummy_tensor = constant_op.constant(1.1)
        audio_summary = summary.audio('audio_summary', dummy_tensor, 0.5)
        histogram_summary = summary.histogram('histogram_summary',
                                              dummy_tensor)
        image_summary = summary.image('image_summary', dummy_tensor)
        scalar_summary = summary.scalar('scalar_summary', dummy_tensor)
        tensor_summary = summary.tensor_summary('tensor_summary', dummy_tensor)
        summary.merge([
            audio_summary, histogram_summary, image_summary, scalar_summary,
            tensor_summary
        ],
                      name='merge_summary')
        logging_ops.Print(dummy_tensor, [dummy_tensor], name='print_op')
        context.Exit()

        unsupported_ops_names = [op.name for op in context._unsupported_ops]
        self.assertEqual(unsupported_ops_names, [
            u'audio_summary', u'histogram_summary', u'image_summary',
            u'scalar_summary', u'tensor_summary',
            u'merge_summary/merge_summary', u'print_op'
        ])
Esempio n. 6
0
  def test_report_unsupported_operations(self):
    """Tests that unsupported operations are detected."""
    context = self.create_test_xla_compile_context()
    context.Enter()
    dummy_tensor = constant_op.constant(1.1)
    audio_summary = summary.audio('audio_summary', dummy_tensor, 0.5)
    histogram_summary = summary.histogram('histogram_summary', dummy_tensor)
    image_summary = summary.image('image_summary', dummy_tensor)
    scalar_summary = summary.scalar('scalar_summary', dummy_tensor)
    tensor_summary = summary.tensor_summary('tensor_summary', dummy_tensor)
    summary.merge(
        [
            audio_summary, histogram_summary, image_summary, scalar_summary,
            tensor_summary
        ],
        name='merge_summary')
    logging_ops.Print(dummy_tensor, [dummy_tensor], name='print_op')
    context.Exit()

    unsupported_ops_names = [op.name for op in context._unsupported_ops]
    self.assertEqual(unsupported_ops_names, [
        u'audio_summary', u'histogram_summary', u'image_summary',
        u'scalar_summary', u'tensor_summary', u'merge_summary/merge_summary',
        u'print_op'
    ])
Esempio n. 7
0
def unet_model_fn(features, labels, mode, params):
    tf.local_variables_initializer()
    loss, train_op, = None, None
    eval_metric_ops, training_hooks, evaluation_hooks = None, None, None
    predictions_dict = None
    unet = Unet(params=params)
    logits = unet.model(input_tensor=features['image'])
    y_pred = tf.math.softmax(logits, axis=-1)
    output_img = tf.expand_dims(tf.cast(tf.math.argmax(y_pred, axis=-1) * 255, dtype=tf.uint8), axis=-1)

    if mode in (estimator.ModeKeys.TRAIN, estimator.ModeKeys.EVAL):

        with tf.name_scope('Loss_Calculation'):
            loss = Losses(logits=logits, labels=labels['label'])
            loss = loss.custom_loss()

        with tf.name_scope('Dice_Score_Calculation'):
            dice = f1(labels=labels['label'], predictions=y_pred)

        with tf.name_scope('Images_{}'.format(mode)):
            with tf.name_scope('Reformat_Outputs'):
                label = tf.expand_dims(tf.cast(tf.argmax(labels['label'], -1) * 255, dtype=tf.uint8), axis=-1)
                image = tf.math.divide(features['image'] - tf.reduce_max(features['image'], [0, 1, 2]),
                                       tf.reduce_max(features['image'], [0, 1, 2]) - tf.reduce_min(features['image'],
                                                                                                   [0, 1, 2]))
            summary.image('1_Medical_Image', image, max_outputs=1)
            summary.image('2_Output', output_img, max_outputs=1)
            summary.image('3_Output_pred', tf.expand_dims(y_pred[:, :, :, 1], -1), max_outputs=1)
            summary.image('4_Output_label', label, max_outputs=1)

    if mode == estimator.ModeKeys.TRAIN:
        with tf.name_scope('Learning_Rate'):
            global_step = tf.compat.v1.train.get_or_create_global_step()
            learning_rate = tf.compat.v1.train.exponential_decay(params['lr'], global_step=global_step,
                                                                 decay_steps=params['decay_steps'],
                                                                 decay_rate=params['decay_rate'], staircase=False)
        with tf.name_scope('Optimizer_conf'):
            train_op = Adam(learning_rate=learning_rate).minimize(loss=loss, global_step=global_step)

        with tf.name_scope('Metrics'):
            summary.scalar('Output_DSC', dice[1])
            summary.scalar('Learning_Rate', learning_rate)

    if mode == estimator.ModeKeys.EVAL:
        eval_metric_ops = {'Metrics/Output_DSC': dice}
        eval_summary_hook = tf.estimator.SummarySaverHook(output_dir=params['eval_path'],
                                                          summary_op=summary.merge_all(),
                                                          save_steps=params['eval_steps'])
        evaluation_hooks = [eval_summary_hook]

    if mode == estimator.ModeKeys.PREDICT:
        predictions_dict = {'image': features['image'],
                            'y_preds': y_pred[:, :, :, 1],
                            'output_img': output_img,
                            'path': features['path']}

    return estimator.EstimatorSpec(mode,
                                   predictions=predictions_dict,
                                   loss=loss,
                                   train_op=train_op,
                                   eval_metric_ops=eval_metric_ops,
                                   training_hooks=training_hooks,
                                   evaluation_hooks=evaluation_hooks)