Beispiel #1
0
def model_fn(features, labels, mode, params):
    """Model function for DenseNet classifier.
	
	Args:
	  features: inputs.
	  labels: one hot encoded classes
	  mode: one of tf.estimator.ModeKeys.{TRAIN, INFER, EVAL}
	  params: a parameter dictionary with the following keys: 
	
	Returns:
	  ModelFnOps for Estimator API.
	"""
    number_classes = params.get('number_classes')
    growth_rate = params.get('growth_rate')
    dropout_keep_prob = params.get('dropout_keep_prob')
    encoder_num_units = params.get('encoder_num_units')
    decoder_num_units = params.get('decoder_num_units')
    bottleneck_number_feature_maps = params.get(
        'bottleneck_number_feature_maps')

    densenet = DenseNet(growth_rate=growth_rate,
                        dropout_keep_prob=dropout_keep_prob,
                        number_classes=number_classes,
                        is_training=(mode == tf.estimator.ModeKeys.TRAIN))
    densenet.encode(
        features=features,
        num_units=encoder_num_units,
        bottleneck_number_feature_maps=bottleneck_number_feature_maps)
    logits = densenet.decode(decoder_num_units).output
    predictions = tf.argmax(logits, axis=1)
    tf.summary.image('predictions', predictions)
    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'class_ids': predictions,
            'probabilities': tf.nn.softmax(logits),
            'logits': logits,
        }
        return tf.estimator.EstimatorSpec(mode, predictions=predictions)

    # Add the loss.
    # Calculate loss, which includes softmax cross entropy and L2 regularization.
    # wraps the softmax_with_entropy fn. adds it to loss collection
    tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=labels)
    # include the regulization losses in the loss collection.
    loss = tf.losses.get_total_loss()

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode=mode,
                                          eval_metric_ops={
                                              "accuracy":
                                              tf.metrics.accuracy(
                                                  labels, predictions)
                                          })

    assert mode == tf.estimator.ModeKeys.TRAIN
    global_step = tf.train.get_global_step()
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    optimizer = tf.train.AdagradOptimizer(learning_rate=0.1)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(loss, global_step=global_step)

    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
Beispiel #2
0
def model_fn(features, labels, mode, params):
    """Model function for DenseNet classifier.
	
	Args:
	  features: inputs.
	  labels: one hot encoded classes
	  mode: one of tf.estimator.ModeKeys.{TRAIN, INFER, EVAL}
	  params: a parameter dictionary with the following keys: 
	
	Returns:
	  ModelFnOps for Estimator API.
	"""
    number_classes = params.get('number_classes')
    growth_rate = params.get('growth_rate')
    dropout_keep_prob = params.get('dropout_keep_prob')
    encoder_num_units = params.get('encoder_num_units')
    decoder_num_units = params.get('decoder_num_units')
    bottleneck_number_feature_maps = params.get(
        'bottleneck_number_feature_maps')

    tf.logging.info("features tensor {}".format(features))
    features, image_ids, image_shapes = features['image'], features[
        'image_id'], features['image_shape']

    densenet = DenseNet(growth_rate=growth_rate,
                        dropout_keep_prob=dropout_keep_prob,
                        number_classes=number_classes,
                        is_training=(mode == tf.estimator.ModeKeys.TRAIN))

    densenet.encode(
        features=features,
        num_units=encoder_num_units,
        bottleneck_number_feature_maps=bottleneck_number_feature_maps)
    logits = densenet.decode(decoder_num_units).output
    probs = tf.nn.softmax(logits)
    predictions = tf.argmax(probs, axis=-1)
    if mode == tf.estimator.ModeKeys.PREDICT:
        # resize the predictions back to original size.
        # label_shape = tf.shape(features)[:2]
        probs = tf.image.resize_bilinear(probs,
                                         image_shapes,
                                         name='resize_predictions')
        predictions = tf.argmax(probs, axis=-1)
        tf.logging.info("Starting to predict..")
        predictions = {
            'class_ids': predictions,
            'probabilities': probs,
            'logits': logits,
            'image_ids': image_ids
        }
        tf.logging.info("prediction tensor {}".format(predictions))
        return tf.estimator.EstimatorSpec(mode, predictions=predictions)

    # Add the loss.
    # Calculate loss, which includes softmax cross entropy and L2 regularization.
    # wraps the softmax_with_entropy fn. adds it to loss collection
    tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=labels)
    # include the regulization losses in the loss collection.
    loss = tf.losses.get_total_loss()
    if mode == tf.estimator.ModeKeys.EVAL:
        tf.logging.info("Starting to evaluate..")
        with tf.variable_scope('mean_iou_calc'):
            prec = []
            up_opts = []
            for t in np.arange(0.5, 1.0, 0.05):
                predicted_mask = tf.to_int32(probs > t)
                score, up_opt = tf.metrics.mean_iou(labels, predicted_mask, 2)
                up_opts.append(up_opt)
                prec.append(score)
            mean_iou = tf.reduce_mean(tf.stack(prec),
                                      axis=0), tf.stack(up_opts)

        eval_metrics = {'mean_iou': mean_iou}
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          eval_metric_ops=eval_metrics)

    assert mode == tf.estimator.ModeKeys.TRAIN
    tf.logging.info("Starting to train..")
    global_step = tf.train.get_global_step()
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    optimizer = tf.train.AdagradOptimizer(learning_rate=1e-4)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(loss, global_step=global_step)
    add_summaries(predictions, features, loss)
    return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)