Beispiel #1
0
def model(features, labels, mode, params):
    """CNN classifier model."""
    images = features["image"]
    labels = labels["label"]

    training = mode == tf.estimator.ModeKeys.TRAIN
    drop_rate = params.drop_rate if training else 0.0

    features = ops.conv_layers(images,
                               filters=[64, 128, 256],
                               kernels=[3, 3, 3],
                               pool_sizes=[2, 2, 2])

    features = tf.contrib.layers.flatten(features)

    logits = ops.dense_layers(features, [512, params.num_classes],
                              drop_rates=drop_rate,
                              linear_top_layer=True)

    predictions = tf.argmax(logits, axis=-1)

    loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

    tf.summary.image("images", images)

    eval_metrics = {
        "accuracy": tf.metrics.accuracy(labels, predictions),
        "top_1_error": tf.metrics.mean(metrics.top_k_error(labels, logits, 1)),
    }

    tf.add_to_collection("batch_logging", tf.identity(labels, name="labels"))
    tf.add_to_collection("batch_logging",
                         tf.identity(predictions, name="predictions"))

    return {"predictions": predictions}, loss, eval_metrics
def model(features, labels, mode, params):
  """CNN classifier model."""
  images = features["image"]
  labels = labels["label"]

  training = mode == tf.estimator.ModeKeys.TRAIN

  features = ops.conv_layers(
    images, [16], [3], linear_top_layer=True)

  features = ops.resnet_blocks(
    features, [16, 32, 64], [1, 2, 2], 5, training)

  features = ops.batch_normalization(features, training=training)
  features = tf.nn.relu(features)

  features = tf.reduce_mean(features, axis=[1, 2])
  logits = ops.dense_layers(
    features, [params.num_classes], linear_top_layer=False)

  predictions = tf.argmax(logits, axis=-1)

  loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

  tf.summary.image("images", images)

  eval_metrics = {
    "accuracy": tf.metrics.accuracy(labels, predictions),
    "top_1_error": tf.metrics.mean(metrics.top_k_error(labels, logits, 1)),
  }

  return {"predictions": predictions}, loss, eval_metrics
Beispiel #3
0
def model(features, labels, mode, params):
    """CNN classifier model."""
    images = features["image"]
    labels = labels["label"]

    training = mode == tf.estimator.ModeKeys.TRAIN

    slim = tf.contrib.slim
    vgg = nets.vgg
    with slim.arg_scope(vgg.vgg_arg_scope()):
        mean = tf.constant([123.68, 116.78, 103.94])
        images = images - mean
        net, _ = vgg.vgg_16(images,
                            num_classes=params.num_classes,
                            is_training=training,
                            spatial_squeeze=False)

        if training:
            variables_to_restore = slim.get_variables_to_restore(
                exclude=['vgg_16/fc6', 'vgg_16/fc7', 'vgg_16/fc8'])
            tf.train.init_from_checkpoint(
                "vgg_16.ckpt",
                {v.name.split(':')[0]: v
                 for v in variables_to_restore})
            variables_in_fc = []
            for scope in ['vgg_16/fc6', 'vgg_16/fc7', 'vgg_16/fc8']:
                variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              scope)
                variables_in_fc.extend(variables)
            variables_to_train = tf.get_collection_ref(
                tf.GraphKeys.TRAINABLE_VARIABLES)
            variables_to_train.clear()
            variables_to_train.extend(variables_in_fc)

    logits = tf.reduce_mean(net, axis=[1, 2])

    predictions = tf.argmax(logits, axis=-1)

    loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

    eval_metrics = {
        "accuracy": tf.metrics.accuracy(labels, predictions),
        "top_1_error": tf.metrics.mean(metrics.top_k_error(labels, logits, 1)),
    }

    return {"predictions": predictions}, loss, eval_metrics
Beispiel #4
0
def model(features, labels, mode, params):
    """CNN classifier model."""
    images = features["image"]
    labels = labels["label"]

    training = mode == tf.estimator.ModeKeys.TRAIN

    x = images / 255.0
    x = tf.layers.conv2d(x,
                         4,
                         3,
                         padding="same",
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(
                             params.weight_decay))
    x = tf.layers.batch_normalization(x, training=training)
    x = tf.nn.relu(x)
    x = tf.layers.average_pooling2d(x, 8, 8, padding="same")
    x = tf.layers.conv2d(x,
                         4,
                         3,
                         padding="same",
                         kernel_regularizer=tf.contrib.layers.l2_regularizer(
                             params.weight_decay))
    x = tf.layers.batch_normalization(x, training=training)
    x = tf.nn.relu(x)
    x = tf.layers.average_pooling2d(x, 8, 8, padding="same")

    logits = tf.layers.dense(
        x,
        params.num_classes,
        kernel_regularizer=tf.contrib.layers.l2_regularizer(
            params.weight_decay))

    logits = tf.reduce_mean(logits, axis=[1, 2])

    predictions = tf.argmax(logits, axis=-1)

    loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

    eval_metrics = {
        "accuracy": tf.metrics.accuracy(labels, predictions),
        "top_1_error": tf.metrics.mean(metrics.top_k_error(labels, logits, 1)),
    }

    return {"predictions": predictions}, loss, eval_metrics
Beispiel #5
0
def model(features, labels, mode, params):
    """CNN classifier model."""
    images = features["image"]
    if mode != tf.estimator.ModeKeys.PREDICT:
        labels = labels["label"]

    training = mode == tf.estimator.ModeKeys.TRAIN
    tf.keras.backend.set_learning_phase(training)

    image_shape = (512, 512, 3)
    if training:
        image_shape = (140, 140, 3)
        images = tf.random_crop(images, [params.batch_size, 140, 140, 3])
    images = tf.keras.applications.inception_resnet_v2.preprocess_input(images)
    inception = tf.keras.applications.InceptionResNetV2(
        input_shape=image_shape,
        include_top=False,
        weights='imagenet' if training else None,
        input_tensor=images,
        pooling='avg')
    for layer in inception.layers:
        layer.trainable = False

    logits = tf.layers.dense(
        inception(images),
        params.num_classes,
        kernel_regularizer=tf.contrib.layers.l2_regularizer(
            params.weight_decay))

    predictions = tf.argmax(logits, axis=-1)

    if mode == tf.estimator.ModeKeys.PREDICT:
        return {"predictions": predictions}, None, None

    loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

    eval_metrics = {
        "accuracy": tf.metrics.accuracy(labels, predictions),
        "top_1_error": tf.metrics.mean(metrics.top_k_error(labels, logits, 1)),
    }

    return {"predictions": predictions}, loss, eval_metrics
Beispiel #6
0
def model(features, labels, mode, params):
    """CNN classifier model."""
    images = features["image"]
    labels = labels["label"]

    training = mode == tf.estimator.ModeKeys.TRAIN
    drop_rate = params.drop_rate if training else 0.0

    images = tf.layers.dropout(images, params.input_drop_rate)

    features = ops.conv_layers(
        images,
        filters=[96, 96, 192, 192, 192, 192, params.num_classes],
        kernels=[3, 3, 3, 3, 3, 1, 1],
        pool_sizes=[1, 3, 1, 3, 1, 1, 1],
        pool_strides=[1, 2, 1, 2, 1, 1, 1],
        drop_rates=[0, 0, drop_rate, 0, drop_rate, 0, 0],
        batch_norm=True,
        training=training,
        pool_activation=tf.nn.relu,
        linear_top_layer=True,
        weight_decay=params.weight_decay)

    logits = tf.reduce_mean(features, [1, 2])

    predictions = tf.argmax(logits, axis=-1)

    loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)

    tf.summary.image("images", images)

    eval_metrics = {
        "accuracy": tf.metrics.accuracy(labels, predictions),
        "top_1_error": tf.metrics.mean(metrics.top_k_error(labels, logits, 1)),
    }

    return {"predictions": predictions}, loss, eval_metrics