Esempio n. 1
0
 def testModelVariables(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         vgg.vgg_19(inputs, num_classes)
         expected_names = [
             'vgg_19/conv1/conv1_1/weights',
             'vgg_19/conv1/conv1_1/biases',
             'vgg_19/conv1/conv1_2/weights',
             'vgg_19/conv1/conv1_2/biases',
             'vgg_19/conv2/conv2_1/weights',
             'vgg_19/conv2/conv2_1/biases',
             'vgg_19/conv2/conv2_2/weights',
             'vgg_19/conv2/conv2_2/biases',
             'vgg_19/conv3/conv3_1/weights',
             'vgg_19/conv3/conv3_1/biases',
             'vgg_19/conv3/conv3_2/weights',
             'vgg_19/conv3/conv3_2/biases',
             'vgg_19/conv3/conv3_3/weights',
             'vgg_19/conv3/conv3_3/biases',
             'vgg_19/conv3/conv3_4/weights',
             'vgg_19/conv3/conv3_4/biases',
             'vgg_19/conv4/conv4_1/weights',
             'vgg_19/conv4/conv4_1/biases',
             'vgg_19/conv4/conv4_2/weights',
             'vgg_19/conv4/conv4_2/biases',
             'vgg_19/conv4/conv4_3/weights',
             'vgg_19/conv4/conv4_3/biases',
             'vgg_19/conv4/conv4_4/weights',
             'vgg_19/conv4/conv4_4/biases',
             'vgg_19/conv5/conv5_1/weights',
             'vgg_19/conv5/conv5_1/biases',
             'vgg_19/conv5/conv5_2/weights',
             'vgg_19/conv5/conv5_2/biases',
             'vgg_19/conv5/conv5_3/weights',
             'vgg_19/conv5/conv5_3/biases',
             'vgg_19/conv5/conv5_4/weights',
             'vgg_19/conv5/conv5_4/biases',
             'vgg_19/fc6/weights',
             'vgg_19/fc6/biases',
             'vgg_19/fc7/weights',
             'vgg_19/fc7/biases',
             'vgg_19/fc8/weights',
             'vgg_19/fc8/biases',
         ]
         model_variables = [
             v.op.name for v in variables_lib.get_model_variables()
         ]
         self.assertSetEqual(set(model_variables), set(expected_names))
Esempio n. 2
0
 def testModelVariables(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     vgg.vgg_19(inputs, num_classes)
     expected_names = [
         'vgg_19/conv1/conv1_1/weights',
         'vgg_19/conv1/conv1_1/biases',
         'vgg_19/conv1/conv1_2/weights',
         'vgg_19/conv1/conv1_2/biases',
         'vgg_19/conv2/conv2_1/weights',
         'vgg_19/conv2/conv2_1/biases',
         'vgg_19/conv2/conv2_2/weights',
         'vgg_19/conv2/conv2_2/biases',
         'vgg_19/conv3/conv3_1/weights',
         'vgg_19/conv3/conv3_1/biases',
         'vgg_19/conv3/conv3_2/weights',
         'vgg_19/conv3/conv3_2/biases',
         'vgg_19/conv3/conv3_3/weights',
         'vgg_19/conv3/conv3_3/biases',
         'vgg_19/conv3/conv3_4/weights',
         'vgg_19/conv3/conv3_4/biases',
         'vgg_19/conv4/conv4_1/weights',
         'vgg_19/conv4/conv4_1/biases',
         'vgg_19/conv4/conv4_2/weights',
         'vgg_19/conv4/conv4_2/biases',
         'vgg_19/conv4/conv4_3/weights',
         'vgg_19/conv4/conv4_3/biases',
         'vgg_19/conv4/conv4_4/weights',
         'vgg_19/conv4/conv4_4/biases',
         'vgg_19/conv5/conv5_1/weights',
         'vgg_19/conv5/conv5_1/biases',
         'vgg_19/conv5/conv5_2/weights',
         'vgg_19/conv5/conv5_2/biases',
         'vgg_19/conv5/conv5_3/weights',
         'vgg_19/conv5/conv5_3/biases',
         'vgg_19/conv5/conv5_4/weights',
         'vgg_19/conv5/conv5_4/biases',
         'vgg_19/fc6/weights',
         'vgg_19/fc6/biases',
         'vgg_19/fc7/weights',
         'vgg_19/fc7/biases',
         'vgg_19/fc8/weights',
         'vgg_19/fc8/biases',
     ]
     model_variables = [v.op.name for v in variables_lib.get_model_variables()]
     self.assertSetEqual(set(model_variables), set(expected_names))
Esempio n. 3
0
 def testEndPoints(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     for is_training in [True, False]:
         with ops.Graph().as_default():
             inputs = random_ops.random_uniform(
                 (batch_size, height, width, 3))
             _, end_points = vgg.vgg_19(inputs,
                                        num_classes,
                                        is_training=is_training)
             expected_names = [
                 'vgg_19/conv1/conv1_1', 'vgg_19/conv1/conv1_2',
                 'vgg_19/pool1', 'vgg_19/conv2/conv2_1',
                 'vgg_19/conv2/conv2_2', 'vgg_19/pool2',
                 'vgg_19/conv3/conv3_1', 'vgg_19/conv3/conv3_2',
                 'vgg_19/conv3/conv3_3', 'vgg_19/conv3/conv3_4',
                 'vgg_19/pool3', 'vgg_19/conv4/conv4_1',
                 'vgg_19/conv4/conv4_2', 'vgg_19/conv4/conv4_3',
                 'vgg_19/conv4/conv4_4', 'vgg_19/pool4',
                 'vgg_19/conv5/conv5_1', 'vgg_19/conv5/conv5_2',
                 'vgg_19/conv5/conv5_3', 'vgg_19/conv5/conv5_4',
                 'vgg_19/pool5', 'vgg_19/fc6', 'vgg_19/fc7', 'vgg_19/fc8'
             ]
             self.assertSetEqual(set(end_points.keys()),
                                 set(expected_names))
Esempio n. 4
0
def tower_loss(scope):
    images, labels = read_and_decode()
    if net == 'vgg_16':
        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, end_points = vgg.vgg_16(images, num_classes=FLAGS.num_classes)
    elif net == 'vgg_19':
        with slim.arg_scope(vgg.vgg_arg_scope()):
            logits, end_points = vgg.vgg_19(images, num_classes=FLAGS.num_classes)
    elif net == 'resnet_v1_101':
        with slim.arg_scope(resnet_v1.resnet_arg_scope()):
            logits, end_points = resnet_v1.resnet_v1_101(images, num_classes=FLAGS.num_classes)
        logits = tf.reshape(logits, [FLAGS.batch_size, FLAGS.num_classes])
    elif net == 'resnet_v1_50':
        with slim.arg_scope(resnet_v1.resnet_arg_scope()):
            logits, end_points = resnet_v1.resnet_v1_50(images, num_classes=FLAGS.num_classes)
        logits = tf.reshape(logits, [FLAGS.batch_size, FLAGS.num_classes])
    elif net == 'resnet_v2_50':
        with slim.arg_scope(resnet_v2.resnet_arg_scope()):
            logits, end_points = resnet_v2.resnet_v2_50(images, num_classes=FLAGS.num_classes)
        logits = tf.reshape(logits, [FLAGS.batch_size, FLAGS.num_classes])
    else:
        raise Exception('No network matched with net %s.' % net)
    assert logits.shape == (FLAGS.batch_size, FLAGS.num_classes)
    _ = cal_loss(logits, labels)
    losses = tf.get_collection('losses', scope)
    total_loss = tf.add_n(losses, name='total_loss')
    for l in losses + [total_loss]:
        loss_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', l.op.name)
        tf.summary.scalar(loss_name, l)
    return total_loss
Esempio n. 5
0
 def testForward(self):
   batch_size = 1
   height, width = 224, 224
   with self.test_session() as sess:
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(inputs)
     sess.run(variables.global_variables_initializer())
     output = sess.run(logits)
     self.assertTrue(output.any())
Esempio n. 6
0
 def testForward(self):
     batch_size = 1
     height, width = 224, 224
     with self.test_session() as sess:
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(inputs)
         sess.run(variables.global_variables_initializer())
         output = sess.run(logits)
         self.assertTrue(output.any())
def model_fn(features, labels, mode):
    img_mean = tf.reshape(tf.constant(IMAGENET_MEAN), (1, 1, 3))
    img = tf.cast(features, tf.float32)
    output = vgg.vgg_19(img - img_mean,
                        is_training=(mode == tf.estimator.ModeKeys.TRAIN))
    logits = tf.layers.dense(tf.layers.flatten(output[1]['vgg_19/fc7']),
                             2,
                             activation=None,
                             name="new_logits")

    probabilities = tf.nn.softmax(logits)
    predicted_classes = tf.argmax(probabilities, axis=1)

    loss = None
    train_op = None

    if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
        loss = tf.losses.softmax_cross_entropy(labels, logits)

    if mode == tf.estimator.ModeKeys.TRAIN:
        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            train_vars = [
                tf.get_variable('new_logits/kernel'),
                tf.get_variable('new_logits/bias')
            ]
            train_vars.extend([
                var for var in tf.trainable_variables()
                if var.name.startswith('vgg_19/fc')
                and not var.name.startswith("vgg_19/fc8")
            ])
            print(train_vars)
            train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(
                loss,
                global_step=tf.train.get_or_create_global_step(),
                var_list=train_vars)

    metrics = None
    if mode == tf.estimator.ModeKeys.EVAL:
        metrics = {
            'accuracy':
            tf.metrics.accuracy(labels=tf.argmax(labels, axis=1),
                                predictions=predicted_classes)
        }

    predictions = None
    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'probabilities': probabilities,
            'predictions': predicted_classes
        }

    return tf.estimator.EstimatorSpec(mode=mode,
                                      predictions=predictions,
                                      loss=loss,
                                      train_op=train_op,
                                      eval_metric_ops=metrics)
Esempio n. 8
0
 def testBuild(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(inputs, num_classes)
         self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
Esempio n. 9
0
 def testFullyConvolutional(self):
   batch_size = 1
   height, width = 256, 256
   num_classes = 1000
   with self.test_session():
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
     self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, 2, 2, num_classes])
Esempio n. 10
0
 def testFullyConvolutional(self):
     batch_size = 1
     height, width = 256, 256
     num_classes = 1000
     with self.test_session():
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
         self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, 2, 2, num_classes])
Esempio n. 11
0
 def testBuild(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(inputs, num_classes)
     self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
Esempio n. 12
0
 def testEvaluation(self):
   batch_size = 2
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     predictions = math_ops.argmax(logits, 1)
     self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
Esempio n. 13
0
 def testEvaluation(self):
     batch_size = 2
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         eval_inputs = random_ops.random_uniform(
             (batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
         predictions = math_ops.argmax(logits, 1)
         self.assertListEqual(predictions.get_shape().as_list(),
                              [batch_size])
Esempio n. 14
0
 def testTrainEvalWithReuse(self):
   train_batch_size = 2
   eval_batch_size = 1
   train_height, train_width = 224, 224
   eval_height, eval_width = 256, 256
   num_classes = 1000
   with self.test_session():
     train_inputs = random_ops.random_uniform(
         (train_batch_size, train_height, train_width, 3))
     logits, _ = vgg.vgg_19(train_inputs)
     self.assertListEqual(logits.get_shape().as_list(),
                          [train_batch_size, num_classes])
     variable_scope.get_variable_scope().reuse_variables()
     eval_inputs = random_ops.random_uniform(
         (eval_batch_size, eval_height, eval_width, 3))
     logits, _ = vgg.vgg_19(
         eval_inputs, is_training=False, spatial_squeeze=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [eval_batch_size, 2, 2, num_classes])
     logits = math_ops.reduce_mean(logits, [1, 2])
     predictions = math_ops.argmax(logits, 1)
     self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
Esempio n. 15
0
 def testTrainEvalWithReuse(self):
     train_batch_size = 2
     eval_batch_size = 1
     train_height, train_width = 224, 224
     eval_height, eval_width = 256, 256
     num_classes = 1000
     with self.test_session():
         train_inputs = random_ops.random_uniform(
             (train_batch_size, train_height, train_width, 3))
         logits, _ = vgg.vgg_19(train_inputs)
         self.assertListEqual(logits.get_shape().as_list(),
                              [train_batch_size, num_classes])
         variable_scope.get_variable_scope().reuse_variables()
         eval_inputs = random_ops.random_uniform(
             (eval_batch_size, eval_height, eval_width, 3))
         logits, _ = vgg.vgg_19(eval_inputs,
                                is_training=False,
                                spatial_squeeze=False)
         self.assertListEqual(logits.get_shape().as_list(),
                              [eval_batch_size, 2, 2, num_classes])
         logits = math_ops.reduce_mean(logits, [1, 2])
         predictions = math_ops.argmax(logits, 1)
         self.assertEquals(predictions.get_shape().as_list(),
                           [eval_batch_size])
Esempio n. 16
0
 def testEndPoints(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   for is_training in [True, False]:
     with ops.Graph().as_default():
       inputs = random_ops.random_uniform((batch_size, height, width, 3))
       _, end_points = vgg.vgg_19(inputs, num_classes, is_training=is_training)
       expected_names = [
           'vgg_19/conv1/conv1_1', 'vgg_19/conv1/conv1_2', 'vgg_19/pool1',
           'vgg_19/conv2/conv2_1', 'vgg_19/conv2/conv2_2', 'vgg_19/pool2',
           'vgg_19/conv3/conv3_1', 'vgg_19/conv3/conv3_2',
           'vgg_19/conv3/conv3_3', 'vgg_19/conv3/conv3_4', 'vgg_19/pool3',
           'vgg_19/conv4/conv4_1', 'vgg_19/conv4/conv4_2',
           'vgg_19/conv4/conv4_3', 'vgg_19/conv4/conv4_4', 'vgg_19/pool4',
           'vgg_19/conv5/conv5_1', 'vgg_19/conv5/conv5_2',
           'vgg_19/conv5/conv5_3', 'vgg_19/conv5/conv5_4', 'vgg_19/pool5',
           'vgg_19/fc6', 'vgg_19/fc7', 'vgg_19/fc8'
       ]
       self.assertSetEqual(set(end_points.keys()), set(expected_names))
def generate_graph(conv_layer=(1, 1), filter_idx=0, lr=0.01, graph=None):
    assert len(conv_layer) == 2 and all([isinstance(item, int) for item in
                                         conv_layer]), "`conv_filter` must be a tuple of two integers that identify the conv layer in the VGG 19 network"
    layer_name = 'vgg_19/conv{0}/conv{0}_{1}'.format(*conv_layer)

    if graph is None:
        graph = tf.get_default_graph()

    with graph.as_default():
        img_mean = tf.reshape(tf.constant(IMAGENET_MEAN), (1, 1, 3))
        params = tf.get_variable("synthetic_img",
                                 initializer=tf.random_normal((1, 224, 224, 3), 0, 0.001, dtype=tf.float32))
        img = (tf.nn.tanh(params) * 255.0) + img_mean
        img = tf.clip_by_value(img, 0, 255)
        test_img = tf.cast(img, tf.uint8)

        output = vgg.vgg_19(img - img_mean, is_training=False)

        layer = output[1][layer_name]
        loss = -tf.reduce_mean(layer[:, :, :, filter_idx])
        train_op = tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, var_list=[params])

        init_op = tf.global_variables_initializer()
        saver = tf.train.Saver(list(filter(lambda v: v != params, tf.trainable_variables())))

        def init_fn(sess):
            sess.run(init_op)
            saver.restore(sess, get_model_checkpoint("VGG 19"))

        return {
            'graph': graph,
            'test_img': test_img,
            'init_fn': init_fn,
            'train_op': train_op,
            'loss': loss
        }
def vgg_19_extract(images,
                   trainable=True,
                   is_training=True,
                   weight_decay=0.00004,
                   stddev=0.1,
                   dropout_keep_prob=0.8,
                   use_batch_norm=True,
                   batch_norm_params=None,
                   add_summaries=True,
                   scope="vgg_19"):
    """Builds an VGG_19 subgraph for image embeddings.

  Args:
    images: A float32 Tensor of shape [batch, height, width, channels].
    trainable: Whether the VGG_19 submodel should be trainable or not.
    is_training: Boolean indicating training mode or not.
    weight_decay: Coefficient for weight regularization.
    stddev: The standard deviation of the trunctated normal weight initializer.
    dropout_keep_prob: Dropout keep probability.
    use_batch_norm: Whether to use batch normalization.
    batch_norm_params: Parameters for batch normalization. See
      tf.contrib.layers.batch_norm for details.
    add_summaries: Whether to add activation summaries.
    scope: Optional Variable scope.

  Returns:
    end_points: A dictionary of activations from inception_v3 layers.
  """
    # Only consider the inception model to be in training mode if it's trainable.
    is_vgg_model_training = trainable and is_training

    if use_batch_norm:
        # Default parameters for batch normalization.
        if not batch_norm_params:
            batch_norm_params = {
                "is_training": is_vgg_model_training,
                "trainable": trainable,
                # Decay for the moving averages.
                "decay": 0.9997,
                # Epsilon to prevent 0s in variance.
                "epsilon": 0.001,
                # Collection containing the moving mean and moving variance.
                "variables_collections": {
                    "beta": None,
                    "gamma": None,
                    "moving_mean": ["moving_vars"],
                    "moving_variance": ["moving_vars"],
                }
            }
    else:
        batch_norm_params = None

    if trainable:
        weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
    else:
        weights_regularizer = None

    with tf.variable_scope(scope, [images]) as scope:
        with slim.arg_scope([slim.conv2d, slim.fully_connected],
                            weights_regularizer=weights_regularizer,
                            trainable=trainable):
            with slim.arg_scope(
                [slim.conv2d],
                    weights_initializer=tf.truncated_normal_initializer(
                        stddev=stddev),
                    activation_fn=tf.nn.relu,
                    normalizer_fn=None,  #slim.batch_norm,
                    normalizer_params=None,  #batch_norm_params
            ):

                net, end_points = vgg_19(images, scope=scope)

    # Add summaries.
    if add_summaries:
        for v in end_points.values():
            tf.contrib.layers.summaries.summarize_activation(v)

    return end_points['vgg_19/vgg_19/conv5/conv5_3']