Example #1
0
 def testModelVariables(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         vgg.vgg_19(inputs, num_classes)
         expected_names = [
             'vgg_19/conv1/conv1_1/weights',
             'vgg_19/conv1/conv1_1/biases',
             'vgg_19/conv1/conv1_2/weights',
             'vgg_19/conv1/conv1_2/biases',
             'vgg_19/conv2/conv2_1/weights',
             'vgg_19/conv2/conv2_1/biases',
             'vgg_19/conv2/conv2_2/weights',
             'vgg_19/conv2/conv2_2/biases',
             'vgg_19/conv3/conv3_1/weights',
             'vgg_19/conv3/conv3_1/biases',
             'vgg_19/conv3/conv3_2/weights',
             'vgg_19/conv3/conv3_2/biases',
             'vgg_19/conv3/conv3_3/weights',
             'vgg_19/conv3/conv3_3/biases',
             'vgg_19/conv3/conv3_4/weights',
             'vgg_19/conv3/conv3_4/biases',
             'vgg_19/conv4/conv4_1/weights',
             'vgg_19/conv4/conv4_1/biases',
             'vgg_19/conv4/conv4_2/weights',
             'vgg_19/conv4/conv4_2/biases',
             'vgg_19/conv4/conv4_3/weights',
             'vgg_19/conv4/conv4_3/biases',
             'vgg_19/conv4/conv4_4/weights',
             'vgg_19/conv4/conv4_4/biases',
             'vgg_19/conv5/conv5_1/weights',
             'vgg_19/conv5/conv5_1/biases',
             'vgg_19/conv5/conv5_2/weights',
             'vgg_19/conv5/conv5_2/biases',
             'vgg_19/conv5/conv5_3/weights',
             'vgg_19/conv5/conv5_3/biases',
             'vgg_19/conv5/conv5_4/weights',
             'vgg_19/conv5/conv5_4/biases',
             'vgg_19/fc6/weights',
             'vgg_19/fc6/biases',
             'vgg_19/fc7/weights',
             'vgg_19/fc7/biases',
             'vgg_19/fc8/weights',
             'vgg_19/fc8/biases',
         ]
         model_variables = [v.op.name for v in slim.get_model_variables()]
         self.assertSetEqual(set(model_variables), set(expected_names))
Example #2
0
 def testModelVariables(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     vgg.vgg_19(inputs, num_classes)
     expected_names = [
         'vgg_19/conv1/conv1_1/weights',
         'vgg_19/conv1/conv1_1/biases',
         'vgg_19/conv1/conv1_2/weights',
         'vgg_19/conv1/conv1_2/biases',
         'vgg_19/conv2/conv2_1/weights',
         'vgg_19/conv2/conv2_1/biases',
         'vgg_19/conv2/conv2_2/weights',
         'vgg_19/conv2/conv2_2/biases',
         'vgg_19/conv3/conv3_1/weights',
         'vgg_19/conv3/conv3_1/biases',
         'vgg_19/conv3/conv3_2/weights',
         'vgg_19/conv3/conv3_2/biases',
         'vgg_19/conv3/conv3_3/weights',
         'vgg_19/conv3/conv3_3/biases',
         'vgg_19/conv3/conv3_4/weights',
         'vgg_19/conv3/conv3_4/biases',
         'vgg_19/conv4/conv4_1/weights',
         'vgg_19/conv4/conv4_1/biases',
         'vgg_19/conv4/conv4_2/weights',
         'vgg_19/conv4/conv4_2/biases',
         'vgg_19/conv4/conv4_3/weights',
         'vgg_19/conv4/conv4_3/biases',
         'vgg_19/conv4/conv4_4/weights',
         'vgg_19/conv4/conv4_4/biases',
         'vgg_19/conv5/conv5_1/weights',
         'vgg_19/conv5/conv5_1/biases',
         'vgg_19/conv5/conv5_2/weights',
         'vgg_19/conv5/conv5_2/biases',
         'vgg_19/conv5/conv5_3/weights',
         'vgg_19/conv5/conv5_3/biases',
         'vgg_19/conv5/conv5_4/weights',
         'vgg_19/conv5/conv5_4/biases',
         'vgg_19/fc6/weights',
         'vgg_19/fc6/biases',
         'vgg_19/fc7/weights',
         'vgg_19/fc7/biases',
         'vgg_19/fc8/weights',
         'vgg_19/fc8/biases',
     ]
     model_variables = [v.op.name for v in slim.get_model_variables()]
     self.assertSetEqual(set(model_variables), set(expected_names))
Example #3
0
 def testEndPoints(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     _, end_points = vgg.vgg_19(inputs, num_classes)
     expected_names = [
         'vgg_19/conv1/conv1_1',
         'vgg_19/conv1/conv1_2',
         'vgg_19/pool1',
         'vgg_19/conv2/conv2_1',
         'vgg_19/conv2/conv2_2',
         'vgg_19/pool2',
         'vgg_19/conv3/conv3_1',
         'vgg_19/conv3/conv3_2',
         'vgg_19/conv3/conv3_3',
         'vgg_19/conv3/conv3_4',
         'vgg_19/pool3',
         'vgg_19/conv4/conv4_1',
         'vgg_19/conv4/conv4_2',
         'vgg_19/conv4/conv4_3',
         'vgg_19/conv4/conv4_4',
         'vgg_19/pool4',
         'vgg_19/conv5/conv5_1',
         'vgg_19/conv5/conv5_2',
         'vgg_19/conv5/conv5_3',
         'vgg_19/conv5/conv5_4',
         'vgg_19/pool5',
         'vgg_19/fc6',
         'vgg_19/fc7',
         'vgg_19/fc8'
     ]
     self.assertSetEqual(set(end_points.keys()), set(expected_names))
Example #4
0
 def testEndPoints(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     for is_training in [True, False]:
         with tf.Graph().as_default():
             inputs = tf.random_uniform((batch_size, height, width, 3))
             _, end_points = vgg.vgg_19(inputs,
                                        num_classes,
                                        is_training=is_training)
             expected_names = [
                 'vgg_19/conv1/conv1_1', 'vgg_19/conv1/conv1_2',
                 'vgg_19/pool1', 'vgg_19/conv2/conv2_1',
                 'vgg_19/conv2/conv2_2', 'vgg_19/pool2',
                 'vgg_19/conv3/conv3_1', 'vgg_19/conv3/conv3_2',
                 'vgg_19/conv3/conv3_3', 'vgg_19/conv3/conv3_4',
                 'vgg_19/pool3', 'vgg_19/conv4/conv4_1',
                 'vgg_19/conv4/conv4_2', 'vgg_19/conv4/conv4_3',
                 'vgg_19/conv4/conv4_4', 'vgg_19/pool4',
                 'vgg_19/conv5/conv5_1', 'vgg_19/conv5/conv5_2',
                 'vgg_19/conv5/conv5_3', 'vgg_19/conv5/conv5_4',
                 'vgg_19/pool5', 'vgg_19/fc6', 'vgg_19/fc7', 'vgg_19/fc8'
             ]
             self.assertSetEqual(set(end_points.keys()),
                                 set(expected_names))
Example #5
0
def get_vgg_loss(x, y):
    from tensorflow.contrib.slim.nets import vgg as model_module
    combined_images = tf.concat([x, y], axis=0)
    input_img = (combined_images + 1.0) / 2.0
    VGG_MEANS = np.array([[[[0.485, 0.456, 0.406]]]]).astype('float32')
    VGG_MEANS = tf.constant(VGG_MEANS, shape=[1, 1, 1, 3])
    vgg_input = (input_img - VGG_MEANS) * 255.0
    bgr_input = tf.stack(
        [vgg_input[:, :, :, 2], vgg_input[:, :, :, 1], vgg_input[:, :, :, 0]],
        axis=-1)

    slim = tf.contrib.slim
    with slim.arg_scope(model_module.vgg_arg_scope()):
        _, end_points = model_module.vgg_19(bgr_input,
                                            num_classes=1000,
                                            spatial_squeeze=False,
                                            is_training=False)

    loss = 0
    for layer in ['vgg_19/conv3/conv3_1', 'vgg_19/conv5/conv5_1']:
        layer_shape = tf.shape(end_points[layer])
        x_vals = end_points[layer][:layer_shape[0] // 2]
        y_vals = end_points[layer][layer_shape[0] // 2:]
        loss += tf.reduce_mean(tf.pow(x_vals - y_vals, 2))

    return loss
Example #6
0
 def testEndPoints(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   for is_training in [True, False]:
     with tf.Graph().as_default():
       inputs = tf.random_uniform((batch_size, height, width, 3))
       _, end_points = vgg.vgg_19(inputs, num_classes, is_training=is_training)
       expected_names = [
           'vgg_19/conv1/conv1_1',
           'vgg_19/conv1/conv1_2',
           'vgg_19/pool1',
           'vgg_19/conv2/conv2_1',
           'vgg_19/conv2/conv2_2',
           'vgg_19/pool2',
           'vgg_19/conv3/conv3_1',
           'vgg_19/conv3/conv3_2',
           'vgg_19/conv3/conv3_3',
           'vgg_19/conv3/conv3_4',
           'vgg_19/pool3',
           'vgg_19/conv4/conv4_1',
           'vgg_19/conv4/conv4_2',
           'vgg_19/conv4/conv4_3',
           'vgg_19/conv4/conv4_4',
           'vgg_19/pool4',
           'vgg_19/conv5/conv5_1',
           'vgg_19/conv5/conv5_2',
           'vgg_19/conv5/conv5_3',
           'vgg_19/conv5/conv5_4',
           'vgg_19/pool5',
           'vgg_19/fc6',
           'vgg_19/fc7',
           'vgg_19/fc8'
       ]
       self.assertSetEqual(set(end_points.keys()), set(expected_names))
Example #7
0
    def model_fn(features, labels, mode):  # pylint: disable=unused-argument
        """model_fn which uses a single unit Dense layer."""
        # You can also use the Flatten layer if you want to test a model without any
        # weights.
        num_classes = 1000
        print(features.shape)
        print(labels.shape)
        print("aaaaa")
        from tensorflow.contrib.slim.nets import vgg

        output, _ = vgg.vgg_19(features, 1000)

        if mode == tf.estimator.ModeKeys.PREDICT:
            predictions = {"logits": output}
            return tf.estimator.EstimatorSpec(mode, predictions=predictions)

        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                       logits=output)
        loss = tf.reduce_sum(loss)

        if mode == tf.estimator.ModeKeys.EVAL:
            return tf.estimator.EstimatorSpec(mode, loss=loss)

        assert mode == tf.estimator.ModeKeys.TRAIN

        global_step = tf.train.get_or_create_global_step()
        train_op = optimizer.minimize(loss=loss, global_step=global_step)
        return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
Example #8
0
    def forward(self, input_tensor, is_training):
        dropout_value = 0.5

        input_tensor = tf.image.resize_images(input_tensor, [224, 224])

        print("Is training:", is_training)

        with slim.arg_scope(vgg.vgg_arg_scope()):
            h, end_points = vgg.vgg_19(input_tensor, is_training=is_training)

        print(list(end_points.keys()))

        h = tf.pad(end_points['vgg_19/pool4'], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        print(h)

        h = L.convolution2d_transpose(h, 128, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 64, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 32, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d_transpose(h, 32, [5, 5], [2, 2], activation_fn=None)
        h = tf.nn.relu(h)
        h = L.dropout(h, keep_prob=dropout_value, is_training=is_training)

        h = L.convolution2d(h, len(self.classes) + 1, [1, 1], [1, 1], activation_fn=None)

        return h
Example #9
0
def vgg(bsize=None):
    from tensorflow.contrib.slim.nets import vgg
    x = tf.placeholder(tf.float32, shape=(bsize, 224, 224, 3))
    y = tf.placeholder(tf.float32, shape=(bsize, 1000))
    output, _ = vgg.vgg_19(x, 1000)
    loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)
    optimizer = tf.train.AdamOptimizer(learning_rate=0.2).minimize(tf.reduce_sum(loss))
    return optimizer
Example #10
0
 def testForward(self):
   batch_size = 1
   height, width = 224, 224
   with self.test_session() as sess:
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(inputs)
     sess.run(tf.initialize_all_variables())
     output = sess.run(logits)
     self.assertTrue(output.any())
Example #11
0
def model_fn():
    from tensorflow.contrib.slim.nets import vgg
    x = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
    y = tf.placeholder(tf.float32, shape=(None, 1000))
    output, _ = vgg.vgg_19(x, 1000)
    loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)
    optimizer = tf.train.GradientDescentOptimizer(0.2).minimize(
        tf.reduce_sum(loss))
    return optimizer
Example #12
0
 def testForward(self):
     batch_size = 1
     height, width = 224, 224
     with self.test_session() as sess:
         inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(inputs)
         sess.run(tf.initialize_all_variables())
         output = sess.run(logits)
         self.assertTrue(output.any())
Example #13
0
    def test_imagenet_vgg19(self):
        tf.get_logger().setLevel('ERROR')
        session = tf.compat.v1.InteractiveSession(graph=tf.Graph())
        input = tf.compat.v1.placeholder(tf.float32, shape=(None, 224, 224, 3))
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', category=DeprecationWarning)
            logits, _ = vgg.vgg_19(input, is_training=False)
        restorer = tf.compat.v1.train.Saver()
        restorer.restore(
            session,
            utils.python_file_dir(__file__) +
            '/models/tensorflow_vgg_19/vgg_19.ckpt')
        mean = (123.68, 116.78, 103.94)
        std = (1, 1, 1)
        data_preprocess = self.ImageNetValData(224,
                                               224,
                                               'vgg19',
                                               transform=lambda x:
                                               (x - mean) / std,
                                               label_offset=0)
        data_original = self.ImageNetValData(224,
                                             224,
                                             'vgg19',
                                             transform=None,
                                             label_offset=0)
        bounds = (0, 255)

        measure_model = TensorFlowModel(session, logits, input)

        accuracy = Accuracy()
        measure_model.predict(data_preprocess.x, data_preprocess.y,
                              [accuracy.update, accuracy.report])

        neuron_coverage = NeuronCoverage()
        measure_model.intermediate_layer_outputs(
            data_preprocess.x,
            [neuron_coverage.update, neuron_coverage.report])

        robustness = Robustness(bounds)
        measure_model.adversarial_samples(
            data_original.x,
            data_original.y,
            3,
            bounds, [
                robustness.update, robustness.report,
                utils.draw_adversarial_samples
            ],
            batch_size=1,
            preprocessing=(mean, std))

        session.close()

        self.assertAlmostEqual(accuracy.get(1), 0.625000)
        self.assertAlmostEqual(accuracy.get(5), 0.925000)
        self.assertAlmostEqual(neuron_coverage.get(0.3), 0.576892, places=2)
        self.assertAlmostEqual(robustness.success_rate, 1.000000)
Example #14
0
 def testFullyConvolutional(self):
     batch_size = 1
     height, width = 256, 256
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
         self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, 2, 2, num_classes])
Example #15
0
 def testBuild(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_19(inputs, num_classes)
         self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
Example #16
0
 def testFullyConvolutional(self):
   batch_size = 1
   height, width = 256, 256
   num_classes = 1000
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
     self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, 2, 2, num_classes])
Example #17
0
 def testBuild(self):
   batch_size = 5
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(inputs, num_classes)
     self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
Example #18
0
 def testEvaluation(self):
   batch_size = 2
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     eval_inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     predictions = tf.argmax(logits, 1)
     self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
Example #19
0
 def testEvaluation(self):
   batch_size = 2
   height, width = 224, 224
   num_classes = 1000
   with self.test_session():
     eval_inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     predictions = tf.argmax(logits, 1)
     self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
Example #20
0
def main(image_path, ckpt_path, predict_status=False):
    images = tf.placeholder(tf.float32, (None, 224, 224, 3))
    preprocessed_images = vgg_preprocessing(images)
    logits, _ = vgg.vgg_19(preprocessed_images, is_training=False)
    restorer = tf.train.Saver(tf.trainable_variables())

    image = open_image(image_path)
    p, ext = os.path.splitext(image_path)
    adv_path = p + '-adv' + ext
    pert_path = p + '-pert' + ext

    with tf.Session() as session:
        restorer.restore(session, ckpt_path)
        model = TensorFlowModel(images, logits, (0, 255))
        label = np.argmax(model.predictions(image))
        print('label:', label)
        if predict_status:
            return

        # target_class = 22
        # criterion = TargetClassProbability(target_class, p=0.99)

        # attack = LBFGSAttack(model, criterion)

        # attack = FGSM(model, criterion)
        attack = FGSM(model)

        # attack = MomentumIterativeAttack(model, criterion)
        # attack = MomentumIterativeAttack(model)

        # attack = SinglePixelAttack(model)
        # attack = LocalSearchAttack(model)

        adversarial = attack(image, label=label)
        new_label = np.argmax(model.predictions(adversarial))
        print('new label:', new_label)

        image = image.astype(np.uint8)
        adversarial = adversarial.astype(np.uint8)
        pert = adversarial - image

        save_image(adversarial, adv_path)
        save_image(pert, pert_path)

        # show images
        plt.subplot(1, 3, 1)
        plt.imshow(image)
        plt.subplot(1, 3, 2)
        plt.imshow(adversarial)
        plt.subplot(1, 3, 3)
        plt.imshow(pert)
        plt.show()
Example #21
0
    def fprop(self, x, **kwargs):
        del kwargs

        with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
            with slim.arg_scope(vgg.vgg_arg_scope()):
                logits, _ = vgg.vgg_19(
                    x, num_classes=self.nb_classes,
                    dropout_keep_prob=self.dropout_keep_prob,
                    is_training=self.is_training, scope=self.scope)

        probs = tf.nn.softmax(logits)

        return {self.O_LOGITS: logits, self.O_PROBS: probs}
Example #22
0
 def testTrainEvalWithReuse(self):
   train_batch_size = 2
   eval_batch_size = 1
   train_height, train_width = 224, 224
   eval_height, eval_width = 256, 256
   num_classes = 1000
   with self.test_session():
     train_inputs = tf.random_uniform(
         (train_batch_size, train_height, train_width, 3))
     logits, _ = vgg.vgg_19(train_inputs)
     self.assertListEqual(logits.get_shape().as_list(),
                          [train_batch_size, num_classes])
     tf.get_variable_scope().reuse_variables()
     eval_inputs = tf.random_uniform(
         (eval_batch_size, eval_height, eval_width, 3))
     logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
                            spatial_squeeze=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [eval_batch_size, 2, 2, num_classes])
     logits = tf.reduce_mean(logits, [1, 2])
     predictions = tf.argmax(logits, 1)
     self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
Example #23
0
 def testTrainEvalWithReuse(self):
   train_batch_size = 2
   eval_batch_size = 1
   train_height, train_width = 224, 224
   eval_height, eval_width = 256, 256
   num_classes = 1000
   with self.test_session():
     train_inputs = tf.random_uniform(
         (train_batch_size, train_height, train_width, 3))
     logits, _ = vgg.vgg_19(train_inputs)
     self.assertListEqual(logits.get_shape().as_list(),
                          [train_batch_size, num_classes])
     tf.get_variable_scope().reuse_variables()
     eval_inputs = tf.random_uniform(
         (eval_batch_size, eval_height, eval_width, 3))
     logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
                            spatial_squeeze=False)
     self.assertListEqual(logits.get_shape().as_list(),
                          [eval_batch_size, 2, 2, num_classes])
     logits = tf.reduce_mean(logits, [1, 2])
     predictions = tf.argmax(logits, 1)
     self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
Example #24
0
def choose_net(inputs, net_name):
    if net_name == 'vgg':
        net_out, end_points = vgg.vgg_19(inputs,
                                         num_classes=2)  # is_training=False
    elif net_name == 'resnet':
        net_out, end_points = resnet_v2.resnet_v2_152(inputs, num_classes=2)
        net_out = tf.reshape(net_out, (-1, 2))
    elif net_name == 'fpn_vgg':
        fpn_net = fpn.FPN(inputs=inputs, net_name='vgg_19')
        net_out = fpn_net.net_scores
    elif net_name == 'fpn_res':
        fpn_net = fpn.FPN(inputs=inputs, net_name='resnet_v1_101')
        net_out = fpn_net.net_scores
    else:
        raise ValueError('the chosen model ')
    return net_out
Example #25
0
 def __init__(self, inputs, net_name, share_head=False):
     self.inputs = inputs
     self.net_name = net_name
     self.share_head = share_head
     self.num_classes = NUM_CLASSES
     if self.net_name == 'resnet_v1_101':
         _, self.share_net = self.get_network_by_name(
             self.net_name, self.inputs)
     elif self.net_name == 'vgg_19':
         _, self.share_net = vgg.vgg_19(
             inputs=inputs,
             num_classes=self.num_classes,
             is_training=True,
         )  # dont know why,the vggnet cant put into function
     self.level = LEVEL
     self.feature_maps_dict = self.get_feature_maps()
     self.feature_pyramid = self.build_feature_pyramid()
     self.net_scores = self.fpn_net()
Example #26
0
    def get_network_by_name(
        self,
        net_name,
        inputs,
        num_classes=None,
        is_training=True,
        global_pool=True,
        output_stride=None,
    ):
        if net_name == 'resnet_v1_50':
            with slim.arg_scope(
                    resnet_v1.resnet_arg_scope(weight_decay=0.0001)):
                logits, end_points = resnet_v1.resnet_v1_50(
                    inputs=inputs,
                    num_classes=num_classes,
                    is_training=is_training,
                    global_pool=global_pool,
                    output_stride=output_stride,
                )
            return logits, end_points

        if net_name == 'resnet_v1_101':
            with slim.arg_scope(
                    resnet_v1.resnet_arg_scope(weight_decay=0.0001)):
                logits, end_points = resnet_v1.resnet_v1_101(
                    inputs=inputs,
                    num_classes=num_classes,
                    is_training=is_training,
                    global_pool=global_pool,
                    output_stride=output_stride,
                )
            return logits, end_points
        if net_name == 'vgg_19':
            with slim.arg_scope(vgg.vgg_arg_scope(weight_decay=0.0001)):
                logits, end_points = vgg.vgg_19(
                    inputs=inputs,
                    num_classes=num_classes,
                    is_training=is_training,
                )
            return logits, end_points
def model_fn(model_name, batch_size):
    if model_name == "vgg19":
        from tensorflow.contrib.slim.nets import vgg
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1000))
        output, _ = vgg.vgg_19(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name == "resnet200":
        from tensorflow.contrib.slim.nets import resnet_v2
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1, 1, 1000))
        output, _ = resnet_v2.resnet_v2_200(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name == "resnet101":
        from tensorflow.contrib.slim.nets import resnet_v2
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1, 1, 1000))
        output, _ = resnet_v2.resnet_v2_101(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name == "resnet152":
        from tensorflow.contrib.slim.nets import resnet_v2
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1, 1, 1000))
        output, _ = resnet_v2.resnet_v2_152(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name == "nasnet_cifar":
        from tensorflow.contrib.slim.nets import nasnet
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1000))
        output, _ = nasnet.build_nasnet_cifar(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)
    elif model_name == "mobile_net":
        from tensorflow.contrib.slim.nets import mobilenet_v2
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1000))
        output, _ = mobilenet_v2.mobilenet(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name == "inceptionv3":
        from tensorflow.contrib.slim.nets import inception_v3
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1000))
        output, _ = inception_v3.inception_v3(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name == "transformer":
        import modeltransformer.transformer as transf
        from modeltransformer.data import DatasetManager
        dm = DatasetManager("wmt14")
        dm.maybe_download_data_files()
        dm.load_vocab()
        transformer = transf.Transformer(
            num_heads=8,
            d_model=512,
            d_ff=2048,
            model_name=model_name,
            tf_sess_config=dict(allow_soft_placement=True))
        train_params = dict(
            learning_rate=1e-4,
            batch_size=batch_size,
            seq_len=10,
            max_steps=300000,
        )
        transformer.build_model("wmt14", dm.source_id2word, dm.target_id2word,
                                0, **train_params)
        loss = transformer._loss

    elif model_name == "bert":
        from bert.runsquad import new_model_fn_builder
        import modeling
        bert_config = modeling.BertConfig.from_json_file(
            "bert/bert_large/bert_config.json")
        model = new_model_fn_builder(bert_config)
        features = {}
        features["input_ids"] = tf.cast(
            100 * tf.placeholder(tf.float32, shape=(batch_size, 128)),
            tf.int32)
        features["input_mask"] = tf.cast(
            100 * tf.placeholder(tf.float32, shape=(batch_size, 128)),
            tf.int32)
        features["segment_ids"] = tf.cast(
            100 * tf.placeholder(tf.float32, shape=(batch_size, 128)),
            tf.int32)
        features["start_positions"] = tf.cast(
            100 * tf.placeholder(tf.float32, shape=(batch_size, )), tf.int32)
        features["end_positions"] = tf.cast(
            100 * tf.placeholder(tf.float32, shape=(batch_size, )), tf.int32)
        loss = model(features)
    elif model_name == "small":
        slim = tf.contrib.slim
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1000))
        v = tf.get_variable(name="large_variable",
                            shape=(3000, 224, 224, 3),
                            trainable=True)
        x = tf.slice(v, [0, 0, 0, 0], tf.shape(x), name="large_slice")
        net = slim.max_pool2d(x, [2, 2], 2)
        net = slim.conv2d(net, 128, [5, 5], trainable=False)
        net = slim.max_pool2d(net, [2, 2], 2)
        net = slim.conv2d(net, 128, [5, 5], trainable=False)
        net = slim.max_pool2d(net, [2, 2], 2)
        net = slim.conv2d(net, 128, [5, 5], trainable=False)
        net = slim.max_pool2d(net, [2, 2], 2)
        net = slim.flatten(net)
        net = slim.fully_connected(net,
                                   1024,
                                   activation_fn=tf.nn.sigmoid,
                                   trainable=False)
        net = slim.fully_connected(net,
                                   1000,
                                   activation_fn=None,
                                   trainable=False)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=net)
    optimizer = tf.train.AdamOptimizer(learning_rate=0.2,
                                       beta1=0.9,
                                       beta2=0.98,
                                       epsilon=1e-9).minimize(
                                           tf.reduce_sum(loss))
    return optimizer
Example #28
0
def train():
    with tf.Graph().as_default(), tf.device('/cpu:0'):

        img_names_q, img_names_uq, img_num = vgg_train.load_all_img_names()
        aa, bb = [1, 0], [0, 1]
        len_qual, len_unqual = len(img_names_q), len(img_names_uq)
        qual_y = np.array(len_qual * aa).reshape(len_qual, 2)
        unqual_y = np.array(len_unqual * bb).reshape(len_unqual, 2)
        all_img_names = np.array(img_names_q + img_names_uq)
        ys = np.concatenate((qual_y, unqual_y), axis=0)
        print('Net build')
        global_step = tf.train.get_or_create_global_step()
        tower_grads = []
        tfx = tf.placeholder(tf.float32, [None, 224, 224, 1])
        tfy = tf.placeholder(tf.float32, [None, 2])
        opt = tf.train.MomentumOptimizer(0.0005, 0.9)
        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(num_gpus):
                with tf.device(
                        assign_to_device('/gpu:{}'.format(i),
                                         ps_device='/cpu:0')):
                    _tfx = tfx[i * batch_size:(i + 1) * batch_size]
                    _tfy = tfy[i * batch_size:(i + 1) * batch_size]
                    out, end_points = vgg.vgg_19(_tfx, num_classes=2)
                    tf.get_variable_scope().reuse_variables()
                    loss = tf.losses.softmax_cross_entropy(_tfy, out)
                    grads = opt.compute_gradients(loss)
                    tower_grads.append(grads)
                    correct_prediction = tf.equal(tf.argmax(out, 1),
                                                  tf.argmax(_tfy, 1))
                    accuracy = tf.reduce_mean(
                        tf.cast(correct_prediction, tf.float32))
        grads = average_gradients(tower_grads)
        train_op = opt.apply_gradients(grads)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver()
            saver.restore(sess, './model_vgg19_1225/transfer_learn_1')
            for step in range(1, num_steps):
                b_idx = np.random.randint(0, img_num, num_gpus * batch_size)
                batch_x, batch_y = vgg_train.load_img_by_name(
                    all_img_names[b_idx]), ys[b_idx]
                ts = time.time()
                sess.run(train_op, feed_dict={tfx: batch_x, tfy: batch_y})
                te = time.time() - ts
                loss_value, acc = sess.run([loss, accuracy],
                                           feed_dict={
                                               tfx: batch_x,
                                               tfy: batch_y
                                           })
                if step % 500 == 0 or loss_value > 0.069:
                    print(step, 'train loss:', loss_value, 'accuracy', acc,
                          '****', 'ys:', ys[b_idx], 'pred:',
                          sess.run(out, feed_dict={tfx: batch_x}))
                if step % 20 == 0:
                    saver.save(sess,
                               './model_vgg19_1225/transfer_learn_%d' % step)

        qual_x, unqual_x, qual_y, unqual_y = vgg_train.load_data()
        xs = np.concatenate(qual_x + unqual_x, axis=0)
        ys = np.concatenate((qual_y, unqual_y), axis=0)
        b_idx = np.random.randint(0, len(xs), 2)
        img_bch, label_bch = [xs[b_idx], ys[b_idx]]
        out, end_points = vgg.vgg_19(img_bch,
                                     num_classes=2)  # 将VGG16升级为VGG19试试呢
        # net_flatten = tf.reshape(fc8, [-1, 1 * 6 * 2])
        # out = tf.layers.dense(net_flatten, 2, name='vgg_out')
        # print(aac_x, aae_x, aat_x, aac_y, aae_y, aat_y)
        print('Net build')
        opt = tf.train.MomentumOptimizer(0.0005, 0.9)

        sess = tf.Session()
        # sess.run(tf.global_variables_initializer())

        for i in range(20000000):
            tower_grads = []
            tower_loss = []
            for d in range(num_gpus):
                gpu_device_name = '/GPU:0' if d == 0 else '/device:XLA_GPU:0'
                # with tf.device('/gpu:%d' % d):
                with tf.device(gpu_device_name):
                    print('calculated by device /gpu: %d' % d)
                    with tf.name_scope('%s_%s' % ('tower', d)):
                        # tf.train.Saver().restore(sess, './model_vgg_1119/transfer_learn_2000')
                        # loss = tf.losses.softmax_cross_entropy(tfy, out)
                        aa, bb = tf.nn.softmax(tf.cast(
                            label_bch, tf.float64)), tf.nn.softmax(out)
                        ce_2 = -tf.reduce_mean(
                            aa[0][0] * tf.math.log(bb[0][0]) + aa[0][1] *
                            tf.math.log(bb[0][1]) * loss_unbalance_w)
                        # opt = tf.train.MomentumOptimizer(0.0005, 0.9)
                        # train_op = tf.train.MomentumOptimizer(0.0005, 0.9).minimize(loss)
                        with tf.device('/cpu:0'):
                            correct_prediction = tf.equal(
                                tf.argmax(out, 1), tf.argmax(label_bch, 1))
                            accuracy = tf.reduce_mean(
                                tf.cast(correct_prediction, tf.float32))
                        with tf.variable_scope('loss'):
                            grads = opt.compute_gradients(ce_2)
                            tower_grads.append(grads)
                            tower_loss.append(ce_2)
                            tf.get_variable_scope().reuse_variables()
            mean_loss = tf.stack(axis=0, values=tower_loss)
            mean_loss = tf.reduce_mean(mean_loss, axis=0)
            mean_grads = average_gradients(tower_grads)
            apply_gradient_op = opt.apply_gradients(mean_grads)

            # example, lbl, name = sess.run([image, label, img_name])  # 在会话中取出image和label   example shape [800,1200], 保存像素值

            # img = Image.fromarray(example)   #这里Image是之前提到的
            # img.save(cwd+str(i)+'_''Label_'+str(lbl)+'.jpg')#存下图片
            sess.run(tf.global_variables_initializer())
            _, losses, accuracy1 = sess.run(
                (apply_gradient_op, ce_2, accuracy))
            tf.summary.scalar('loss', losses)
            merged_summaries = tf.summary.merge_all()
            fileWriter = tf.summary.FileWriter('./logs/vgg', graph=sess.graph)

            # losses, _, accuracy1 = sess.run((ce_2, train_op, accuracy), feed_dict={tfx: xs[b_idx], tfy: ys[b_idx]})
            if i % 500 == 0 or losses > 0.069:
                print(i, 'train loss:', losses, 'accuracy', accuracy1)
            if i % 10000 == 0:
                tf.train.Saver().save(sess,
                                      './model_vgg_1119/transfer_learn_%d' % i)
            # summary = sess.run(merged_summaries, feed_dict={loss: losses})
            # fileWriter.add_summary(summary=summary, global_step=i)
            #  add summary ---better method
            summary = tf.Summary(value=[
                tf.Summary.Value(tag='training_loss',
                                 simple_value=float(losses))
            ])
            fileWriter.add_summary(summary, i)
        # print(i, 'train loss:', losses)
    tf.train.Saver().save(sess, './model_vgg_1119/transfer_learn')
Example #29
0
def model_fn(model_name, batch_size):
    if model_name=='vgg19':
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size,1000))
        output, _ = vgg.vgg_19(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name=='resnet200':
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size,1,1, 1000))
        output, _ = resnet_v2.resnet_v2_200(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name=='resnet101':
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size,1,1, 1000))
        output, _ = resnet_v2.resnet_v2_101(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name=='resnet152':
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size,1,1, 1000))
        output, _ = resnet_v2.resnet_v2_152(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name=='nasnet_cifar':
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size,1000))
        output, _ = nasnet.build_nasnet_cifar(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name=='mobile_net':
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size,1000))
        output, _ = mobilenet_v2.mobilenet(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name=='inceptionv3':
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1000))
        output, _ = inception.inception_v3(x, 1000)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)

    elif model_name=='transformer':
        dm = DatasetManager('wmt14')
        dm.maybe_download_data_files()
        dm.load_vocab()
        transformer = transf.Transformer(
            num_heads=8,
            d_model=512,
            d_ff=2048,
            model_name=model_name,
            tf_sess_config=dict(allow_soft_placement=True)
        )
        train_params = dict(
            learning_rate=1e-4,
            batch_size=batch_size,
            seq_len=10,
            max_steps=300000,
        )
        transformer.build_model('wmt14', dm.source_id2word, dm.target_id2word, 0,**train_params)
        loss = transformer._loss

    elif model_name=='bert':
        #bert_config = modeling.BertConfig.from_json_file('bert/bert_large/bert_config.json')
        bert_large_config_path = 'bert/pre-trained/large/cased_L-24_H-1024_A-16/bert_config.json'
        bert_config = modeling.BertConfig.from_json_file(bert_large_config_path)
        model = new_model_fn_builder(bert_config)
        features = {}
        features['input_ids']= tf.cast(100*tf.placeholder(tf.float32,shape=(batch_size,128)),tf.int32)
        features['input_mask'] = tf.cast(100*tf.placeholder(tf.float32,shape=(batch_size,128)),tf.int32)
        features['segment_ids']=tf.cast(100*tf.placeholder(tf.float32,shape=(batch_size,128)),tf.int32)
        features['start_positions'] = tf.cast(100*tf.placeholder(tf.float32,shape=(batch_size,)),tf.int32)
        features['end_positions'] =tf.cast(100*tf.placeholder(tf.float32,shape=(batch_size,)),tf.int32)
        loss = model(features)

    elif model_name == 'small':
        slim = tf.contrib.slim
        x = tf.placeholder(tf.float32, shape=(batch_size, 224, 224, 3))
        y = tf.placeholder(tf.float32, shape=(batch_size, 1000))
        v= tf.get_variable(name='large_variable',shape=(3000,224, 224, 3),trainable=True)
        x = tf.slice(v,[0,0,0,0],tf.shape(x),name='large_slice')
        net = slim.max_pool2d(x, [2, 2], 2)
        net = slim.conv2d(net, 128, [5, 5],trainable=False)
        net = slim.max_pool2d(net, [2, 2], 2)
        net = slim.conv2d(net, 128, [5, 5],trainable=False)
        net = slim.max_pool2d(net, [2, 2], 2)
        net = slim.conv2d(net, 128, [5, 5],trainable=False)
        net = slim.max_pool2d(net, [2, 2], 2)
        net = slim.flatten(net)
        net = slim.fully_connected(net, 1024, activation_fn=tf.nn.sigmoid,trainable=False)
        net = slim.fully_connected(net, 1000, activation_fn=None,trainable=False)
        loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=net)

    optimizer = tf.train.AdamOptimizer(learning_rate=0.2,
                            beta1=0.9, beta2=0.98, epsilon=1e-9).minimize(
                                                        tf.reduce_sum(loss))
    # TODO: Make lr, beta, epsilon value of parameter
    """
    if opt == 'Adam':
        optimizer = tf.train.AdamOptimizer(learning_rate=0.2,
                            beta1=0.9, beta2=0.98, epsilon=1e-9).minimize(
                                                        tf.reduce_sum(loss))
    elif opt == 'GradientDescent':
        optimizer = tf.train.GradientDescentOptimizer(
                                learning_rate=0.2).minimize(tf.reduce_sum(loss))
    """
    return optimizer
    # Convert image to float32 before subtracting the
    # mean pixel value
    image_float = tf.to_float(image, name='ToFloat')

    # Subtract the mean pixel value from each pixel
    processed_image = _mean_image_subtraction(image_float,
                                              [_R_MEAN, _G_MEAN, _B_MEAN])

    input_image = tf.expand_dims(processed_image, 0)

    with slim.arg_scope(vgg.vgg_arg_scope()):
        # spatial_squeeze option enables to use network in a fully
        # convolutional manner
        logits, _ = vgg.vgg_19(input_image,
                               num_classes=1000,
                               is_training=False,
                               spatial_squeeze=False)

    # For each pixel we get predictions for each class
    # out of 1000. We need to pick the one with the highest
    # probability. To be more precise, these are not probabilities,
    # because we didn't apply softmax. But if we pick a class
    # with the highest value it will be equivalent to picking
    # the highest value after applying softmax
    pred = tf.argmax(logits, dimension=3)

    checkpoints_dir = 'slim_pretrained'
    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'vgg_19.ckpt'),
        slim.get_model_variables('vgg_19'))
Example #31
0
def save_img(out_path, img):
    img = np.clip(img * 255.0, 0, 255).astype(np.uint8)
    scipy.misc.imsave(out_path, img)


def get_img(src, img_size=False):
    img = scipy.misc.imread(src, mode='RGB')  # misc.imresize(, (256, 256, 3))
    if not (len(img.shape) == 3 and img.shape[2] == 3):
        img = np.dstack((img, img, img))
    if img_size != False:
        img = scipy.misc.imresize(img, img_size)

    return np.array(img, dtype=np.float32) / 255.0


inputs = tf.placeholder(tf.float32, shape=[4, 256, 256, 3])

with slim.arg_scope(vgg.vgg_arg_scope()):
    _, end_points = vgg.vgg_19(inputs, spatial_squeeze=False)
    print("pen")
    print(end_points)

print("atu")
fnet_variable_list = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES,
                                       scope="vgg_19")
saver = Saver(fnet_variable_list, save_path="vgg19")
print("asffas")
with tf.Session() as sess:
    saver.load(sess)
    print(end_points['vgg_19/conv4/conv4_2'])