def testNoBatchNormScaleByDefault(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(inception.inception_v3_arg_scope()):
      inception.inception_v3(inputs, num_classes, is_training=False)

    self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
  def testRaiseValueErrorWithInvalidDepthMultiplier(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.random_uniform((batch_size, height, width, 3))
    with self.assertRaises(ValueError):
      _ = inception.inception_v3(inputs, num_classes, depth_multiplier=-0.1)
    with self.assertRaises(ValueError):
      _ = inception.inception_v3(inputs, num_classes, depth_multiplier=0.0)
  def testBatchNormScale(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(
        inception.inception_v3_arg_scope(batch_norm_scale=True)):
      inception.inception_v3(inputs, num_classes, is_training=False)

    gamma_names = set(
        v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
    self.assertGreater(len(gamma_names), 0)
    for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
      self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
  def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000

    train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
    inception.inception_v3(train_inputs, num_classes)
    eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
    logits, _ = inception.inception_v3(eval_inputs, num_classes,
                                       is_training=False, reuse=True)
    predictions = tf.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
  def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.random_uniform((batch_size, height, width, 3))
    _, end_points = inception.inception_v3(inputs, num_classes)

    endpoint_keys = [key for key in end_points.keys()
                     if key.startswith('Mixed') or key.startswith('Conv')]

    _, end_points_with_multiplier = inception.inception_v3(
        inputs, num_classes, scope='depth_multiplied_net',
        depth_multiplier=0.5)

    for key in endpoint_keys:
      original_depth = end_points[key].get_shape().as_list()[3]
      new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
      self.assertEqual(0.5 * original_depth, new_depth)
    def testTrainEvalWithReuse(self):
        train_batch_size = 5
        eval_batch_size = 2
        height, width = 150, 150
        num_classes = 1000

        train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
        inception.inception_v3(train_inputs, num_classes)
        eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
        logits, _ = inception.inception_v3(eval_inputs,
                                           num_classes,
                                           is_training=False,
                                           reuse=True)
        predictions = tf.argmax(logits, 1)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            output = sess.run(predictions)
            self.assertEquals(output.shape, (eval_batch_size, ))
  def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.random_uniform((batch_size, height, width, 3))
    _, end_points = inception.inception_v3(inputs, num_classes)

    endpoint_keys = [key for key in end_points.keys()
                     if key.startswith('Mixed') or key.startswith('Conv')]

    _, end_points_with_multiplier = inception.inception_v3(
        inputs, num_classes, scope='depth_multiplied_net',
        depth_multiplier=2.0)

    for key in endpoint_keys:
      original_depth = end_points[key].get_shape().as_list()[3]
      new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
      self.assertEqual(2.0 * original_depth, new_depth)
  def testLogitsNotSqueezed(self):
    num_classes = 25
    images = tf.random_uniform([1, 299, 299, 3])
    logits, _ = inception.inception_v3(images,
                                       num_classes=num_classes,
                                       spatial_squeeze=False)

    with self.test_session() as sess:
      tf.global_variables_initializer().run()
      logits_out = sess.run(logits)
      self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
  def testBuildPreLogitsNetwork(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = None

    inputs = tf.random_uniform((batch_size, height, width, 3))
    net, end_points = inception.inception_v3(inputs, num_classes)
    self.assertTrue(net.op.name.startswith('InceptionV3/Logits/AvgPool'))
    self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 2048])
    self.assertFalse('Logits' in end_points)
    self.assertFalse('Predictions' in end_points)
Beispiel #10
0
def predict(image, version='V3'):
    tf.reset_default_graph()
    # Process the image
    raw_image, processed_image = process_image(image)
    class_names = imagenet.create_readable_names_for_imagenet_labels()
    # Create a placeholder for the images
    X = tf.placeholder(tf.float32, [None, 299, 299, 3], name="X")
    #inception_v3 function returns logits and end_points dictionary
    #logits are output of the network before applying softmax activation

    if version.upper() == 'V3':
        model_ckpt_path = INCEPTION_V3_CKPT_PATH
        with slim.arg_scope(inception.inception_v3_arg_scope()):
            # Set the number of classes and is_training parameter
            logits, end_points = inception.inception_v3(
                X, num_classes=1001, is_training=False)

    elif version.upper() == 'V4':
        model_ckpt_path = INCEPTION_V4_CKPT_PATH
        with slim.arg_scope(inception.inception_v3_arg_scope()):
            # Set the number of classes and is_training parameter
            # Logits
            logits, end_points = inception.inception_v4(
                X, num_classes=1001, is_training=False)

    predictions = end_points.get('Predictions', 'No key named predictions')
    saver = tf.train.Saver()

    with tf.Session() as sess:
        saver.restore(sess, model_ckpt_path)
        prediction_values = predictions.eval({X: processed_image})

    try:
        # Add an index to predictions and then sort by probability
        prediction_values = [
            (i, prediction)
            for i, prediction in enumerate(prediction_values[0, :])
        ]
        prediction_values = sorted(
            prediction_values, key=lambda x: x[1], reverse=True)
        # Plot the image
        # plot_color_image(raw_image)
        plot_color_image(image)
        print("Using Inception_{} CNN\nPrediction: Probability\n".format(
            version))
        # Display the image and predictions
        for i in range(5):
            predicted_class = class_names[prediction_values[i][0]]
            probability = prediction_values[i][1]
            print("{}: {:.2f}%".format(predicted_class, probability * 100))

    # If the predictions do not come out right
    except:
        print(predictions)
  def testLogitsNotSqueezed(self):
    num_classes = 25
    images = tf.random.uniform([1, 299, 299, 3])
    logits, _ = inception.inception_v3(images,
                                       num_classes=num_classes,
                                       spatial_squeeze=False)

    with self.test_session() as sess:
      tf.compat.v1.global_variables_initializer().run()
      logits_out = sess.run(logits)
      self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
  def testBuildPreLogitsNetwork(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = None

    inputs = tf.random.uniform((batch_size, height, width, 3))
    net, end_points = inception.inception_v3(inputs, num_classes)
    self.assertTrue(net.op.name.startswith('InceptionV3/Logits/AvgPool'))
    self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 2048])
    self.assertFalse('Logits' in end_points)
    self.assertFalse('Predictions' in end_points)
Beispiel #13
0
 def __call__(self, x_input):
   """Constructs model and return probabilities for given input."""
   reuse = True if self.built else None
   with slim.arg_scope(inception.inception_v3_arg_scope()):
     _, end_points = inception.inception_v3(
         x_input, num_classes=self.num_classes, is_training=False,
         reuse=reuse)
   self.built = True
   output = end_points['Predictions']
   # Strip off the extra reshape op at the output
   probs = output.op.inputs[0]
   return probs
Beispiel #14
0
    def _buildInception():
        g = tf.Graph()
        with g.as_default():
            train_batch_size = 5
            eval_batch_size = 2
            height, width = 150, 150
            num_classes = 1000
            train_inputs = tf.random_uniform(
                (train_batch_size, height, width, 3))
            inception.inception_v3(train_inputs, num_classes)
            eval_inputs = tf.random_uniform(
                (eval_batch_size, height, width, 3))
            logits, _ = inception.inception_v3(eval_inputs,
                                               num_classes,
                                               is_training=False,
                                               reuse=True)
            predictions = tf.argmax(logits, 1)

        train_op = g.get_collection_ref(tf_ops.GraphKeys.TRAIN_OP)
        train_op.append(predictions)
        return g
  def testHalfSizeImages(self):
    batch_size = 5
    height, width = 150, 150
    num_classes = 1000

    inputs = tf.random_uniform((batch_size, height, width, 3))
    logits, end_points = inception.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    pre_pool = end_points['Mixed_7c']
    self.assertListEqual(pre_pool.get_shape().as_list(),
                         [batch_size, 3, 3, 2048])
  def testHalfSizeImages(self):
    batch_size = 5
    height, width = 150, 150
    num_classes = 1000

    inputs = tf.random.uniform((batch_size, height, width, 3))
    logits, end_points = inception.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    pre_pool = end_points['Mixed_7c']
    self.assertListEqual(pre_pool.get_shape().as_list(),
                         [batch_size, 3, 3, 2048])
Beispiel #17
0
  def testBuildClassificationNetwork(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.random_uniform((batch_size, height, width, 3))
    logits, end_points = inception.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('Predictions' in end_points)
    self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
                         [batch_size, num_classes])
Beispiel #18
0
def graph_incv3(x, y, i, x_max, x_min, grad, grad_D):
    eps = 2.0 * FLAGS.max_epsilon / 255.0
    kd = FLAGS.derivative
    momentum = FLAGS.momentum
    num_iter = FLAGS.num_iter
    alpha = eps / FLAGS.num_iter
    tf.get_variable_scope().reuse_variables()

    x_nes = x + alpha * momentum * grad
    x_b = x - alpha * grad_D

    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits_1, end_points_1 = inception.inception_v3(
            x_b, num_classes=FLAGS.num_classes, is_training=False)
    cross_entropy_1 = tf.losses.softmax_cross_entropy(y,
                                                      logits_1,
                                                      label_smoothing=0.0,
                                                      weights=1.0)
    noise_1 = tf.gradients(cross_entropy_1, x_b)[0]
    noise_1 = noise_1 / tf.reduce_mean(tf.abs(noise_1), [1, 2, 3],
                                       keep_dims=True)

    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, end_points = inception.inception_v3(
            x_nes, num_classes=FLAGS.num_classes, is_training=False)
    #logits = (end_points['Logits'])
    cross_entropy = tf.losses.softmax_cross_entropy(y,
                                                    logits,
                                                    label_smoothing=0.0,
                                                    weights=1.0)
    noise = tf.gradients(cross_entropy, x)[0]
    noise = noise / tf.reduce_mean(tf.abs(noise), [1, 2, 3], keep_dims=True)
    grad_D = grad_D + kd * (noise - noise_1)
    noise_all = momentum * grad + noise - grad_D

    x = x + alpha * tf.sign(noise_all)
    x = tf.clip_by_value(x, x_min, x_max)
    i = tf.add(i, 1)
    return x, y, i, x_max, x_min, noise_all, grad_D
  def testBuildClassificationNetwork(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.random_uniform((batch_size, height, width, 3))
    logits, end_points = inception.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('Predictions' in end_points)
    self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
                         [batch_size, num_classes])
Beispiel #20
0
def test_image_data():
    slim = tf.contrib.slim
    tf.reset_default_graph()
    session = tf.Session()

    names = imagenet.create_readable_names_for_imagenet_labels()

    processed_images = tf.placeholder(tf.float32, shape=(None, 299, 299, 3))

    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, _ = inception.inception_v3(processed_images,
                                           num_classes=1001,
                                           is_training=False)
    probabilities = tf.nn.softmax(logits)

    # Please correctly set the model path.
    # Download the model at https://github.com/tensorflow/models/tree/master/research/slim
    checkpoints_dir = 'model'
    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'inception_v3.ckpt'),
        slim.get_model_variables('InceptionV3'))
    init_fn(session)

    def predict_fn(images):
        return session.run(probabilities, feed_dict={processed_images: images})

    def f(x):
        return x / 2 + 0.5

    class_names = []
    for item in names:
        class_names.append(names[item])

    images = transform_img_fn(['data/violin.JPEG'])
    image = images[0]

    explainer = xdeep_image.ImageExplainer(predict_fn, class_names)

    explainer.explain('lime', image, top_labels=3)
    explainer.show_explanation('lime', deprocess=f, positive_only=False)

    explainer.explain('cle', image, top_labels=3)
    explainer.show_explanation('cle', deprocess=f, positive_only=False)

    explainer.explain('anchor', image)
    explainer.show_explanation('anchor')

    segments_slic = slic(image, n_segments=50, compactness=30, sigma=3)
    explainer.initialize_shap(n_segment=50, segment=segments_slic)
    explainer.explain('shap', image, nsamples=400)
    explainer.show_explanation('shap', deprocess=f)
  def testEvaluation(self):
    batch_size = 2
    height, width = 299, 299
    num_classes = 1000

    eval_inputs = tf.random_uniform((batch_size, height, width, 3))
    logits, _ = inception.inception_v3(eval_inputs, num_classes,
                                       is_training=False)
    predictions = tf.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (batch_size,))
  def testEvaluation(self):
    batch_size = 2
    height, width = 299, 299
    num_classes = 1000

    eval_inputs = tf.random.uniform((batch_size, height, width, 3))
    logits, _ = inception.inception_v3(eval_inputs, num_classes,
                                       is_training=False)
    predictions = tf.argmax(input=logits, axis=1)

    with self.test_session() as sess:
      sess.run(tf.compat.v1.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (batch_size,))
def build_network(batch_size, is_training):
    # input
    tf_raw_image_data = tf.placeholder(tf.string, shape=(batch_size, ))
    tf_body_bbox = tf.placeholder(tf.int32, shape=(batch_size, 4))
    tf_labels = tf.placeholder(tf.int32, shape=(batch_size, ))

    # pre-processing pipeline
    crops = []
    for i in range(batch_size):
        image = tf.image.decode_jpeg(tf_raw_image_data[i], channels=3)
        body_crop = tf.image.crop_to_bounding_box(image, tf_body_bbox[i, 1],
                                                  tf_body_bbox[i, 0],
                                                  tf_body_bbox[i, 3],
                                                  tf_body_bbox[i, 2])
        processed_crop = inception_preprocessing.preprocess_image(
            body_crop, image_size, image_size, is_training=is_training)
        crops.append(processed_crop)
    processed_images = tf.stack(crops)

    # training pipeline
    with slim.arg_scope(inception.inception_v3_arg_scope()):
        _, endpoints = inception.inception_v3(processed_images,
                                              num_classes=num_identity,
                                              is_training=is_training)

    # load model parameters
    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, checkpoint_name),
        slim.get_model_variables(original_variable_namescope))

    net_before_pool = tf.reshape(endpoints['Mixed_7c'], shape=(batch_size, -1))
    net_before_pool_frozen = tf.stop_gradient(net_before_pool)
    tf_features = slim.fully_connected(net_before_pool_frozen,
                                       feature_length,
                                       activation_fn=None)
    tf_features_normalized = tf.nn.l2_normalize(tf_features, dim=1)
    tf_loss = coco_loss_layer(tf_features_normalized, tf_labels, batch_size)

    # optimizer
    tf_lr = tf.placeholder(dtype=tf.float32, shape=(), name='learning_rate')
    optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
    train = optimizer.minimize(tf_loss)

    # summary
    tf.summary.scalar('coco_loss', tf_loss)
    summary_op = tf.summary.merge_all()

    return (tf_raw_image_data, tf_body_bbox,
            tf_labels), (init_fn, tf_loss, tf_lr, train,
                         summary_op), tf_features
Beispiel #24
0
    def testUnknowBatchSize(self):
        batch_size = 1
        height, width = 299, 299
        num_classes = 1000

        inputs = tf.placeholder(tf.float32, (None, height, width, 3))
        logits, _ = inception.inception_v3(inputs, num_classes)
        self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
        self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
        images = tf.random_uniform((batch_size, height, width, 3))

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            output = sess.run(logits, {inputs: images.eval()})
            self.assertEquals(output.shape, (batch_size, num_classes))
 def __init__(self, sess):
     # placeholder
     self.input_placeholder = tf.placeholder(tf.float32, shape=(None, 299, 299, 3))
     # inception features extractor
     device = tf.device('device:CPU:0')
     if tf.test.is_gpu_available():
         device = tf.device('device:GPU:0')
     with device:
         with tf.contrib.slim.arg_scope(inception_model.inception_v3_arg_scope()):
             self.features_extractor, _ = inception_model.inception_v3(self.input_placeholder,
                                                                       num_classes=0, is_training=False)
     # init
     init_fn = tf.contrib.slim.assign_from_checkpoint_fn(INCEPTION_MODEL_PATH,
                                                         tf.contrib.slim.get_model_variables("InceptionV3"))
     init_fn(sess)
Beispiel #26
0
    def _buildInception():
        g = tf.Graph()
        train_batch_size = 5
        eval_batch_size = 2
        height, width = 299, 299
        num_classes = 1000
        with g.as_default():
            train_inputs = tf.random_uniform(
                (train_batch_size, height, width, 3))
            logits, _ = inception.inception_v3(train_inputs, num_classes)
            predictions = tf.argmax(logits, 1)

        train_op = g.get_collection_ref(tf_ops.GraphKeys.TRAIN_OP)
        train_op.append(predictions)
        return g
Beispiel #27
0
def load_pretrained_slim_model():
    global session, names, probabilities, processed_images, slim_home_dir

    names = imagenet.create_readable_names_for_imagenet_labels()
    processed_images = tf.placeholder(tf.float32, shape=(None, 299, 299, 3))
    with slim.arg_scope(inception.inception_v3_arg_scope()):
        logits, _ = inception.inception_v3(processed_images,
                                           num_classes=1001,
                                           is_training=False)
    probabilities = tf.nn.softmax(logits)
    checkpoints_dir = os.path.join(slim_home_dir, 'pretrained')
    session = tf.Session()
    init_fn = slim.assign_from_checkpoint_fn(
        os.path.join(checkpoints_dir, 'inception_v3.ckpt'),
        slim.get_model_variables('InceptionV3'))
    init_fn(session)
Beispiel #28
0
    def __call__(self, x_input, batch_size=None, is_training=False):
        """Constructs model and return probabilities for given input."""
        reuse = True if self.built else None
        with slim.arg_scope(inception.inception_v3_arg_scope()):
            with tf.variable_scope(self.name):
                logits, end_points = inception.inception_v3(
                    x_input,
                    num_classes=self.num_classes,
                    is_training=is_training,
                    reuse=reuse)

            preds = tf.argmax(logits, axis=1)
        self.built = True
        self.logits = logits
        self.preds = preds
        return logits
 def testUnknownImageShape(self):
   tf.compat.v1.reset_default_graph()
   batch_size = 2
   height, width = 299, 299
   num_classes = 1000
   input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
   with self.test_session() as sess:
     inputs = tf.compat.v1.placeholder(tf.float32, shape=(batch_size, None, None, 3))
     logits, end_points = inception.inception_v3(inputs, num_classes)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     pre_pool = end_points['Mixed_7c']
     feed_dict = {inputs: input_np}
     tf.compat.v1.global_variables_initializer().run()
     pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
     self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
 def testUnknownImageShape(self):
   tf.reset_default_graph()
   batch_size = 2
   height, width = 299, 299
   num_classes = 1000
   input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
   with self.test_session() as sess:
     inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
     logits, end_points = inception.inception_v3(inputs, num_classes)
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     pre_pool = end_points['Mixed_7c']
     feed_dict = {inputs: input_np}
     tf.global_variables_initializer().run()
     pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
     self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
  def testUnknowBatchSize(self):
    batch_size = 1
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.placeholder(tf.float32, (None, height, width, 3))
    logits, _ = inception.inception_v3(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV3/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [None, num_classes])
    images = tf.random_uniform((batch_size, height, width, 3))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEquals(output.shape, (batch_size, num_classes))
Beispiel #32
0
    def testEvaluation(self):
        print('enter testEvaluation(): %s' % datetime.now())
        batch_size = 2
        height, width = 299, 299
        num_classes = 1000

        eval_inputs = tf.random_uniform((batch_size, height, width, 3))
        logits, _ = inception.inception_v3(eval_inputs,
                                           num_classes,
                                           is_training=False)
        predictions = tf.argmax(logits, 1)

        with self.test_session() as sess:
            sess.run(tf.global_variables_initializer())
            output = sess.run(predictions)
            self.assertEquals(output.shape, (batch_size, ))
        print('leave testEvaluation(): %s' % datetime.now())
Beispiel #33
0
  def __call__(self, x_input, batch_size=None, is_training=False):
    """Constructs model and return probabilities for given input."""
    reuse = True if self.built else None
    with slim.arg_scope(inception.inception_v3_arg_scope()):
      with tf.variable_scope(self.ckpt):
        logits, end_points = inception.inception_v3(
            x_input, num_classes=self.num_classes, is_training=is_training,
            reuse=reuse)

      preds = tf.argmax(logits, axis=1)
    self.built = True
    self.logits = logits
    self.preds = preds
    #output = end_points['logits']
    # Strip off the extra reshape op at the output
    #probs = output.op.inputs[0]
    return logits
Beispiel #34
0
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python alexnet.py
    """
    tf.set_random_seed(1)
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.Variable(tf.random_uniform((1, height, width, 3)), name='input')
    inputs = tf.identity(inputs, "input_node")
    net, end_points  = inception.inception_v3(inputs, num_classes,is_training=False)
    print("nodes in the graph")
    for n in end_points:
        print(n + " => " + str(end_points[n]))
    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x), argv[2].split(','))
    run_model(net_outputs, argv[1], 'InceptionV3', argv[3] == 'True')
Beispiel #35
0
def inception_v3(inputs, is_training, opts):
    with slim.arg_scope(inception.inception_v3_arg_scope(
            weight_decay=opts.weight_decay,
            use_batch_norm=opts.use_batch_norm,
            batch_norm_decay=opts.batch_norm_decay,
            batch_norm_epsilon=opts.batch_norm_epsilon,
            activation_fn=tf.nn.relu)):
        return inception.inception_v3(
            inputs,
            num_classes=opts.num_classes,
            is_training=is_training,
            dropout_keep_prob=opts.dropout_keep_prob,
            min_depth=opts.min_depth,
            depth_multiplier=opts.depth_multiplier,
            prediction_fn=slim.softmax,
            spatial_squeeze=opts.spatial_squeeze,
            reuse=None,
            create_aux_logits=opts.create_aux_logits,
            global_pool=opts.global_pool)
def batch_results():

    batch_size = 3  # number of samples to show

    with tf.Graph().as_default():
        tf.logging.set_verbosity(tf.logging.INFO)
        images, images_raw, labels = load_batch(dataset,
                                                height=image_size,
                                                width=image_size)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(inception.inception_v3_arg_scope()):
            logits, _ = inception.inception_v3(images,
                                               num_classes=dataset.num_classes,
                                               is_training=False)

        probabilities = tf.nn.softmax(logits)

        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        init_fn = slim.assign_from_checkpoint_fn(
            checkpoint_path, slim.get_variables_to_restore())

        with tf.Session() as sess:
            with slim.queues.QueueRunners(sess):
                sess.run(tf.initialize_local_variables())
                init_fn(sess)
                np_probabilities, np_images_raw, np_labels = sess.run(
                    [probabilities, images_raw, labels])

                for i in xrange(batch_size):
                    image = np_images_raw[i, :, :, :]
                    true_label = np_labels[i]
                    predicted_label = np.argmax(np_probabilities[i, :])
                    predicted_name = dataset.labels_to_names[predicted_label]
                    true_name = dataset.labels_to_names[true_label]

                    plt.figure()
                    plt.imshow(image.astype(np.uint8))
                    plt.title('Ground Truth: [%s], Prediction [%s]' %
                              (true_name, predicted_name))
                    plt.axis('off')
                    plt.show()
Beispiel #37
0
def main():
    """
    You can also run these commands manually to generate the pb file
    1. git clone https://github.com/tensorflow/models.git
    2. export PYTHONPATH=Path_to_your_model_folder
    3. python alexnet.py
    """
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.Variable(tf.random_uniform((1, height, width, 3)),
                         name='input')
    net, end_points = inception.inception_v3(inputs,
                                             num_classes,
                                             is_training=False)
    print("nodes in the graph")
    for n in end_points:
        print(n + " => " + str(end_points[n]))
    net_outputs = map(lambda x: tf.get_default_graph().get_tensor_by_name(x),
                      argv[2].split(','))
    run_model(net_outputs, argv[1], 'InceptionV3', argv[3] == 'True')
def url_results():

    with tf.Graph().as_default():
        url = 'http://www.vetprofessionals.com/catprofessional/images/home-cat.jpg'
        image_string = urllib2.urlopen(url).read()
        image = tf.image.decode_jpeg(image_string, channels=3)
        processed_image = inception_preprocessing.preprocess_image(
            image, image_size, image_size, is_training=False)
        processed_images = tf.expand_dims(processed_image, 0)

        # Create the model, use the default arg scope to configure the batch norm parameters.
        with slim.arg_scope(inception.inception_v3_arg_scope()):
            logits, _ = inception.inception_v3(processed_images,
                                               num_classes=dataset.num_classes,
                                               is_training=False)

        probabilities = tf.nn.softmax(logits)

        checkpoint_path = tf.train.latest_checkpoint(train_dir)
        init_fn = slim.assign_from_checkpoint_fn(
            checkpoint_path, slim.get_variables_to_restore())

        with tf.Session() as sess:
            init_fn(sess)
            np_image, probabilities = sess.run([image, probabilities])
            probabilities = probabilities[0, 0:]
            sorted_inds = [
                i[0]
                for i in sorted(enumerate(-probabilities), key=lambda x: x[1])
            ]

        plt.figure()
        plt.imshow(np_image.astype(np.uint8))
        plt.axis('off')
        plt.show()

        names = dataset.labels_to_names
        for i in range(dataset.num_classes):
            index = sorted_inds[i]
            print('Probability %0.2f%% => [%s]' %
                  (probabilities[index], names[index + 1]))
def build_graph():
    images_ph = tf.placeholder(tf.float32, shape=[None, 299, 299, 3])
    labels_ph = tf.placeholder(tf.int32, shape=[
        None,
    ])
    with slim.arg_scope(inception.inception_v3_arg_scope()):
        l, _ = inception.inception_v3(images_ph,
                                      num_classes=2,
                                      is_training=False,
                                      reuse=False,
                                      scope="crack/InceptionV3")

    # Construct the scalar neuron tensor.
    logits = tf.get_default_graph().get_tensor_by_name(
        'crack/InceptionV3/Logits/SpatialSqueeze:0')
    neuron_selector = tf.placeholder(tf.int32)
    y = logits[0][neuron_selector]

    # Construct tensor for predictions.
    prediction = tf.argmax(logits, 1)
    probs = tf.nn.softmax(l)
    return images_ph, prediction, probs, y, neuron_selector
  def testBuildEndPoints(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.random_uniform((batch_size, height, width, 3))
    _, end_points = inception.inception_v3(inputs, num_classes)
    self.assertTrue('Logits' in end_points)
    logits = end_points['Logits']
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('AuxLogits' in end_points)
    aux_logits = end_points['AuxLogits']
    self.assertListEqual(aux_logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('Mixed_7c' in end_points)
    pre_pool = end_points['Mixed_7c']
    self.assertListEqual(pre_pool.get_shape().as_list(),
                         [batch_size, 8, 8, 2048])
    self.assertTrue('PreLogits' in end_points)
    pre_logits = end_points['PreLogits']
    self.assertListEqual(pre_logits.get_shape().as_list(),
                         [batch_size, 1, 1, 2048])
  def testBuildEndPoints(self):
    batch_size = 5
    height, width = 299, 299
    num_classes = 1000

    inputs = tf.random.uniform((batch_size, height, width, 3))
    _, end_points = inception.inception_v3(inputs, num_classes)
    self.assertTrue('Logits' in end_points)
    logits = end_points['Logits']
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('AuxLogits' in end_points)
    aux_logits = end_points['AuxLogits']
    self.assertListEqual(aux_logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('Mixed_7c' in end_points)
    pre_pool = end_points['Mixed_7c']
    self.assertListEqual(pre_pool.get_shape().as_list(),
                         [batch_size, 8, 8, 2048])
    self.assertTrue('PreLogits' in end_points)
    pre_logits = end_points['PreLogits']
    self.assertListEqual(pre_logits.get_shape().as_list(),
                         [batch_size, 1, 1, 2048])
Beispiel #42
0
def main(_):
    batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
    num_classes = 1001
    ensemble_type = FLAGS.ensemble_type

    tf.logging.set_verbosity(tf.logging.INFO)

    checkpoint_path_list = [
        FLAGS.checkpoint_path_inception_v1, FLAGS.checkpoint_path_inception_v2,
        FLAGS.checkpoint_path_inception_v3, FLAGS.checkpoint_path_inception_v4,
        FLAGS.checkpoint_path_inception_resnet_v2,
        FLAGS.checkpoint_path_resnet_v1_101,
        FLAGS.checkpoint_path_resnet_v1_152,
        FLAGS.checkpoint_path_resnet_v2_101,
        FLAGS.checkpoint_path_resnet_v2_152, FLAGS.checkpoint_path_vgg_16,
        FLAGS.checkpoint_path_vgg_19
    ]
    normalization_method = [
        'default', 'default', 'default', 'default', 'global', 'caffe_rgb',
        'caffe_rgb', 'default', 'default', 'caffe_rgb', 'caffe_rgb'
    ]
    pred_list = []
    for idx, checkpoint_path in enumerate(checkpoint_path_list, 1):
        with tf.Graph().as_default():
            if int(FLAGS.test_idx) == 20 and idx in [3]:
                continue
            if int(FLAGS.test_idx) in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
                                       ] and int(FLAGS.test_idx) != idx:
                continue
            # Prepare graph
            if idx in [1, 2, 6, 7, 10, 11]:
                _x_input = tf.placeholder(tf.float32, shape=batch_shape)
                x_input = tf.image.resize_images(_x_input, [224, 224])
            else:
                _x_input = tf.placeholder(tf.float32, shape=batch_shape)
                x_input = _x_input

            x_input = image_normalize(x_input, normalization_method[idx - 1])

            if idx == 1:
                with slim.arg_scope(inception.inception_v1_arg_scope()):
                    _, end_points = inception.inception_v1(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 2:
                with slim.arg_scope(inception.inception_v2_arg_scope()):
                    _, end_points = inception.inception_v2(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 3:
                with slim.arg_scope(inception.inception_v3_arg_scope()):
                    _, end_points = inception.inception_v3(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 4:
                with slim.arg_scope(inception.inception_v4_arg_scope()):
                    _, end_points = inception.inception_v4(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 5:
                with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
                    _, end_points = inception.inception_resnet_v2(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 6:
                with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                    _, end_points = resnet_v1.resnet_v1_101(x_input,
                                                            num_classes=1000,
                                                            is_training=False)
            elif idx == 7:
                with slim.arg_scope(resnet_v1.resnet_arg_scope()):
                    _, end_points = resnet_v1.resnet_v1_152(x_input,
                                                            num_classes=1000,
                                                            is_training=False)
            elif idx == 8:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    _, end_points = resnet_v2.resnet_v2_101(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 9:
                with slim.arg_scope(resnet_v2.resnet_arg_scope()):
                    _, end_points = resnet_v2.resnet_v2_152(
                        x_input, num_classes=num_classes, is_training=False)
            elif idx == 10:
                with slim.arg_scope(vgg.vgg_arg_scope()):
                    _, end_points = vgg.vgg_16(x_input,
                                               num_classes=1000,
                                               is_training=False)
                    end_points['predictions'] = tf.nn.softmax(
                        end_points['vgg_16/fc8'])
            elif idx == 11:
                with slim.arg_scope(vgg.vgg_arg_scope()):
                    _, end_points = vgg.vgg_19(x_input,
                                               num_classes=1000,
                                               is_training=False)
                    end_points['predictions'] = tf.nn.softmax(
                        end_points['vgg_19/fc8'])

            #end_points = tf.reduce_mean([end_points1['Predictions'], end_points2['Predictions'], end_points3['Predictions'], end_points4['Predictions']], axis=0)

            #predicted_labels = tf.argmax(end_points, 1)

            # Run computation
            saver = tf.train.Saver(slim.get_model_variables())
            session_creator = tf.train.ChiefSessionCreator(
                scaffold=tf.train.Scaffold(saver=saver),
                checkpoint_filename_with_path=checkpoint_path,
                master=FLAGS.master)

            pred_in = []
            filenames_list = []
            with tf.train.MonitoredSession(
                    session_creator=session_creator) as sess:
                for filenames, images in load_images(FLAGS.input_dir,
                                                     batch_shape):
                    #if idx in [1,2,6,7,10,11]:
                    #  # 16x299x299x3
                    #  images = zoom(images, (1, 0.7491638795986622, 0.7491638795986622, 1), order=2)
                    filenames_list.extend(filenames)
                    end_points_dict = sess.run(end_points,
                                               feed_dict={_x_input: images})
                    if idx in [6, 7, 10, 11]:
                        end_points_dict['predictions'] = \
                                      np.concatenate([np.zeros([FLAGS.batch_size, 1]),
                                                      np.array(end_points_dict['predictions'].reshape(-1, 1000))],
                                                      axis=1)
                    try:
                        pred_in.extend(end_points_dict['Predictions'].reshape(
                            -1, num_classes))
                    except KeyError:
                        pred_in.extend(end_points_dict['predictions'].reshape(
                            -1, num_classes))
            pred_list.append(pred_in)

    if ensemble_type == 'mean':
        pred = np.mean(pred_list, axis=0)
        labels = np.argmax(
            pred, axis=1
        )  # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch
    elif ensemble_type == 'vote':
        pred = np.argmax(
            pred_list, axis=2
        )  # model_num X batch X class_num ==(np.mean)==> batch X class_num ==(np.argmax)==> batch
        labels = np.median(pred, axis=0)
    with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
        for filename, label in zip(filenames_list, labels):
            out_file.write('{0},{1}\n'.format(filename, label))
Beispiel #43
0
 def create(self, images, num_classes, is_training):
   """See baseclass."""
   with slim.arg_scope(inception.inception_v3_arg_scope()):
     _, endpoints = inception.inception_v3(
         images, num_classes, create_aux_logits=False, is_training=is_training)
     return endpoints
Beispiel #44
0
    print('Start to read images!')
    image_list = get_test_images(test_path)
    processed_images = tf.placeholder(tf.float32,
                                      shape=(None, image_size, image_size, 3))

    if deep_lerning_architecture == "v1" or deep_lerning_architecture == "V1":
        with slim.arg_scope(inception.inception_v1_arg_scope()):
            logits, _ = inception.inception_v1(processed_images,
                                               num_classes=nb_classes,
                                               is_training=False)

    else:
        if deep_lerning_architecture == "v3" or deep_lerning_architecture == "V3":
            with slim.arg_scope(inception.inception_v3_arg_scope()):
                logits, _ = inception.inception_v3(processed_images,
                                                   num_classes=nb_classes,
                                                   is_training=False)
        else:
            if deep_lerning_architecture == "resv2" or deep_lerning_architecture == "inception_resnet2":
                with slim.arg_scope(inception.inception_resnet_v2_arg_scope()):
                    logits, _ = inception.inception_resnet_v2(
                        processed_images,
                        num_classes=nb_classes,
                        is_training=False)

    def predict_fn(images):
        return session.run(probabilities, feed_dict={processed_images: images})

    probabilities = tf.nn.softmax(logits)
    checkpoint_path = tf.train.latest_checkpoint(train_dir)
    init_fn = slim.assign_from_checkpoint_fn(checkpoint_path,
Beispiel #45
0
def nestedClassification(img_paths):

    checkpoints_path = '/home/mehdi/Desktop/1.Projects/1.Image_Processing/1_.classification/inception_v3/all'
    fileName = "../Project_Data/labels.txt"
    names = []

    crimefile = open(fileName, 'r')
    for line in crimefile.readlines():
        if line.strip():
            names.append(line.strip().split(":")[-1])

    BATCH_SIZE=FLAGS.Batch_Size

############################################################################

    number_of_inputImage=len(img_paths)
    if BATCH_SIZE>=number_of_inputImage:
        batch_size=number_of_inputImage
    else:
        batch_size=BATCH_SIZE

    NUM_CLASSES = len(names)
    image_size=inception.inception_v3.default_image_size

    repeat_count=2

    checkpoints_path = tf.train.latest_checkpoint(checkpoints_path)
    X = tf.placeholder(tf.float32, [batch_size, image_size, image_size, 3])
    with slim.arg_scope(inception.inception_v3_arg_scope()):
        model,_ = inception.inception_v3(X, num_classes = NUM_CLASSES, is_training = False)

    probabilities = tf.nn.softmax(model)
    init = slim.assign_from_checkpoint_fn(checkpoints_path,slim.get_model_variables('InceptionV3'))


    def input_parser(img_path):
        # # read the img from file
        img_file = tf.read_file(img_path)
        img_decoded = tf.image.decode_png(img_file,channels=3)

        img_decoded=tf.image.convert_image_dtype(img_decoded,dtype=tf.float32)
    
        processed_image = inception_preprocessing.preprocess_image(img_decoded, image_size, image_size,
                                                                   is_training=False)
        return processed_image




    data=tf.data.Dataset.from_tensor_slices(img_paths)
    print("===imaPath",img_paths)
    data = data.map(input_parser, num_parallel_calls=1)
    data= data.repeat(repeat_count)  # Repeats dataset this # times
    data = data.batch(batch_size)
    # data= data.repeat(repeat_count)  # Repeats dataset this # times

    iterator=data.make_initializable_iterator()
    next_batch = iterator.get_next()
    test_init_op = iterator.make_initializer(data)


    with tf.Session() as sess:

        init(sess)
        sess.run(test_init_op)
    #
        j = 0
        k=0
        epoch=int(number_of_inputImage/batch_size)
        for i in range(epoch+1):

            img_batch=(sess.run(next_batch))
            probabilities1 = sess.run(probabilities, feed_dict={X: img_batch})

            for prediction in range((probabilities1.shape)[0]):  # batch_size
                if j<number_of_inputImage:
                    print(img_paths[j])
                    j += 1
                    batchPredResult = probabilities1[prediction, 0:]


                    sorted_inds = [i[0] for i in sorted(enumerate(-batchPredResult), key=lambda x: x[1])]
                    for i in range(1):
                        index = sorted_inds[i]
                        print('Probability %0.4f => [%s]' % (batchPredResult[index], names[index]))