def network_inception_v1():
    input_shape = [1, 224, 224, 3]
    input_ = tf.placeholder(dtype=tf.float32, name='input', shape=input_shape)
    net, _end_points = inception_v1(input_,
                                    num_classes=1000,
                                    is_training=False)
    return net
Exemplo n.º 2
0
def test_network(img_path, label_path):
    x = tf.placeholder("float", shape=[None, 224, 224, 3], name='input')
    with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
        logits, end_points = inception_v1.inception_v1(x,
                                                       num_classes=1001,
                                                       dropout_keep_prob=1.0,
                                                       is_training=False)
    predictions = end_points["Predictions"]
    saver = tf.train.Saver()
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        saver.restore(sess, "inception_v1.ckpt")

        imgfloat = tf.cast(tf.image.decode_jpeg(tf.read_file(img_path),
                                                channels=3),
                           dtype=tf.float32)
        img = tf.subtract(
            tf.multiply(
                tf.div(
                    tf.image.resize_images(tf.expand_dims(imgfloat, 0),
                                           (224, 224),
                                           method=0), 255.0), 2), 1.0)
        predictions_val = predictions.eval(feed_dict={x: img.eval()})
        predicted_classes = np.argmax(predictions_val, axis=1)

        file = open(label_path)
        labels = file.readlines()
        print(predicted_classes, labels[predicted_classes[0]])
Exemplo n.º 3
0
def test_network(img_path, label_path):
    x = tf.placeholder("float", shape=[None, 224, 224, 3], name='input')
    xscale = tf.subtract(tf.multiply(tf.div(x, 255.0), 2), 1.0)
    with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
        logits, end_points = inception_v1.inception_v1(xscale,
                                                       num_classes=1001,
                                                       dropout_keep_prob=1.0,
                                                       is_training=False)
    predictions = tf.nn.softmax(logits, name="output")
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, "inception_v1.ckpt")
        #ckpt_info("inception_v1.ckpt")

        #var_list = tf.global_variables()
        #print(var_list)
        constant_graph = tf.get_default_graph().as_graph_def()
        output_graph_def = graph_util.convert_variables_to_constants(
            sess, constant_graph, ['output'])
        with tf.gfile.GFile("inception_v1.pb", "wb") as f:
            f.write(output_graph_def.SerializeToString())

        imgfloat = tf.cast(tf.image.decode_jpeg(tf.read_file(img_path),
                                                channels=3),
                           dtype=tf.float32)
        img = tf.image.resize_images(tf.expand_dims(imgfloat, 0), (224, 224),
                                     method=0)
        predictions_val = predictions.eval(feed_dict={x: img.eval()})
        predicted_classes = np.argmax(predictions_val, axis=1)

        file = open(label_path)
        labels = file.readlines()
        print(predicted_classes, labels[predicted_classes[0]])
Exemplo n.º 4
0
  def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 224, 224
    num_classes = 1000

    train_inputs = random_ops.random_uniform(
        (train_batch_size, height, width, 3))
    inception_v1.inception_v1(train_inputs, num_classes)
    eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
    logits, _ = inception_v1.inception_v1(eval_inputs, num_classes, reuse=True)
    predictions = math_ops.argmax(logits, 1)

    with self.cached_session() as sess:
      sess.run(variables.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
Exemplo n.º 5
0
def create_inception(image_input,
                     is_training,
                     scope="",
                     inception_out="Mixed_5c",
                     resnet_version=50,
                     cbn=None):
    """
    Create a resnet by overidding the classic batchnorm with conditional batchnorm
    :param image_input: placeholder with image
    :param is_training: are you using the resnet at training_time or test_time
    :param scope: tensorflow scope
    :param resnet_version: 50/101/152
    :param cbn: the cbn factory
    :return: the resnet output
    """

    # assert False, "\n" \
    #               "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \
    #               "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}"
    # arg_sc = slim_utils.resnet_arg_scope(is_training=is_training)

    # print("--- 1")
    arg_sc = inception_v1.inception_v1_arg_scope()

    # Pick the correct version of the resnet
    # if resnet_version == 50:
    #     current_resnet = resnet_v1.resnet_v1_50
    # elif resnet_version == 101:
    #     current_resnet = resnet_v1.resnet_v1_101
    # elif resnet_version == 152:
    #     current_resnet = resnet_v1.resnet_v1_152
    # else:
    #     raise ValueError("Unsupported resnet version")

    # inception_scope = os.path.join('InceptionV1/InceptionV1', inception_out)
    # print("--- 2")
    inception_scope = inception_out
    # print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope))
    # print("--- 3")
    with slim.arg_scope(arg_sc):
        net, end_points = inception_v1.inception_v1(
            image_input, 1001)  # 1000 is the number of softmax class

    print("Net = ", net)
    # print("--- 4")

    if len(scope) > 0 and not scope.endswith("/"):
        scope += "/"
    # print("--- 5")
    # print(end_points)
    print(" Batch ", inception_scope)

    out = end_points[scope + inception_scope]
    print("-- out Use: {},output = {}".format(inception_scope, out))

    return out, end_points
Exemplo n.º 6
0
  def testLogitsNotSqueezed(self):
    num_classes = 25
    images = random_ops.random_uniform([1, 224, 224, 3])
    logits, _ = inception_v1.inception_v1(
        images, num_classes=num_classes, spatial_squeeze=False)

    with self.cached_session() as sess:
      variables.global_variables_initializer().run()
      logits_out = sess.run(logits)
      self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
Exemplo n.º 7
0
    def testTrainEvalWithReuse(self):
        train_batch_size = 5
        eval_batch_size = 2
        height, width = 224, 224
        num_classes = 1000

        train_inputs = random_ops.random_uniform(
            (train_batch_size, height, width, 3))
        inception_v1.inception_v1(train_inputs, num_classes)
        eval_inputs = random_ops.random_uniform(
            (eval_batch_size, height, width, 3))
        logits, _ = inception_v1.inception_v1(eval_inputs,
                                              num_classes,
                                              reuse=True)
        predictions = math_ops.argmax(logits, 1)

        with self.test_session() as sess:
            sess.run(variables.global_variables_initializer())
            output = sess.run(predictions)
            self.assertEquals(output.shape, (eval_batch_size, ))
Exemplo n.º 8
0
    def testLogitsNotSqueezed(self):
        num_classes = 25
        images = random_ops.random_uniform([1, 224, 224, 3])
        logits, _ = inception_v1.inception_v1(images,
                                              num_classes=num_classes,
                                              spatial_squeeze=False)

        with self.test_session() as sess:
            variables.global_variables_initializer().run()
            logits_out = sess.run(logits)
            self.assertListEqual(list(logits_out.shape),
                                 [1, 1, 1, num_classes])
Exemplo n.º 9
0
    def testBuildClassificationNetwork(self):
        batch_size = 5
        height, width = 224, 224
        num_classes = 1000

        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        logits, end_points = inception_v1.inception_v1(inputs, num_classes)
        self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
        self.assertListEqual(logits.get_shape().as_list(),
                             [batch_size, num_classes])
        self.assertTrue('Predictions' in end_points)
        self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
                             [batch_size, num_classes])
Exemplo n.º 10
0
  def testBuildClassificationNetwork(self):
    batch_size = 5
    height, width = 224, 224
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    logits, end_points = inception_v1.inception_v1(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('Predictions' in end_points)
    self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
                         [batch_size, num_classes])
Exemplo n.º 11
0
  def testEvaluation(self):
    batch_size = 2
    height, width = 224, 224
    num_classes = 1000

    eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
    logits, _ = inception_v1.inception_v1(
        eval_inputs, num_classes, is_training=False)
    predictions = math_ops.argmax(logits, 1)

    with self.cached_session() as sess:
      sess.run(variables.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (batch_size,))
Exemplo n.º 12
0
def trainmodel(train_batch, train_label_batch, train_label, num_epochs):
    with slim.arg_scope(inception_v1_arg_scope()):
        train_logits, end_points = inception_v1.inception_v1(train_batch,
                                                             num_classes=2,
                                                             is_training=True)

    tf.losses.sparse_softmax_cross_entropy(labels=train_label,
                                           logits=train_logits)
    total_loss = tf.losses.get_total_loss()
    global_step = tf.Variable(0, name='global_step', trainable=False)

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
        train_op = optimizer.minimize(total_loss, global_step=global_step)

    prediction_labels = tf.argmax(end_points['Predictions'], 1)
    read_labels = tf.argmax(train_label_batch, 1)
    correct_prediction = tf.equal(prediction_labels, read_labels)
    train_accuracy_batch = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    saver = tf.train.Saver(tf.trainable_variables() +
                           tf.get_collection_ref("moving_vars"))

    with tf.Session() as sess:
        sess.run(
            tf.group(tf.global_variables_initializer(),
                     tf.local_variables_initializer()))
        print("Initialized")

        step = 0
        start_time = time.time()
        for epoch_index in range(num_epochs):
            _, l, end_points2, logits2, train_acc2_batch = sess.run([
                train_op, total_loss, end_points, train_logits,
                train_accuracy_batch
            ])

            duration = time.time() - start_time

            print("Minibatch loss at step %d: %.6f (%.3f sec)" %
                  (step, l, duration))
            print(end_points2['Predictions'])
            print("Minibatch accuracy: %.6f" % train_acc2_batch)
            #print("lr: %.6f" % optimizer._lr)

            step += 1

        saver.save(sess, './train.ckpt')
Exemplo n.º 13
0
  def testUnknownBatchSize(self):
    batch_size = 1
    height, width = 224, 224
    num_classes = 1000

    inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
    logits, _ = inception_v1.inception_v1(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
    self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
    images = random_ops.random_uniform((batch_size, height, width, 3))

    with self.cached_session() as sess:
      sess.run(variables.global_variables_initializer())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEquals(output.shape, (batch_size, num_classes))
Exemplo n.º 14
0
    def testEvaluation(self):
        batch_size = 2
        height, width = 224, 224
        num_classes = 1000

        eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
        logits, _ = inception_v1.inception_v1(eval_inputs,
                                              num_classes,
                                              is_training=False)
        predictions = math_ops.argmax(logits, 1)

        with self.test_session() as sess:
            sess.run(variables.global_variables_initializer())
            output = sess.run(predictions)
            self.assertEquals(output.shape, (batch_size, ))
Exemplo n.º 15
0
    def testUnknownBatchSize(self):
        batch_size = 1
        height, width = 224, 224
        num_classes = 1000

        inputs = array_ops.placeholder(dtypes.float32,
                                       (None, height, width, 3))
        logits, _ = inception_v1.inception_v1(inputs, num_classes)
        self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
        self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
        images = random_ops.random_uniform((batch_size, height, width, 3))

        with self.test_session() as sess:
            sess.run(variables.global_variables_initializer())
            output = sess.run(logits, {inputs: images.eval()})
            self.assertEquals(output.shape, (batch_size, num_classes))
Exemplo n.º 16
0
 def testUnknownImageShape(self):
   ops.reset_default_graph()
   batch_size = 2
   height, width = 224, 224
   num_classes = 1000
   input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
   with self.cached_session() as sess:
     inputs = array_ops.placeholder(
         dtypes.float32, shape=(batch_size, None, None, 3))
     logits, end_points = inception_v1.inception_v1(inputs, num_classes)
     self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     pre_pool = end_points['Mixed_5c']
     feed_dict = {inputs: input_np}
     variables.global_variables_initializer().run()
     pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
     self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
Exemplo n.º 17
0
 def testUnknownImageShape(self):
     ops.reset_default_graph()
     batch_size = 2
     height, width = 224, 224
     num_classes = 1000
     input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
     with self.test_session() as sess:
         inputs = array_ops.placeholder(dtypes.float32,
                                        shape=(batch_size, None, None, 3))
         logits, end_points = inception_v1.inception_v1(inputs, num_classes)
         self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
         pre_pool = end_points['Mixed_5c']
         feed_dict = {inputs: input_np}
         variables.global_variables_initializer().run()
         pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
         self.assertListEqual(list(pre_pool_out.shape),
                              [batch_size, 7, 7, 1024])
Exemplo n.º 18
0
def test_network():
    x = tf.placeholder("float", shape=[None, 224, 224, 3], name='input')
    with slim.arg_scope(inception_v1.inception_v1_arg_scope()):
        logits, end_points = inception_v1.inception_v1(x,
                                                       num_classes=2,
                                                       dropout_keep_prob=1.0,
                                                       is_training=False)
    predictions = end_points["Predictions"]

    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, "train.ckpt")

        path = './picture/'
        w = 224
        h = 224
        c = 3
        cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
        imgs = []
        labels = []
        for idx, folder in enumerate(cate):
            for im in glob.glob(folder + '/*.jpg'):
                print('reading the image: %s' % (im))
                img = io.imread(im)
                img = transform.resize(img, (w, h, c))
                imgs.append(img)
                labels.append([1, 0] if idx == 0 else [0, 1])
                break
            break

        data = np.asarray(imgs, np.float32)
        label = np.asarray(labels, np.int32)

        predictions_val = predictions.eval(feed_dict={x: data})

        print(predictions_val)
Exemplo n.º 19
0
#sys.path
#slim_path = "hdfs:///slim/models/research/slim"
# Please set this to the directory where you clone the tensorflow models repository
#sys.path.append(slim_path)

#from tensorflow.contrib.slim.python.slim.nets import inception
from tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1
from tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1_arg_scope
from tensorflow.contrib.slim.python.slim.nets.inception_v1 import inception_v1_base

slim = tf.contrib.slim
tf.reset_default_graph()
images = tf.placeholder(dtype=tf.float32, shape=(None, 224, 224, 3))

with slim.arg_scope(inception_v1_arg_scope()):
    logits, end_points = inception_v1(images, num_classes=1001)

sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, "file:///home/hduser/slim/checkpoint/inception_v1.ckpt")
#saver.restore(sess, "hdfs:///slim/checkpoint/inception_v1.ckpt")
# You need to edit this path to the checkpoint you downloaded

from zoo.util.tf import export_tf
avg_pool = end_points['Mixed_3c']
export_tf(sess,
          "file:///home/hduser/slim/tfnet/",
          inputs=[images],
          outputs=[avg_pool])
from zoo.pipeline.api.net import TFNet
amodel = TFNet.from_export_folder("file:///home/hduser/slim/tfnet/")
Exemplo n.º 20
0
def trainmodel(train_batch, train_label_batch, train_label, num_epochs):
    ps_hosts = FLAGS.ps_hosts.split(",")
    worker_hosts = FLAGS.worker_hosts.split(",")

    # Create a cluster from the parameter server and worker hosts.
    cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})

    # Create and start a server for the local task.
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.4

    server = tf.train.Server(cluster,
                             job_name=FLAGS.job_name,
                             task_index=FLAGS.task_index,
                             config=config)
    print("Cluster job: %s, task_index: %d, target: %s" %
          (FLAGS.job_name, FLAGS.task_index, server.target))
    if FLAGS.job_name == "ps":
        server.join()
    elif FLAGS.job_name == "worker":
        # Assigns ops to the local worker by default.
        with tf.device(
                tf.train.replica_device_setter(
                    worker_device="/job:worker/task:%d" % FLAGS.task_index,
                    cluster=cluster)):
            with slim.arg_scope(inception_v1_arg_scope()):
                train_logits, end_points = inception_v1.inception_v1(
                    train_batch, num_classes=2, is_training=True)

            tf.losses.sparse_softmax_cross_entropy(labels=train_label,
                                                   logits=train_logits)
            total_loss = tf.losses.get_total_loss()
            global_step = tf.Variable(0, name='global_step', trainable=False)

            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
                train_op = optimizer.minimize(total_loss,
                                              global_step=global_step)

            prediction_labels = tf.argmax(end_points['Predictions'], 1)
            read_labels = tf.argmax(train_label_batch, 1)
            correct_prediction = tf.equal(prediction_labels, read_labels)
            train_accuracy_batch = tf.reduce_mean(
                tf.cast(correct_prediction, "float"))

            saver = tf.train.Saver(tf.trainable_variables() +
                                   tf.get_collection_ref("moving_vars"))
            #init_op = tf.global_variables_initializer()
            local_init_op = tf.global_variables_initializer()

        # Create a "Supervisor", which oversees the training process.
        sv = tf.train.Supervisor(
            is_chief=(FLAGS.task_index == 0),
            logdir="./tflog",
            #init_op=init_op,
            local_init_op=local_init_op,
            saver=saver,
            global_step=global_step,
            save_model_secs=600)

        # The supervisor takes care of session initialization and restoring from
        # a checkpoint.
        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 0.4
        #session = tf.Session(config=config, ...)
        sess = sv.prepare_or_wait_for_session(server.target, config=config)

        # Start queue runners for the input pipelines (if ang).
        sv.start_queue_runners(sess)

        print("Initialized")

        step = 0
        start_time = time.time()
        for epoch_index in range(num_epochs):
            _, l, end_points2, logits2, train_acc2_batch = sess.run([
                train_op, total_loss, end_points, train_logits,
                train_accuracy_batch
            ])

            duration = time.time() - start_time

            print("Minibatch loss at step %d: %.6f (%.3f sec)" %
                  (step, l, duration))
            print(end_points2['Predictions'])
            print("Minibatch accuracy: %.6f" % train_acc2_batch)
            #print("lr: %.6f" % optimizer._lr)

            step += 1

        sv.stop()