def testMultipleHashTables(self):
        with self.test_session() as sess:
            default_val = -1
            keys = tf.constant(["brain", "salad", "surgery"])
            values = tf.constant([0, 1, 2], tf.int64)

            table1 = tf.contrib.lookup.HashTable(
                tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
                default_val)
            table2 = tf.contrib.lookup.HashTable(
                tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
                default_val)
            table3 = tf.contrib.lookup.HashTable(
                tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
                default_val)

            tf.initialize_all_tables().run()
            self.assertAllEqual(3, table1.size().eval())
            self.assertAllEqual(3, table2.size().eval())
            self.assertAllEqual(3, table3.size().eval())

            input_string = tf.constant(["brain", "salad", "tank"])
            output1 = table1.lookup(input_string)
            output2 = table2.lookup(input_string)
            output3 = table3.lookup(input_string)

            out1, out2, out3 = sess.run([output1, output2, output3])
            self.assertAllEqual([0, 1, -1], out1)
            self.assertAllEqual([0, 1, -1], out2)
            self.assertAllEqual([0, 1, -1], out3)
Beispiel #2
0
  def testMultipleHashTables(self):
    with self.test_session() as sess:
      default_val = -1
      keys = tf.constant(["brain", "salad", "surgery"])
      values = tf.constant([0, 1, 2], tf.int64)

      table1 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
          default_val)
      table2 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
          default_val)
      table3 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.KeyValueTensorInitializer(keys, values),
          default_val)

      tf.initialize_all_tables().run()
      self.assertAllEqual(3, table1.size().eval())
      self.assertAllEqual(3, table2.size().eval())
      self.assertAllEqual(3, table3.size().eval())

      input_string = tf.constant(["brain", "salad", "tank"])
      output1 = table1.lookup(input_string)
      output2 = table2.lookup(input_string)
      output3 = table3.lookup(input_string)

      out1, out2, out3 = sess.run([output1, output2, output3])
      self.assertAllEqual([0, 1, -1], out1)
      self.assertAllEqual([0, 1, -1], out2)
      self.assertAllEqual([0, 1, -1], out3)
  def testMultipleHashTables(self):
    with self.test_session() as sess:
      shared_name = ''
      default_val = -1
      table1 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
      table2 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)
      table3 = tf.HashTable(tf.string, tf.int64, default_val, shared_name)

      keys = tf.constant(['brain', 'salad', 'surgery'])
      values = tf.constant([0, 1, 2], tf.int64)
      table1.initialize_from(keys, values)
      table2.initialize_from(keys, values)
      table3.initialize_from(keys, values)

      tf.initialize_all_tables().run()
      self.assertAllEqual(3, table1.size().eval())
      self.assertAllEqual(3, table2.size().eval())
      self.assertAllEqual(3, table3.size().eval())

      input_string = tf.constant(['brain', 'salad', 'tank'])
      output1 = table1.lookup(input_string)
      output2 = table2.lookup(input_string)
      output3 = table3.lookup(input_string)

      out1, out2, out3 = sess.run([output1, output2, output3])
      self.assertAllEqual([0, 1, -1], out1)
      self.assertAllEqual([0, 1, -1], out2)
      self.assertAllEqual([0, 1, -1], out3)
    def testMultipleHashTables(self):
        with self.test_session() as sess:
            shared_name = ''
            default_val = -1
            table1 = tf.HashTable(tf.string, tf.int64, default_val,
                                  shared_name)
            table2 = tf.HashTable(tf.string, tf.int64, default_val,
                                  shared_name)
            table3 = tf.HashTable(tf.string, tf.int64, default_val,
                                  shared_name)

            keys = tf.constant(['brain', 'salad', 'surgery'])
            values = tf.constant([0, 1, 2], tf.int64)
            table1.initialize_from(keys, values)
            table2.initialize_from(keys, values)
            table3.initialize_from(keys, values)

            tf.initialize_all_tables().run()
            self.assertAllEqual(3, table1.size().eval())
            self.assertAllEqual(3, table2.size().eval())
            self.assertAllEqual(3, table3.size().eval())

            input_string = tf.constant(['brain', 'salad', 'tank'])
            output1 = table1.lookup(input_string)
            output2 = table2.lookup(input_string)
            output3 = table3.lookup(input_string)

            out1, out2, out3 = sess.run([output1, output2, output3])
            self.assertAllEqual([0, 1, -1], out1)
            self.assertAllEqual([0, 1, -1], out2)
            self.assertAllEqual([0, 1, -1], out3)
    def test_duplicate_entries(self):
        with self.test_session():
            mapping_strings = tf.constant(["hello", "hello"])
            indices = tf.constant([0, 1, 4], tf.int64)
            feats = tf.contrib.lookup.index_to_string(indices, mapping=mapping_strings)
            tf.initialize_all_tables().run()
            self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())

            self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
    def test_duplicate_entries(self):
        with self.test_session():
            mapping_strings = tf.constant(["hello", "hello"])
            indices = tf.constant([0, 1, 4], tf.int64)
            feats = tf.contrib.lookup.index_to_string(indices,
                                                      mapping=mapping_strings)
            tf.initialize_all_tables().run()
            self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())

            self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
    def test_index_to_string_with_default_value(self):
        default_value = b"NONE"
        with self.test_session():
            mapping_strings = tf.constant(["brain", "salad", "surgery"])
            indices = tf.constant([1, 2, 4], tf.int64)
            feats = tf.contrib.lookup.index_to_string(indices, mapping=mapping_strings, default_value=default_value)
            self.assertRaises(tf.OpError, feats.eval)

            tf.initialize_all_tables().run()
            self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
    def test_string_to_index_with_default_value(self):
        default_value = -42
        with self.test_session():
            mapping_strings = tf.constant(["brain", "salad", "surgery"])
            feats = tf.constant(["salad", "surgery", "tarkus"])
            indices = tf.contrib.lookup.string_to_index(feats, mapping=mapping_strings, default_value=default_value)
            self.assertRaises(tf.OpError, indices.eval)

            tf.initialize_all_tables().run()
            self.assertAllEqual((1, 2, default_value), indices.eval())
    def test_index_to_string(self):
        with self.test_session():
            mapping_strings = tf.constant(["brain", "salad", "surgery"])
            indices = tf.constant([0, 1, 2, 3], tf.int64)
            feats = tf.contrib.lookup.index_to_string(indices, mapping=mapping_strings)

            self.assertRaises(tf.OpError, feats.eval)
            tf.initialize_all_tables().run()

            self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), feats.eval())
    def test_string_to_index_with_default_value(self):
        default_value = -42
        with self.test_session():
            mapping_strings = tf.constant(["brain", "salad", "surgery"])
            feats = tf.constant(["salad", "surgery", "tarkus"])
            indices = tf.contrib.lookup.string_to_index(
                feats, mapping=mapping_strings, default_value=default_value)
            self.assertRaises(tf.OpError, indices.eval)

            tf.initialize_all_tables().run()
            self.assertAllEqual((1, 2, default_value), indices.eval())
    def test_index_to_string(self):
        with self.test_session():
            mapping_strings = tf.constant(["brain", "salad", "surgery"])
            indices = tf.constant([0, 1, 2, 3], tf.int64)
            feats = tf.contrib.lookup.index_to_string(indices,
                                                      mapping=mapping_strings)

            self.assertRaises(tf.OpError, feats.eval)
            tf.initialize_all_tables().run()

            self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
                                feats.eval())
    def test_index_to_string_with_default_value(self):
        default_value = b"NONE"
        with self.test_session():
            mapping_strings = tf.constant(["brain", "salad", "surgery"])
            indices = tf.constant([1, 2, 4], tf.int64)
            feats = tf.contrib.lookup.index_to_string(
                indices, mapping=mapping_strings, default_value=default_value)
            self.assertRaises(tf.OpError, feats.eval)

            tf.initialize_all_tables().run()
            self.assertAllEqual((b"salad", b"surgery", default_value),
                                feats.eval())
Beispiel #13
0
def test():
    sess = tf.Session()
    x = tf_input_training()

    y = x

    tf.initialize_all_variables().run(session=sess)
    tf.initialize_all_tables().run(session=sess)

    out = sess.run(y)

    f = 1
Beispiel #14
0
def export():
    with tf.Graph().as_default():
        #TODO(xuesen) for serving
        serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
        feature_configs = {
            'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.string),
        }
        tf_example = tf.parse_example(serialized_tf_example, feature_configs)

        jpegs = tf_example['image/encoded']
        images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
        # Run inference.
        feature = vgg.inference(images)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, 'model/inshop.sgd.adam')
            # Export inference model.
            init_op = tf.group(tf.initialize_all_tables(), name='init_op')

            #TODO() Export inference model using regression_signture ?
            feat_signature = exporter.regression_signature(
                input_tensor=serialized_tf_example, output_tensor=feature)
            named_graph_signature = {
                'inputs': exporter.generic_signature({'images': jpegs}),
                'outputs': exporter.generic_signature({'feats': feature})
            }
            model_exporter = exporter.Exporter(saver)
            model_exporter.init(default_graph_signature=feat_signature,
                                init_op=init_op,
                                named_graph_signatures=named_graph_signature)
            model_exporter.export('model/vgg_serving', tf.constant(150000),
                                  sess)
            print('Successfully exported model to model/.')
Beispiel #15
0
def export():
    with tf.Graph().as_default():
        # Build inference model.
        # Please refer to Tensorflow inception model for details.

        # Input transformation.
        jpegs = tf.placeholder(tf.string)
        images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
        print(images)
        # Run inference.
        feature = vgg.inference(images)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, 'model/inshop.sgd.adam')
            # Export inference model.
            init_op = tf.group(tf.initialize_all_tables(), name='init_op')

            model_exporter = exporter.Exporter(saver)
            signature = exporter.classification_signature(
                input_tensor=jpegs, classes_tensor=None, scores_tensor=feature)
            model_exporter.init(default_graph_signature=signature,
                                init_op=init_op)
            model_exporter.export('model', tf.constant(150000), sess)
            print('Successfully exported model to model/.')
Beispiel #16
0
def gen_poetry():
    def to_word(weights):
        t = np.cumsum(weights)
        s = np.sum(weights)
        sample = int(np.searchsorted(t, np.random.rand(1) * s))
        return words[sample]

    _, last_state, probs, cell, initial_state = neural_network()

    with tf.Session() as sess:
        sess.run(tf.initialize_all_tables())
        saver = tf.train.Saver(tf.all_variables())
        saver.restore(sess, 'poetry.module-49')

        state_ = sess.run(cell.zero_state(1, tf.float32))

        x = np.array([list(map(word_num_map.get, '['))])
        [probs_, state_] = sess.run([probs, last_state],
                                    feed_dict={
                                        input_data: x,
                                        initial_state: state_
                                    })
        word = to_word(probs_)
        poem = ''
        while word != ']':
            poem += word
            x = np.zeros((1, 1))
            x[0, 0] = word_num_map[word]
            [probs_, state_] = sess.run([probs, last_state],
                                        feed_dict={
                                            input_data: x,
                                            initial_state: state_
                                        })
            word = to_word(probs_)
        return poem
Beispiel #17
0
def train_neural_network(x):
    prediction = neural_network_model(x)
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(prediction, y))
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    hm_epochs = 10

    with tf.Session() as sess:
        sess.run(tf.initialize_all_tables())

        for epoch in hm_epochs:
            epoch_loss = 0
            for _ in range(int(mnist.train._num_examples / batch_size)):
                epoch_x, epoch_y = mnist.train.next_batch(batch_size)
                _, c = sess.run([optimizer, cost],
                                feed_dict={
                                    x: epoch_x,
                                    y: epoch_y
                                })
                epoch_loss += c
            print('Epoch', epoch, 'completed out of', hm_epochs, 'loss',
                  epoch_loss)

        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        print('Accuracy:',
              accuracy.eval({
                  x: mnist.test.images,
                  y: mnist.test.labels
              }))
    def test_duplicate_entries(self):
        with self.test_session():
            mapping_strings = tf.constant(["hello", "hello"])
            feats = tf.constant(["hello", "hola"])
            indices = tf.contrib.lookup.string_to_index(feats, mapping=mapping_strings)

            self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
def main(args):
    # Reading data
    df = pd.read_csv(args.csv_path)
    img_list = sorted(df.filename.unique())[args.start:]
    # Preparing
    img_dir = args.img_dir
    batch_size = args.batch_size
    batch = []

    print(">>> Checking {} images in {}".format(len(img_list), img_dir))
    # Start checking images
    with tf.Graph().as_default():
        init_op = tf.initialize_all_tables()
        with tf.Session() as sess:
            sess.run(init_op)
            for i, img_name in enumerate(tqdm(img_list)):
                img_contents = tf.read_file(os.path.join(img_dir, img_name))
                img = tf.image.decode_jpeg(img_contents, channels=3)
                batch.append(img)
                if (i + 1) % batch_size:
                    try:
                        sess.run(batch)
                    except:
                        if batch_size == 1:
                            print(">>> Found corrupted image: "
                                  "{}".format(img_name))
                        else:
                            print(">>> Found corrupted image(s) at batch "
                                  "{}-{}".format(i - batch_size, i))
                    batch = []
Beispiel #20
0
    def _initialize_graph(self, saved_model_dir):
        self._saved_model_dir = saved_model_dir
        if self._session is not None:
            self._session.close()
        self._graph = tf.Graph()
        self._session = tf.Session(graph=self._graph)
        with self._graph.as_default():
            inputs, outputs = impl_helper.load_transform_fn_def(
                self._saved_model_dir)
            # Run the op that initializes all tables in the graph.
            if hasattr(tf, 'tables_initializer'):
                self._session.run(tf.tables_initializer())
            else:
                self._session.run(tf.initialize_all_tables())

        input_schema_keys = self._input_schema.column_schemas.keys()
        output_schema_keys = self._output_schema.column_schemas.keys()
        extra_input_keys = set(input_schema_keys).difference(inputs.keys())
        if extra_input_keys:
            raise ValueError('Input schema contained keys not in graph: %s' %
                             input_schema_keys)
        extra_output_keys = set(output_schema_keys).difference(outputs.keys())
        if extra_output_keys:
            raise ValueError('Output schema contained keys not in graph: %s' %
                             extra_output_keys)
        self._inputs = {key: inputs[key] for key in input_schema_keys}
        self._outputs = {key: outputs[key] for key in output_schema_keys}
Beispiel #21
0
    def apply_model(self, x):
        x = x.data

        tmp = np.zeros((1,1))
        with tf.Session(graph=self._graph) as sess:
            tf.initialize_all_tables().run()

            feed_dict = {self._x: x,
                         self._W: self._result_W,
                         self._b: self._result_b}

            tmp = sess.run(self._y, feed_dict=feed_dict)

        ret = BrewPipeDataFrame('y')
        ret.data = tmp
        return ret
Beispiel #22
0
def main(_):
    """Trains the unstable model."""

    dataset = mnist.train(FLAGS.data_dir)
    dataset = dataset.cache().shuffle(buffer_size=50000).batch(
        100).repeat()  # 维持一个50000大小的shuffle buffer,每轮去除100个数据
    iterator = dataset.make_one_shot_iterator()  # 通过迭代器读取数据,数据输出一次后就丢弃了
    images, integer_labels = iterator.get_next()  # 获得图片和标签
    images = tf.reshape(images, [-1, 28, 28, 1])  # 改变形状,-1表示不知道大小

    # x_, integer_labels = iterator.get_next()
    # x_ = tf.reshape(x_, [-1, 28, 28, 1])
    label_input_tensor = tf.identity(integer_labels)  # 标签输入张量
    labels = tf.one_hot(label_input_tensor, 10)  # 使用独热编码对标签编码

    # input_layer, hidden_layer_1, hidden_layer_2, logits, image_input_tensor = classifier(images, init_func)  # 分类,得到logits和图片输入张量
    logits = output_layer
    equality = tf.equal(tf.argmax(logits, 1),
                        tf.argmax(labels, 1))  # 比较是否相等,argmax返回最大值的索引号
    accuracy = tf.reduce_mean(tf.to_float(equality))  # 计算正确率,先将

    # This will NaN if abs of any logit >= 88.
    bad_softmax = unsafe_softmax(logits)
    # This will NaN if max_logit - min_logit >= 88.
    bad_cross_entropies = unsafe_cross_entropy(bad_softmax, labels)
    loss = tf.reduce_mean(bad_cross_entropies)  # 损失函数
    # loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))
    optimizer = tf.train.GradientDescentOptimizer(0.01)  # 优化器

    tf.add_to_collection("input_tensors", image_input_tensor)  # 添加输入图片张量
    tf.add_to_collection("input_tensors", label_input_tensor)  # 添加标签张量
    tf.add_to_collection("coverage_tensors", logits)  # 输出层的输出
    tf.add_to_collection("metadata_tensors",
                         bad_softmax)  # 输出层经过unsafe_softmax后的输出
    tf.add_to_collection("metadata_tensors",
                         bad_cross_entropies)  # bad_softmax和labels的交叉熵
    tf.add_to_collection("metadata_tensors", logits)  # 输出层的输出

    train_op = optimizer.minimize(loss)  # 训练

    saver = tf.train.Saver(keep_checkpoint_every_n_hours=1)
    sess = tf.Session()
    sess.run(tf.initialize_all_tables())
    sess.run(tf.global_variables_initializer())

    # train classifier on these images and labels
    for idx in range(FLAGS.training_steps):
        sess.run(train_op, feed_dict={x_: images.eval()})
        if idx % 1000 == 0:
            loss_val, accuracy_val = sess.run([loss, accuracy])
            print(idx,
                  ":loss: {}, accuracy: {}".format(loss_val, accuracy_val))
            # print(len(input_values[0]))
            # print(os.path)
            saver.save(
                sess,
                os.path.join(FLAGS.checkpoint_dir, "fuzz_checkpoint"),
                global_step=idx,
            )  # 保存模型
Beispiel #23
0
def exporter(saver, sess):
    model_exporter = exp.Exporter(saver)
    signature = exp.classification_signature(input_tensor=img,
                                             pred_tensor=pred_val)
    model_exporter.init(default_graph_signature=signature,
                        init_op=tf.initialize_all_tables())
    model_exporter.export(FLAGS.log_dir + "/export", tf.constant(time.time()),
                          sess)
    def test_duplicate_entries(self):
        with self.test_session():
            mapping_strings = tf.constant(["hello", "hello"])
            feats = tf.constant(["hello", "hola"])
            indices = tf.contrib.lookup.string_to_index(
                feats, mapping=mapping_strings)

            self.assertRaises(tf.OpError, tf.initialize_all_tables().run)
Beispiel #25
0
def main():
    image_lists = create_image_lists(test_percentage, validation_percentage)
    n_classes = len(image_lists.keys())

    with gfile.FastGFile(os.path.join(model_dir, model_file), 'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

        bottleneck_tensor, jpeg_data_tensor = tf.import_graph_def(graph_def, return_elements=[bottleneck_tensor_name,
                                                                                              jpeg_data_tensor_name])
        bottleneck_input = tf.placeholder(tf.float32, [None, bottleneck_tensor_size], name='BotleneckInputPlaceholder')
        ground_truth_input = tf.placeholder(tf.float32, [None, n_classes], name='GroundTruthInput')

        with tf.name_scope('final_training_ops'):
            weights = tf.Variable(tf.truncated_normal([bottleneck_tensor_size, n_classes], stddev=0.001))
            biases = tf.Variable(tf.zeros([n_classes]))
            logits = tf.matmul(bottleneck_input, weights) + biases
            final_tensor = tf.nn.softmax(logits)

        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, ground_truth_input)
        cross_entropy_mean = tf.reduce_mean(cross_entropy)
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy_mean)

        with tf.name_scope('evalution'):
            correct_prediction = tf.equal(tf.argmax(final_tensor, 1), tf.argmax(ground_truth_input, 1))
            evalution_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        with tf.Session() as sess:
            init = tf.initialize_all_tables()
            sess.run(init)

            for i in range(steps):
                train_bottlenecks, train_ground_truth = get_random_cached_bottleneks(sess, n_classes, image_lists,
                                                                                     batch, 'training',
                                                                                     jpeg_data_tensor,
                                                                                     bottleneck_tensor)
                sess.run(train_step,
                         feed_dict={bottleneck_input: train_bottlenecks, ground_truth_input: train_ground_truth})

                if i % 10 == 0 or i + 1 == steps:
                    validation_bottlenecks, validation_ground_truth = get_random_cached_bottleneks(sess, n_classes,
                                                                                                   image_lists, batch,
                                                                                                   'validation',
                                                                                                   jpeg_data_tensor,
                                                                                                   bottleneck_tensor)
                    validation_accuracy = sess.run(evalution_step, feed_dict={bottleneck_input: validation_bottlenecks,
                                                                              ground_truth_input: validation_ground_truth})
                    print('Step %d: Validation accuracy on random sampled %d examples%.1f%%' % (
                        i, batch, validation_accuracy * 100))

                test_bottlenecks, test_ground_truth = get_test_bottlenecks(sess, image_lists, n_classes,
                                                                           jpeg_data_tensor, bottleneck_tensor)
                test_accuracy = sess.run(evalution_step, feed_dict={bottleneck_input: test_bottlenecks,
                                                                    ground_truth_input: test_ground_truth})
                print('Step %d: Final test accuracy on random sampled %d examples%.1f%%' % (
                    i, batch, test_accuracy * 100))
Beispiel #26
0
def new_model(session):
    """ Initializes model from scratch and returns global step variable
    Args:
        session: Tensorflow session
    Returns:
        step: Global step variable
    """
    logger.info('Initializing model from scratch ...')
    session.run(tf.global_variables_initializer())
    session.run(tf.local_variables_initializer())
    session.run(tf.initialize_all_tables())
    return get_global_step()
Beispiel #27
0
  def testInitializeSameTableWithMultipleNodes(self):
    vocabulary_file = self._createVocabFile("one_column_5.txt")

    with self.test_session() as sess:
      shared_name = "shared-one-columm"
      default_value = -1
      table1 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.TextFileInitializer(
              vocabulary_file, tf.string,
              tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
              tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
          default_value,
          shared_name=shared_name)
      table2 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.TextFileInitializer(
              vocabulary_file, tf.string,
              tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
              tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
          default_value,
          shared_name=shared_name)
      table3 = tf.contrib.lookup.HashTable(
          tf.contrib.lookup.TextFileInitializer(
              vocabulary_file, tf.string,
              tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
              tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
          default_value,
          shared_name=shared_name)

      tf.initialize_all_tables().run()

      input_string = tf.constant(["brain", "salad", "tank"])

      output1 = table1.lookup(input_string)
      output2 = table2.lookup(input_string)
      output3 = table3.lookup(input_string)

      out1, out2, out3 = sess.run([output1, output2, output3])
      self.assertAllEqual([0, 1, -1], out1)
      self.assertAllEqual([0, 1, -1], out2)
      self.assertAllEqual([0, 1, -1], out3)
Beispiel #28
0
    def testInitializeSameTableWithMultipleNodes(self):
        vocabulary_file = self._createVocabFile("one_column_5.txt")

        with self.test_session() as sess:
            shared_name = "shared-one-columm"
            default_value = -1
            table1 = tf.contrib.lookup.HashTable(
                tf.contrib.lookup.TextFileInitializer(
                    vocabulary_file, tf.string,
                    tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
                    tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
                default_value,
                shared_name=shared_name)
            table2 = tf.contrib.lookup.HashTable(
                tf.contrib.lookup.TextFileInitializer(
                    vocabulary_file, tf.string,
                    tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
                    tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
                default_value,
                shared_name=shared_name)
            table3 = tf.contrib.lookup.HashTable(
                tf.contrib.lookup.TextFileInitializer(
                    vocabulary_file, tf.string,
                    tf.contrib.lookup.TextFileIndex.WHOLE_LINE, tf.int64,
                    tf.contrib.lookup.TextFileIndex.LINE_NUMBER),
                default_value,
                shared_name=shared_name)

            tf.initialize_all_tables().run()

            input_string = tf.constant(["brain", "salad", "tank"])

            output1 = table1.lookup(input_string)
            output2 = table2.lookup(input_string)
            output3 = table3.lookup(input_string)

            out1, out2, out3 = sess.run([output1, output2, output3])
            self.assertAllEqual([0, 1, -1], out1)
            self.assertAllEqual([0, 1, -1], out2)
            self.assertAllEqual([0, 1, -1], out3)
    def loadckpt(self):
        tf.reset_default_graph()
        MODEL_DIR = os.path.abspath(os.path.dirname(__file__)) + "/output"
        checkpoint = tf.train.get_checkpoint_state(MODEL_DIR)
        last_model = checkpoint.model_checkpoint_path
        print("load {}".format(last_model))
        saver = tf.train.import_meta_graph(last_model + '.meta', clear_devices=CLEAR_DEVICES)
        graph = tf.get_default_graph()
        graph_def = graph.as_graph_def()


        with tf.Session() as sess:
            saver.restore(sess, last_model)
            export_path = 'tf_serving_export_{}_{}'.format('mobilenet', 'pascal')
            print('Exporting trained model to', export_path)
            if os.path.exists(export_path):
                shutil.rmtree(export_path)
            builder = saved_model_builder.SavedModelBuilder(export_path)

            image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
            boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
            scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
            classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')

            image_tensor_info = utils.build_tensor_info(image_tensor)
            boxes_tensor_info = utils.build_tensor_info(boxes)
            scores_tensor_info = utils.build_tensor_info(scores)
            classes_tensor_info = utils.build_tensor_info(classes)
            num_detections_tensor_info = utils.build_tensor_info(num_detections)

            prediction_signature = signature_def_utils.build_signature_def(
                inputs={'image_tensor': image_tensor_info},
                outputs={'boxes': boxes_tensor_info,
                         'scores': scores_tensor_info,
                         'classes': classes_tensor_info,
                         'num_detections': num_detections_tensor_info,
                         },
                method_name=signature_constants.PREDICT_METHOD_NAME)
            legacy_init_op = tf.group(tf.initialize_all_tables(),
                                      name='legacy_init_op')
            builder.add_meta_graph_and_variables(
                sess, [tag_constants.SERVING],
                signature_def_map={
                    'predict_bbox':
                        prediction_signature,
                },
                legacy_init_op=legacy_init_op)

            builder.save()

            print('Done exporting!')
Beispiel #30
0
def checkJPG(fn):
    with tf.Graph().as_default():
        try:
            image_contents = tf.read_file(fn)
            image = tf.image.decode_jpeg(image_contents, channels=3)
            init_op = tf.initialize_all_tables()
            with tf.Session() as sess:
                sess.run(init_op)
                tmp = sess.run(image)
        except:
            print("Corrupted file: ", fn)
            return False
    return True
Beispiel #31
0
def verify(url,fpath):
    with tf.Graph().as_default():
        image_contents = tf.read_file(fpath)
        image = tf.image.decode_jpeg(image_contents, channels=3)
        init_op = tf.initialize_all_tables()
        try:
            with tf.Session() as sess:
                sess.run(init_op)
                tmp = sess.run(image)
        except Exception as e:
            print(e)
            curs.append((fpath,url))
            return False
    return True
Beispiel #32
0
 def testGetModelInput(self):
     initial_state, sequence_input = self._rnn_estimator._get_model_input(
         self._columns_to_tensors)
     self.assertIsNone(initial_state)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         sess.run(tf.initialize_all_tables())
         sequence_input_val = sess.run(sequence_input)
     expected_shape = np.array([
         3,  # expected batch size
         2,  # padded sequence length
         3 + 8 + 2  # location keys + embedding dim + measurement dimension
     ])
     self.assertAllEqual(expected_shape, sequence_input_val.shape)
 def testBuildSequenceInputInput(self):
     sequence_input = dynamic_rnn_estimator.build_sequence_input(
         self.GetColumnsToTensors(), self.sequence_feature_columns,
         self.context_feature_columns)
     with self.test_session() as sess:
         sess.run(tf.global_variables_initializer())
         sess.run(tf.initialize_all_tables())
         sequence_input_val = sess.run(sequence_input)
     expected_shape = np.array([
         3,  # expected batch size
         2,  # padded sequence length
         3 + 8 + 2  # location keys + embedding dim + measurement dimension
     ])
     self.assertAllEqual(expected_shape, sequence_input_val.shape)
 def testGetModelInput(self):
   initial_state, sequence_input = self._rnn_estimator._get_model_input(
       self._columns_to_tensors)
   self.assertIsNone(initial_state)
   with self.test_session() as sess:
     sess.run(tf.initialize_all_variables())
     sess.run(tf.initialize_all_tables())
     sequence_input_val = sess.run(sequence_input)
   expected_shape = np.array([
       3,         # expected batch size
       2,         # padded sequence length
       3 + 8 + 2  # location keys + embedding dim + measurement dimension
   ])
   self.assertAllEqual(expected_shape, sequence_input_val.shape)
 def testBuildSequenceInputInput(self):
   sequence_input = dynamic_rnn_estimator.build_sequence_input(
       self.columns_to_tensors,
       self.sequence_feature_columns,
       self.context_feature_columns)
   with self.test_session() as sess:
     sess.run(tf.global_variables_initializer())
     sess.run(tf.initialize_all_tables())
     sequence_input_val = sess.run(sequence_input)
   expected_shape = np.array([
       3,         # expected batch size
       2,         # padded sequence length
       3 + 8 + 2  # location keys + embedding dim + measurement dimension
   ])
   self.assertAllEqual(expected_shape, sequence_input_val.shape)
Beispiel #36
0
def restore_model(session, saver, path):
    """ Initializes a model that has been previously trained and
    returns global step
    Args:
        session: Tensorflow session
        saver: Tensorflow saver
        path: Path where model to be loaded is
    Returns:
        Global step variable
    """
    logger.info('Starting model from %s' % path)
    session.run(tf.local_variables_initializer())
    session.run(tf.initialize_all_tables())
    saver.restore(session, path)
    return get_global_step()
Beispiel #37
0
def _export_model():
    test_src_file = tf.placeholder(tf.string, [None])
    data_dir = join(dirname(dirname(abspath(__file__))), 'train', 'data')
    # vocab files
    src_vocab_file = join(data_dir, hparams.vocab_prefix + '.' + hparams.src)
    tgt_vocab_file = join(data_dir, hparams.vocab_prefix + '.' + hparams.tgt)
    src_vocab2idx_table, tgt_vocab2idx_table = create_vocab2idx_tables(
        src_vocab_file, tgt_vocab_file, hparams.share_vocab)
    src_idx2vocab_table, tgt_idx2vocab_table = create_idx2vocab_tables(
        src_vocab_file, tgt_vocab_file, hparams.share_vocab)

    with tf.name_scope('TestInput'):
        test_inputs = get_infer_input(hparams, test_src_file,
                                      src_vocab2idx_table)

    with tf.name_scope('Test'):
        with tf.variable_scope('NMTModel', reuse=None):
            test_model = NMTModel(hparams, tf.contrib.learn.ModeKeys.INFER,
                                  test_inputs, src_vocab2idx_table,
                                  tgt_vocab2idx_table, src_idx2vocab_table,
                                  tgt_idx2vocab_table)

    # restore checkpoint
    config = tf.ConfigProto(allow_soft_placement=True)
    sess = tf.Session(config=config)
    saver = tf.train.Saver()
    saver.restore(sess, tf.train.latest_checkpoint(hparams.save_path))

    model_signature = signature_def_utils.build_signature_def(
        inputs={'inputs': utils.build_tensor_info(test_src_file)},
        outputs={
            'bpe_sentences':
            utils.build_tensor_info(test_model.infer_bpe_sentences)
        },
        method_name=signature_constants.PREDICT_METHOD_NAME)

    builder = saved_model_builder.SavedModelBuilder(hparams.export_path)
    builder.add_meta_graph_and_variables(
        sess, [tag_constants.SERVING],
        clear_devices=True,
        signature_def_map={
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
            model_signature,
        },
        legacy_init_op=tf.group(tf.initialize_all_tables(),
                                name='legacy_init_op'))

    builder.save()
Beispiel #38
0
    def train(self, session, X, Y):
        session.run(tf.initialize_all_tables())

        for epoch in xrange(self.num_epochs):
            avg_cost = 0
            for i in xrange(self.steps_per_epoch):
                batch_X, batch_Y = self.get_batch(X, Y)
                _, c = session.run([self.updates, self.cost],
                                   feed_dict={
                                       self.X: batch_X,
                                       self.Y: batch_Y
                                   })
                avg_cost += c
            avg_cost /= self.steps_per_epoch
            print("Epoch:", '%04d' % (epoch + 1), "loss=",
                  "{:.9f}".format(avg_cost))
Beispiel #39
0
    def __init__(self,
                 hub_model="https://tfhub.dev/google/elmo/2",
                 trainable=False,
                 signature="default"):
        self.trainable = trainable
        self.signature = signature
        self._sess = tf.Session()
        self._model = hub.Module(hub_model, trainable=trainable)
        self._input = tf.placeholder(tf.string, [None])
        self.res = self._model(self._input,
                               signature=self.signature,
                               as_dict=True)['default']

        self._sess.run(
            [tf.initialize_all_tables(),
             tf.initialize_all_variables()])
  def testConstructRNN(self):
    """Test `DynamicRNNEstimator._construct_rnn`."""
    initial_state, sequence_input = self._rnn_estimator._get_model_input(
        self._columns_to_tensors)
    activations_t, final_state_t = self._rnn_estimator._construct_rnn(
        initial_state, sequence_input)

    # Obtain values of activations and final state.
    with tf.Session() as sess:
      sess.run(tf.initialize_all_variables())
      sess.run(tf.initialize_all_tables())
      activations, final_state = sess.run([activations_t, final_state_t])

    expected_activations_shape = np.array([3, 2, self.NUM_LABEL_COLUMNS])
    self.assertAllEqual(expected_activations_shape, activations.shape)
    expected_state_shape = np.array([3, self.NUM_RNN_CELL_UNITS])
    self.assertAllEqual(expected_state_shape, final_state.shape)
Beispiel #41
0
    def testConstructRNN(self):
        """Test `DynamicRNNEstimator._construct_rnn`."""
        initial_state, sequence_input = self._rnn_estimator._get_model_input(
            self._columns_to_tensors)
        activations_t, final_state_t = self._rnn_estimator._construct_rnn(
            initial_state, sequence_input)

        # Obtain values of activations and final state.
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.initialize_all_tables())
            activations, final_state = sess.run([activations_t, final_state_t])

        expected_activations_shape = np.array([3, 2, self.NUM_LABEL_COLUMNS])
        self.assertAllEqual(expected_activations_shape, activations.shape)
        expected_state_shape = np.array([3, self.NUM_RNN_CELL_UNITS])
        self.assertAllEqual(expected_state_shape, final_state.shape)
  def testConstructRNN(self):
    initial_state = None
    sequence_input = dynamic_rnn_estimator.build_sequence_input(
        self.columns_to_tensors,
        self.sequence_feature_columns,
        self.context_feature_columns)
    activations_t, final_state_t = dynamic_rnn_estimator.construct_rnn(
        initial_state,
        sequence_input,
        self.rnn_cell,
        self.mock_target_column.num_label_columns)

    # Obtain values of activations and final state.
    with tf.Session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.initialize_all_tables())
      activations, final_state = sess.run([activations_t, final_state_t])

    expected_activations_shape = np.array([3, 2, self.NUM_LABEL_COLUMNS])
    self.assertAllEqual(expected_activations_shape, activations.shape)
    expected_state_shape = np.array([3, self.NUM_RNN_CELL_UNITS])
    self.assertAllEqual(expected_state_shape, final_state.shape)
def export():
  # Create index->synset mapping
  synsets = []
  with open(SYNSET_FILE) as f:
    synsets = f.read().splitlines()
  # Create synset->metadata mapping
  texts = {}
  with open(METADATA_FILE) as f:
    for line in f.read().splitlines():
      parts = line.split('\t')
      assert len(parts) == 2
      texts[parts[0]] = parts[1]

  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
    feature_configs = {
        'image/encoded': tf.FixedLenFeature(
            shape=[], dtype=tf.string),
    }
    tf_example = tf.parse_example(serialized_tf_example, feature_configs)
    jpegs = tf_example['image/encoded']
    images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    # Note that the 0th index is an unused background class
    # (see inception model definition code).
    class_descriptions = ['unused background']
    for s in synsets:
      class_descriptions.append(texts[s])
    class_tensor = tf.constant(class_descriptions)

    classes = tf.contrib.lookup.index_to_string(
        tf.to_int64(indices), mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print 'Successfully loaded model from %s at step=%s.' % (
            ckpt.model_checkpoint_path, global_step)
      else:
        print 'No checkpoint file found at %s' % FLAGS.checkpoint_dir
        return

      # Export inference model.
      output_path = os.path.join(
          compat.as_bytes(FLAGS.output_dir),
          compat.as_bytes(str(FLAGS.model_version)))
      print 'Exporting trained model to', output_path
      builder = saved_model_builder.SavedModelBuilder(output_path)

      # Build the signature_def_map.
      classify_inputs_tensor_info = utils.build_tensor_info(
          serialized_tf_example)
      classes_output_tensor_info = utils.build_tensor_info(classes)
      scores_output_tensor_info = utils.build_tensor_info(values)

      classification_signature = signature_def_utils.build_signature_def(
          inputs={
              signature_constants.CLASSIFY_INPUTS: classify_inputs_tensor_info
          },
          outputs={
              signature_constants.CLASSIFY_OUTPUT_CLASSES:
                  classes_output_tensor_info,
              signature_constants.CLASSIFY_OUTPUT_SCORES:
                  scores_output_tensor_info
          },
          method_name=signature_constants.CLASSIFY_METHOD_NAME)

      predict_inputs_tensor_info = utils.build_tensor_info(jpegs)
      prediction_signature = signature_def_utils.build_signature_def(
          inputs={'images': predict_inputs_tensor_info},
          outputs={
              'classes': classes_output_tensor_info,
              'scores': scores_output_tensor_info
          },
          method_name=signature_constants.PREDICT_METHOD_NAME)

      legacy_init_op = tf.group(
          tf.initialize_all_tables(), name='legacy_init_op')
      builder.add_meta_graph_and_variables(
          sess, [tag_constants.SERVING],
          signature_def_map={
              'predict_images':
                  prediction_signature,
              signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                  classification_signature,
          },
          legacy_init_op=legacy_init_op)

      builder.save()
      print 'Successfully exported model to %s' % FLAGS.output_dir
def main():
  # Get hyperparameters
  if FLAGS.enable_colored_log:
    import coloredlogs
    coloredlogs.install()
  logging.basicConfig(level=logging.INFO)
  INPUT_FILE_FORMAT = FLAGS.input_file_format
  if INPUT_FILE_FORMAT not in ["tfrecord", "csv"]:
    logging.error("Unknow input file format: {}".format(INPUT_FILE_FORMAT))
    exit(1)
  FEATURE_SIZE = FLAGS.feature_size
  LABEL_SIZE = FLAGS.label_size
  EPOCH_NUMBER = FLAGS.epoch_number
  if EPOCH_NUMBER <= 0:
    EPOCH_NUMBER = None
  BATCH_THREAD_NUMBER = FLAGS.batch_thread_number
  MIN_AFTER_DEQUEUE = FLAGS.min_after_dequeue
  BATCH_CAPACITY = BATCH_THREAD_NUMBER * FLAGS.batch_size + MIN_AFTER_DEQUEUE
  MODE = FLAGS.mode
  MODEL = FLAGS.model
  CHECKPOINT_PATH = FLAGS.checkpoint_path
  if not CHECKPOINT_PATH.startswith("fds://") and not os.path.exists(
      CHECKPOINT_PATH):
    os.makedirs(CHECKPOINT_PATH)
  CHECKPOINT_FILE = CHECKPOINT_PATH + "/checkpoint.ckpt"
  LATEST_CHECKPOINT = tf.train.latest_checkpoint(CHECKPOINT_PATH)
  OUTPUT_PATH = FLAGS.output_path
  if not OUTPUT_PATH.startswith("fds://") and not os.path.exists(OUTPUT_PATH):
    os.makedirs(OUTPUT_PATH)
  pprint.PrettyPrinter().pprint(FLAGS.__flags)

  # Process TFRecoreds files
  def read_and_decode_tfrecord(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            "label": tf.FixedLenFeature([], tf.float32),
            "features": tf.FixedLenFeature([FEATURE_SIZE], tf.float32),
        })
    label = features["label"]
    features = features["features"]
    return label, features

  def read_and_decode_csv(filename_queue):
    # TODO: Not generic for all datasets
    reader = tf.TextLineReader()
    key, value = reader.read(filename_queue)

    # Default values, in case of empty columns. Also specifies the type of the
    # decoded result.
    #record_defaults = [[1], [1], [1], [1], [1]]
    record_defaults = [[1], [1.0], [1.0], [1.0], [1.0]]
    col1, col2, col3, col4, col5 = tf.decode_csv(
        value, record_defaults=record_defaults)
    label = col1
    features = tf.stack([col2, col3, col4, col4])
    return label, features

  # Read TFRecords files for training
  filename_queue = tf.train.string_input_producer(
      tf.train.match_filenames_once(FLAGS.train_file),
      num_epochs=EPOCH_NUMBER)
  if INPUT_FILE_FORMAT == "tfrecord":
    label, features = read_and_decode_tfrecord(filename_queue)
  elif INPUT_FILE_FORMAT == "csv":
    label, features = read_and_decode_csv(filename_queue)
  batch_labels, batch_features = tf.train.shuffle_batch(
      [label, features],
      batch_size=FLAGS.batch_size,
      num_threads=BATCH_THREAD_NUMBER,
      capacity=BATCH_CAPACITY,
      min_after_dequeue=MIN_AFTER_DEQUEUE)

  # Read TFRecords file for validatioin
  validate_filename_queue = tf.train.string_input_producer(
      tf.train.match_filenames_once(FLAGS.validate_file),
      num_epochs=EPOCH_NUMBER)
  if INPUT_FILE_FORMAT == "tfrecord":
    validate_label, validate_features = read_and_decode_tfrecord(
        validate_filename_queue)
  elif INPUT_FILE_FORMAT == "csv":
    validate_label, validate_features = read_and_decode_csv(
        validate_filename_queue)
  validate_batch_labels, validate_batch_features = tf.train.shuffle_batch(
      [validate_label, validate_features],
      batch_size=FLAGS.validate_batch_size,
      num_threads=BATCH_THREAD_NUMBER,
      capacity=BATCH_CAPACITY,
      min_after_dequeue=MIN_AFTER_DEQUEUE)

  # Define the model
  input_units = FEATURE_SIZE
  output_units = LABEL_SIZE
  model_network_hidden_units = [int(i) for i in FLAGS.model_network.split()]

  def full_connect(inputs, weights_shape, biases_shape, is_train=True):
    weights = tf.get_variable("weights",
                              weights_shape,
                              initializer=tf.random_normal_initializer())
    biases = tf.get_variable("biases",
                             biases_shape,
                             initializer=tf.random_normal_initializer())
    layer = tf.matmul(inputs, weights) + biases

    if FLAGS.enable_bn and is_train:
      mean, var = tf.nn.moments(layer, axes=[0])
      scale = tf.get_variable("scale",
                              biases_shape,
                              initializer=tf.random_normal_initializer())
      shift = tf.get_variable("shift",
                              biases_shape,
                              initializer=tf.random_normal_initializer())
      layer = tf.nn.batch_normalization(layer, mean, var, shift, scale,
                                        FLAGS.bn_epsilon)
    return layer

  def full_connect_relu(inputs, weights_shape, biases_shape, is_train=True):
    layer = full_connect(inputs, weights_shape, biases_shape, is_train)
    layer = tf.nn.relu(layer)
    return layer

  def customized_inference(inputs, is_train=True):
    hidden1_units = 128
    hidden2_units = 32
    hidden3_units = 8

    with tf.variable_scope("input"):
      layer = full_connect_relu(inputs, [input_units, hidden1_units],
                                [hidden1_units], is_train)
    with tf.variable_scope("layer0"):
      layer = full_connect_relu(layer, [hidden1_units, hidden2_units],
                                [hidden2_units], is_train)
    with tf.variable_scope("layer1"):
      layer = full_connect_relu(layer, [hidden2_units, hidden3_units],
                                [hidden3_units], is_train)
    if FLAGS.enable_dropout and is_train:
      layer = tf.nn.dropout(layer, FLAGS.dropout_keep_prob)
    with tf.variable_scope("output"):
      layer = full_connect(layer, [hidden3_units, output_units],
                           [output_units], is_train)
    return layer

  def dnn_inference(inputs, is_train=True):
    with tf.variable_scope("input"):
      layer = full_connect_relu(inputs,
                                [input_units, model_network_hidden_units[0]],
                                [model_network_hidden_units[0]], is_train)

    for i in range(len(model_network_hidden_units) - 1):
      with tf.variable_scope("layer{}".format(i)):
        layer = full_connect_relu(
            layer,
            [model_network_hidden_units[i], model_network_hidden_units[i + 1]],
            [model_network_hidden_units[i + 1]], is_train)

    with tf.variable_scope("output"):
      layer = full_connect(layer,
                           [model_network_hidden_units[-1], output_units],
                           [output_units], is_train)
    return layer

  def lr_inference(inputs, is_train=True):
    with tf.variable_scope("lr"):
      layer = full_connect(inputs, [input_units, output_units], [output_units])
    return layer

  def wide_and_deep_inference(inputs, is_train=True):
    return lr_inference(inputs, is_train) + dnn_inference(inputs, is_train)

  def cnn_inference(inputs, is_train=True):
    # TODO: Change if validate_batch_size is different
    # [BATCH_SIZE, 512 * 512 * 1] -> [BATCH_SIZE, 512, 512, 1]
    inputs = tf.reshape(inputs, [FLAGS.batch_size, 512, 512, 1])

    # [BATCH_SIZE, 512, 512, 1] -> [BATCH_SIZE, 128, 128, 8]
    with tf.variable_scope("conv0"):
      weights = tf.get_variable("weights", [3, 3, 1, 8],
                                initializer=tf.random_normal_initializer())
      bias = tf.get_variable("bias", [8],
                             initializer=tf.random_normal_initializer())

      layer = tf.nn.conv2d(inputs,
                           weights,
                           strides=[1, 1, 1, 1],
                           padding="SAME")
      layer = tf.nn.bias_add(layer, bias)
      layer = tf.nn.relu(layer)
      layer = tf.nn.max_pool(layer,
                             ksize=[1, 4, 4, 1],
                             strides=[1, 4, 4, 1],
                             padding="SAME")

    # [BATCH_SIZE, 128, 128, 8] -> [BATCH_SIZE, 32, 32, 8]
    with tf.variable_scope("conv1"):
      weights = tf.get_variable("weights", [3, 3, 8, 8],
                                initializer=tf.random_normal_initializer())
      bias = tf.get_variable("bias", [8],
                             initializer=tf.random_normal_initializer())

      layer = tf.nn.conv2d(layer,
                           weights,
                           strides=[1, 1, 1, 1],
                           padding="SAME")
      layer = tf.nn.bias_add(layer, bias)
      layer = tf.nn.relu(layer)
      layer = tf.nn.max_pool(layer,
                             ksize=[1, 4, 4, 1],
                             strides=[1, 4, 4, 1],
                             padding="SAME")

    # [BATCH_SIZE, 32, 32, 8] -> [BATCH_SIZE, 8, 8, 8]
    with tf.variable_scope("conv2"):
      weights = tf.get_variable("weights", [3, 3, 8, 8],
                                initializer=tf.random_normal_initializer())
      bias = tf.get_variable("bias", [8],
                             initializer=tf.random_normal_initializer())

      layer = tf.nn.conv2d(layer,
                           weights,
                           strides=[1, 1, 1, 1],
                           padding="SAME")
      layer = tf.nn.bias_add(layer, bias)
      layer = tf.nn.relu(layer)
      layer = tf.nn.max_pool(layer,
                             ksize=[1, 4, 4, 1],
                             strides=[1, 4, 4, 1],
                             padding="SAME")

    # [BATCH_SIZE, 8, 8, 8] -> [BATCH_SIZE, 8 * 8 * 8]
    layer = tf.reshape(layer, [-1, 8 * 8 * 8])

    # [BATCH_SIZE, 8 * 8 * 8] -> [BATCH_SIZE, LABEL_SIZE]
    with tf.variable_scope("output"):
      weights = tf.get_variable("weights", [8 * 8 * 8, LABEL_SIZE],
                                initializer=tf.random_normal_initializer())
      bias = tf.get_variable("bias", [LABEL_SIZE],
                             initializer=tf.random_normal_initializer())
      layer = tf.add(tf.matmul(layer, weights), bias)

    return layer

  def inference(inputs, is_train=True):
    if MODEL == "dnn":
      return dnn_inference(inputs, is_train)
    elif MODEL == "lr":
      return lr_inference(inputs, is_train)
    elif MODEL == "wide_and_deep":
      return wide_and_deep_inference(inputs, is_train)
    elif MODEL == "customized":
      return customized_inference(inputs, is_train)
    elif MODEL == "cnn":
      return cnn_inference(inputs, is_train)
    else:
      logging.error("Unknown model, exit now")
      exit(1)

  logging.info("Use the model: {}, model network: {}".format(
      MODEL, FLAGS.model_network))
  logits = inference(batch_features, True)
  batch_labels = tf.to_int64(batch_labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=batch_labels)
  loss = tf.reduce_mean(cross_entropy, name="loss")
  global_step = tf.Variable(0, name="global_step", trainable=False)
  if FLAGS.enable_lr_decay:
    logging.info("Enable learning rate decay rate: {}".format(
        FLAGS.lr_decay_rate))
    starter_learning_rate = FLAGS.learning_rate
    learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                               global_step,
                                               100000,
                                               FLAGS.lr_decay_rate,
                                               staircase=True)
  else:
    learning_rate = FLAGS.learning_rate
  optimizer = get_optimizer(FLAGS.optimizer, learning_rate)
  train_op = optimizer.minimize(loss, global_step=global_step)
  tf.get_variable_scope().reuse_variables()

  # Define accuracy op for train data
  train_accuracy_logits = inference(batch_features, False)
  train_softmax = tf.nn.softmax(train_accuracy_logits)
  train_correct_prediction = tf.equal(
      tf.argmax(train_softmax, 1), batch_labels)
  train_accuracy = tf.reduce_mean(tf.cast(train_correct_prediction,
                                          tf.float32))

  # Define auc op for train data
  batch_labels = tf.cast(batch_labels, tf.int32)
  sparse_labels = tf.reshape(batch_labels, [-1, 1])
  derived_size = tf.shape(batch_labels)[0]
  indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
  concated = tf.concat(axis=1, values=[indices, sparse_labels])
  outshape = tf.stack([derived_size, LABEL_SIZE])
  new_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
  _, train_auc = tf.contrib.metrics.streaming_auc(train_softmax,
                                                  new_batch_labels)

  # Define accuracy op for validate data
  validate_accuracy_logits = inference(validate_batch_features, False)
  validate_softmax = tf.nn.softmax(validate_accuracy_logits)
  validate_batch_labels = tf.to_int64(validate_batch_labels)
  validate_correct_prediction = tf.equal(
      tf.argmax(validate_softmax, 1), validate_batch_labels)
  validate_accuracy = tf.reduce_mean(tf.cast(validate_correct_prediction,
                                             tf.float32))

  # Define auc op for validate data
  validate_batch_labels = tf.cast(validate_batch_labels, tf.int32)
  sparse_labels = tf.reshape(validate_batch_labels, [-1, 1])
  derived_size = tf.shape(validate_batch_labels)[0]
  indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
  concated = tf.concat(axis=1, values=[indices, sparse_labels])
  outshape = tf.stack([derived_size, LABEL_SIZE])
  new_validate_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
  _, validate_auc = tf.contrib.metrics.streaming_auc(validate_softmax,
                                                     new_validate_batch_labels)

  # Define inference op
  inference_features = tf.placeholder("float", [None, FEATURE_SIZE])
  inference_logits = inference(inference_features, False)
  inference_softmax = tf.nn.softmax(inference_logits)
  inference_op = tf.argmax(inference_softmax, 1)
  keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1])
  keys = tf.identity(keys_placeholder)
  model_signature = {
      "inputs": exporter.generic_signature({"keys": keys_placeholder,
                                            "features": inference_features}),
      "outputs": exporter.generic_signature({"keys": keys,
                                             "softmax": inference_softmax,
                                             "prediction": inference_op})
  }

  # Initialize saver and summary
  saver = tf.train.Saver()
  tf.summary.scalar("loss", loss)
  tf.summary.scalar("train_accuracy", train_accuracy)
  tf.summary.scalar("train_auc", train_auc)
  tf.summary.scalar("validate_accuracy", validate_accuracy)
  tf.summary.scalar("validate_auc", validate_auc)
  summary_op = tf.summary.merge_all()
  init_op = [tf.global_variables_initializer(),
             tf.local_variables_initializer()]

  # Create session to run
  with tf.Session() as sess:
    logging.info("Start to run with mode: {}".format(MODE))
    writer = tf.summary.FileWriter(OUTPUT_PATH, sess.graph)
    sess.run(init_op)

    if MODE == "train":
      # Restore session and start queue runner
      restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT)
      coord = tf.train.Coordinator()
      threads = tf.train.start_queue_runners(coord=coord, sess=sess)
      start_time = datetime.datetime.now()

      try:
        while not coord.should_stop():
          _, loss_value, step = sess.run([train_op, loss, global_step])

          # Print state while training
          if step % FLAGS.steps_to_validate == 0:
            train_accuracy_value, train_auc_value, validate_accuracy_value, validate_auc_value, summary_value = sess.run(
                [train_accuracy, train_auc, validate_accuracy, validate_auc,
                 summary_op])
            end_time = datetime.datetime.now()
            logging.info(
                "[{}] Step: {}, loss: {}, train_acc: {}, train_auc: {}, valid_acc: {}, valid_auc: {}".format(
                    end_time - start_time, step, loss_value,
                    train_accuracy_value, train_auc_value,
                    validate_accuracy_value, validate_auc_value))
            writer.add_summary(summary_value, step)
            saver.save(sess, CHECKPOINT_FILE, global_step=step)
            start_time = end_time
      except tf.errors.OutOfRangeError:
        # Export the model after training
        export_model(sess, saver, model_signature, FLAGS.model_path,
                     FLAGS.model_version)
      finally:
        coord.request_stop()
      coord.join(threads)

    elif MODE == "export":
      if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT):
        logging.error("No checkpoint found, exit now")
        exit(1)

      # Export the model
      export_model(sess, saver, model_signature, FLAGS.model_path,
                   FLAGS.model_version)

    elif MODE == "savedmodel":
      if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT):
        logging.error("No checkpoint found, exit now")
        exit(1)

      logging.info("Export the saved model to {}".format(
          FLAGS.saved_model_path))
      export_path_base = FLAGS.saved_model_path
      export_path = os.path.join(
          compat.as_bytes(export_path_base),
          compat.as_bytes(str(FLAGS.model_version)))

      model_signature = signature_def_utils.build_signature_def(
          inputs={
              "keys": utils.build_tensor_info(keys_placeholder),
              "features": utils.build_tensor_info(inference_features)
          },
          outputs={
              "keys": utils.build_tensor_info(keys),
              "softmax": utils.build_tensor_info(inference_softmax),
              "prediction": utils.build_tensor_info(inference_op)
          },
          method_name=signature_constants.PREDICT_METHOD_NAME)

      try:
        builder = saved_model_builder.SavedModelBuilder(export_path)
        builder.add_meta_graph_and_variables(
            sess,
            [tag_constants.SERVING],
            clear_devices=True,
            signature_def_map={
                signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                model_signature,
            },
            #legacy_init_op=legacy_init_op)
            legacy_init_op=tf.group(tf.initialize_all_tables(),
                                    name="legacy_init_op"))

        builder.save()
      except Exception as e:
        logging.error("Fail to export saved model, exception: {}".format(e))

    elif MODE == "inference":
      if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT):
        logging.error("No checkpoint found, exit now")
        exit(1)

      # Load inference test data
      inference_result_file_name = FLAGS.inference_result_file
      inference_test_file_name = FLAGS.inference_test_file
      inference_data = np.genfromtxt(inference_test_file_name, delimiter=",")
      inference_data_features = inference_data[:, 0:9]
      inference_data_labels = inference_data[:, 9]

      # Run inference
      start_time = datetime.datetime.now()
      prediction, prediction_softmax = sess.run(
          [inference_op, inference_softmax],
          feed_dict={inference_features: inference_data_features})
      end_time = datetime.datetime.now()

      # Compute accuracy
      label_number = len(inference_data_labels)
      correct_label_number = 0
      for i in range(label_number):
        if inference_data_labels[i] == prediction[i]:
          correct_label_number += 1
      accuracy = float(correct_label_number) / label_number

      # Compute auc
      y_true = np.array(inference_data_labels)
      y_score = prediction_softmax[:, 1]
      fpr, tpr, thresholds = metrics.roc_curve(y_true,
                                               y_score,
                                               pos_label=1)
      auc = metrics.auc(fpr, tpr)
      logging.info("[{}] Inference accuracy: {}, auc: {}".format(
          end_time - start_time, accuracy, auc))

      # Save result into the file
      np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",")
      logging.info("Save result to file: {}".format(
          inference_result_file_name))
def save_model():
    with tf.Graph().as_default():
        # definimos placeholders
        _images = tf.placeholder(tf.float32, shape=[None, FLAGS.image_height, FLAGS.image_width, 3])

        # Inference.
        logits = reconobook_modelo.inference(_images)

        # clase = tf.argmax(logits, 1)

        values, indices = tf.nn.top_k(logits, 10)
        prediction_classes = tf.contrib.lookup.index_to_string(
            tf.to_int64(indices), mapping=tf.constant([str(i) for i in range(10)]))

        with tf.Session() as sess:
            # Cargar modelo
            ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
            variable_averages = tf.train.ExponentialMovingAverage(FLAGS.moving_average_decay)
            variables_to_restore = variable_averages.variables_to_restore()
            saver = tf.train.Saver(variables_to_restore)
            saver.restore(sess, ckpt.model_checkpoint_path)

            # Definimos la ruta donde se guardará el modelo
            export_path = os.path.join(
                compat.as_bytes(FLAGS.export_model_dir),
                compat.as_bytes(str(FLAGS.model_version)))

            # creamos el directorio de export si no existe, y si existe lo borramos y creamos de nuevo
            if os.path.exists(export_path):
                shutil.rmtree(export_path)

            print('Exportando modelo a %s' % export_path)

            # Creamos el "builder"
            builder = saved_model_builder.SavedModelBuilder(export_path)

            # Build the signature_def_map.
            classification_inputs = utils.build_tensor_info(_images)
            classification_outputs_classes = utils.build_tensor_info(prediction_classes)
            classification_outputs_scores = utils.build_tensor_info(values)

            classification_signature = signature_def_utils.build_signature_def(
                inputs={signature_constants.CLASSIFY_INPUTS: classification_inputs},
                outputs={
                    signature_constants.CLASSIFY_OUTPUT_CLASSES:
                        classification_outputs_classes,
                    signature_constants.CLASSIFY_OUTPUT_SCORES:
                        classification_outputs_scores
                },
                method_name=signature_constants.CLASSIFY_METHOD_NAME)

            tensor_info_x = utils.build_tensor_info(_images)
            tensor_info_y = utils.build_tensor_info(logits)
            
            prediction_signature = signature_def_utils.build_signature_def(
                inputs={'images': tensor_info_x},
                outputs={'scores': tensor_info_y},
                method_name=signature_constants.PREDICT_METHOD_NAME)

            legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op')
            builder.add_meta_graph_and_variables(
                sess, [tag_constants.SERVING],
                signature_def_map={
                    'predict_images':
                        prediction_signature,
                    signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                        classification_signature,
                },
                legacy_init_op=legacy_init_op)

            builder.save()

            print('Modelo exportado')
#!/usr/bin/env python
import tensorflow as tf

# Create a Constant op
# The op is added as a node to the default graph.
#
# The value returned by the constructor represents the output
# of the Constant op.
hello = tf.constant('Hello, TensorFlow!')
x = tf.placeholder("float", 3)
a = tf.placeholder("float", shape=[None, 3])

y = x*2
b = a*2

# Start tf session
sess = tf.Session()
sess.run(tf.initialize_all_tables())

print sess.run(hello)
print sess.run(y, feed_dict={x:[1,2,3]})
print sess.run(b, feed_dict={a:[[1,2,3], [4,5,6]]})

sess.close()
Beispiel #47
0
def main(_):
  if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
    print('Usage: mnist_export.py [--training_iteration=x] '
          '[--export_version=y] export_dir')
    sys.exit(-1)
  if FLAGS.training_iteration <= 0:
    print('Please specify a positive value for training iteration.')
    sys.exit(-1)
  if FLAGS.export_version <= 0:
    print('Please specify a positive value for version number.')
    sys.exit(-1)

  # Train model
  print('Training model...')
  mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
  sess = tf.InteractiveSession()
  serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
  feature_configs = {
      'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),
  }
  tf_example = tf.parse_example(serialized_tf_example, feature_configs)
  x = tf.identity(tf_example['x'], name='x')  # use tf.identity() to assign name
  y_ = tf.placeholder('float', shape=[None, 10])
  w = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  sess.run(tf.initialize_all_variables())
  y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')
  cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
  train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
  values, indices = tf.nn.top_k(y, 10)
  prediction_classes = tf.contrib.lookup.index_to_string(
      tf.to_int64(indices),
      mapping=tf.constant([str(i) for i in range(10)]))
  for _ in range(FLAGS.training_iteration):
    batch = mnist.train.next_batch(50)
    train_step.run(feed_dict={x: batch[0], y_: batch[1]})
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
  print('training accuracy %g' % sess.run(accuracy,
                                          feed_dict={x: mnist.test.images,
                                                     y_: mnist.test.labels}))
  print('Done training!')

  # Export model
  # WARNING(break-tutorial-inline-code): The following code snippet is
  # in-lined in tutorials, please update tutorial documents accordingly
  # whenever code changes.
  export_path = sys.argv[-1]
  print('Exporting trained model to %s' % export_path)
  init_op = tf.group(tf.initialize_all_tables(), name='init_op')
  saver = tf.train.Saver(sharded=True)
  model_exporter = exporter.Exporter(saver)
  model_exporter.init(
      sess.graph.as_graph_def(),
      init_op=init_op,
      default_graph_signature=exporter.classification_signature(
          input_tensor=serialized_tf_example,
          classes_tensor=prediction_classes,
          scores_tensor=values),
      named_graph_signatures={
          'inputs': exporter.generic_signature({'images': x}),
          'outputs': exporter.generic_signature({'scores': y})})
  model_exporter.export(export_path, tf.constant(FLAGS.export_version), sess)
  print('Done exporting!')
Beispiel #48
0


external_x = tf.placeholder(tf.string)
x = convert_external_inputs(external_x)
y = inference(x)

saver = tf.train.Saver()

with tf.Session() as sess:
    # Restore variables from training checkpoints.
    ckpt = tf.train.get_checkpoint_state(sys.argv[1])
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, sys.argv[1] + "/" + ckpt.model_checkpoint_path)
    else:
        print("Checkpoint file not found")
        raise SystemExit

    scores, class_ids = tf.nn.top_k(y, NUM_CLASSES_TO_RETURN)

    # for simplification we will just return the class ids, we should return the names instead
    classes = tf.contrib.lookup.index_to_string(tf.to_int64(class_ids),
        mapping=tf.constant([str(i) for i in range(1001)]))

    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(
        input_tensor=external_x, classes_tensor=classes, scores_tensor=scores)
    model_exporter.init(default_graph_signature=signature, init_op=tf.initialize_all_tables())
    model_exporter.export(sys.argv[1] + "/export", tf.constant(time.time()), sess)

    Args:
      logits: A `Tensor`. Must be one of the following types: `float32`, `float64`.
        2-D with shape `[batch_size, num_classes]`.
      name: A name for the operation (optional).

    Returns:
      A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
'''

# To implement cross-entropy
# placeholder to input the correct answers
y_ = tf.placeholder(tf.float32, [None, mnist.train.labels.shape[1]])

cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))

# training setup
# learning rate = 0.5
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

# initialize the variables we created
init = tf.initialize_all_tables()

# launch the model in a Session
sess = tf.Session()
sess.run(init)

for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
Beispiel #50
0
def export():
  # Create index->synset mapping
  synsets = []
  with open(SYNSET_FILE) as f:
    synsets = f.read().splitlines()
  # Create synset->metadata mapping
  texts = {}
  with open(METADATA_FILE) as f:
    for line in f.read().splitlines():
      parts = line.split('\t')
      assert len(parts) == 2
      texts[parts[0]] = parts[1]

  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    # TODO(b/27776734): Add batching support.
    jpegs = tf.placeholder(tf.string, shape=(1))
    image_buffer = tf.squeeze(jpegs, [0])
    # Decode the string as an RGB JPEG.
    # Note that the resulting image contains an unknown height and width
    # that is set dynamically by decode_jpeg. In other words, the height
    # and width of image is unknown at compile-time.
    image = tf.image.decode_jpeg(image_buffer, channels=3)
    # After this point, all image pixels reside in [0,1)
    # until the very end, when they're rescaled to (-1, 1).  The various
    # adjust_* ops all require this range for dtype float.
    image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # Crop the central region of the image with an area containing 87.5% of
    # the original image.
    image = tf.image.central_crop(image, central_fraction=0.875)
    # Resize the image to the original height and width.
    image = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image,
                                     [FLAGS.image_size, FLAGS.image_size],
                                     align_corners=False)
    image = tf.squeeze(image, [0])
    # Finally, rescale to [-1,1] instead of [0, 1)
    image = tf.sub(image, 0.5)
    image = tf.mul(image, 2.0)
    images = tf.expand_dims(image, 0)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    class_tensor = tf.constant([texts[s] for s in synsets])

    classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
                                                mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print('Successfully loaded model from %s at step=%s.' %
              (ckpt.model_checkpoint_path, global_step))
      else:
        print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
        return

      # Export inference model.
      init_op = tf.group(tf.initialize_all_tables(), name='init_op')
      model_exporter = exporter.Exporter(saver)
      signature = exporter.classification_signature(
          input_tensor=jpegs, classes_tensor=classes, scores_tensor=values)
      model_exporter.init(default_graph_signature=signature, init_op=init_op)
      model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
      print('Successfully exported model to %s' % FLAGS.export_dir)
Beispiel #51
0
def main(_):
  if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
    print('Usage: mnist_export.py [--training_iteration=x] '
          '[--model_version=y] export_dir')
    sys.exit(-1)
  if FLAGS.training_iteration <= 0:
    print 'Please specify a positive value for training iteration.'
    sys.exit(-1)
  if FLAGS.model_version <= 0:
    print 'Please specify a positive value for version number.'
    sys.exit(-1)

  # Train model
  print 'Training model...'
  mnist = mnist_input_data.read_data_sets(FLAGS.work_dir, one_hot=True)
  sess = tf.InteractiveSession()
  serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
  feature_configs = {'x': tf.FixedLenFeature(shape=[784], dtype=tf.float32),}
  tf_example = tf.parse_example(serialized_tf_example, feature_configs)
  x = tf.identity(tf_example['x'], name='x')  # use tf.identity() to assign name
  y_ = tf.placeholder('float', shape=[None, 10])
  w = tf.Variable(tf.zeros([784, 10]))
  b = tf.Variable(tf.zeros([10]))
  sess.run(tf.initialize_all_variables())
  y = tf.nn.softmax(tf.matmul(x, w) + b, name='y')
  cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
  train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
  values, indices = tf.nn.top_k(y, 10)
  prediction_classes = tf.contrib.lookup.index_to_string(
      tf.to_int64(indices), mapping=tf.constant([str(i) for i in xrange(10)]))
  for _ in range(FLAGS.training_iteration):
    batch = mnist.train.next_batch(50)
    train_step.run(feed_dict={x: batch[0], y_: batch[1]})
  correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
  accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
  print 'training accuracy %g' % sess.run(
      accuracy, feed_dict={x: mnist.test.images,
                           y_: mnist.test.labels})
  print 'Done training!'

  # Export model
  # WARNING(break-tutorial-inline-code): The following code snippet is
  # in-lined in tutorials, please update tutorial documents accordingly
  # whenever code changes.
  export_path_base = sys.argv[-1]
  export_path = os.path.join(
      compat.as_bytes(export_path_base),
      compat.as_bytes(str(FLAGS.model_version)))
  print 'Exporting trained model to', export_path
  builder = saved_model_builder.SavedModelBuilder(export_path)

  # Build the signature_def_map.
  classification_inputs = utils.build_tensor_info(serialized_tf_example)
  classification_outputs_classes = utils.build_tensor_info(prediction_classes)
  classification_outputs_scores = utils.build_tensor_info(values)

  classification_signature = signature_def_utils.build_signature_def(
      inputs={signature_constants.CLASSIFY_INPUTS: classification_inputs},
      outputs={
          signature_constants.CLASSIFY_OUTPUT_CLASSES:
              classification_outputs_classes,
          signature_constants.CLASSIFY_OUTPUT_SCORES:
              classification_outputs_scores
      },
      method_name=signature_constants.CLASSIFY_METHOD_NAME)

  tensor_info_x = utils.build_tensor_info(x)
  tensor_info_y = utils.build_tensor_info(y)

  prediction_signature = signature_def_utils.build_signature_def(
      inputs={'images': tensor_info_x},
      outputs={'scores': tensor_info_y},
      method_name=signature_constants.PREDICT_METHOD_NAME)

  legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op')
  builder.add_meta_graph_and_variables(
      sess, [tag_constants.SERVING],
      signature_def_map={
          'predict_images':
              prediction_signature,
          signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
              classification_signature,
      },
      legacy_init_op=legacy_init_op)

  builder.save()

  print 'Done exporting!'
def main(_):
  if len(sys.argv) < 2 or sys.argv[-1].startswith('-'):
    print('Usage: mnist_dist_export.py '
          '[--model_version=y] [--checkpoint_path=checkpoint_store_path] export_dir')
    sys.exit(-1)
  if FLAGS.model_version <= 0:
    print 'Please specify a positive value for exported serveable version number.'
    sys.exit(-1)
  if not FLAGS.checkpoint_path:
    print 'Please specify the correct path where checkpoints stored locally or in OSS.'
    sys.exit(-1)

  checkpoint_basename="model.ckpt"
  default_meta_graph_suffix='.meta'
  ckpt_path=os.path.join(FLAGS.checkpoint_path, checkpoint_basename + '-0')
  meta_graph_file=ckpt_path + default_meta_graph_suffix
  with tf.Session() as new_sess:
#   with new_sess.graph.as_default():
  #  tf.reset_default_graph()
  #  new_sess.run(tf.initialize_all_variables())
    new_saver = tf.train.import_meta_graph(meta_graph_file, clear_devices=True) #'/test/mnistoutput/ckpt.meta')
    new_saver.restore(new_sess, ckpt_path) #'/test/mnistoutput/ckpt')
    new_graph = tf.get_default_graph()
    new_x = new_graph.get_tensor_by_name('input/x-input:0')
    print(new_x)
    new_y = new_graph.get_tensor_by_name('cross_entropy/logits:0')
    print(new_y)

  # Export model
  # WARNING(break-tutorial-inline-code): The following code snippet is
  # in-lined in tutorials, please update tutorial documents accordingly
  # whenever code changes.
    export_path_base = sys.argv[-1]
    export_path = os.path.join(
      compat.as_bytes(export_path_base),
      compat.as_bytes(str(FLAGS.model_version)))
    print 'Exporting trained model to', export_path
    builder = saved_model_builder.SavedModelBuilder(export_path)

  # Build the signature_def_map.
    tensor_info_x = utils.build_tensor_info(new_x)
    tensor_info_y = utils.build_tensor_info(new_y)

    prediction_signature = signature_def_utils.build_signature_def(
      inputs={'images': tensor_info_x},
      outputs={'scores': tensor_info_y},
      method_name=signature_constants.PREDICT_METHOD_NAME)

    legacy_init_op = tf.group(tf.initialize_all_tables(), name='legacy_init_op')

    builder.add_meta_graph_and_variables(
      new_sess, [tag_constants.SERVING],
      signature_def_map={
          'predict_images':
              prediction_signature,
      },
      legacy_init_op=legacy_init_op,
      clear_devices=True)
    builder.save()

  print 'Done exporting!'
def main():
  # Get hyperparameters
  if FLAGS.enable_colored_log:
    import coloredlogs
    coloredlogs.install()
  logging.basicConfig(level=logging.INFO)
  FEATURE_SIZE = FLAGS.feature_size
  LABEL_SIZE = FLAGS.label_size
  EPOCH_NUMBER = FLAGS.epoch_number
  if EPOCH_NUMBER <= 0:
    EPOCH_NUMBER = None
  BATCH_THREAD_NUMBER = FLAGS.batch_thread_number
  MIN_AFTER_DEQUEUE = FLAGS.min_after_dequeue
  BATCH_CAPACITY = BATCH_THREAD_NUMBER * FLAGS.batch_size + MIN_AFTER_DEQUEUE
  MODE = FLAGS.mode
  MODEL = FLAGS.model
  OPTIMIZER = FLAGS.optimizer
  CHECKPOINT_PATH = FLAGS.checkpoint_path
  if not CHECKPOINT_PATH.startswith("fds://") and not os.path.exists(
      CHECKPOINT_PATH):
    os.makedirs(CHECKPOINT_PATH)
  CHECKPOINT_FILE = CHECKPOINT_PATH + "/checkpoint.ckpt"
  LATEST_CHECKPOINT = tf.train.latest_checkpoint(CHECKPOINT_PATH)
  OUTPUT_PATH = FLAGS.output_path
  if not OUTPUT_PATH.startswith("fds://") and not os.path.exists(OUTPUT_PATH):
    os.makedirs(OUTPUT_PATH)
  pprint.PrettyPrinter().pprint(FLAGS.__flags)

  # Read TFRecords files for training
  def read_and_decode(filename_queue):
    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    return serialized_example

  # Read TFRecords files for training
  filename_queue = tf.train.string_input_producer(
      tf.train.match_filenames_once(FLAGS.train_tfrecords_file),
      num_epochs=EPOCH_NUMBER)
  serialized_example = read_and_decode(filename_queue)
  batch_serialized_example = tf.train.shuffle_batch(
      [serialized_example],
      batch_size=FLAGS.batch_size,
      num_threads=BATCH_THREAD_NUMBER,
      capacity=BATCH_CAPACITY,
      min_after_dequeue=MIN_AFTER_DEQUEUE)
  features = tf.parse_example(batch_serialized_example,
                              features={
                                  "label": tf.FixedLenFeature([], tf.float32),
                                  "ids": tf.VarLenFeature(tf.int64),
                                  "values": tf.VarLenFeature(tf.float32),
                              })
  batch_labels = features["label"]
  batch_ids = features["ids"]
  batch_values = features["values"]

  # Read TFRecords file for validation
  validate_filename_queue = tf.train.string_input_producer(
      tf.train.match_filenames_once(FLAGS.validate_tfrecords_file),
      num_epochs=EPOCH_NUMBER)
  validate_serialized_example = read_and_decode(validate_filename_queue)
  validate_batch_serialized_example = tf.train.shuffle_batch(
      [validate_serialized_example],
      batch_size=FLAGS.validate_batch_size,
      num_threads=BATCH_THREAD_NUMBER,
      capacity=BATCH_CAPACITY,
      min_after_dequeue=MIN_AFTER_DEQUEUE)
  validate_features = tf.parse_example(
      validate_batch_serialized_example,
      features={
          "label": tf.FixedLenFeature([], tf.float32),
          "ids": tf.VarLenFeature(tf.int64),
          "values": tf.VarLenFeature(tf.float32),
      })
  validate_batch_labels = validate_features["label"]
  validate_batch_ids = validate_features["ids"]
  validate_batch_values = validate_features["values"]

  # Define the model
  input_units = FEATURE_SIZE
  output_units = LABEL_SIZE
  model_network_hidden_units = [int(i) for i in FLAGS.model_network.split()]

  def full_connect(inputs, weights_shape, biases_shape, is_train=True):
    with tf.device("/cpu:0"):
      weights = tf.get_variable("weights",
                                weights_shape,
                                initializer=tf.random_normal_initializer())
      biases = tf.get_variable("biases",
                               biases_shape,
                               initializer=tf.random_normal_initializer())
      layer = tf.matmul(inputs, weights) + biases

      if FLAGS.enable_bn and is_train:
        mean, var = tf.nn.moments(layer, axes=[0])
        scale = tf.get_variable("scale",
                                biases_shape,
                                initializer=tf.random_normal_initializer())
        shift = tf.get_variable("shift",
                                biases_shape,
                                initializer=tf.random_normal_initializer())
        layer = tf.nn.batch_normalization(layer, mean, var, shift, scale,
                                          FLAGS.bn_epsilon)
    return layer

  def sparse_full_connect(sparse_ids,
                          sparse_values,
                          weights_shape,
                          biases_shape,
                          is_train=True):
    weights = tf.get_variable("weights",
                              weights_shape,
                              initializer=tf.random_normal_initializer())
    biases = tf.get_variable("biases",
                             biases_shape,
                             initializer=tf.random_normal_initializer())
    return tf.nn.embedding_lookup_sparse(
        weights, sparse_ids, sparse_values,
        combiner="sum") + biases

  def full_connect_relu(inputs, weights_shape, biases_shape, is_train=True):
    return tf.nn.relu(full_connect(inputs, weights_shape, biases_shape,
                                   is_train))

  def customized_inference(sparse_ids, sparse_values, is_train=True):
    hidden1_units = 128
    hidden2_units = 32
    hidden3_units = 8

    with tf.variable_scope("input"):
      sparse_layer = sparse_full_connect(sparse_ids, sparse_values,
                                         [input_units, hidden1_units],
                                         [hidden1_units], is_train)
      layer = tf.nn.relu(sparse_layer)
    with tf.variable_scope("layer0"):
      layer = full_connect_relu(layer, [hidden1_units, hidden2_units],
                                [hidden2_units], is_train)
    with tf.variable_scope("layer1"):
      layer = full_connect_relu(layer, [hidden2_units, hidden3_units],
                                [hidden3_units], is_train)
    if FLAGS.enable_dropout and is_train:
      layer = tf.nn.dropout(layer, FLAGS.dropout_keep_prob)
    with tf.variable_scope("output"):
      layer = full_connect(layer, [hidden3_units, output_units],
                           [output_units], is_train)
    return layer

  def dnn_inference(sparse_ids, sparse_values, is_train=True):
    with tf.variable_scope("input"):
      sparse_layer = sparse_full_connect(
          sparse_ids, sparse_values,
          [input_units, model_network_hidden_units[0]],
          [model_network_hidden_units[0]], is_train)
      layer = tf.nn.relu(sparse_layer)

    for i in range(len(model_network_hidden_units) - 1):
      with tf.variable_scope("layer{}".format(i)):
        layer = full_connect_relu(layer, [
            model_network_hidden_units[i], model_network_hidden_units[i + 1]
        ], [model_network_hidden_units[i + 1]], is_train)

    with tf.variable_scope("output"):
      layer = full_connect(layer,
                           [model_network_hidden_units[-1], output_units],
                           [output_units], is_train)
    return layer

  def lr_inference(sparse_ids, sparse_values, is_train=True):
    with tf.variable_scope("logistic_regression"):
      layer = sparse_full_connect(sparse_ids, sparse_values,
                                  [input_units, output_units], [output_units])
    return layer

  def wide_and_deep_inference(sparse_ids, sparse_values, is_train=True):
    return lr_inference(sparse_ids, sparse_values, is_train) + dnn_inference(
        sparse_ids, sparse_values, is_train)

  def inference(sparse_ids, sparse_values, is_train=True):
    if MODEL == "dnn":
      return dnn_inference(sparse_ids, sparse_values, is_train)
    elif MODEL == "lr":
      return lr_inference(sparse_ids, sparse_values, is_train)
    elif MODEL == "wide_and_deep":
      return wide_and_deep_inference(sparse_ids, sparse_values, is_train)
    elif MODEL == "customized":
      return customized_inference(sparse_ids, sparse_values, is_train)
    else:
      logging.error("Unknown model, exit now")
      exit(1)

  logging.info("Use the model: {}, model network: {}".format(
      MODEL, FLAGS.model_network))
  logits = inference(batch_ids, batch_values, True)
  batch_labels = tf.to_int64(batch_labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=batch_labels)
  loss = tf.reduce_mean(cross_entropy, name="loss")
  global_step = tf.Variable(0, name="global_step", trainable=False)
  if FLAGS.enable_lr_decay:
    logging.info("Enable learning rate decay rate: {}".format(
        FLAGS.lr_decay_rate))
    starter_learning_rate = FLAGS.learning_rate
    learning_rate = tf.train.exponential_decay(starter_learning_rate,
                                               global_step,
                                               100000,
                                               FLAGS.lr_decay_rate,
                                               staircase=True)
  else:
    learning_rate = FLAGS.learning_rate
  optimizer = get_optimizer(FLAGS.optimizer, learning_rate)
  train_op = optimizer.minimize(loss, global_step=global_step)
  tf.get_variable_scope().reuse_variables()

  # Define accuracy op for train data
  train_accuracy_logits = inference(batch_ids, batch_values, False)
  train_softmax = tf.nn.softmax(train_accuracy_logits)
  train_correct_prediction = tf.equal(
      tf.argmax(train_softmax, 1), batch_labels)
  train_accuracy = tf.reduce_mean(tf.cast(train_correct_prediction,
                                          tf.float32))

  # Define auc op for train data
  batch_labels = tf.cast(batch_labels, tf.int32)
  sparse_labels = tf.reshape(batch_labels, [-1, 1])
  derived_size = tf.shape(batch_labels)[0]
  indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
  concated = tf.concat(axis=1, values=[indices, sparse_labels])
  outshape = tf.stack([derived_size, LABEL_SIZE])
  new_train_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
  _, train_auc = tf.contrib.metrics.streaming_auc(train_softmax,
                                                  new_train_batch_labels)

  # Define accuracy op for validate data
  validate_accuracy_logits = inference(validate_batch_ids,
                                       validate_batch_values, False)
  validate_softmax = tf.nn.softmax(validate_accuracy_logits)
  validate_batch_labels = tf.to_int64(validate_batch_labels)
  validate_correct_prediction = tf.equal(
      tf.argmax(validate_softmax, 1), validate_batch_labels)
  validate_accuracy = tf.reduce_mean(tf.cast(validate_correct_prediction,
                                             tf.float32))

  # Define auc op for validate data
  validate_batch_labels = tf.cast(validate_batch_labels, tf.int32)
  sparse_labels = tf.reshape(validate_batch_labels, [-1, 1])
  derived_size = tf.shape(validate_batch_labels)[0]
  indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
  concated = tf.concat(axis=1, values=[indices, sparse_labels])
  outshape = tf.stack([derived_size, LABEL_SIZE])
  new_validate_batch_labels = tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
  _, validate_auc = tf.contrib.metrics.streaming_auc(validate_softmax,
                                                     new_validate_batch_labels)

  # Define inference op
  sparse_index = tf.placeholder(tf.int64, [None, 2])
  sparse_ids = tf.placeholder(tf.int64, [None])
  sparse_values = tf.placeholder(tf.float32, [None])
  sparse_shape = tf.placeholder(tf.int64, [2])
  inference_ids = tf.SparseTensor(sparse_index, sparse_ids, sparse_shape)
  inference_values = tf.SparseTensor(sparse_index, sparse_values, sparse_shape)
  inference_logits = inference(inference_ids, inference_values, False)
  inference_softmax = tf.nn.softmax(inference_logits)
  inference_op = tf.argmax(inference_softmax, 1)
  keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1])
  keys = tf.identity(keys_placeholder)
  model_signature = {
      "inputs": exporter.generic_signature({"keys": keys_placeholder,
                                            "indexs": sparse_index,
                                            "ids": sparse_ids,
                                            "values": sparse_values,
                                            "shape": sparse_shape}),
      "outputs": exporter.generic_signature({"keys": keys,
                                             "softmax": inference_softmax,
                                             "prediction": inference_op})
  }

  # Initialize saver and summary
  saver = tf.train.Saver()
  tf.summary.scalar("loss", loss)
  tf.summary.scalar("train_accuracy", train_accuracy)
  tf.summary.scalar("train_auc", train_auc)
  tf.summary.scalar("validate_accuracy", validate_accuracy)
  tf.summary.scalar("validate_auc", validate_auc)
  summary_op = tf.summary.merge_all()
  init_op = [tf.global_variables_initializer(), tf.local_variables_initializer(
  )]

  # Create session to run
  with tf.Session() as sess:
    logging.info("Start to run with mode: {}".format(MODE))
    writer = tf.summary.FileWriter(OUTPUT_PATH, sess.graph)
    sess.run(init_op)

    if MODE == "train":
      # Restore session and start queue runner
      restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT)
      coord = tf.train.Coordinator()
      threads = tf.train.start_queue_runners(coord=coord, sess=sess)
      start_time = datetime.datetime.now()

      try:
        while not coord.should_stop():
          _, loss_value, step = sess.run([train_op, loss, global_step])

          # Print state while training
          if step % FLAGS.steps_to_validate == 0:
            train_accuracy_value, train_auc_value, validate_accuracy_value, auc_value, summary_value = sess.run(
                [train_accuracy, train_auc, validate_accuracy, validate_auc,
                 summary_op])
            end_time = datetime.datetime.now()
            logging.info(
                "[{}] Step: {}, loss: {}, train_acc: {}, train_auc: {}, valid_acc: {}, valid_auc: {}".format(
                    end_time - start_time, step, loss_value,
                    train_accuracy_value, train_auc_value,
                    validate_accuracy_value, auc_value))
            writer.add_summary(summary_value, step)
            saver.save(sess, CHECKPOINT_FILE, global_step=step)
            start_time = end_time
      except tf.errors.OutOfRangeError:
        # Export the model after training
        export_model(sess, saver, model_signature, FLAGS.model_path,
                     FLAGS.model_version)
      finally:
        coord.request_stop()
      coord.join(threads)

    elif MODE == "export":
      if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT):
        logging.error("No checkpoint found, exit now")
        exit(1)

      # Export the model
      export_model(sess, saver, model_signature, FLAGS.model_path,
                   FLAGS.model_version)

    elif MODE == "savedmodel":
      if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT):
        logging.error("No checkpoint found, exit now")
        exit(1)

      logging.info("Export the saved model to {}".format(
          FLAGS.saved_model_path))
      export_path_base = FLAGS.saved_model_path
      export_path = os.path.join(
          compat.as_bytes(export_path_base),
          compat.as_bytes(str(FLAGS.model_version)))

      model_signature = signature_def_utils.build_signature_def(
          inputs={
              "keys": utils.build_tensor_info(keys_placeholder),
              "indexs": utils.build_tensor_info(sparse_index),
              "ids": utils.build_tensor_info(sparse_ids),
              "values": utils.build_tensor_info(sparse_values),
              "shape": utils.build_tensor_info(sparse_shape)
          },
          outputs={
              "keys": utils.build_tensor_info(keys),
              "softmax": utils.build_tensor_info(inference_softmax),
              "prediction": utils.build_tensor_info(inference_op)
          },
          method_name=signature_constants.PREDICT_METHOD_NAME)

      try:
        builder = saved_model_builder.SavedModelBuilder(export_path)
        builder.add_meta_graph_and_variables(
            sess,
            [tag_constants.SERVING],
            clear_devices=True,
            signature_def_map={
                signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                model_signature,
            },
            #legacy_init_op=legacy_init_op)
            legacy_init_op=tf.group(tf.initialize_all_tables(),
                                    name="legacy_init_op"))

        builder.save()
      except Exception as e:
        logging.error("Fail to export saved model, exception: {}".format(e))

    elif MODE == "inference":
      if not restore_session_from_checkpoint(sess, saver, LATEST_CHECKPOINT):
        logging.error("No checkpoint found, exit now")
        exit(1)

      # Load inference test data
      inference_result_file_name = "./inference_result.txt"
      inference_test_file_name = "./data/a8a_test.libsvm"
      labels = []
      feature_ids = []
      feature_values = []
      feature_index = []
      ins_num = 0
      for line in open(inference_test_file_name, "r"):
        tokens = line.split(" ")
        labels.append(int(tokens[0]))
        feature_num = 0
        for feature in tokens[1:]:
          feature_id, feature_value = feature.split(":")
          feature_ids.append(int(feature_id))
          feature_values.append(float(feature_value))
          feature_index.append([ins_num, feature_num])
          feature_num += 1
        ins_num += 1

      # Run inference
      start_time = datetime.datetime.now()
      prediction, prediction_softmax = sess.run(
          [inference_op, inference_softmax],
          feed_dict={sparse_index: feature_index,
                     sparse_ids: feature_ids,
                     sparse_values: feature_values,
                     sparse_shape: [ins_num, FEATURE_SIZE]})

      end_time = datetime.datetime.now()

      # Compute accuracy
      label_number = len(labels)
      correct_label_number = 0
      for i in range(label_number):
        if labels[i] == prediction[i]:
          correct_label_number += 1
      accuracy = float(correct_label_number) / label_number

      # Compute auc
      expected_labels = np.array(labels)
      predict_labels = prediction_softmax[:, 0]
      fpr, tpr, thresholds = metrics.roc_curve(expected_labels,
                                               predict_labels,
                                               pos_label=0)
      auc = metrics.auc(fpr, tpr)
      logging.info("[{}] Inference accuracy: {}, auc: {}".format(
          end_time - start_time, accuracy, auc))

      # Save result into the file
      np.savetxt(inference_result_file_name, prediction_softmax, delimiter=",")
      logging.info("Save result to file: {}".format(
          inference_result_file_name))
Beispiel #54
0
def export():
  # Create index->synset mapping
  synsets = []
  with open(SYNSET_FILE) as f:
    synsets = f.read().splitlines()
  # Create synset->metadata mapping
  texts = {}
  with open(METADATA_FILE) as f:
    for line in f.read().splitlines():
      parts = line.split('\t')
      assert len(parts) == 2
      texts[parts[0]] = parts[1]

  with tf.Graph().as_default():
    # Build inference model.
    # Please refer to Tensorflow inception model for details.

    # Input transformation.
    jpegs = tf.placeholder(tf.string)
    images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

    # Run inference.
    logits, _ = inception_model.inference(images, NUM_CLASSES + 1)

    # Transform output to topK result.
    values, indices = tf.nn.top_k(logits, NUM_TOP_CLASSES)

    # Create a constant string Tensor where the i'th element is
    # the human readable class description for the i'th index.
    # Note that the 0th index is an unused background class
    # (see inception model definition code).
    class_descriptions = ['unused background']
    for s in synsets:
      class_descriptions.append(texts[s])
    class_tensor = tf.constant(class_descriptions)

    classes = tf.contrib.lookup.index_to_string(tf.to_int64(indices),
                                                mapping=class_tensor)

    # Restore variables from training checkpoint.
    variable_averages = tf.train.ExponentialMovingAverage(
        inception_model.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)
    with tf.Session() as sess:
      # Restore variables from training checkpoints.
      ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        # Assuming model_checkpoint_path looks something like:
        #   /my-favorite-path/imagenet_train/model.ckpt-0,
        # extract global_step from it.
        global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        print('Successfully loaded model from %s at step=%s.' %
              (ckpt.model_checkpoint_path, global_step))
      else:
        print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
        return

      # Export inference model.
      init_op = tf.group(tf.initialize_all_tables(), name='init_op')
      model_exporter = exporter.Exporter(saver)
      model_exporter.init(init_op=init_op, named_graph_signatures={
          'inputs': exporter.generic_signature({'images': jpegs}),
          'outputs': exporter.generic_signature({'classes': classes,
                                                 'scores': values})})
      model_exporter.export(FLAGS.export_dir, tf.constant(global_step), sess)
      print('Successfully exported model to %s' % FLAGS.export_dir)