def tensorflow_hello_world(dataset_url='file:///tmp/carbon_pycarbon_dataset/'):
    # Example: tf_tensors will return tensors with dataset data
    with make_carbon_reader(dataset_url) as reader:
        tensor = tf_tensors(reader)
        with tf.Session() as sess:
            sample = sess.run(tensor)
            print(sample.id)

    with make_reader(dataset_url, is_batch=False) as reader:
        tensor = make_tensor(reader)
        with tf.Session() as sess:
            sample = sess.run(tensor)
            print(sample.id)

    # Example: use tf.data.Dataset API
    with make_carbon_reader(dataset_url) as reader:
        dataset = make_pycarbon_dataset(reader)
        iterator = dataset.make_one_shot_iterator()
        tensor = iterator.get_next()
        with tf.Session() as sess:
            sample = sess.run(tensor)
            print(sample.id)

    with make_reader(dataset_url, is_batch=False) as reader:
        dataset = make_dataset(reader)
        iterator = dataset.make_one_shot_iterator()
        tensor = iterator.get_next()
        with tf.Session() as sess:
            sample = sess.run(tensor)
            print(sample.id)
def tensorflow_hello_world(dataset_url='file:///tmp/carbon_external_dataset'):
    # Example: tf_tensors will return tensors with dataset data
    with make_batch_carbon_reader(dataset_url) as reader:
        tensor = tf_tensors(reader)
        with tf.Session() as sess:
            # Because we are using make_batch_carbon_reader(), each read returns a batch of rows instead of a single row
            batched_sample = sess.run(tensor)
            print("id batch: {0}".format(batched_sample.id))

    with make_reader(dataset_url) as reader:
        tensor = make_tensor(reader)
        with tf.Session() as sess:
            # Because we are using make_batch_carbon_reader(), each read returns a batch of rows instead of a single row
            batched_sample = sess.run(tensor)
            print("id batch: {0}".format(batched_sample.id))

    # Example: use tf.data.Dataset API
    with make_batch_carbon_reader(dataset_url) as reader:
        dataset = make_pycarbon_dataset(reader)
        iterator = dataset.make_one_shot_iterator()
        tensor = iterator.get_next()
        with tf.Session() as sess:
            batched_sample = sess.run(tensor)
            print("id batch: {0}".format(batched_sample.id))

    with make_reader(dataset_url) as reader:
        dataset = make_dataset(reader)
        iterator = dataset.make_one_shot_iterator()
        tensor = iterator.get_next()
        with tf.Session() as sess:
            batched_sample = sess.run(tensor)
            print("id batch: {0}".format(batched_sample.id))
示例#3
0
def train_and_test(dataset_url, num_epochs, batch_size, evaluation_interval):
    """
  Train a model for training iterations with a batch size batch_size, printing accuracy every log_interval.
  :param dataset_url: The MNIST dataset url.
  :param num_epochs: The number of epochs to train for.
  :param batch_size: The batch size for training.
  :param evaluation_interval: The interval used to print the accuracy.
  :return:
  """

    with make_reader(os.path.join(dataset_url, 'train'),
                     num_epochs=num_epochs) as train_reader:
        with make_reader(os.path.join(dataset_url, 'test'),
                         num_epochs=num_epochs) as test_reader:
            # Create the model
            x = tf.placeholder(tf.float32, [None, 784])
            w = tf.Variable(tf.zeros([784, 10]))
            b = tf.Variable(tf.zeros([10]))
            y = tf.matmul(x, w) + b

            # Define loss and optimizer
            y_ = tf.placeholder(tf.int64, [None])

            # Define the loss function
            cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y_,
                                                                   logits=y)

            train_step = tf.train.GradientDescentOptimizer(0.5).minimize(
                cross_entropy)

            correct_prediction = tf.equal(tf.argmax(y, 1), y_)

            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

            train_dataset = make_dataset(train_reader) \
              .apply(tf.data.experimental.unbatch()) \
              .batch(batch_size) \
              .map(decode)

            train_iterator = train_dataset.make_one_shot_iterator()
            label, image = train_iterator.get_next()

            test_dataset = make_dataset(test_reader) \
              .apply(tf.data.experimental.unbatch()) \
              .batch(batch_size) \
              .map(decode)

            test_iterator = test_dataset.make_one_shot_iterator()
            test_label, test_image = test_iterator.get_next()

            # Train
            print(
                'Training model for {0} epoch with batch size {1} and evaluation interval {2}'
                .format(num_epochs, batch_size, evaluation_interval))

            i = 0
            with tf.Session() as sess:
                sess.run([
                    tf.local_variables_initializer(),
                    tf.global_variables_initializer(),
                ])

                try:
                    while True:
                        cur_label, cur_image = sess.run([label, image])

                        sess.run([train_step],
                                 feed_dict={
                                     x: cur_image,
                                     y_: cur_label
                                 })

                        if i % evaluation_interval == 0:
                            test_cur_label, test_cur_image = sess.run(
                                [test_label, test_image])
                            print(
                                'After {0} training iterations, the accuracy of the model is: {1:.2f}'
                                .format(
                                    i,
                                    sess.run(accuracy,
                                             feed_dict={
                                                 x: test_cur_image,
                                                 y_: test_cur_label
                                             })))
                        i += 1

                except tf.errors.OutOfRangeError:
                    print("Finish! the number is " + str(i))