コード例 #1
0
  def train_input_fn():
    """Prepare data for training."""

    # When choosing shuffle buffer sizes, larger sizes result in better
    # randomness, while smaller sizes use less memory. MNIST is a small
    # enough dataset that we can easily shuffle the full epoch.
    ds = dataset.train(flags_obj.data_dir)

    def invert(image, label):
      return (image * -1.0) + 1.0, label

    def brightness(image, label):
      return tf.image.random_brightness(image, max_delta=0.2), label 

    if INVERT:
      inverted = ds.map(invert)
      ds = ds.concatenate(inverted)

    if BRIGHTNESS:
      ds = ds.concatenate(ds.map(brightness)) 

    ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)

    # Iterate through the dataset a set number (`epochs_between_evals`) of times
    # during each training session.
    ds = ds.repeat(flags_obj.epochs_between_evals)
    return ds
コード例 #2
0
def train_input_fn():
    ds = mnist_dataset.train('/tmp/mnist')
    ds = ds.cache()
    ds = ds.shuffle(buffer_size=50000)
    ds = ds.repeat(5)
    # Do not call `ds.batch(batch_size)` here if `batch_size` is a
    # hyperparameter, this must be handled in `BaseParamSearch`.
    return ds
コード例 #3
0
    def train_input_fn():
        """Prepare data for training."""

        # When choosing shuffle buffer sizes, larger sizes result in better
        # randomness, while smaller sizes use less memory. MNIST is a small
        # enough dataset that we can easily shuffle the full epoch.
        ds = dataset.train(FLAGS.data_dir)
        # ds_batched = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size)
        ds_batched = ds.cache().batch(FLAGS.batch_size)
        # Iterate through the dataset a set number (`epochs_between_evals`) of times
        # during each training session.
        ds = ds_batched.repeat(FLAGS.epochs_between_evals)
        return ds
コード例 #4
0
def train_input_fn(data_dir, params):
    """Train input function for the MNIST dataset.

    Args:
        data_dir: (string) path to the data directory
        params: (dict) contains hyperparameters of the model
    """
    dataset = mnist_dataset.train(data_dir)
    dataset = dataset.shuffle(
        params['train_size'])  # whole dataset into the buffer
    dataset = dataset.repeat(
        params['num_epochs'])  # repeat for multiple epochs
    dataset = dataset.batch(params['batch_size'])
    dataset = dataset.prefetch(
        1)  # make sure you always have one batch ready to serve
    return dataset
コード例 #5
0
def make_datasets_unbatched():
    # Scaling MNIST data from (0, 255] to (0., 1.]
    if tf.__version__ == '2.0.0':
        import tensorflow_datasets as tfds

        def scale(image, label):
            image = tf.cast(image, tf.float32)
            image /= 255
            return image, label

        tfds.disable_progress_bar()
        datasets, info = tfds.load(name='mnist',
                                   with_info=True,
                                   as_supervised=True)

        return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE)
    else:
        import mnist_dataset as dataset
        ds = dataset.train('/bg/mnist')
        ds = ds.cache().shuffle(buffer_size=50000)
        return ds
コード例 #6
0
def train_data():
    data = dataset.train(FLAGS.data_dir)
    data = data.cache()
    data = data.batch(FLAGS.batch_size)
    return data
コード例 #7
0
    def forward(self, x, labels, train):
        with tf.GradientTape() as tape:
            y = self.input(x)
            for layer in self.layers:
                y = layer(y, train=train)
            loss = tf.reduce_mean(
                tf.losses.sparse_softmax_cross_entropy(labels=labels,
                                                       logits=y.outputs))
        grads = tape.gradient(loss, self.layers[-1].weights)
        return y, grads, loss

    def update(self, grads):
        self.optimiser.apply_gradients(zip(grads, self.layers[-1].weights))


train_ds = mnist_dataset.train("../data/")
test_ds = mnist_dataset.test("../data/").batch(32)

network = Net()

log_interval = 1000

for epoch in range(3):

    start_time = time.time()

    # Training
    train_ds_shuffle = train_ds.shuffle(60000).batch(32)
    for (batch, (images, labels)) in enumerate(train_ds_shuffle):
        logits, grads, loss = network.forward(images, labels, train=True)