def test_normal_output_without_signals(self):
    num_samples = 4
    batch_size = 2

    params = {'batch_size': batch_size}
    input_fn, (a, b) = make_input_fn(num_samples=num_samples)

    with ops.Graph().as_default():
      dataset = input_fn(params)
      features = dataset_lib.make_one_shot_iterator(dataset).get_next()

      # With tf.data.Dataset.batch, the batch is None, i.e., dynamic shape.
      self.assertIsNone(features['a'].shape.as_list()[0])

      with session.Session() as sess:
        result = sess.run(features)
        self.assertAllEqual(a[:batch_size], result['a'])
        self.assertAllEqual(b[:batch_size], result['b'])

        # This run should work as num_samples / batch_size = 2.
        result = sess.run(features)
        self.assertAllEqual(a[batch_size:num_samples], result['a'])
        self.assertAllEqual(b[batch_size:num_samples], result['b'])

        with self.assertRaises(errors.OutOfRangeError):
          # Given num_samples and batch_size, this run should fail.
          sess.run(features)
    def test_normal_output_without_signals(self):
        num_samples = 4
        batch_size = 2

        params = {'batch_size': batch_size}
        input_fn, (a, b) = make_input_fn(num_samples=num_samples)

        with ops.Graph().as_default():
            dataset = input_fn(params)
            features = dataset_lib.make_one_shot_iterator(dataset).get_next()

            # With tf.data.Dataset.batch, the batch is None, i.e., dynamic shape.
            self.assertIsNone(features['a'].shape.as_list()[0])

            with session.Session() as sess:
                result = sess.run(features)
                self.assertAllEqual(a[:batch_size], result['a'])
                self.assertAllEqual(b[:batch_size], result['b'])

                # This run should work as num_samples / batch_size = 2.
                result = sess.run(features)
                self.assertAllEqual(a[batch_size:num_samples], result['a'])
                self.assertAllEqual(b[batch_size:num_samples], result['b'])

                with self.assertRaises(errors.OutOfRangeError):
                    # Given num_samples and batch_size, this run should fail.
                    sess.run(features)
 def _EvalInputFn():
     mnist_x, mnist_y = test_data
     dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
     dataset = dataset.apply(
         data.experimental.map_and_batch(map_func=_PreprocessFn,
                                         batch_size=batch_size,
                                         num_parallel_calls=8))
     dataset = dataset.repeat(count=1)
     iterator = data.make_one_shot_iterator(dataset)
     features, labels = iterator.get_next()
     return features, labels
Ejemplo n.º 4
0
 def _EvalInputFn():
   mnist_x, mnist_y = test_data
   dataset = data.Dataset.from_tensor_slices((mnist_x, mnist_y))
   dataset = dataset.apply(
       data.experimental.map_and_batch(
           map_func=_PreprocessFn,
           batch_size=batch_size,
           num_parallel_calls=8))
   dataset = dataset.repeat(count=1)
   iterator = data.make_one_shot_iterator(dataset)
   features, labels = iterator.get_next()
   return features, labels
Ejemplo n.º 5
0
    def test_nested_model_with_tensor_input(self):
        gpus = 2
        input_dim = 10
        shape = (input_dim, )
        num_samples = 16
        num_classes = 10

        if not check_if_compatible_devices(gpus=gpus):
            return

        with self.cached_session():
            input_shape = (num_samples, ) + shape
            x_train = np.random.randint(0, 255, input_shape)
            y_train = np.random.randint(0, num_classes, (input_shape[0], ))
            keras.backend.set_learning_phase(True)

            y_train = keras.utils.to_categorical(y_train, num_classes)

            x_train = x_train.astype('float32')
            y_train = y_train.astype('float32')

            dataset = data.Dataset.from_tensor_slices((x_train, y_train))
            dataset = dataset.repeat()
            dataset = dataset.batch(4)
            iterator = data.make_one_shot_iterator(dataset)

            inputs, targets = iterator.get_next()

            input_tensor = keras.layers.Input(tensor=inputs)

            model = keras.models.Sequential()
            model.add(keras.layers.Dense(3, input_shape=(input_dim, )))
            model.add(keras.layers.Dense(num_classes))

            output = model(input_tensor)
            outer_model = keras.Model(input_tensor, output)
            parallel_model = keras.utils.multi_gpu_model(outer_model,
                                                         gpus=gpus)

            parallel_model.compile(loss='categorical_crossentropy',
                                   optimizer=keras.optimizers.RMSprop(
                                       lr=0.0001, decay=1e-6),
                                   metrics=['accuracy'],
                                   target_tensors=[targets])
            parallel_model.fit(epochs=1, steps_per_epoch=3)
Ejemplo n.º 6
0
  def test_nested_model_with_tensor_input(self):
    gpus = 2
    input_dim = 10
    shape = (input_dim,)
    num_samples = 16
    num_classes = 10

    if not check_if_compatible_devices(gpus=gpus):
      return

    with self.cached_session():
      input_shape = (num_samples,) + shape
      x_train = np.random.randint(0, 255, input_shape)
      y_train = np.random.randint(0, num_classes, (input_shape[0],))
      keras.backend.set_learning_phase(True)

      y_train = keras.utils.to_categorical(y_train, num_classes)

      x_train = x_train.astype('float32')
      y_train = y_train.astype('float32')

      dataset = data.Dataset.from_tensor_slices((x_train, y_train))
      dataset = dataset.repeat()
      dataset = dataset.batch(4)
      iterator = data.make_one_shot_iterator(dataset)

      inputs, targets = iterator.get_next()

      input_tensor = keras.layers.Input(tensor=inputs)

      model = keras.models.Sequential()
      model.add(keras.layers.Dense(3,
                                   input_shape=(input_dim,)))
      model.add(keras.layers.Dense(num_classes))

      output = model(input_tensor)
      outer_model = keras.Model(input_tensor, output)
      parallel_model = keras.utils.multi_gpu_model(outer_model, gpus=gpus)

      parallel_model.compile(
          loss='categorical_crossentropy',
          optimizer=keras.optimizers.RMSprop(lr=0.0001, decay=1e-6),
          metrics=['accuracy'],
          target_tensors=[targets])
      parallel_model.fit(epochs=1, steps_per_epoch=3)