def test_evaluate_generator_method(self, model_type):
    if model_type == 'sequential':
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=3, num_classes=4, input_dim=2)
    else:
      model = testing_utils.get_small_functional_mlp(
          num_hidden=3, num_classes=4, input_dim=2)
    model.compile(
        loss='mse',
        optimizer='sgd',
        metrics=['mae', metrics_module.CategoricalAccuracy()])
    model.summary()

    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             workers=2,
                             verbose=1,
                             use_multiprocessing=True)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False,
                             workers=0)
  def test_dataset_input_shape_validation(self):
    with self.cached_session():
      model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
      model.compile(optimizer='rmsprop', loss='mse')

      # User forgets to batch the dataset
      inputs = np.zeros((10, 3))
      targets = np.zeros((10, 4))
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)

      with self.assertRaisesRegexp(
          ValueError,
          r'expected (.*?) to have shape \(3,\) but got array with shape \(1,\)'
      ):
        model.train_on_batch(dataset)

      # Wrong input shape
      inputs = np.zeros((10, 5))
      targets = np.zeros((10, 4))
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      with self.assertRaisesRegexp(ValueError,
                                   r'expected (.*?) to have shape \(3,\)'):
        model.train_on_batch(dataset)
  def test_training_and_eval_methods_on_iterators_single_io(self, model):
    if model == 'functional':
      model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    elif model == 'subclass':
      model = testing_utils.get_small_sequential_mlp(1, 4)
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(optimizer, loss, metrics=metrics)

    inputs = np.zeros((10, 3))
    targets = np.zeros((10, 4))
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()

    model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
    model.evaluate(iterator, steps=2, verbose=1)
    model.predict(iterator, steps=2)

    # Test with validation data
    model.fit(iterator,
              epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=iterator, validation_steps=2)
    # Test with validation split
    with self.assertRaisesRegexp(
        ValueError, '`validation_split` argument is not supported '
        'when input `x` is a dataset or a dataset iterator'):
      model.fit(iterator,
                epochs=1, steps_per_epoch=2, verbose=0,
                validation_split=0.5, validation_steps=2)

    # Test with sample weight.
    sample_weight = np.random.random((10,))
    with self.assertRaisesRegexp(
        ValueError, '`sample_weight` argument is not supported '
        'when input `x` is a dataset or a dataset iterator'):
      model.fit(
          iterator,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          sample_weight=sample_weight)

    # Test invalid usage
    with self.assertRaisesRegexp(ValueError,
                                 'you should not specify a target'):
      model.fit(iterator, iterator,
                epochs=1, steps_per_epoch=2, verbose=0)

    with self.assertRaisesRegexp(
        ValueError, 'you should specify the `steps_per_epoch` argument'):
      model.fit(iterator, epochs=1, verbose=0)
    with self.assertRaisesRegexp(ValueError,
                                 'you should specify the `steps` argument'):
      model.evaluate(iterator, verbose=0)
    with self.assertRaisesRegexp(ValueError,
                                 'you should specify the `steps` argument'):
      model.predict(iterator, verbose=0)
Beispiel #4
0
    def test_dataset_with_class_weight(self):
        model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
        model.compile('rmsprop', 'mse')

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        class_weight_np = np.array([0.25, 0.25, 0.25, 0.25])
        class_weight = dict(enumerate(class_weight_np))

        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=1,
                  class_weight=class_weight)
  def test_dataset_with_class_weight(self):
    model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    model.compile('rmsprop', 'mse')

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    class_weight_np = np.array([0.25, 0.25, 0.25, 0.25])
    class_weight = dict(enumerate(class_weight_np))

    model.fit(
        dataset,
        epochs=1,
        steps_per_epoch=2,
        verbose=1,
        class_weight=class_weight)
  def test_dataset_with_sparse_labels(self, model):
    if model == 'functional':
      model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    elif model == 'subclass':
      model = testing_utils.get_small_sequential_mlp(1, 4)

    for loss in ['sparse_categorical_crossentropy',
                 losses_impl.sparse_softmax_cross_entropy]:
      optimizer = RMSPropOptimizer(learning_rate=0.001)
      model.compile(optimizer, loss)

      inputs = np.zeros((10, 3), dtype=np.float32)
      targets = np.random.randint(0, 4, size=10, dtype=np.int32)
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
    def test_dataset_with_sample_weights(self):
        model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer, loss, metrics=metrics)

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        sample_weights = np.ones((10), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices(
            (inputs, targets, sample_weights))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)
  def test_dataset_with_sparse_labels(self, model):
    if model == 'functional':
      model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    elif model == 'subclass':
      model = testing_utils.get_small_sequential_mlp(1, 4)

    for loss in ['sparse_categorical_crossentropy',
                 losses_impl.sparse_softmax_cross_entropy]:
      optimizer = RMSPropOptimizer(learning_rate=0.001)
      model.compile(optimizer, loss)

      inputs = np.zeros((10, 3), dtype=np.float32)
      targets = np.random.randint(0, 4, size=10, dtype=np.int32)
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
  def test_dataset_with_sample_weights(self):
    model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(optimizer, loss, metrics=metrics)

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    sample_weights = np.ones((10), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets,
                                                      sample_weights))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
    model.evaluate(dataset, steps=2, verbose=1)
    model.predict(dataset, steps=2)
    def test_iterators_running_out_of_data(self):
        model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae']
        model.compile(optimizer, loss, metrics=metrics)

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(2)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        with test.mock.patch.object(logging, 'warning') as mock_log:
            model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0)
            self.assertRegexpMatches(str(mock_log.call_args),
                                     'dataset iterator ran out of data')
    def test_get_next_op_created_once(self):
        model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae']
        model.compile(optimizer, loss, metrics=metrics)

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
        # Finalize graph to make sure we are not appending another iterator
        # get_next op in the graph.
        ops.get_default_graph().finalize()
        model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
  def test_iterators_running_out_of_data(self):
    model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae']
    model.compile(optimizer, loss, metrics=metrics)

    inputs = np.zeros((10, 3))
    targets = np.zeros((10, 4))
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(2)
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()

    with test.mock.patch.object(logging, 'warning') as mock_log:
      model.fit(iterator, epochs=1, steps_per_epoch=3, verbose=0)
      self.assertRegexpMatches(
          str(mock_log.call_args),
          'dataset iterator ran out of data')
  def test_get_next_op_created_once(self):
    model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae']
    model.compile(optimizer, loss, metrics=metrics)

    inputs = np.zeros((10, 3))
    targets = np.zeros((10, 4))
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()

    model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
    # Finalize graph to make sure we are not appending another iterator
    # get_next op in the graph.
    ops.get_default_graph().finalize()
    model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
Beispiel #14
0
    def test_predict_generator_method(self, model_type):
        if model_type == 'sequential':
            model = testing_utils.get_small_sequential_mlp(num_hidden=3,
                                                           num_classes=4,
                                                           input_dim=2)
        else:
            model = testing_utils.get_small_functional_mlp(num_hidden=3,
                                                           num_classes=4,
                                                           input_dim=2)
        model.compile(loss='mse',
                      optimizer='sgd',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
        # Test generator with just inputs (no targets)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
  def test_calling_model_on_same_dataset(self):
    model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae']
    model.compile(optimizer, loss, metrics=metrics)

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    # Call fit with validation data
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
    # Finalize the graph to make sure new ops aren't added when calling on the
    # same dataset
    ops.get_default_graph().finalize()
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
  def test_calling_model_on_same_dataset(self):
    model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
    optimizer = RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    metrics = ['mae']
    model.compile(optimizer, loss, metrics=metrics)

    inputs = np.zeros((10, 3))
    targets = np.zeros((10, 4))
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)

    # Call fit with validation data
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
    # Finalize the graph to make sure new ops aren't added when calling on the
    # same dataset
    ops.get_default_graph().finalize()
    model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=dataset, validation_steps=2)
Beispiel #17
0
    def test_fit_generator_method(self, model_type):
        if model_type == 'sequential':
            model = testing_utils.get_small_sequential_mlp(num_hidden=3,
                                                           num_classes=4,
                                                           input_dim=2)
        else:
            model = testing_utils.get_small_functional_mlp(num_hidden=3,
                                                           num_classes=4,
                                                           input_dim=2)
        model.compile(loss='mse',
                      optimizer='sgd',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            workers=4,
                            use_multiprocessing=True)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(),
                            validation_steps=10)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            workers=0)
Beispiel #18
0
    def test_save_load_with_compile_functional(self):
        functional_model = testing_utils.get_small_functional_mlp(
            num_hidden=1, num_classes=2, input_dim=3)
        functional_model = add_optimizer(functional_model)
        tiledb_uri = os.path.join(self.get_temp_dir(), "model_array")
        tiledb_model_obj = TensorflowTileDB(uri=tiledb_uri)
        tiledb_model_obj.save(model=functional_model, include_optimizer=True)
        loaded_model = tiledb_model_obj.load(compile_model=True)
        data = np.random.rand(100, 3)

        model_opt_weights = batch_get_value(
            getattr(functional_model.optimizer, "weights"))
        loaded_opt_weights = batch_get_value(
            getattr(loaded_model.optimizer, "weights"))

        # Assert optimizer weights are equal
        for weight_model, weight_loaded_model in zip(model_opt_weights,
                                                     loaded_opt_weights):
            np.testing.assert_array_equal(weight_model, weight_loaded_model)

        # Assert model predictions are equal
        np.testing.assert_array_equal(loaded_model.predict(data),
                                      functional_model.predict(data))
    def test_training_and_eval_methods_on_iterators_single_io(self, model):
        if model == 'functional':
            model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
        elif model == 'subclass':
            model = testing_utils.get_small_sequential_mlp(1, 4)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer, loss, metrics=metrics)

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(iterator, steps=2, verbose=1)
        model.predict(iterator, steps=2)

        # Test with validation data
        model.fit(iterator,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=iterator,
                  validation_steps=2)
        # Test with validation split
        with self.assertRaisesRegexp(
                ValueError, '`validation_split` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_split=0.5,
                      validation_steps=2)

        # Test with sample weight.
        sample_weight = np.random.random((10, ))
        with self.assertRaisesRegexp(
                ValueError, '`sample_weight` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      sample_weight=sample_weight)

        # Test invalid usage
        with self.assertRaisesRegexp(ValueError,
                                     'you should not specify a target'):
            model.fit(iterator,
                      iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0)

        with self.assertRaisesRegexp(
                ValueError,
                'you should specify the `steps_per_epoch` argument'):
            model.fit(iterator, epochs=1, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'you should specify the `steps` argument'):
            model.evaluate(iterator, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'you should specify the `steps` argument'):
            model.predict(iterator, verbose=0)