示例#1
0
    def test_dataset_with_sample_weights(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        sample_weights = np.ones((10), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices(
            (inputs, targets, sample_weights))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)
示例#2
0
    def DISABLED_test_function_model_feature_layer_input(self):
        col_a = fc.numeric_column('a')
        col_b = fc.numeric_column('b')

        feature_layer = fc.DenseFeatures([col_a, col_b], name='fc')
        dense = keras.layers.Dense(4)

        # This seems problematic.... We probably need something for DenseFeatures
        # the way Input is for InputLayer.
        output = dense(feature_layer)

        model = keras.models.Model([feature_layer], [output])

        optimizer = 'rmsprop'
        loss = 'mse'
        loss_weights = [1., 0.5]
        model.compile(optimizer,
                      loss,
                      metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
                      loss_weights=loss_weights)

        data = ({'a': np.arange(10), 'b': np.arange(10)}, np.arange(10, 20))
        print(model.fit(*data, epochs=1))
示例#3
0
    def test_model_methods_with_eager_tensors_single_io(self):
        if not context.executing_eagerly():
            # Only test V2 Function and V2 Eager modes, as V1 Graph mode with
            # symbolic tensors has different requirements.
            return

        model = testing_utils.get_small_mlp(10, 4, 3)

        optimizer = rmsprop.RMSprop(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      run_distributed=testing_utils.should_run_distributed())

        inputs = array_ops.zeros(shape=(10, 3))
        targets = array_ops.zeros(shape=(10, 4))

        model.fit(inputs, targets, epochs=1, batch_size=2, verbose=0)
        model.fit(inputs,
                  targets,
                  epochs=1,
                  batch_size=3,
                  verbose=0,
                  shuffle=False)
        model.fit(inputs,
                  targets,
                  epochs=1,
                  batch_size=4,
                  verbose=0,
                  validation_data=(inputs, targets))
        model.evaluate(inputs, targets, batch_size=2, verbose=0)
        model.predict(inputs, batch_size=2)
        model.train_on_batch(inputs, targets)
        model.test_on_batch(inputs, targets)
    def test_predict_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer='sgd',
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())

        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
        # Test generator with just inputs (no targets)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=2,
                                use_multiprocessing=True)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                use_multiprocessing=False)
        model.predict_generator(custom_generator(mode=1),
                                steps=5,
                                max_queue_size=10,
                                workers=0)
  def test_evaluate_generator_method(self):
    model = testing_utils.get_small_mlp(
        num_hidden=3, num_classes=4, input_dim=2)
    model.compile(
        loss='mse',
        optimizer=rmsprop.RMSprop(1e-3),
        metrics=['mae', metrics_module.CategoricalAccuracy()],
        run_eagerly=testing_utils.should_run_eagerly())

    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             workers=2,
                             verbose=1,
                             use_multiprocessing=True)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False,
                             workers=0)
    def test_fit_generator_method(self):
        model = testing_utils.get_small_mlp(num_hidden=3,
                                            num_classes=4,
                                            input_dim=2)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSprop(1e-3),
                      metrics=['mae',
                               metrics_module.CategoricalAccuracy()])

        self._sleep_at_end = True
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            workers=4,
                            use_multiprocessing=True)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            epochs=1,
                            verbose=1,
                            max_queue_size=10,
                            use_multiprocessing=False,
                            validation_data=custom_generator(),
                            validation_steps=10)
        model.fit_generator(custom_generator(),
                            steps_per_epoch=5,
                            validation_data=custom_generator(),
                            validation_steps=1,
                            workers=0)
示例#7
0
    def test_model_methods_with_eager_tensors_multi_io(self):
        if not context.executing_eagerly():
            # Only test V2 Function and V2 Eager modes, as V1 Graph mode with
            # symbolic tensors has different requirements.
            return

        input_a = keras.layers.Input(shape=(3, ), name='input_a')
        input_b = keras.layers.Input(shape=(3, ), name='input_b')

        dense = keras.layers.Dense(4, name='dense')
        dropout = keras.layers.Dropout(0.5, name='dropout')

        model = testing_utils.get_multi_io_model([input_a, dense],
                                                 [input_b, dense, dropout])

        optimizer = rmsprop.RMSprop(learning_rate=0.001)
        loss = 'mse'
        loss_weights = [1., 0.5]
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      loss_weights=loss_weights,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function(),
                      sample_weight_mode=None)

        input_a = array_ops.zeros(shape=(10, 3))
        input_b = array_ops.zeros(shape=(10, 3))
        target_a = array_ops.zeros(shape=(10, 4))
        target_b = array_ops.zeros(shape=(10, 4))

        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=5,
                  verbose=0)
        # Test: no shuffle.
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=5,
                  verbose=0,
                  shuffle=False)
        # Test: validation data.
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=2,
                  verbose=0,
                  validation_data=([input_a, input_b], [target_a, target_b]))
        model.train_on_batch([input_a, input_b], [target_a, target_b])
        model.predict([input_a, input_b], batch_size=5)
        model.evaluate([input_a, input_b], [target_a, target_b],
                       batch_size=2,
                       verbose=0)
        model.test_on_batch([input_a, input_b], [target_a, target_b])

        # Test: mix np and tensors.
        input_b = np.zeros(shape=(10, 3)).astype('float32')
        target_b = np.zeros(shape=(10, 4)).astype('float32')
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=5,
                  verbose=0)
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=2,
                  verbose=0,
                  validation_data=([input_a, input_b], [target_a, target_b]))
        model.fit([input_a, input_b], [target_a, target_b],
                  epochs=1,
                  batch_size=5,
                  verbose=0,
                  shuffle=False)
        model.train_on_batch([input_a, input_b], [target_a, target_b])
        model.predict([input_a, input_b], batch_size=5)
        model.evaluate([input_a, input_b], [target_a, target_b],
                       batch_size=2,
                       verbose=0)
        model.test_on_batch([input_a, input_b], [target_a, target_b])
    def test_training_and_eval_methods_on_iterators_single_io(self, model):
        if model == 'functional':
            model = testing_utils.get_small_functional_mlp(1, 4, input_dim=3)
        elif model == 'subclass':
            model = testing_utils.get_small_sequential_mlp(1, 4)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer, loss, metrics=metrics)

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(iterator, steps=2, verbose=1)
        model.predict(iterator, steps=2)

        # Test with validation data
        model.fit(iterator,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=iterator,
                  validation_steps=2)
        # Test with validation split
        with self.assertRaisesRegexp(
                ValueError, '`validation_split` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_split=0.5,
                      validation_steps=2)

        # Test with sample weight.
        sample_weight = np.random.random((10, ))
        with self.assertRaisesRegexp(
                ValueError, '`sample_weight` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      sample_weight=sample_weight)

        # Test invalid usage
        with self.assertRaisesRegexp(ValueError,
                                     'you should not specify a target'):
            model.fit(iterator,
                      iterator,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0)

        with self.assertRaisesRegexp(
                ValueError,
                'you should specify the `steps_per_epoch` argument'):
            model.fit(iterator, epochs=1, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'you should specify the `steps` argument'):
            model.evaluate(iterator, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'you should specify the `steps` argument'):
            model.predict(iterator, verbose=0)
    def DISABLED_test_function_model_multiple_feature_layer_inputs(self):
        col_a = fc.numeric_column('a')
        col_b = fc.numeric_column('b')
        col_c = fc.numeric_column('c')

        fc1 = df.DenseFeatures([col_a, col_b], name='fc1')
        fc2 = df.DenseFeatures([col_b, col_c], name='fc2')
        dense = keras.layers.Dense(4)

        # This seems problematic.... We probably need something for DenseFeatures
        # the way Input is for InputLayer.
        output = dense(fc1) + dense(fc2)

        model = keras.models.Model([fc1, fc2], [output])

        optimizer = 'rmsprop'
        loss = 'mse'
        loss_weights = [1., 0.5]
        model.compile(optimizer,
                      loss,
                      metrics=[metrics_module.CategoricalAccuracy(), 'mae'],
                      loss_weights=loss_weights)

        data_list = ([{
            'a': np.arange(10),
            'b': np.arange(10)
        }, {
            'b': np.arange(10),
            'c': np.arange(10)
        }], np.arange(10, 100))
        model.fit(*data_list, epochs=1)

        data_bloated_list = ([{
            'a': np.arange(10),
            'b': np.arange(10),
            'c': np.arange(10)
        }, {
            'a': np.arange(10),
            'b': np.arange(10),
            'c': np.arange(10)
        }], np.arange(10, 100))
        model.fit(*data_bloated_list, epochs=1)

        data_dict = ({
            'fc1': {
                'a': np.arange(10),
                'b': np.arange(10)
            },
            'fc2': {
                'b': np.arange(10),
                'c': np.arange(10)
            }
        }, np.arange(10, 100))
        model.fit(*data_dict, epochs=1)

        data_bloated_dict = ({
            'fc1': {
                'a': np.arange(10),
                'b': np.arange(10),
                'c': np.arange(10)
            },
            'fc2': {
                'a': np.arange(10),
                'b': np.arange(10),
                'c': np.arange(10)
            }
        }, np.arange(10, 100))
        model.fit(*data_bloated_dict, epochs=1)
示例#10
0
    def test_training_and_eval_methods_on_dataset(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = 'rmsprop'
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly(),
                      experimental_run_tf_function=testing_utils.
                      should_run_tf_function())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat()  # Infinite dataset.
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)

        # Test with validation data
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)

        # Test with validation split
        with self.assertRaisesRegexp(
                ValueError,
                '`validation_split` argument is not supported when '):
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_split=0.5,
                      validation_steps=2)

        # Test with sample weight.
        sample_weight = np.random.random((10, ))
        with self.assertRaisesRegexp(
                ValueError,
                r'`sample_weight` argument is not supported .+dataset'):
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      sample_weight=sample_weight)

        # Test invalid usage
        with self.assertRaisesRegexp(
                ValueError, 'The `batch_size` argument must not be specified'):
            model.fit(dataset,
                      batch_size=10,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0)

        with self.assertRaisesRegexp(
                ValueError, 'The `batch_size` argument must not be specified'):
            model.predict(dataset, batch_size=10, steps=2, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'The `batch_size` argument must not be specified'):
            model.evaluate(dataset, batch_size=10, steps=2, verbose=0)

        with self.assertRaisesRegexp(
                ValueError, '(you should not specify a target)|'
                '(`y` argument is not supported when using dataset as input.)'
        ):
            model.fit(dataset, dataset, epochs=1, steps_per_epoch=2, verbose=0)

        # With an infinite dataset, `steps_per_epoch`/`steps` argument is required.
        with self.assertRaisesRegexp(ValueError,
                                     'the `steps_per_epoch` argument'):
            model.fit(dataset, epochs=1, verbose=0)
        with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
            model.evaluate(dataset, verbose=0)
        with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
            model.predict(dataset, verbose=0)
    def test_training_and_eval_methods_on_dataset(self):
        model = testing_utils.get_small_mlp(1, 4, input_dim=3)
        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      run_eagerly=testing_utils.should_run_eagerly())

        inputs = np.zeros((10, 3), np.float32)
        targets = np.zeros((10, 4), np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)

        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=1)
        model.evaluate(dataset, steps=2, verbose=1)
        model.predict(dataset, steps=2)

        # Test with validation data
        model.fit(dataset,
                  epochs=1,
                  steps_per_epoch=2,
                  verbose=0,
                  validation_data=dataset,
                  validation_steps=2)

        # Test with validation split
        with self.assertRaisesRegexp(
                ValueError, '`validation_split` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      validation_split=0.5,
                      validation_steps=2)

        # Test with sample weight.
        sample_weight = np.random.random((10, ))
        with self.assertRaisesRegexp(
                ValueError, '`sample_weight` argument is not supported '
                'when input `x` is a dataset or a dataset iterator'):
            model.fit(dataset,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0,
                      sample_weight=sample_weight)

        # Test invalid usage
        with self.assertRaisesRegexp(
                ValueError, 'The `batch_size` argument'
                ' must not be specified when using dataset'
                ' as an input.'):
            model.fit(dataset,
                      batch_size=10,
                      epochs=1,
                      steps_per_epoch=2,
                      verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'The `batch_size` argument'
                ' must not be specified when using dataset'
                ' as an input.'):
            model.predict(dataset, batch_size=10, steps=2, verbose=0)
        with self.assertRaisesRegexp(
                ValueError, 'The `batch_size` argument'
                ' must not be specified when using dataset'
                ' as an input.'):
            model.evaluate(dataset, batch_size=10, steps=2, verbose=0)

        with self.assertRaisesRegexp(ValueError,
                                     'you should not specify a target'):
            model.fit(dataset, dataset, epochs=1, steps_per_epoch=2, verbose=0)

        with self.assertRaisesRegexp(ValueError,
                                     'the `steps_per_epoch` argument'):
            model.fit(dataset, epochs=1, verbose=0)
        with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
            model.evaluate(dataset, verbose=0)
        with self.assertRaisesRegexp(ValueError, 'the `steps` argument'):
            model.predict(dataset, verbose=0)
示例#12
0
    def test_model_methods_with_eager_tensors_multi_io(self):
        a = keras.layers.Input(shape=(3, ), name='input_a')
        b = keras.layers.Input(shape=(3, ), name='input_b')

        dense = keras.layers.Dense(4, name='dense')
        c = dense(a)
        d = dense(b)
        e = keras.layers.Dropout(0.5, name='dropout')(c)

        model = keras.models.Model([a, b], [d, e])

        optimizer = RMSPropOptimizer(learning_rate=0.001)
        loss = 'mse'
        loss_weights = [1., 0.5]
        metrics = ['mae', metrics_module.CategoricalAccuracy()]
        model.compile(optimizer,
                      loss,
                      metrics=metrics,
                      loss_weights=loss_weights,
                      sample_weight_mode=None)

        input_a = keras.backend.zeros(shape=(10, 3))
        input_b = keras.backend.zeros(shape=(10, 3))
        target_d = keras.backend.zeros(shape=(10, 4))
        target_e = keras.backend.zeros(shape=(10, 4))

        model.fit([input_a, input_b], [target_d, target_e],
                  epochs=1,
                  batch_size=5,
                  verbose=0)
        # Test: no shuffle.
        model.fit([input_a, input_b], [target_d, target_e],
                  epochs=1,
                  batch_size=5,
                  verbose=0,
                  shuffle=False)
        # Test: validation data.
        model.fit([input_a, input_b], [target_d, target_e],
                  epochs=1,
                  batch_size=2,
                  verbose=0,
                  validation_data=([input_a, input_b], [target_d, target_e]))
        model.train_on_batch([input_a, input_b], [target_d, target_e])
        model.predict([input_a, input_b], batch_size=5)
        model.evaluate([input_a, input_b], [target_d, target_e],
                       batch_size=2,
                       verbose=0)
        model.test_on_batch([input_a, input_b], [target_d, target_e])

        # Test: mix np and tensors.
        input_b = np.zeros(shape=(10, 3)).astype('float32')
        target_e = np.zeros(shape=(10, 4)).astype('float32')
        model.fit([input_a, input_b], [target_d, target_e],
                  epochs=1,
                  batch_size=5,
                  verbose=0)
        model.fit([input_a, input_b], [target_d, target_e],
                  epochs=1,
                  batch_size=2,
                  verbose=0,
                  validation_data=([input_a, input_b], [target_d, target_e]))
        model.fit([input_a, input_b], [target_d, target_e],
                  epochs=1,
                  batch_size=5,
                  verbose=0,
                  shuffle=False)
        model.train_on_batch([input_a, input_b], [target_d, target_e])
        model.predict([input_a, input_b], batch_size=5)
        model.evaluate([input_a, input_b], [target_d, target_e],
                       batch_size=2,
                       verbose=0)
        model.test_on_batch([input_a, input_b], [target_d, target_e])
        encoded_text =converttoInt(text,vocab)
        encoded_texts.append(encoded_text)
    print("encoded_text example: ",encoded_texts[0])
    print("decoded_text example: ",[int_to_vocab[char] for char in encoded_texts[0]])
    
    #decoded_text = [int_to_vocab[char] for char in encoded_texts[0]]

    
    #loss_object = tf.keras.losses.categorical_crossentropy()
    optimizer = tf.keras.optimizers.Adam(lr=0.001)
    # track the evolution
    # Loss
    train_loss = metrics.Mean(name='train_loss')
    valid_loss = metrics.Mean(name='valid_loss')
    # Accuracy
    train_accuracy = metrics.CategoricalAccuracy(name='train_accuracy')
    valid_accuracy = metrics.CategoricalAccuracy(name='valid_accuracy')

    model = createModel()
    model.summary()

    epochs = 15
    batch_size = 32
    actual_batch = 0
    model.reset_states()

    for epoch in range(epochs):
        print("\n epoch :", epoch)
        model.reset_states()
        actual_batch = 0
        for songs, targets in get_batches(mfccs, encoded_texts,batch_size):
示例#14
0
    def test_generator_methods(self):
        arr_data = np.random.random((50, 2))
        arr_labels = np.random.random((50, ))

        def custom_generator():
            batch_size = 10
            num_samples = 50
            while True:
                batch_index = np.random.randint(0, num_samples - batch_size)
                start = batch_index
                end = start + batch_size
                x = arr_data[start:end]
                y = arr_labels[start:end]
                yield x, y

        with self.cached_session():
            x = keras.Input((2, ))
            y = keras.layers.Dense(1)(x)
            fn_model = keras.models.Model(x, y)
            fn_model.compile(
                loss='mse',
                optimizer='sgd',
                metrics=['mae', metrics_module.CategoricalAccuracy()])

            seq_model = keras.models.Sequential()
            seq_model.add(keras.layers.Dense(1, input_shape=(2, )))
            seq_model.compile(loss='mse', optimizer='sgd')

            for model in [fn_model, seq_model]:
                model.fit_generator(custom_generator(),
                                    steps_per_epoch=5,
                                    epochs=1,
                                    verbose=1,
                                    max_queue_size=10,
                                    workers=4,
                                    use_multiprocessing=True)
                model.fit_generator(custom_generator(),
                                    steps_per_epoch=5,
                                    epochs=1,
                                    verbose=1,
                                    max_queue_size=10,
                                    use_multiprocessing=False)
                model.fit_generator(custom_generator(),
                                    steps_per_epoch=5,
                                    epochs=1,
                                    verbose=1,
                                    max_queue_size=10,
                                    use_multiprocessing=False,
                                    validation_data=custom_generator(),
                                    validation_steps=10)
                model.fit_generator(custom_generator(),
                                    steps_per_epoch=5,
                                    validation_data=custom_generator(),
                                    validation_steps=1,
                                    workers=0)
                model.predict_generator(custom_generator(),
                                        steps=5,
                                        max_queue_size=10,
                                        workers=2,
                                        use_multiprocessing=True)
                model.predict_generator(custom_generator(),
                                        steps=5,
                                        max_queue_size=10,
                                        use_multiprocessing=False)
                model.predict_generator(custom_generator(),
                                        steps=5,
                                        max_queue_size=10,
                                        workers=0)
                model.evaluate_generator(custom_generator(),
                                         steps=5,
                                         max_queue_size=10,
                                         workers=2,
                                         verbose=1,
                                         use_multiprocessing=True)
                model.evaluate_generator(custom_generator(),
                                         steps=5,
                                         max_queue_size=10,
                                         use_multiprocessing=False)
                model.evaluate_generator(custom_generator(),
                                         steps=5,
                                         max_queue_size=10,
                                         use_multiprocessing=False,
                                         workers=0)
示例#15
0
  def test_training_and_eval_methods_on_iterators_single_io(self):
    if testing_utils.should_run_distributed():
      self.skipTest('b/137397816')
    model = testing_utils.get_small_mlp(1, 4, input_dim=3)
    optimizer = 'rmsprop'
    loss = 'mse'
    metrics = ['mae', metrics_module.CategoricalAccuracy()]
    model.compile(
        optimizer,
        loss,
        metrics=metrics,
        run_eagerly=testing_utils.should_run_eagerly(),
        run_distributed=testing_utils.should_run_distributed())

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset_ops.make_one_shot_iterator(dataset)

    model.fit(iterator, epochs=1, steps_per_epoch=2, verbose=1)
    model.evaluate(iterator, steps=2, verbose=1)
    model.predict(iterator, steps=2)

    # Test with validation data
    model.fit(iterator,
              epochs=1, steps_per_epoch=2, verbose=0,
              validation_data=iterator, validation_steps=2)
    # Test with validation split
    with self.assertRaisesRegexp(
        ValueError, '`validation_split` argument is not supported '
        'when input `x` is a dataset or a dataset iterator'):
      model.fit(iterator,
                epochs=1, steps_per_epoch=2, verbose=0,
                validation_split=0.5, validation_steps=2)

    # Test with sample weight.
    sample_weight = np.random.random((10,))
    with self.assertRaisesRegexp(
        ValueError, '`sample_weight` argument is not supported '
        'when input `x` is a dataset or a dataset iterator'):
      model.fit(
          iterator,
          epochs=1,
          steps_per_epoch=2,
          verbose=0,
          sample_weight=sample_weight)

    # Test invalid usage
    with self.assertRaisesRegexp(ValueError,
                                 'you should not specify a target'):
      model.fit(iterator, iterator,
                epochs=1, steps_per_epoch=2, verbose=0)

    with self.assertRaisesRegexp(
        ValueError, 'the `steps_per_epoch` argument'):
      model.fit(iterator, epochs=1, verbose=0)
    with self.assertRaisesRegexp(ValueError,
                                 'the `steps` argument'):
      model.evaluate(iterator, verbose=0)
    with self.assertRaisesRegexp(ValueError,
                                 'the `steps` argument'):
      model.predict(iterator, verbose=0)