Beispiel #1
0
  def test_masking_rnn_with_output_and_states(self):

    class Cell(keras.layers.Layer):

      def __init__(self):
        self.state_size = None
        self.output_size = None
        super(Cell, self).__init__()

      def build(self, input_shape):
        self.state_size = input_shape[-1]
        self.output_size = input_shape[-1]

      def call(self, inputs, states):
        return inputs, [s + 1 for s in states]

    x = keras.Input((3, 1), name='x')
    x_masked = keras.layers.Masking()(x)
    s_0 = keras.Input((1,), name='s_0')
    y, s = keras.layers.RNN(
        Cell(), return_state=True)(x_masked, initial_state=s_0)
    model = keras.models.Model([x, s_0], [y, s])
    model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                  loss='mse')

    # last time step masked
    x_np = np.array([[[1.], [2.], [0.]]])
    s_0_np = np.array([[10.]])
    y_np, s_np = model.predict([x_np, s_0_np])

    # 1 is added to initial state two times
    self.assertAllClose(s_np, s_0_np + 2)
    # Expect last output to be the same as last output before masking
    self.assertAllClose(y_np, x_np[:, 1, :])
Beispiel #2
0
  def test_zero_output_for_masking(self):

    for unroll in [True, False]:
      cell = keras.layers.SimpleRNNCell(5)
      x = keras.Input((5, 5))
      mask = keras.layers.Masking()
      layer = keras.layers.RNN(
          cell, return_sequences=True, zero_output_for_mask=True, unroll=unroll)
      masked_input = mask(x)
      y = layer(masked_input)
      model = keras.models.Model(x, y)
      model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                    loss='mse')

      np_x = np.ones((6, 5, 5))
      result_1 = model.predict(np_x)

      # set the time 4 and 5 for last record to be zero (masked).
      np_x[5, 3:] = 0
      result_2 = model.predict(np_x)

      # expect the result_2 has same output, except the time 4,5 for last
      # record.
      result_1[5, 3:] = 0
      self.assertAllClose(result_1, result_2)
 def prepare_simple_model(input_tensor, loss_name, target):
     axis = 1 if K.image_data_format() == 'channels_first' else -1
     loss = None
     num_channels = None
     activation = None
     if loss_name == 'sparse_categorical_crossentropy':
         loss = lambda y_true, y_pred: K.sparse_categorical_crossentropy(  # pylint: disable=g-long-lambda
             y_true,
             y_pred,
             axis=axis)
         num_channels = np.amax(target) + 1
         activation = 'softmax'
     elif loss_name == 'categorical_crossentropy':
         loss = lambda y_true, y_pred: K.categorical_crossentropy(  # pylint: disable=g-long-lambda
             y_true,
             y_pred,
             axis=axis)
         num_channels = target.shape[axis]
         activation = 'softmax'
     elif loss_name == 'binary_crossentropy':
         loss = lambda y_true, y_pred: K.binary_crossentropy(
             y_true, y_pred)  # pylint: disable=unnecessary-lambda
         num_channels = target.shape[axis]
         activation = 'sigmoid'
     predictions = Conv2D(num_channels,
                          1,
                          activation=activation,
                          kernel_initializer='ones',
                          bias_initializer='ones')(input_tensor)
     simple_model = keras.models.Model(inputs=input_tensor,
                                       outputs=predictions)
     simple_model.compile(optimizer=rmsprop.RMSPropOptimizer(1e-3),
                          loss=loss)
     return simple_model
Beispiel #4
0
    def test_warm_start_from_keras_ckpt(self):
        keras_model, (x_train, y_train), (
            _,
            _), train_input_fn, eval_input_fn = get_resource_for_simple_model(
                model_type='functional', is_evaluate=True)
        keras_model.compile(optimizer=rmsprop.RMSPropOptimizer(1e-3),
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])
        keras_model.fit(x_train, y_train, epochs=1)

        warm_start_path = os.path.join(self._config.model_dir, 'keras',
                                       'warm_start.ckpt')
        keras_model.save_weights(warm_start_path)

        est_keras = keras_lib.model_to_estimator(keras_model=keras_model,
                                                 config=self._config,
                                                 checkpoint_format='saver')

        self.assertEqual(
            warm_start_path,
            est_keras._warm_start_settings.ckpt_to_initialize_from)
        before_eval_results = est_keras.evaluate(input_fn=eval_input_fn,
                                                 steps=1)
        est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
        after_eval_results = est_keras.evaluate(input_fn=eval_input_fn,
                                                steps=1)
        self.assertLess(after_eval_results['loss'],
                        before_eval_results['loss'])
Beispiel #5
0
  def test_sequential_deferred_build_serialization(self):
    num_hidden = 5
    input_dim = 3
    batch_size = 5
    num_classes = 2

    model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
    model.compile(
        loss='mse',
        optimizer=rmsprop.RMSPropOptimizer(1e-3),
        metrics=[keras.metrics.CategoricalAccuracy()],
        run_eagerly=testing_utils.should_run_eagerly())
    self.assertFalse(model.built)

    x = np.random.random((batch_size, input_dim))
    y = np.random.random((batch_size, num_classes))
    model.train_on_batch(x, y)
    self.assertTrue(model.built)

    config = model.get_config()
    self.assertIn('build_input_shape', config)

    new_model = keras.models.Sequential.from_config(config)
    self.assertEqual(len(new_model.layers), 2)
    self.assertEqual(len(new_model.weights), 4)
    def test_sequential_deferred_build_with_dataset_iterators(self):
        num_hidden = 5
        input_dim = 3
        num_classes = 2
        num_samples = 50
        steps_per_epoch = 10

        model = testing_utils.get_small_sequential_mlp(num_hidden, num_classes)
        model.compile(loss='mse',
                      optimizer=rmsprop.RMSPropOptimizer(1e-3),
                      metrics=[keras.metrics.CategoricalAccuracy()],
                      run_eagerly=testing_utils.should_run_eagerly())
        self.assertEqual(len(model.layers), 2)
        self.assertEqual(len(model.weights), 0)
        self.assertFalse(model.built)

        x = array_ops.ones((num_samples, input_dim))
        y = array_ops.zeros((num_samples, num_classes))
        dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)

        model.fit(iterator, epochs=1, steps_per_epoch=steps_per_epoch)
        self.assertTrue(model.built)
        self.assertEqual(len(model.weights), 2 * 2)
        self.assertFalse(model._is_graph_network)
Beispiel #7
0
  def test_high_dimension_RNN_with_init_state(self):
    unit_a = 10
    unit_b = 20
    input_a = 5
    input_b = 10
    batch = 32
    time_step = 4

    # Basic test case.
    cell = Minimal2DRNNCell(unit_a, unit_b)
    x = keras.Input((None, input_a, input_b))
    s = keras.Input((unit_a, unit_b))
    layer = keras.layers.RNN(cell)
    y = layer(x, initial_state=s)

    model = keras.models.Model([x, s], y)
    model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                  loss='mse')
    model.train_on_batch([
        np.zeros((batch, time_step, input_a, input_b)),
        np.zeros((batch, unit_a, unit_b))
    ], np.zeros((batch, unit_a, unit_b)))
    self.assertEqual(model.output_shape, (None, unit_a, unit_b))

    # Bad init state shape.
    bad_shape_a = unit_a * 2
    bad_shape_b = unit_b * 2
    cell = Minimal2DRNNCell(unit_a, unit_b)
    x = keras.Input((None, input_a, input_b))
    s = keras.Input((bad_shape_a, bad_shape_b))
    layer = keras.layers.RNN(cell)
    with self.assertRaisesWithPredicateMatch(ValueError,
                                             'however `cell.state_size` is'):
      layer(x, initial_state=s)
Beispiel #8
0
  def test_train_with_subclassed_model_with_existing_state(self):
    keras_model, (_, _), (
        _, _), train_input_fn, eval_input_fn = get_resource_for_simple_model(
            model_type='subclass', is_evaluate=True)
    keras_model.compile(
        loss='categorical_crossentropy',
        optimizer=rmsprop.RMSPropOptimizer(1e-3),
        metrics=['mse', keras.metrics.categorical_accuracy])

    with self.test_session():
      # Create state
      keras_model.train_on_batch(np.random.random((10,) + _INPUT_SIZE),
                                 np.random.random((10, _NUM_CLASS)))
      original_preds = keras_model.predict(np.ones((10,) + _INPUT_SIZE))

      est_keras = keras_lib.model_to_estimator(
          keras_model=keras_model, config=self._config)
      est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
      before_eval_results = est_keras.evaluate(
          input_fn=eval_input_fn, steps=1)
      est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
      after_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
      self.assertLess(after_eval_results['loss'], before_eval_results['loss'])

      # Check that original model state was not altered
      preds = keras_model.predict(np.ones((10,) + _INPUT_SIZE))
      self.assertAllClose(original_preds, preds, atol=1e-5)
      # Check that the original model compilation did not break
      keras_model.train_on_batch(np.random.random((10,) + _INPUT_SIZE),
                                 np.random.random((10, _NUM_CLASS)))
Beispiel #9
0
  def test_dataset_input_shape_validation(self):
    with self.cached_session():
      x = keras.layers.Input(shape=(3,), name='input')
      y = keras.layers.Dense(4, name='dense')(x)
      model = keras.Model(x, y)

      optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
      loss = 'mse'
      strategy = mirrored_strategy.MirroredStrategy(['/device:GPU:1',
                                                     '/device:GPU:0'])

      model.compile(optimizer, loss, distribute=strategy)

      # User forgets to batch the dataset
      inputs = np.zeros((10, 3), dtype=np.float32)
      targets = np.zeros((10, 4), dtype=np.float32)
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)

      with self.assertRaisesRegexp(ValueError, 'expected input to have shape'):
        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)

      # Wrong input shape
      inputs = np.zeros((10, 5), dtype=np.float32)
      targets = np.zeros((10, 4), dtype=np.float32)
      dataset = dataset_ops.Dataset.from_tensor_slices((inputs, targets))
      dataset = dataset.repeat(100)
      dataset = dataset.batch(10)

      with self.assertRaisesRegexp(ValueError,
                                   'expected input to have shape'):
        model.fit(dataset, epochs=1, steps_per_epoch=2, verbose=0)
    def test_extract_model_metrics(self):
        a = keras.layers.Input(shape=(3, ), name='input_a')
        b = keras.layers.Input(shape=(3, ), name='input_b')

        dense = keras.layers.Dense(4, name='dense')
        c = dense(a)
        d = dense(b)
        e = keras.layers.Dropout(0.5, name='dropout')(c)

        model = keras.models.Model([a, b], [d, e])
        extract_metrics = saving_utils.extract_model_metrics(model)
        self.assertEqual(None, extract_metrics)

        extract_metric_names = [
            'dense_binary_accuracy', 'dropout_binary_accuracy',
            'dense_mean_squared_error', 'dropout_mean_squared_error'
        ]
        if tf2.enabled():
            extract_metric_names.extend(['dense_mae', 'dropout_mae'])
        else:
            extract_metric_names.extend(
                ['dense_mean_absolute_error', 'dropout_mean_absolute_error'])

        model_metric_names = ['loss', 'dense_loss', 'dropout_loss'
                              ] + extract_metric_names
        model.compile(loss='mae',
                      metrics=[
                          keras.metrics.BinaryAccuracy(), 'mae',
                          keras.metrics.mean_squared_error
                      ],
                      optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
        extract_metrics = saving_utils.extract_model_metrics(model)
        self.assertEqual(set(model_metric_names), set(model.metrics_names))
        self.assertEqual(set(extract_metric_names),
                         set(extract_metrics.keys()))
Beispiel #11
0
    def test_train_sequential_with_distribution_strategy(self):
        dist = mirrored_strategy.MirroredStrategy(
            devices=['/device:GPU:0', '/device:GPU:1'])
        keras_model = simple_sequential_model()
        keras_model.compile(
            loss='categorical_crossentropy',
            metrics=[keras.metrics.CategoricalAccuracy()],
            optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.01))
        config = run_config_lib.RunConfig(tf_random_seed=_RANDOM_SEED,
                                          model_dir=self._base_dir,
                                          train_distribute=dist)
        with self.cached_session():
            est_keras = keras_lib.model_to_estimator(keras_model=keras_model,
                                                     config=config)
            before_eval_results = est_keras.evaluate(
                input_fn=get_ds_test_input_fn, steps=1)
            est_keras.train(input_fn=get_ds_train_input_fn,
                            steps=_TRAIN_SIZE / 16)
            after_eval_results = est_keras.evaluate(
                input_fn=get_ds_test_input_fn, steps=1)
            self.assertLess(after_eval_results['loss'],
                            before_eval_results['loss'])

        writer_cache.FileWriterCache.clear()
        gfile.DeleteRecursively(self._config.model_dir)
Beispiel #12
0
    def test_sequential_deferred_build_with_dataset_iterators(self):
        if not context.executing_eagerly():
            # TODO(psv/fchollet): Add support for this use case in graph mode.
            return
        num_hidden = 5
        input_dim = 3
        num_classes = 2
        num_samples = 50
        steps_per_epoch = 10

        model = keras.models.Sequential()
        # We don't specify the input shape.
        model.add(keras.layers.Dense(num_hidden))
        model.add(keras.layers.Dense(num_classes))
        model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3))
        self.assertEqual(len(model.layers), 2)
        self.assertEqual(len(model.weights), 0)
        self.assertFalse(model.built)

        x = array_ops.ones((num_samples, input_dim))
        y = array_ops.zeros((num_samples, num_classes))
        dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset.make_one_shot_iterator()

        model.fit(iterator, epochs=1, steps_per_epoch=steps_per_epoch)
        self.assertTrue(model.built)
        self.assertEqual(model.inputs[0].get_shape().as_list(),
                         [None, input_dim])
        self.assertEqual(model.outputs[0].get_shape().as_list(),
                         [None, num_classes])
        self.assertEqual(len(model.weights), 2 * 2)
Beispiel #13
0
    def test_eager_dnc_optimization(self):
        batch_size = 7
        input_size = 15
        memory_config = {
            'memory_size': 27,
            'word_size': 9,
            'num_read_heads': 10,
        }
        output_size = 36

        x = tf.keras.Input(shape=(
            None,
            input_size,
        ))
        dnc_cell = DNC(output_size, controller_units=30, **memory_config)
        dnc_initial_state = dnc_cell.get_initial_state(batch_size=batch_size)
        layer = tf.keras.layers.RNN(dnc_cell)
        y = layer(x, initial_state=dnc_initial_state)

        model = tf.keras.models.Model(x, y)
        model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                      loss='mse',
                      run_eagerly=True)
        model.train_on_batch(np.zeros((batch_size, 5, input_size)),
                             np.zeros((batch_size, output_size)))
        self.assertEqual(model.output_shape[1], output_size)
Beispiel #14
0
    def test_train_with_tf_optimizer(self):
        for model_type in ['sequential', 'functional']:
            keras_model, (_, _), (
                _, _
            ), train_input_fn, eval_input_fn = get_resource_for_simple_model(
                model_type=model_type, is_evaluate=True)
            keras_model.compile(
                loss='categorical_crossentropy',
                optimizer=rmsprop.RMSPropOptimizer(1e-3),
                metrics=['mse', keras.metrics.categorical_accuracy])

            with self.test_session():
                est_keras = keras_lib.model_to_estimator(
                    keras_model=keras_model, config=self._config)
                before_eval_results = est_keras.evaluate(
                    input_fn=eval_input_fn, steps=1)
                est_keras.train(input_fn=train_input_fn,
                                steps=_TRAIN_SIZE / 16)
                after_eval_results = est_keras.evaluate(input_fn=eval_input_fn,
                                                        steps=1)
                self.assertLess(after_eval_results['loss'],
                                before_eval_results['loss'])

            writer_cache.FileWriterCache.clear()
            gfile.DeleteRecursively(self._config.model_dir)
Beispiel #15
0
    def test_builtin_rnn_cell_serialization(self):
        for cell_class in [
                keras.layers.SimpleRNNCell, keras.layers.GRUCell,
                keras.layers.LSTMCell
        ]:
            # Test basic case.
            x = keras.Input((None, 5))
            cell = cell_class(32)
            layer = keras.layers.RNN(cell)
            y = layer(x)
            model = keras.models.Model(x, y)
            model.compile(
                optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                loss='mse')

            # Test basic case serialization.
            x_np = np.random.random((6, 5, 5))
            y_np = model.predict(x_np)
            weights = model.get_weights()
            config = layer.get_config()
            layer = keras.layers.RNN.from_config(config)
            y = layer(x)
            model = keras.models.Model(x, y)
            model.set_weights(weights)
            y_np_2 = model.predict(x_np)
            self.assertAllClose(y_np, y_np_2, atol=1e-4)

            # Test stacking.
            cells = [cell_class(8), cell_class(12), cell_class(32)]
            layer = keras.layers.RNN(cells)
            y = layer(x)
            model = keras.models.Model(x, y)
            model.compile(
                optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                loss='mse')

            # Test stacked RNN serialization.
            x_np = np.random.random((6, 5, 5))
            y_np = model.predict(x_np)
            weights = model.get_weights()
            config = layer.get_config()
            layer = keras.layers.RNN.from_config(config)
            y = layer(x)
            model = keras.models.Model(x, y)
            model.set_weights(weights)
            y_np_2 = model.predict(x_np)
            self.assertAllClose(y_np, y_np_2, atol=1e-4)
Beispiel #16
0
  def test_nested_input_output(self):
    batch = 10
    t = 5
    i1, i2, i3 = 3, 4, 5
    o1, o2, o3 = 2, 3, 4

    cell = NestedCell(o1, o2, o3)
    rnn = keras.layers.RNN(cell)

    input_1 = keras.Input((t, i1))
    input_2 = keras.Input((t, i2, i3))

    outputs = rnn((input_1, input_2))

    self.assertEqual(len(outputs), 2)
    self.assertEqual(outputs[0].shape.as_list(), [None, o1])
    self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3])

    model = keras.models.Model((input_1, input_2), outputs)
    model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                  loss='mse')
    model.train_on_batch(
        [np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
        [np.zeros((batch, o1)), np.zeros((batch, o2, o3))])
    self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)])

    cell = NestedCell(o1, o2, o3, use_tuple=True)

    rnn = keras.layers.RNN(cell)

    input_1 = keras.Input((t, i1))
    input_2 = keras.Input((t, i2, i3))

    outputs = rnn(NestedInput(t1=input_1, t2=input_2))

    self.assertEqual(len(outputs), 2)
    self.assertEqual(outputs[0].shape.as_list(), [None, o1])
    self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3])

    model = keras.models.Model([input_1, input_2], outputs)
    model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                  loss='mse')
    model.train_on_batch(
        [np.zeros((batch, t, i1)),
         np.zeros((batch, t, i2, i3))],
        [np.zeros((batch, o1)), np.zeros((batch, o2, o3))])
    self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)])
Beispiel #17
0
 def test_ops_with_var_and_rmsprop(self):
     var_list = [
         deo.get_variable('sp_var', initializer=0.0, dim=2),
     ]
     opt_list = [
         rmsprop.RMSPropOptimizer(0.1),
     ]
     self.common_run_context(var_list, opt_list, name='rmsprop_test')
Beispiel #18
0
def get_multiple_optimizers():
    return [
        adagrad.AdagradOptimizer(0.1),
        adam.AdamOptimizer(0.1),
        ftrl.FtrlOptimizer(0.1),
        momentum.MomentumOptimizer(0.1, 0.1),
        rmsprop.RMSPropOptimizer(0.1)
    ]
Beispiel #19
0
  def test_high_dimension_RNN(self):
    # Basic test case.
    unit_a = 10
    unit_b = 20
    input_a = 5
    input_b = 10
    batch = 32
    time_step = 4

    cell = Minimal2DRNNCell(unit_a, unit_b)
    x = keras.Input((None, input_a, input_b))
    layer = keras.layers.RNN(cell)
    y = layer(x)

    self.assertEqual(cell.state_size.as_list(), [unit_a, unit_b])

    if not context.executing_eagerly():
      init_state = layer.get_initial_state(x)
      self.assertEqual(len(init_state), 1)
      self.assertEqual(init_state[0].get_shape().as_list(),
                       [None, unit_a, unit_b])

    model = keras.models.Model(x, y)
    model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                  loss='mse')
    model.train_on_batch(
        np.zeros((batch, time_step, input_a, input_b)),
        np.zeros((batch, unit_a, unit_b)))
    self.assertEqual(model.output_shape, (None, unit_a, unit_b))

    # Test stacking.
    cells = [
        Minimal2DRNNCell(unit_a, unit_b),
        Minimal2DRNNCell(unit_a * 2, unit_b * 2),
        Minimal2DRNNCell(unit_a * 4, unit_b * 4)
    ]
    layer = keras.layers.RNN(cells)
    y = layer(x)
    model = keras.models.Model(x, y)
    model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                  loss='mse')
    model.train_on_batch(
        np.zeros((batch, time_step, input_a, input_b)),
        np.zeros((batch, unit_a * 4, unit_b * 4)))
    self.assertEqual(model.output_shape, (None, unit_a * 4, unit_b * 4))
    def test_sequential_nesting(self):
        model = _get_small_mlp(4, 3)
        inner_model = _get_small_mlp(4, 5)
        model.add(inner_model)

        model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3))
        x = np.random.random((2, 6))
        y = np.random.random((2, 5))
        model.fit(x, y, epochs=1)
    def test_pretrained_weights(self):
        keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()
        keras_model.compile(
            loss='categorical_crossentropy',
            optimizer=rmsprop.RMSPropOptimizer(1e-3),
            metrics=['mse', keras.metrics.categorical_accuracy])

        keras_model.train_on_batch(np.random.random((10, ) + _INPUT_SIZE),
                                   np.random.random((10, _NUM_CLASS)))
        weights = keras_model.get_weights()
        keras_model, (_, _), (_, _), _, _ = get_resource_for_simple_model()
        keras_model.set_weights(weights)
        keras_model.compile(
            loss='categorical_crossentropy',
            optimizer=rmsprop.RMSPropOptimizer(1e-3),
            metrics=['mse', keras.metrics.categorical_accuracy])
        keras.estimator.model_to_estimator(keras_model=keras_model,
                                           config=self._config)
 def get_model():
     if deferred:
         model = _get_small_mlp(10, 4)
     else:
         model = _get_small_mlp(10, 4, input_dim=3)
     model.compile(optimizer=rmsprop.RMSPropOptimizer(1e-3),
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
     return model
Beispiel #23
0
            def create_model():
                model = keras.Sequential()
                model.add(keras.layers.Dense(10, activation='relu'))
                model.add(keras.layers.Dense(4, activation='softmax'))

                model.compile(optimizer=rmsprop.RMSPropOptimizer(1e-3),
                              loss='categorical_crossentropy',
                              metrics=['accuracy'])
                return model
Beispiel #24
0
    def test_build_before_fit(self):
        # Fix for b/112433577
        model = testing_utils.get_small_sequential_mlp(4, 5)
        model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3))

        model.build((None, 6))

        x = np.random.random((2, 6))
        y = np.random.random((2, 5))
        model.fit(x, y, epochs=1)
Beispiel #25
0
    def test_globalpooling_1d_masking_support(self):
        model = keras.Sequential()
        model.add(keras.layers.Masking(mask_value=0., input_shape=(3, 4)))
        model.add(keras.layers.GlobalAveragePooling1D())
        model.compile(loss='mae', optimizer=rmsprop.RMSPropOptimizer(0.001))

        model_input = np.random.random((2, 3, 4))
        model_input[0, 1:, :] = 0
        output = model.predict(model_input)
        self.assertAllClose(output[0], model_input[0, 0, :])
Beispiel #26
0
  def test_sequential_nesting(self):
    model = testing_utils.get_small_sequential_mlp(4, 3)
    inner_model = testing_utils.get_small_sequential_mlp(4, 5)
    model.add(inner_model)

    model.compile(loss='mse', optimizer=rmsprop.RMSPropOptimizer(1e-3),
                  run_eagerly=testing_utils.should_run_eagerly())
    x = np.random.random((2, 6))
    y = np.random.random((2, 5))
    model.fit(x, y, epochs=1)
  def test_numpy_with_sample_weights(self, distribution):
    model = get_model()
    optimizer = rmsprop.RMSPropOptimizer(learning_rate=0.001)
    loss = 'mse'
    model.compile(optimizer, loss, distribute=distribution)

    inputs = np.zeros((10, 3), np.float32)
    targets = np.zeros((10, 4), np.float32)
    sample_weights = np.ones((10), np.float32)

    model.fit(inputs, targets, sample_weight=sample_weights, epochs=1,
              steps_per_epoch=2, verbose=1)
  def _get_optimizer(self):

    lr = self.lr = self.setup_lr()
    # tf.summary.scalar('lr', self.lr)
    optimizer_type = self.optimizer_type
    if optimizer_type == "adam":
      opt = adam.AdamOptimizer(lr)
    elif optimizer_type == "sgd":
      opt = gradient_descent.GradientDescentOptimizer(lr)
    elif optimizer_type == "rmsprop":
      opt = rmsprop.RMSPropOptimizer(lr)
    return opt
Beispiel #29
0
    def test_minimal_rnn_cell_non_layer_multiple_states(self):
        class MinimalRNNCell(object):
            def __init__(self, units, input_dim):
                self.units = units
                self.state_size = (units, units)
                self.kernel = keras.backend.variable(
                    np.random.random((input_dim, units)))

            def call(self, inputs, states):
                prev_output_1 = states[0]
                prev_output_2 = states[1]
                output = keras.backend.dot(inputs, self.kernel)
                output += prev_output_1
                output -= prev_output_2
                return output, [output * 2, output * 3]

        # Basic test case.
        cell = MinimalRNNCell(32, 5)
        x = keras.Input((None, 5))
        layer = keras.layers.RNN(cell)
        y = layer(x)
        model = keras.models.Model(x, y)
        model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                      loss='mse')
        model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))

        # Test stacking.
        cells = [
            MinimalRNNCell(8, 5),
            MinimalRNNCell(16, 8),
            MinimalRNNCell(32, 16)
        ]
        layer = keras.layers.RNN(cells)
        self.assertEqual(layer.cell.state_size, (8, 8, 16, 16, 32, 32))
        self.assertEqual(layer.cell.output_size, 32)
        y = layer(x)
        model = keras.models.Model(x, y)
        model.compile(optimizer=rmsprop.RMSPropOptimizer(learning_rate=0.001),
                      loss='mse')
        model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
Beispiel #30
0
 def _GetOptimizer(self, opt):
   if opt == "adagrad":
     return adagrad.AdagradOptimizer(learning_rate=1e-2)
   elif opt == "adam":
     return adam.AdamOptimizer(learning_rate=1e-2)
   elif opt == "rmsprop":
     return rmsprop.RMSPropOptimizer(learning_rate=1e-2)
   elif opt == "momentum":
     return momentum.MomentumOptimizer(learning_rate=1e-2, momentum=0.9)
   elif opt == "sgd":
     return gradient_descent.GradientDescentOptimizer(learning_rate=1e-2)
   else:
     raise ValueError("Unsupported optimizer: %s" % opt)