def test_dataset_creator_model_fit_without_strategy(
            self, use_input_options):
        model = sequential.Sequential([core_layers.Dense(10)])
        model.compile(gradient_descent.SGD(), loss="mse")

        input_options = distribute_lib.InputOptions(
        ) if use_input_options else None
        history = model.fit(dataset_creator.DatasetCreator(
            self._get_dataset_fn(), input_options),
                            epochs=10,
                            steps_per_epoch=10,
                            verbose=0)
        self.assertLen(history.history["loss"], 10)
Example #2
0
 def test_wide_deep_model_with_single_input(self):
   linear_model = linear.LinearModel(units=1)
   dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
   wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
   inputs = np.random.uniform(low=-5, high=5, size=(64, 3))
   output = .3 * inputs[:, 0]
   wide_deep_model.compile(
       optimizer=['sgd', 'adam'],
       loss='mse',
       metrics=[],
       run_eagerly=testing_utils.should_run_eagerly(),
       run_distributed=testing_utils.should_run_distributed())
   wide_deep_model.fit(inputs, output, epochs=5)
Example #3
0
    def test_config_with_custom_objects(self):
        def my_activation(x):
            return x

        linear_model = linear.LinearModel(units=1)
        dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
        wide_deep_model = wide_deep.WideDeepModel(linear_model,
                                                  dnn_model,
                                                  activation=my_activation)
        config = wide_deep_model.get_config()
        cloned_wide_deep_model = wide_deep.WideDeepModel.from_config(
            config, custom_objects={'my_activation': my_activation})
        self.assertEqual(cloned_wide_deep_model.activation, my_activation)
Example #4
0
 def test_wide_deep_model_as_layer(self):
   linear_model = linear.LinearModel(units=1)
   dnn_model = sequential.Sequential([core.Dense(units=1)])
   linear_input = input_layer.Input(shape=(3,), name='linear')
   dnn_input = input_layer.Input(shape=(5,), name='dnn')
   wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
   wide_deep_output = wide_deep_model((linear_input, dnn_input))
   input_b = input_layer.Input(shape=(1,), name='b')
   output_b = core.Dense(units=1)(input_b)
   model = training.Model(
       inputs=[linear_input, dnn_input, input_b],
       outputs=[wide_deep_output + output_b])
   linear_input_np = np.random.uniform(low=-5, high=5, size=(64, 3))
   dnn_input_np = np.random.uniform(low=-5, high=5, size=(64, 5))
   input_b_np = np.random.uniform(low=-5, high=5, size=(64,))
   output_np = linear_input_np[:, 0] + .2 * dnn_input_np[:, 1] + input_b_np
   model.compile(
       optimizer='sgd',
       loss='mse',
       metrics=[],
       run_eagerly=testing_utils.should_run_eagerly())
   model.fit([linear_input_np, dnn_input_np, input_b_np], output_np, epochs=5)
Example #5
0
  def test_converted_call_whitelisted_method_via_owner(self):

    opts = converter.ConversionOptions()

    model = sequential.Sequential([
        core.Dense(2)
    ])

    x = api.converted_call('call', model, opts,
                           (constant_op.constant([[0.0]]),), {'training': True})

    self.evaluate(variables.global_variables_initializer())
    self.assertAllEqual([[0.0, 0.0]], self.evaluate(x))
        def build_layer_fn(x, w_initializer, b_initializer):
            x = keras_core.Flatten()(x)
            layer = keras_core.Dense(units=3,
                                     kernel_initializer=w_initializer,
                                     bias_initializer=b_initializer)
            net = layer.apply(x)
            expected_normalized_vars = {
                'keras.layers.Dense.kernel': layer.kernel
            }
            expected_not_normalized_vars = {
                'keras.layers.Dense.bias': layer.bias
            }

            return net, expected_normalized_vars, expected_not_normalized_vars
Example #7
0
 def test_wide_deep_model(self):
     linear_model = linear.LinearModel(units=1)
     dnn_model = sequential.Sequential([core.Dense(units=1, input_dim=3)])
     wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
     linear_inp = np.random.uniform(low=-5, high=5, size=(64, 2))
     dnn_inp = np.random.uniform(low=-5, high=5, size=(64, 3))
     inputs = [linear_inp, dnn_inp]
     output = .3 * linear_inp[:, 0] + .2 * dnn_inp[:, 1]
     wide_deep_model.compile(optimizer=['sgd', 'adam'],
                             loss='mse',
                             metrics=[],
                             run_eagerly=testing_utils.should_run_eagerly())
     wide_deep_model.fit(inputs, output, epochs=5)
     self.assertTrue(wide_deep_model.built)
def mnist_model(input_shape):
    """Creates a MNIST model."""
    model = sequential_model_lib.Sequential()

    # Adding custom pass-through layer to visualize input images.
    model.add(LayerForImageSummary())

    model.add(
        conv_layer_lib.Conv2D(32,
                              kernel_size=(3, 3),
                              activation='relu',
                              input_shape=input_shape))
    model.add(conv_layer_lib.Conv2D(64, (3, 3), activation='relu'))
    model.add(pool_layer_lib.MaxPooling2D(pool_size=(2, 2)))
    model.add(layer_lib.Dropout(0.25))
    model.add(layer_lib.Flatten())
    model.add(layer_lib.Dense(128, activation='relu'))
    model.add(layer_lib.Dropout(0.5))
    model.add(layer_lib.Dense(NUM_CLASSES, activation='softmax'))

    # Adding custom pass-through layer for summary recording.
    model.add(LayerForHistogramSummary())
    return model
Example #9
0
  def test_converted_call_whitelisted_method_via_owner(self):

    opts = converter.ConversionOptions()

    model = sequential.Sequential([
        core.Dense(2)
    ])

    x = api.converted_call('call', model, opts,
                           constant_op.constant([[0.0]]), training=True)

    with self.cached_session() as sess:
      sess.run(variables.global_variables_initializer())
      self.assertAllEqual([[0.0, 0.0]], sess.run(x))
Example #10
0
 def test_wide_deep_model(self, distribution, data_fn):
     with distribution.scope():
         linear_model = linear.LinearModel(units=1)
         dnn_model = sequential.Sequential([core.Dense(units=1)])
         wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
         linear_opt = gradient_descent.SGD(learning_rate=0.05)
         dnn_opt = adagrad.Adagrad(learning_rate=0.1)
         wide_deep_model.compile(optimizer=[linear_opt, dnn_opt],
                                 loss='mse')
         if data_fn == 'numpy':
             inputs, output = get_numpy()
             hist = wide_deep_model.fit(inputs, output, epochs=5)
         else:
             hist = wide_deep_model.fit(get_dataset(), epochs=5)
         self.assertLess(hist.history['loss'][4], 0.2)
Example #11
0
    def testObjectMetadata(self):
        with context.eager_mode():
            checkpoint_directory = self.get_temp_dir()
            checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
            dense = core.Dense(1)
            checkpoint = trackable_utils.Checkpoint(dense=dense)
            dense(constant_op.constant([[1.]]))
            save_path = checkpoint.save(checkpoint_prefix)

        objects = trackable_utils.object_metadata(save_path)
        all_variable_names = []
        for obj in objects.nodes:
            for attribute in obj.attributes:
                all_variable_names.append(attribute.full_name)
        self.assertIn("dense/kernel", all_variable_names)
Example #12
0
  def __init__(self,
               num_features,
               input_window_size,
               output_window_size,
               num_units=128):
    """Construct the LSTM prediction model.

    Args:
      num_features: number of input features per time step.
      input_window_size: Number of past time steps of data to look at when doing
        the regression.
      output_window_size: Number of future time steps to predict. Note that
        setting it to > 1 empirically seems to give a better fit.
      num_units: The number of units in the encoder and decoder LSTM cells.
    """
    super(LSTMPredictionModel, self).__init__()
    self._encoder = lstm_ops.LSTMBlockFusedCell(
        num_units=num_units, name="encoder")
    self._decoder = lstm_ops.LSTMBlockFusedCell(
        num_units=num_units, name="decoder")
    self._mean_transform = core.Dense(num_features,
                                      name="mean_transform")
    self._covariance_transform = core.Dense(num_features,
                                            name="covariance_transform")
Example #13
0
    def test_dataset_creator_usage_in_parameter_server_model_fit(self):
        cluster_def = multi_worker_test_base.create_in_process_cluster(
            num_workers=2, num_ps=1, rpc_layer="grpc")
        strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
            SimpleClusterResolver(ClusterSpec(cluster_def), rpc_layer="grpc"))
        with strategy.scope():
            model = sequential.Sequential([core_layers.Dense(10)])
        model.compile(gradient_descent.SGD(), loss="mse")

        history = model.fit(dataset_creator.DatasetCreator(
            self._get_dataset_fn()),
                            epochs=10,
                            steps_per_epoch=10,
                            verbose=0)
        self.assertLen(history.history["loss"], 10)
Example #14
0
    def testOptimizerWithCallbacks(self):
        np.random.seed(1331)
        input_np = np.random.random((10, 3))
        output_np = np.random.random((10, 4))
        a = input_layer.Input(shape=(3, ), name='input_a')
        model = sequential.Sequential()
        model.add(core.Dense(4, name='dense'))
        model.add(core.Dropout(0.5, name='dropout'))
        model(a)
        optimizer = gradient_descent.SGD(learning_rate=0.1)
        model.compile(optimizer, loss='mse', metrics=['mae'])
        # This does not reduce the LR after the first epoch (due to low delta).
        cbks = [
            callbacks.ReduceLROnPlateau(monitor='val_loss',
                                        factor=0.1,
                                        min_delta=0,
                                        patience=1,
                                        cooldown=5)
        ]
        model.fit(input_np,
                  output_np,
                  batch_size=10,
                  validation_data=(input_np, output_np),
                  callbacks=cbks,
                  epochs=2,
                  verbose=0)
        self.assertAllClose(float(backend.get_value(model.optimizer.lr)),
                            0.1,
                            atol=1e-4)

        # This should reduce the LR after the first epoch (due to high delta).
        cbks = [
            callbacks.ReduceLROnPlateau(monitor='val_loss',
                                        factor=0.1,
                                        min_delta=10,
                                        patience=1,
                                        cooldown=5)
        ]
        model.fit(input_np,
                  output_np,
                  batch_size=10,
                  validation_data=(input_np, output_np),
                  callbacks=cbks,
                  epochs=2,
                  verbose=2)
        self.assertAllClose(float(backend.get_value(model.optimizer.lr)),
                            0.01,
                            atol=1e-4)
Example #15
0
    def testSubSequentialTracking(self):
        class _Subclassed(training.Model):
            def __init__(self, wrapped):
                super(_Subclassed, self).__init__()
                self._wrapped = wrapped

            def call(self, x):
                return self._wrapped(x)

        model = sequential.Sequential()
        layer = core.Dense(1)
        model.add(layer)
        model2 = _Subclassed(model)
        model2(array_ops.ones([1, 2]))
        model2.m = [model]
        self.assertIn(layer.kernel, model2.trainable_weights)
  def _test_minimize_loss_eager(self, d):
    with d.scope():
      l = core.Dense(1, use_bias=False)

      def loss(x):
        y = array_ops.reshape(l(x), []) - array_ops.identity(1.)
        return y * y
      # TODO(isaprykin): Extract implicit_grad+get_filtered_grad_fn into a
      # common `implicit_grad` function and put it in DistributionStrategy.
      grad_fn = backprop.implicit_grad(loss)
      grad_fn = optimizer.get_filtered_grad_fn(grad_fn)

      def update(v, g):
        return v.assign_sub(0.2 * g)

      one = array_ops.identity([[1.]])

      def step():
        """Perform one optimization step."""
        # Run forward & backward to get gradients, variables list.
        g_v = d.extended.call_for_each_replica(grad_fn, args=(one,))

        # Update the variables using the gradients and the update() function.
        before_list = []
        after_list = []
        for g, v in g_v:
          fetched = d.extended.read_var(v)
          before_list.append(fetched)
          # control_dependencies irrelevant but harmless in eager execution
          with ops.control_dependencies([fetched]):
            g = d.extended.reduce_to(
                reduce_util.ReduceOp.SUM, g, destinations=v)
            with ops.control_dependencies(
                d.extended.update(v, update, args=(g,), group=False)):
              after_list.append(d.extended.read_var(v))
        return before_list, after_list

      for i in range(10):
        b, a = step()
        if i == 0:
          before, = b  # pylint: disable=unbalanced-tuple-unpacking
        after, = a  # pylint: disable=unbalanced-tuple-unpacking

      error_before = abs(before.numpy() - 1)
      error_after = abs(after.numpy() - 1)
      # Error should go down
      self.assertLess(error_after, error_before)
Example #17
0
    def test_end_to_end_bagged_modeling(self, output_mode, num_tokens):
        input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])

        input_data = keras.Input(shape=(None, ), dtype=dtypes.int32)
        layer = category_encoding.CategoryEncoding(num_tokens=num_tokens,
                                                   output_mode=output_mode)

        weights = []
        if num_tokens is None:
            layer.set_num_elements(5)
        layer.set_weights(weights)

        int_data = layer(input_data)
        float_data = backend.cast(int_data, dtype="float32")
        output_data = core.Dense(64)(float_data)
        model = keras.Model(inputs=input_data, outputs=output_data)
        _ = model.predict(input_array)
Example #18
0
 def test_serialization_with_layers(self):
     activation = advanced_activations.LeakyReLU(alpha=0.1)
     layer = core.Dense(3, activation=activation)
     config = serialization.serialize(layer)
     # with custom objects
     deserialized_layer = serialization.deserialize(
         config, custom_objects={'LeakyReLU': activation})
     self.assertEqual(deserialized_layer.__class__.__name__,
                      layer.__class__.__name__)
     self.assertEqual(deserialized_layer.activation.__class__.__name__,
                      activation.__class__.__name__)
     # without custom objects
     deserialized_layer = serialization.deserialize(config)
     self.assertEqual(deserialized_layer.__class__.__name__,
                      layer.__class__.__name__)
     self.assertEqual(deserialized_layer.activation.__class__.__name__,
                      activation.__class__.__name__)
Example #19
0
  def test_dataset_creator_input_options(self):
    dataset_fn = lambda _: dataset_ops.DatasetV2.from_tensor_slices([1, 1])
    input_options = distribute_lib.InputOptions(
        experimental_fetch_to_device=True,
        experimental_per_replica_buffer_size=2)
    x = dataset_creator.DatasetCreator(dataset_fn, input_options=input_options)
    with collective_all_reduce_strategy.CollectiveAllReduceStrategy().scope():
      data_handler = data_adapter.get_data_handler(
          x,
          steps_per_epoch=2,
          model=sequential.Sequential([core_layers.Dense(10)]))

    # Ensuring the resulting `DistributedDatasetsFromFunction` has the right
    # options.
    self.assertTrue(data_handler._dataset._options.experimental_fetch_to_device)
    self.assertEqual(
        data_handler._dataset._options.experimental_per_replica_buffer_size, 2)
Example #20
0
    def test_restore_old_loss_scale_checkpoint(self):
        # Ensure a checkpoint from TF 2.2 can be loaded. The checkpoint format
        # of LossScaleOptimizer changed, but old checkpoints can still be loaded
        opt = gradient_descent.SGD(0.1, momentum=0.1)
        opt = loss_scale_optimizer.LossScaleOptimizer(opt)
        model = sequential.Sequential([core.Dense(2, )])

        # The checkpoint and expected values were obtained from the program in
        # testdata/BUILD.
        ckpt_dir = os.path.join(flags.FLAGS['test_srcdir'].value,
                                'org_tensorflow/tensorflow/python/keras',
                                'mixed_precision/testdata/lso_ckpt_tf2.2')
        # ckpt_dir = test.test_src_dir_path(
        #     'python/keras/mixed_precision/testdata/lso_ckpt_tf2.2')
        model.load_weights(os.path.join(ckpt_dir, 'ckpt'))
        model.compile(opt,
                      'mse',
                      run_eagerly=testing_utils.should_run_eagerly())
        model(np.zeros((2, 2)))  # Create model weights
        opt._create_all_weights(model.weights)
        expected_kernel = np.array([[9.229685, 10.901115],
                                    [10.370763, 9.757362]])
        expected_slot = np.array([[10.049943, 9.917691], [10.049943,
                                                          9.917691]])
        self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)
        self.assertAllClose(
            self.evaluate(opt.get_slot(model.weights[0], 'momentum')),
            expected_slot)
        self.assertEqual(self.evaluate(opt.loss_scale), 32768)
        self.assertEqual(self.evaluate(opt.dynamic_counter), 1)

        # Check restoring works even after the model is compiled and the weights
        # have been created.
        model.fit(np.random.normal(size=(2, 2)), np.random.normal(size=(2, 2)))
        self.assertNotAllClose(self.evaluate(model.weights[0]),
                               expected_kernel)
        self.assertNotAllClose(
            self.evaluate(opt.get_slot(model.weights[0], 'momentum')),
            expected_slot)
        model.load_weights(os.path.join(ckpt_dir, 'ckpt'))
        self.assertAllClose(self.evaluate(model.weights[0]), expected_kernel)
        self.assertAllClose(
            self.evaluate(opt.get_slot(model.weights[0], 'momentum')),
            expected_slot)
        self.assertEqual(self.evaluate(opt.loss_scale), 32768)
        self.assertEqual(self.evaluate(opt.dynamic_counter), 1)
Example #21
0
  def test_validation_dataset_with_no_step_arg(self):
    # Create a model that learns y=Mx.
    layers = [core.Dense(1)]
    model = testing_utils.get_model_from_layers(layers, input_shape=(1,))
    model.compile(loss="mse", optimizer="adam", metrics=["mean_absolute_error"])

    train_dataset = self.create_dataset(num_samples=200, batch_size=10)
    eval_dataset = self.create_dataset(num_samples=50, batch_size=25)

    history = model.fit(x=train_dataset, validation_data=eval_dataset, epochs=2)
    evaluation = model.evaluate(x=eval_dataset)

    # If the fit call used the entire dataset, then the final val MAE error
    # from the fit history should be equal to the final element in the output
    # of evaluating the model on the same eval dataset.
    self.assertAlmostEqual(history.history["val_mean_absolute_error"][-1],
                           evaluation[-1])
  def test_training_internal_ragged_tensors(self):
    # Create a model that implements y=Mx. This is easy to learn and will
    # demonstrate appropriate gradient passing. (We have to use RaggedTensors
    # for this test, as ToSparse() doesn't support gradient propagation through
    # the layer.) TODO(b/124796939): Investigate this.
    layers = [core.Dense(2), ToRagged(padding=0), ToDense(default_value=-1)]
    model = testing_utils.get_model_from_layers(layers, input_shape=(1,))

    input_data = np.random.rand(1024, 1)
    expected_data = np.concatenate((input_data * 3, input_data * .5), axis=-1)

    model.compile(loss="mse", optimizer="adam", **get_test_mode_kwargs())
    history = model.fit(input_data, expected_data, epochs=10, verbose=0)

    # If the model trained, the loss stored at history[0] should be different
    # than the one stored at history[-1].
    self.assertNotEqual(history.history["loss"][-1], history.history["loss"][0])
Example #23
0
  def test_saving_model_with_custom_object(self):
    with generic_utils.custom_object_scope(), self.cached_session():

      @generic_utils.register_keras_serializable()
      class CustomLoss(losses.MeanSquaredError):
        pass

      model = sequential.Sequential(
          [core.Dense(units=1, input_shape=(1,))])
      model.compile(optimizer='sgd', loss=CustomLoss())
      model.fit(np.zeros([10, 1]), np.zeros([10, 1]))

      temp_dir = self.get_temp_dir()
      filepath = os.path.join(temp_dir, 'saving')
      model.save(filepath)

      # Make sure the model can be correctly load back.
      _ = save.load_model(filepath, compile=True)
  def testHessianOfVariables(self, use_pfor):
    model = core.Dense(1)
    model.build([None, 2])

    def _loss(*unused_args):
      input_value = constant_op.constant([[-0.5, 1.], [0.5, -1.]])
      target = constant_op.constant([[-1.], [2.]])
      return math_ops.reduce_sum((model(input_value) - target) ** 2.)

    kernel_hess, bias_hess = _forward_over_back_hessian(
        _loss, [model.kernel, model.bias], use_pfor=use_pfor,
        dtype=[dtypes.float32, dtypes.float32])
    # 3 total parameters, the whole hessian is the 3x3 concatenation
    self.assertEqual([3, 2, 1], kernel_hess.shape)
    self.assertEqual([3, 1], bias_hess.shape)
    full_hessian = array_ops.concat(
        [array_ops.reshape(kernel_hess, [3, 2]), bias_hess], axis=1)
    # The full Hessian should be symmetric.
    self.assertAllClose(full_hessian, array_ops.transpose(full_hessian))
Example #25
0
  def test_end_to_end_bagged_modeling(self, output_mode, max_tokens):
    tfidf_data = np.array([.03, .5, .25, .2, .125])
    input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])

    input_data = keras.Input(shape=(None,), dtype=dtypes.int32)
    layer = get_layer_class()(max_tokens=max_tokens, output_mode=output_mode)

    weights = []
    if max_tokens is None:
      layer.set_num_elements(5)
    if output_mode == category_encoding.TFIDF:
      weights.append(tfidf_data)

    layer.set_weights(weights)

    int_data = layer(input_data)
    float_data = backend.cast(int_data, dtype="float32")
    output_data = core.Dense(64)(float_data)
    model = keras.Model(inputs=input_data, outputs=output_data)
    _ = model.predict(input_array)
  def test_vectorized_map_example_2(self):
    batch_size = 10
    num_features = 32
    layer = keras_core.Dense(1)

    def model_fn(arg):
      with backprop.GradientTape() as g:
        inp, label = arg
        inp = array_ops.expand_dims(inp, 0)
        label = array_ops.expand_dims(label, 0)
        prediction = layer(inp)
        loss = nn.l2_loss(label - prediction)
      return g.gradient(loss, (layer.kernel, layer.bias))

    inputs = random_ops.random_uniform([batch_size, num_features])
    labels = random_ops.random_uniform([batch_size, 1])
    per_example_gradients = pfor_control_flow_ops.vectorized_map(
        model_fn, (inputs, labels))
    self.assertAllEqual(per_example_gradients[0].shape,
                        (batch_size, num_features, 1))
    self.assertAllEqual(per_example_gradients[1].shape, (batch_size, 1))
Example #27
0
  def test_dataset_creator_input_options_with_cluster_coordinator(self):
    dataset_fn = lambda _: dataset_ops.DatasetV2.from_tensor_slices([1, 1])
    input_options = distribute_lib.InputOptions(
        experimental_fetch_to_device=True,
        experimental_per_replica_buffer_size=2)
    x = dataset_creator.DatasetCreator(dataset_fn, input_options=input_options)
    strategy = self._get_parameter_server_strategy()
    with strategy.scope():
      model = sequential.Sequential([core_layers.Dense(10)])
      model._cluster_coordinator = cluster_coordinator.ClusterCoordinator(
          strategy)
      data_handler = data_adapter.get_data_handler(
          x, steps_per_epoch=2, model=model)

    iter_rv = iter(data_handler._dataset)._values[0]
    iter_rv._rebuild_on(model._cluster_coordinator._cluster.workers[0])
    distributed_iterator = iter_rv._get_values()

    # Ensuring the resulting `DistributedIterator` has the right options.
    self.assertTrue(distributed_iterator._options.experimental_fetch_to_device)
    self.assertEqual(
        distributed_iterator._options.experimental_per_replica_buffer_size, 2)
Example #28
0
 def testIgnoreSaveCounter(self):
   checkpoint_directory = self.get_temp_dir()
   checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
   with self.cached_session() as session:
     # Create and save a model using Saver() before using a Checkpoint. This
     # generates a snapshot without the Checkpoint's `save_counter`.
     model = sequential.Sequential()
     model.add(core.Flatten(input_shape=(1,)))
     model.add(core.Dense(1))
     name_saver = saver_lib.Saver(model.trainable_variables)
     save_path = name_saver.save(
         sess=session, save_path=checkpoint_prefix, global_step=1)
     # Checkpoint.restore must successfully load that checkpoint.
     ckpt = trackable_utils.Checkpoint(model=model)
     status = ckpt.restore(save_path)
     status.assert_existing_objects_matched()
     # It should, however, refuse to load a checkpoint where an unrelated
     # `save_counter` variable is missing.
     model.layers[1].var = variables_lib.Variable(0., name="save_counter")
     status = ckpt.restore(save_path)
     with self.assertRaises(AssertionError):
       status.assert_existing_objects_matched()
Example #29
0
  def test_wide_deep_model_with_multi_outputs(self):
    inp = input_layer.Input(shape=(1,), name='linear')
    l = linear.LinearModel(units=2, use_bias=False)(inp)
    l1, l2 = array_ops.split(l, num_or_size_splits=2, axis=1)
    linear_model = training.Model(inp, [l1, l2])
    linear_model.set_weights([np.asarray([[0.5, 0.3]])])
    h = core.Dense(units=2, use_bias=False)(inp)
    h1, h2 = array_ops.split(h, num_or_size_splits=2, axis=1)
    dnn_model = training.Model(inp, [h1, h2])
    dnn_model.set_weights([np.asarray([[0.1, -0.5]])])
    wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
    inp_np = np.asarray([[1.]])
    out1, out2 = wide_deep_model(inp_np)
    # output should be (0.5 + 0.1), and (0.3 - 0.5)
    self.assertAllClose([[0.6]], out1)
    self.assertAllClose([[-0.2]], out2)

    wide_deep_model = wide_deep.WideDeepModel(
        linear_model, dnn_model, activation='relu')
    out1, out2 = wide_deep_model(inp_np)
    # output should be relu((0.5 + 0.1)), and relu((0.3 - 0.5))
    self.assertAllClose([[0.6]], out1)
    self.assertAllClose([[0.]], out2)
Example #30
0
    def testVariableInitializersCanBeLifted(self):
        # The initializer is a stateful op, but using it inside a function should
        # *not* create additional dependencies.  That's what we're testing.
        layer = keras_core.Dense(1, kernel_initializer="glorot_uniform")

        @def_function.function
        def fn(x):
            # Stateful operation
            control_flow_ops.Assert(x, ["Error"])
            # Variable initialization should be lifted.  Prior to the change that
            # added this test, the lifting would crash because of an auto control dep
            # added on `x`.  Note, the error did not happen if we
            # manually created a tf.Variable outside of function and used it
            # here.  Alternatively, creating a tf.Variable inside fn() causes
            # a different sort of error that is out of scope for this test.
            return layer(ops.convert_to_tensor([[1.0, 1.0]]))

        true = ops.convert_to_tensor(True)

        concrete = fn.get_concrete_function(
            tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool))
        self.evaluate(concrete(true))
        self.evaluate(fn(True))