Example #1
0
    def test_LSTM_runtime_with_cond(self):
        # This test is to demonstrate the graph rewrite of grappler plugin under
        # the condition that the function returns different number of internal
        # states.
        layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)

        inputs = keras.layers.Input(shape=[self.timestep, self.input_shape],
                                    dtype=dtypes.float32)

        zeros = array_ops.zeros([self.batch, self.output_shape])
        dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
        a = constant_op.constant(0)
        b = constant_op.constant(1)
        # Will always run the lstm layer.
        outputs, runtime = control_flow_ops.cond(
            gen_math_ops.less(a, b), lambda: layer(inputs), lambda:
            (zeros, dummy_runtime))

        # Expand the runtime so that it is a 1D tensor instead of scalar.
        # TF model does not work with scalar model output, specially during
        # aggregation.
        runtime = keras.layers.Lambda(
            lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
        model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
        self._test_runtime_with_model(model)
Example #2
0
    def test_unifiedLSTM_with_cond(self):
        # This test is to demonstrate the graph rewrite of grappler plugin under
        # the condition that the function returns different number of internal
        # states.
        input_shape = 10
        rnn_state_size = 8
        output_shape = 8
        timestep = 4
        batch = 100
        epoch = 1

        with self.cached_session(config=_config, use_gpu=True) as sess:
            (x_train, y_train), _ = testing_utils.get_test_data(
                train_samples=batch,
                test_samples=0,
                input_shape=(timestep, input_shape),
                num_classes=output_shape)
            y_train = keras.utils.to_categorical(y_train, output_shape)

            layer = rnn.LSTM(rnn_state_size, return_runtime=True)

            inputs = array_ops.placeholder(dtypes.float32,
                                           shape=(None, timestep, input_shape),
                                           name='inputs')
            predict = array_ops.placeholder(dtypes.float32,
                                            shape=(None, output_shape),
                                            name='predict')

            zeros = array_ops.zeros([batch, output_shape])
            dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
            a = constant_op.constant(0)
            b = constant_op.constant(1)
            # Will always run the lstm layer.
            outputs, runtime = control_flow_ops.cond(
                gen_math_ops.less(a, b), lambda: layer(inputs), lambda:
                (zeros, dummy_runtime))
            loss = losses.softmax_cross_entropy(predict, outputs)
            optimizer = gradient_descent.GradientDescentOptimizer(0.001)
            train_op = optimizer.minimize(loss)

            sess.run([variables.global_variables_initializer()])
            existing_loss = 0

            for _ in range(epoch):
                loss_value, _, runtime_value = sess.run(
                    [loss, train_op, runtime], {
                        inputs: x_train,
                        predict: y_train
                    })
                if test.is_gpu_available():
                    self.assertEqual(runtime_value, rnn._RUNTIME_GPU)
                else:
                    self.assertEqual(runtime_value, rnn._RUNTIME_CPU)
                # Make sure the loss is updated for every epoch
                # (layer weights properly updated).
                self.assertNotEqual(existing_loss, loss_value)
                existing_loss = loss_value
Example #3
0
  def test_unifiedLSTM_with_cond(self):
    # This test is to demonstrate the graph rewrite of grappler plugin under
    # the condition that the function returns different number of internal
    # states.
    input_shape = 10
    rnn_state_size = 8
    output_shape = 8
    timestep = 4
    batch = 100
    epoch = 1

    with self.cached_session(config=_config, use_gpu=True) as sess:
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=batch,
          test_samples=0,
          input_shape=(timestep, input_shape),
          num_classes=output_shape)
      y_train = keras.utils.to_categorical(y_train, output_shape)

      layer = rnn.LSTM(rnn_state_size, return_runtime=True)

      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape), name='inputs')
      predict = array_ops.placeholder(
          dtypes.float32, shape=(None, output_shape), name='predict')

      zeros = array_ops.zeros([batch, output_shape])
      dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
      a = constant_op.constant(0)
      b = constant_op.constant(1)
      # Will always run the lstm layer.
      outputs, runtime = control_flow_ops.cond(
          gen_math_ops.less(a, b),
          lambda: layer(inputs),
          lambda: (zeros, dummy_runtime))
      loss = losses.softmax_cross_entropy(predict, outputs)
      optimizer = gradient_descent.GradientDescentOptimizer(0.001)
      train_op = optimizer.minimize(loss)

      sess.run([variables.global_variables_initializer()])
      existing_loss = 0

      for _ in range(epoch):
        loss_value, _, runtime_value = sess.run([loss, train_op, runtime], {
            inputs: x_train,
            predict: y_train
        })
        if test.is_gpu_available():
          self.assertEqual(runtime_value, rnn._RUNTIME_GPU)
        else:
          self.assertEqual(runtime_value, rnn._RUNTIME_CPU)
        # Make sure the loss is updated for every epoch
        # (layer weights properly updated).
        self.assertNotEqual(existing_loss, loss_value)
        existing_loss = loss_value