Esempio n. 1
0
    def test_LSTM_runtime_with_cond(self):
        # This test is to demonstrate the graph rewrite of grappler plugin under
        # the condition that the function returns different number of internal
        # states.
        layer = rnn.LSTM(self.rnn_state_size, return_runtime=True)

        inputs = keras.layers.Input(shape=[self.timestep, self.input_shape],
                                    dtype=dtypes.float32)

        zeros = array_ops.zeros([self.batch, self.output_shape])
        dummy_runtime = rnn._runtime(rnn._RUNTIME_UNKNOWN)
        a = constant_op.constant(0)
        b = constant_op.constant(1)
        # Will always run the lstm layer.
        outputs, runtime = control_flow_ops.cond(
            gen_math_ops.less(a, b), lambda: layer(inputs), lambda:
            (zeros, dummy_runtime))

        # Expand the runtime so that it is a 1D tensor instead of scalar.
        # TF model does not work with scalar model output, specially during
        # aggregation.
        runtime = keras.layers.Lambda(
            lambda x: array_ops.expand_dims(x, axis=-1))(runtime)
        model = keras.models.Model(inputs=inputs, outputs=[outputs, runtime])
        self._test_runtime_with_model(model)
Esempio n. 2
0
    def test_unifiedLSTM_with_cond(self):
        # This test is to demonstrate the graph rewrite of grappler plugin under
        # the condition that the function returns different number of internal
        # states.
        input_shape = 10
        rnn_state_size = 8
        output_shape = 8
        timestep = 4
        batch = 100
        epoch = 1

        with self.cached_session(config=_config, use_gpu=True) as sess:
            (x_train, y_train), _ = testing_utils.get_test_data(
                train_samples=batch,
                test_samples=0,
                input_shape=(timestep, input_shape),
                num_classes=output_shape)
            y_train = keras.utils.to_categorical(y_train, output_shape)

            layer = keras.layers.UnifiedLSTM(rnn_state_size,
                                             return_runtime=True)

            inputs = array_ops.placeholder(dtypes.float32,
                                           shape=(None, timestep, input_shape),
                                           name='inputs')
            predict = array_ops.placeholder(dtypes.float32,
                                            shape=(None, output_shape),
                                            name='predict')

            zeros = array_ops.zeros([batch, output_shape])
            dummy_runtime = constant_op.constant('unknown',
                                                 dtype=dtypes.string,
                                                 name='runtime')
            a = constant_op.constant(0)
            b = constant_op.constant(1)
            # Will always run the lstm layer.
            outputs, runtime = control_flow_ops.cond(
                gen_math_ops.less(a, b), lambda: layer(inputs), lambda:
                (zeros, dummy_runtime))
            loss = losses.softmax_cross_entropy(predict, outputs)
            optimizer = gradient_descent.GradientDescentOptimizer(0.001)
            train_op = optimizer.minimize(loss)

            sess.run([variables.global_variables_initializer()])
            existing_loss = 0

            for _ in range(epoch):
                loss_value, _, runtime_value = sess.run(
                    [loss, train_op, runtime], {
                        inputs: x_train,
                        predict: y_train
                    })
                if test.is_gpu_available():
                    self.assertEqual(runtime_value, b'cudnn')
                else:
                    self.assertEqual(runtime_value, b'cpu')
                # Make sure the loss is updated for every epoch
                # (layer weights properly updated).
                self.assertNotEqual(existing_loss, loss_value)
                existing_loss = loss_value
 def incr_loss_scale():
   float_max = (3.4e+38)/self._incr_ratio
   new_loss_scale = control_flow_ops.cond(
       gen_math_ops.less( self._loss_scale, float_max),
       lambda: self._loss_scale * self._incr_ratio,
       lambda: self._loss_scale)
   update_op = state_ops.assign(self._loss_scale, new_loss_scale)
   # When loss_scale is updated, both good and bad steps are reset.
   return control_flow_ops.group(update_op, self._reset_stats())
Esempio n. 4
0
  def test_unifiedRNN_with_cond(self):
    # This test is to demonstrate the graph rewrite of grappler plugin under
    # the condition that the function returns different number of internal
    # states.
    input_shape = 10
    rnn_state_size = 8
    output_shape = 8
    timestep = 4
    batch = 100
    epoch = 1

    with self.cached_session(config=self.config, use_gpu=True) as sess:
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=batch,
          test_samples=0,
          input_shape=(timestep, input_shape),
          num_classes=output_shape)
      y_train = keras.utils.to_categorical(y_train, output_shape)

      layer = UnifiedLSTM(rnn_state_size)

      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape), name='inputs')
      predict = array_ops.placeholder(
          dtypes.float32, shape=(None, output_shape), name='predict')

      zeros = array_ops.zeros([batch, output_shape])
      dummy_runtime = constant_op.constant(
          'unknown', dtype=dtypes.string, name='runtime')
      a = constant_op.constant(0)
      b = constant_op.constant(1)
      # Will always run the lstm layer.
      outputs, runtime = control_flow_ops.cond(
          gen_math_ops.less(a, b),
          lambda: layer(inputs),
          lambda: (zeros, dummy_runtime))
      loss = losses.softmax_cross_entropy(predict, outputs)
      optimizer = gradient_descent.GradientDescentOptimizer(0.001)
      train_op = optimizer.minimize(loss)

      sess.run([variables.global_variables_initializer()])
      existing_loss = 0

      for _ in range(epoch):
        loss_value, _, runtime_value = sess.run([loss, train_op, runtime], {
            inputs: x_train,
            predict: y_train
        })
        if test.is_gpu_available():
          self.assertEquals(runtime_value, b'cudnn')
        else:
          self.assertEquals(runtime_value, b'cpu')
        # Make sure the loss is updated for every epoch
        # (layer weights properly updated).
        self.assertNotEqual(existing_loss, loss_value)
        existing_loss = loss_value
 def increase_loss_scale():
     incr_result_finite = gen_math_ops.less(
         self._loss_scale, (3.4e+38) / self._incr_ratio)
     new_loss_scale_value = control_flow_ops.cond(
         incr_result_finite,
         lambda: self._loss_scale * self._incr_ratio,
         lambda: self._loss_scale)
     update_loss_scale = state_ops.assign(self._loss_scale,
                                          new_loss_scale_value)
     return control_flow_ops.group(update_loss_scale,
                                   self._reset_stats())
Esempio n. 6
0
 def testConvertCase(self):
   """Tests that a v1 case() construction converts properly."""
   with ops.Graph().as_default():
     with variable_scope.variable_scope("", use_resource=False):
       control_flow_v2_toggles.disable_control_flow_v2()
       x = variable_scope.get_variable("x", initializer=1.0)
       y = variable_scope.get_variable("y", initializer=2.0)
       _ = control_flow_ops.case([(gen_math_ops.less(x, y), lambda: x)],
                                 default=lambda: y)
     with session_lib.Session() as sess:
       sess.run(variables.global_variables_initializer())
       variable_graph_def = sess.graph.as_graph_def()
       constant_graph_def = (
           convert_to_constants
           .convert_variables_to_constants_from_session_graph(
               sess, variable_graph_def, ["case/cond/Merge"]))
       self._assertGraphContains(
           constant_graph_def, """
           node {
             name: "x" op: "Const"
             attr { key: "dtype" value { type: DT_FLOAT } }
             attr {
               key: "value"
               value { tensor { dtype: DT_FLOAT tensor_shape{} float_val: 1 }}}
           }
           node {
             name: "y" op: "Const"
             attr { key: "dtype" value { type: DT_FLOAT } }
             attr {
               key: "value"
               value { tensor { dtype: DT_FLOAT tensor_shape{} float_val: 2 }}}
           }
           node {name: "x/read" op: "Identity" input: "x"}
           node {name: "y/read" op: "Identity" input: "y"}
           node {name: "Less" op: "Less" input: "x/read" input: "y/read"}
           node {name: "case/cond/pred_id" op: "Identity" input: "Less"}
           node {
             name: "case/cond/Switch_1" op: "Switch"
             input: "case/cond/pred_id" input: "x/read"
           }
           node {
             name: "case/cond/Switch_2" op: "Switch"
             input: "case/cond/pred_id" input: "y/read"
           }
           node {
             name: "case/cond/Merge" op: "Merge"
             input: "case/cond/Switch_2" input: "case/cond/Switch_1:1"
             attr {key: "T" value {type: DT_FLOAT}}
           }""")
Esempio n. 7
0
 def testConvertV2ResourceCase(self):
   """Tests that a v2 case() with resource variables converts properly."""
   with ops.Graph().as_default():
     with variable_scope.variable_scope("", use_resource=True):
       control_flow_v2_toggles.enable_control_flow_v2()
       x = variable_scope.get_variable("x", initializer=1.0)
       y = variable_scope.get_variable("y", initializer=2.0)
       _ = control_flow_ops.case([(gen_math_ops.less(x, y), lambda: x)],
                                 default=lambda: y)
       control_flow_v2_toggles.disable_control_flow_v2()
     with session_lib.Session() as sess:
       sess.run(variables.global_variables_initializer())
       variable_graph_def = sess.graph.as_graph_def()
       constant_graph_def = (
           convert_to_constants
           .convert_variables_to_constants_from_session_graph(
               sess, variable_graph_def, ["case/cond"]))
       self._assertGraphContains(
           constant_graph_def, """
           node {name: "x" op: "Const"}
           node {name: "y" op: "Const"}
           node {
             name: "case/cond" op: "If" input: "Less" input: "x" input: "y"
             attr {key: "Tcond" value {type: DT_BOOL}}
             attr {key: "Tin" value {list {type: DT_FLOAT type: DT_FLOAT}}}
             attr {key: "Tout" value {list {type: DT_FLOAT}}}
           }
           library {
             function {
               signature {
                 name: "case_cond_false_frozen_0"
                 input_arg {name: "placeholder" type: DT_FLOAT}
                 input_arg {name: "readvariableop_y" type: DT_FLOAT}
                 output_arg {name: "readvariableop" type: DT_FLOAT}
               }
             }
             function {
               signature {
                 name: "case_cond_true_frozen_0"
                 input_arg {name: "placeholder" type: DT_FLOAT}
                 input_arg {name: "readvariableop_x" type: DT_FLOAT}
                 output_arg {name: "readvariableop" type: DT_FLOAT}
               }
             }
           }""")
Esempio n. 8
0
    def __call__(self, query, previous_alignments):
        """Score the query based on the keys and values.

        Args:
          query: Tensor of dtype matching `self.values` and shape
            `[batch_size, query_depth]`.
          previous_alignments: Tensor of dtype matching `self.values` and shape
            `[batch_size, alignments_size]`
            (`alignments_size` is memory's `max_time`).

        Returns:
          alignments: Tensor of dtype matching `self.values` and shape
            `[batch_size, alignments_size]` (`alignments_size` is memory's
            `max_time`).
        """
        with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
            processed_query = self.query_layer(query) if self.query_layer else query
            score = _bahdanau_score(processed_query, self._keys, self._normalize)

        # mask with memory_sequence_length
        mask_score = _maybe_mask_score(score, self._memory_sequence_length, self._score_mask_value)

        # choose top_k alignments among dimension 1. replace others with -inf
        top_k = control_flow_ops.cond(gen_math_ops.less(
            self.alignments_size, self._top_alignment_number),
            lambda: self.alignments_size, lambda: self._top_alignment_number)
        _, score_mask_index = nn_ops.top_k(mask_score, top_k)
        score_mask_index_final = array_ops.concat(
            [array_ops.reshape(
                [i * array_ops.ones([top_k], dtypes.int32) for i in range(self.batch_size)],
                [-1, 1]),
                array_ops.reshape(score_mask_index, [-1, 1])],
            axis=-1)
        score_mask_ = sparse_ops.sparse_to_dense(
            sparse_indices=score_mask_index_final,
            output_shape=[self.batch_size, self.alignments_size],
            sparse_values=True, default_value=False, validate_indices=False)
        score_mask_values_ = self._score_mask_value * array_ops.ones_like(mask_score)
        keywords_score = array_ops.where(score_mask_, mask_score, score_mask_values_)

        alignments = nn_ops.softmax(keywords_score)

        return alignments
Esempio n. 9
0
  def testGraphWithSwitch(self):
    """Freezes a graph which contains a Switch with type RESOURCE_DT."""
    with ops.Graph().as_default():
      with variable_scope.variable_scope("", use_resource=True):
        x = variable_scope.get_variable("var_x", initializer=1.0)
        y = variable_scope.get_variable("var_y", initializer=2.0)
        f1 = lambda: variable_scope.get_variable("var_f1", initializer=17.0)
        f2 = lambda: variable_scope.get_variable("var_f2", initializer=23.0)
        cond_node = control_flow_ops.case([(gen_math_ops.less(x, y), f1)],
                                          default=f2)
        _ = math_ops_lib.multiply(cond_node, 2.0, name="output_node")

        with session.Session() as sess:
          sess.run(variables.global_variables_initializer())
          variable_graph_def = sess.graph.as_graph_def()

          constant_graph_def = graph_util.convert_variables_to_constants(
              sess, variable_graph_def, ["output_node"])

    self._ensure_no_variables_in_graph(constant_graph_def)
        def __body(w_, e_, mask, b):
            e = math_ops.cast(distributions.Beta((self.__mf - 1.0) / 2.0,
                                                 (self.__mf - 1.0) / 2.0).
                              sample(shape, seed=seed), dtype=self.dtype)

            u = random_ops.random_uniform(shape, dtype=self.dtype, seed=seed)
            w = (1.0 - (1.0 + b) * e) / (1.0 - (1.0 - b) * e)
            x = (1.0 - b) / (1.0 + b)
            c = self.scale * x + (self.__mf - 1) * math_ops.log1p(-x**2)

            tmp = tf.clip_by_value(x * w, 0, 1 - 1e-16)
            reject = gen_math_ops.less(((self.__mf - 1.0) * math_ops.log(1.0 - tmp) +
                                        self.scale * w - c),
                                       math_ops.log(u))
            accept = gen_math_ops.logical_not(reject)

            w_ = array_ops.where(gen_math_ops.logical_and(mask, accept), w, w_)
            e_ = array_ops.where(gen_math_ops.logical_and(mask, accept), e, e_)
            mask = array_ops.where(gen_math_ops.logical_and(mask, accept),
                                   reject, mask)

            return w_, e_, mask, b
Esempio n. 11
0
def _eunn_loop(state, capacity, diag_vec_list, off_vec_list, diag, fft):
    """
    EUNN main loop, applying unitary matrix on input tensor
    """
    i = 0

    def layer_tunable(x, i):

        diag_vec = diag_vec_list.read(i)
        off_vec = off_vec_list.read(i)

        diag = math_ops.multiply(x, diag_vec)
        off = math_ops.multiply(x, off_vec)

        def even_input(off, size):
            def even_s(off, size):
                off = array_ops.reshape(off, [-1, size // 2, 2])
                off = array_ops.reshape(array_ops.reverse(off, [2]),
                                        [-1, size])
                return off

            def odd_s(off, size):
                off, helper = array_ops.split(off, [size - 1, 1], 1)
                size -= 1
                off = even_s(off, size)
                off = array_ops.concat([off, helper], 1)
                return off

            off = control_flow_ops.cond(
                gen_math_ops.equal(gen_math_ops.mod(size, 2), 0),
                lambda: even_s(off, size), lambda: odd_s(off, size))
            return off

        def odd_input(off, size):
            helper, off = array_ops.split(off, [1, size - 1], 1)
            size -= 1
            off = even_input(off, size)
            off = array_ops.concat([helper, off], 1)
            return off

        size = int(off.get_shape()[1])
        off = control_flow_ops.cond(
            gen_math_ops.equal(gen_math_ops.mod(i, 2), 0),
            lambda: even_input(off, size), lambda: odd_input(off, size))

        layer_output = diag + off
        i += 1

        return layer_output, i

    def layer_fft(state, i):

        diag_vec = diag_vec_list.read(i)
        off_vec = off_vec_list.read(i)
        diag = math_ops.multiply(state, diag_vec)
        off = math_ops.multiply(state, off_vec)

        hidden_size = int(off.get_shape()[1])
        # size = 2**i
        dist = capacity - i
        normal_size = (hidden_size // (2**dist)) * (2**(dist - 1))
        normal_size *= 2
        extra_size = tf.maximum(0, (hidden_size % (2**dist)) - (2**(dist - 1)))
        hidden_size -= normal_size

        def modify(off_normal, dist, normal_size):
            off_normal = array_ops.reshape(
                array_ops.reverse(
                    array_ops.reshape(
                        off_normal,
                        [-1, normal_size // (2**dist), 2, (2**(dist - 1))]),
                    [2]), [-1, normal_size])
            return off_normal

        def do_nothing(off_normal):
            return off_normal

        off_normal, off_extra = array_ops.split(off,
                                                [normal_size, hidden_size], 1)
        off_normal = control_flow_ops.cond(
            gen_math_ops.equal(normal_size, 0), lambda: do_nothing(off_normal),
            lambda: modify(off_normal, dist, normal_size))
        helper1, helper2 = array_ops.split(
            off_extra, [hidden_size - extra_size, extra_size], 1)
        off_extra = array_ops.concat([helper2, helper1], 1)
        off = array_ops.concat([off_normal, off_extra], 1)

        layer_output = diag + off
        i += 1

        return layer_output, i

    if fft:
        layer_function = layer_fft
    else:
        layer_function = layer_tunable
    output, _ = control_flow_ops.while_loop(
        lambda state, i: gen_math_ops.less(i, capacity), layer_function,
        [state, i])

    if not diag is None:
        output = math_ops.multiply(output, diag)

    return output
    def _get_queue_ops_stale(self, var_update_op: ops.Operation,
                             source_op: ops.Operation, is_chief: bool,
                             is_trainable: bool) -> List[ops.Operation]:
        """
        Get queue operations for staleness synchronous parameter update.

        Maintain a list of queues of size equal to <staleness>. At the beginning of each call of this function
        (either by the chief worker or other workers), it checks whether each queue is not full. If yes, it pushes
        a token to each queue. If not, it does nothing (a no_op).
        Then, for the current worker that calls this function, it dequeues a token from its corresponding queue
        (indexed by its worker id).
        The potential enqueue operations and definite dequeue operation are grouped together, and have to be
        finished before the model moves on to the next step.
        As at each invocation of this function, a row of empty space in the list of queues will be filled. Thus
        <staleness> number of consecutive dequeue operations can be done by a worker without blocking, achieving
        stale synchronous parameter update with maximum <staleness> steps difference.

        Args:
            var_update_op: The op

        Returns:
            A list of queue operations.
        """
        var_op = var_update_op.inputs[UPDATE_OP_VAR_POS].op

        var_update_sync_queues = \
            [data_flow_ops.FIFOQueue(self._staleness, [dtypes.bool], shapes=None,
                                     name='%s_update_sync_queue_%d' % (var_op.name, i),
                                     shared_name='%s_update_sync_queue_%d' % (var_op.name, i))
             for i in range(self.num_workers)]

        # Enqueue one token to every queue if all queues are not full.
        def _enqueue_row_op():
            enqueue_ops = []
            for q in var_update_sync_queues:
                enqueue_ops.append(q.enqueue(False))
            enqueue_a_row_ops = control_flow_ops.group(*enqueue_ops)
            return enqueue_a_row_ops

        def _no_op():
            return gen_control_flow_ops.no_op()

        switch_cond = gen_array_ops.identity(True)
        for q in var_update_sync_queues:
            switch_cond = gen_math_ops.logical_and(
                switch_cond,
                gen_math_ops.less(q.size(),
                                  gen_array_ops.identity(self._staleness)))

        enqueue_a_row_ops = control_flow_ops.cond(switch_cond, _enqueue_row_op,
                                                  _no_op)

        queue_ops = [enqueue_a_row_ops]

        if is_chief:
            if is_trainable:
                var_update_deps = [
                    self._var_op_to_accum_apply_op[var_op], source_op
                ]
            else:
                var_update_deps = [var_update_op]
            with ops.control_dependencies(var_update_deps):
                dequeue = var_update_sync_queues[self.worker_id].dequeue()
        else:
            # wait for execution of var_update_op
            if is_trainable:
                with ops.control_dependencies(
                    [self._var_op_to_accum_apply_op[var_op]]):
                    dequeue = var_update_sync_queues[self.worker_id].dequeue()
            else:
                dequeue = var_update_sync_queues[self.worker_id].dequeue()
        queue_ops.append(dequeue)

        return queue_ops
Esempio n. 13
0
  def testConvertV2UnconvertedResourceNestedCase(self):
    """Tests unconverted variable propagation through nested functions."""
    with ops.Graph().as_default():
      with variable_scope.variable_scope("", use_resource=True):
        control_flow_v2_toggles.enable_control_flow_v2()
        x = variable_scope.get_variable("x", initializer=1.0)
        y = variable_scope.get_variable("y", initializer=2.0)
        z = variable_scope.get_variable("z", initializer=3.0)
        # pylint: disable=g-long-lambda
        _ = control_flow_ops.case(
            [(gen_math_ops.less(x, y), lambda: x)],
            default=lambda: control_flow_ops.case(
                [(gen_math_ops.less(z, y), lambda: z)], default=lambda: y))
        # pylint: enable=g-long-lambda
        control_flow_v2_toggles.disable_control_flow_v2()
      with session_lib.Session() as sess:
        sess.run(variables.global_variables_initializer())
        variable_graph_def = sess.graph.as_graph_def()
        constant_graph_def = (
            convert_to_constants
            .convert_variables_to_constants_from_session_graph(
                sess,
                variable_graph_def, ["case/cond"],
                variable_names_denylist=["y"]))
        self._assertGraphContains(
            constant_graph_def, """
            node {name: "x" op: "Const"}
            node {name: "y" op: "VarHandleOp"}
            node {name: "z" op: "Const"}

            node {name: "Less/ReadVariableOp" op: "Identity" input: "x"}
            node {name: "Less/ReadVariableOp_1" op: "ReadVariableOp" input: "y"}

            node {
              name: "case/cond" op: "If"
              input: "x" input: "z" input: "y"
              attr {
                key: "Tin"
                value {list
                  {type: DT_FLOAT type: DT_FLOAT type: DT_RESOURCE}}}
              attr {
                key: "_read_only_resource_inputs"
                value {list {i: 1 i: 2 i: 3}}}
              attr {key: "then_branch"
                    value {func {name: "case_cond_true_frozen_0"}}}
              attr {key: "else_branch"
                    value {func {name: "case_cond_false_frozen_0"}}}
              attr {key: "output_shapes" value {list {shape {}}}}
            }
            library {
              function {
                signature {
                  name: "case_cond_true_frozen_0"
                  input_arg {name: "placeholder" type: DT_FLOAT}
                  input_arg {name: "placeholder_1" type: DT_RESOURCE}
                  input_arg {name: "readvariableop_x" type: DT_FLOAT}
                  output_arg {name: "readvariableop" type: DT_FLOAT}
                  is_stateful: true
                }

                node_def {name: "ReadVariableOp" op: "Identity"
                  input: "readvariableop_x"}}

              function {
                signature {
                  name: "case_cond_false_frozen_0"
                  input_arg {name: "placeholder" type: DT_FLOAT}
                  input_arg {name: "less_readvariableop_1_y" type: DT_RESOURCE}
                  input_arg {name: "less_readvariableop_z" type: DT_FLOAT}
                  output_arg {name: "case_cond_identity" type: DT_FLOAT}
                  is_stateful: true
                }

                node_def {name: "Less/ReadVariableOp_1" op: "ReadVariableOp"
                  input: "less_readvariableop_1_y"}

                node_def {name: "Less/ReadVariableOp" op: "Identity"
                  input: "less_readvariableop_z"}

                node_def {name: "case/cond" op: "If"
                  input: "less_readvariableop_z"
                  input: "less_readvariableop_1_y"
                  attr {
                    key: "Tin"
                    value {list {type: DT_FLOAT type: DT_RESOURCE}}}
                  attr {key: "then_branch"
                        value {func {name: "case_cond_true_frozen_1"}}}
                  attr {key: "else_branch"
                        value {func {name: "case_cond_false_frozen_1"}}}
                  attr {
                    key: "_read_only_resource_inputs"
                    value {list {i: 1 i: 2}}}}}

              function {
                signature {
                  name: "case_cond_false_frozen_1"
                  input_arg {name: "placeholder" type: DT_FLOAT}
                  input_arg {name: "readvariableop_y" type: DT_RESOURCE}
                  output_arg {name: "readvariableop" type: DT_FLOAT}
                  is_stateful: true
                }

                node_def {name: "ReadVariableOp" op: "ReadVariableOp"
                  input: "readvariableop_y"}}

              function {
                signature {
                  name: "case_cond_true_frozen_1"
                  input_arg {name: "placeholder" type: DT_RESOURCE}
                  input_arg {name: "readvariableop_z" type: DT_FLOAT}
                  output_arg {name: "readvariableop" type: DT_FLOAT}
                  is_stateful: true
                }

                node_def {name: "ReadVariableOp" op: "Identity"
                  input: "readvariableop_z"}}}""")