def export_saved_model(self, export_dir='.'):
        """export a saved model

        Args:
            export_dir: directory to save the saved model
        """

        sess = tf.get_default_session()
        builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
        tf.identity_n(self.outputs, name='output/hr')
        builder.add_meta_graph_and_variables(sess, tf.saved_model.tag_constants.SERVING)
        builder.save()
Beispiel #2
0
    def decorated(*args, **kwargs):
        """Decorated function with custom gradient."""

        if kwargs:
            raise ValueError(
                "The custom_gradient decorator currently suports keywords "
                "arguments only when eager execution is enabled.")
        name = "CustomGradient-%s" % ops.uid()
        args = [ops.convert_to_tensor(x) for x in args]
        result, grad_fn = f(*args)
        flat_result = nest.flatten(result)
        all_tensors = flat_result + args

        @ops.RegisterGradient(name)
        def internal_grad_fn(unused_op, *result_grads):
            gradients = nest.flatten(grad_fn(*result_grads[:len(flat_result)]))
            # Need to return one value per input to the IdentityN, so pad the
            # gradients of the inputs of the custom_gradient function with the
            # gradients of the outputs as well.
            return ([None] * len(flat_result)) + gradients

        with ops.get_default_graph().gradient_override_map({"IdentityN":
                                                            name}):
            all_tensors = tf.identity_n(all_tensors)
        return nest.pack_sequence_as(
            structure=result, flat_sequence=all_tensors[:len(flat_result)])
Beispiel #3
0
 def to_graph(self, graph):
     with graph.as_default():
         placeholder = tf.keras.backend.placeholder(
             shape=(self.__num_inputs, ),
             name='MainInput',
             dtype=tf.float32)
         return tf.identity_n(tf.unstack(placeholder), name="Inp")
Beispiel #4
0
  def build_graph(parameters):
    """Make a set of tests to do identity."""

    input_tensors = []
    input_count = (2 if parameters["op_to_use"] == "identity_n_with_2_inputs"
                   else 1)
    input_tensors = [
        tf.placeholder(
            dtype=tf.float32, name="input", shape=parameters["input_shape"])
        for _ in range(input_count)
    ]

    # We add the Multiply before Identity just as a walk-around to make the test
    # pass when input_shape is scalar.
    # During graph transformation, TOCO will replace the Identity op with
    # Reshape when input has shape. However, currently TOCO can't distinguish
    # between missing shape and scalar shape. As a result, when input has scalar
    # shape, this conversion still fails.
    # TODO(b/129197312), remove the walk-around code once the bug is fixed.
    inputs_doubled = [input_tensor * 2.0 for input_tensor in input_tensors]
    if parameters["op_to_use"] == "identity":
      identity_outputs = [tf.identity(inputs_doubled[0])]
    elif parameters["op_to_use"] == "snapshot":
      identity_outputs = [array_ops.snapshot(inputs_doubled[0])]
    elif parameters["op_to_use"] in ("identity_n", "identity_n_with_2_inputs"):
      identity_outputs = tf.identity_n(inputs_doubled)
    return input_tensors, identity_outputs
Beispiel #5
0
    def __init__(self,
                 adj,
                 x,
                 labels,
                 idx_train,
                 idx_unlabeled=None,
                 surrogate=None,
                 surrogate_args={},
                 surrogate_lr=5e-3,
                 seed=None,
                 name=None,
                 device='CPU:0',
                 **kwargs):

        super().__init__(adj,
                         x,
                         labels,
                         idx_train=idx_train,
                         idx_unlabeled=idx_unlabeled,
                         surrogate=surrogate,
                         surrogate_args=surrogate_args,
                         seed=seed,
                         device=device,
                         **kwargs)

        with tf.device(self.device):
            self.stored_weights = tf.identity_n(self.surrogate.weights)
            self.optimizer = Adam(surrogate_lr)
Beispiel #6
0
 def _get_init_state_tuple(self, cell, name):
     _init_zero_states = cell.zero_state(self.hparams.batch_size, tf.float32)
     if self.stateful:
         _init_states = [tf.identity_n(_state, name) for _state in _init_zero_states]
         return tuple(
             [tf.nn.rnn_cell.LSTMStateTuple(*_state) for _state in _init_states]
         )
     return _init_zero_states
Beispiel #7
0
 def to_graph(self, graph, max_len):
     with graph.as_default():
         placeholder = tf.keras.backend.placeholder(shape=(),
                                                    dtype=tf.float32,
                                                    name='sw')
         weights = []
         for i in range(int(max_len) + 1):
             weights.append(tf.fill((i, ), placeholder))
         return tf.identity_n(weights, "shared_weight")
Beispiel #8
0
 def process(self,
             surrogate,
             train_nodes,
             unlabeled_nodes=None,
             lr=5e-3,
             reset=True):
     super().process(surrogate, train_nodes, unlabeled_nodes, reset=False)
     with tf.device(self.device):
         self.stored_weights = tf.identity_n(self.surrogate.weights)
         self.optimizer = Adam(lr)
     if reset:
         self.reset()
     return self
Beispiel #9
0
    def export_model_pb(self, export_dir='.', export_name='model.pb', **kwargs):
        """ Export model as a constant protobuf. Unlike saved model, this one is not trainable

        Args:
            export_dir: directory to save the exported model
            export_name: model name
        """

        self.output = tf.identity_n(self.output, name='output/hr')
        sess = tf.get_default_session()
        graph = sess.graph.as_graph_def()
        graph = tf.graph_util.remove_training_nodes(graph)
        graph = tf.graph_util.convert_variables_to_constants(sess, graph, [outp.name.split(':')[0] for outp in self.output])
        tf.train.write_graph(graph, export_dir, export_name, as_text=False)
        tf.logging.info("Model exported to [%s/%s]." % (Path(export_dir).resolve(), export_name))
def init_normalization(x, *, name, init_scale=1., init, ema):
    with tf.variable_scope(name):
        g = get_var('g', shape=x.shape[1:], initializer=tf.constant_initializer(1.), ema=ema)
        b = get_var('b', shape=x.shape[1:], initializer=tf.constant_initializer(0.), ema=ema)
        if init:
            # data based normalization
            m_init, v_init = tf.nn.moments(x, [0])
            scale_init = init_scale * tf.rsqrt(v_init + 1e-8)
            assert m_init.shape == v_init.shape == scale_init.shape == g.shape == b.shape
            with tf.control_dependencies([
                g.assign(scale_init),
                b.assign(-m_init * scale_init)
            ]):
                g, b = tf.identity_n([g, b])
        return g, b
Beispiel #11
0
  def test_reversible_step(self, distribution):
    # Reversible layers satisfy: (a) strides = 1 (b) in_filter = out_filter
    bsz, h, w, c = 8, 32, 32, 32
    filters = c
    strides = 1

    input_tensor = tf.random.uniform(shape=[bsz, h, w, c])
    with distribution.scope():
      f = nn_blocks.ResidualInner(
          filters=filters // 2,
          strides=strides,
          batch_norm_first=False)
      g = nn_blocks.ResidualInner(
          filters=filters // 2,
          strides=1,
          batch_norm_first=False)
      test_layer = nn_blocks.ReversibleLayer(f, g)
      test_layer(input_tensor, training=False)  # init weights
      optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)

    @tf.function
    def step_fn():
      with tf.GradientTape() as tape:
        output = test_layer(input_tensor, training=True)
      grads = tape.gradient(output, test_layer.trainable_variables)
      # Test applying gradients with optimizer works
      optimizer.apply_gradients(zip(grads, test_layer.trainable_variables))

      return output

    @tf.function
    def fwd():
      test_layer(input_tensor)

    distribution.run(fwd)  # Initialize variables
    prev_variables = tf.identity_n(test_layer.trainable_variables)
    replica_output = distribution.run(step_fn)
    outputs = distribution.experimental_local_results(replica_output)

    # Assert variables values have changed values
    for v0, v1 in zip(prev_variables, test_layer.trainable_variables):
      self.assertNotAllEqual(v0, v1)

    # Assert forward pass shape
    expected_output_shape = [bsz, h // strides, w // strides, filters]
    for output in outputs:
      self.assertEqual(expected_output_shape, output.shape.as_list())
Beispiel #12
0
    def _dky(x, y):
        with tf.GradientTape() as t:
            # Get the numbers of inputs.
            nx = B.shape(x)[0]
            ny = B.shape(y)[0]

            # Copy the input `nx` times to efficiently compute many derivatives.
            yis = tf.identity_n([y[:, i:i + 1]] * nx)
            t.watch(yis)

            # Tile inputs for batched computation.
            x = B.reshape(B.tile(x, 1, ny), nx * ny, -1)
            y = B.tile(y, nx, 1)

            # Insert tracked dimension, which is different for every tile.
            yi = B.concat(*yis, axis=0)
            y = B.concat(y[:, :i], yi, y[:, i + 1:], axis=1)

            # Perform the derivative computation.
            out = dense(k_elwise(x, y))
            grads = t.gradient(out, yis, unconnected_gradients='zero')
            return B.transpose(B.concat(*grads, axis=1))
    def export_freeze_model(self, export_dir='.', version=1):
        """export model as a constant protobuf.

        Unlike saved model, this one is not trainable

        Args:
            export_dir: directory to save the exported model
            version: version of the exported model
        """

        self.outputs = tf.identity_n(self.outputs, name='output/hr')
        sess = tf.get_default_session()
        export_path = Path(export_dir) / str(version)
        while export_path.exists():
            version += 1  # step ahead 1 version
            export_path = Path(export_dir) / str(version)
        export_path = str(export_path)
        graph = sess.graph.as_graph_def()
        graph = tf.graph_util.remove_training_nodes(graph)
        graph = tf.graph_util.convert_variables_to_constants(
            sess, graph, [outp.name.split(':')[0] for outp in self.outputs])
        tf.train.write_graph(graph, export_path, self.name, as_text=False)
        tf.logging.info(f"Model exported to {export_path}/{self.name}.")
Beispiel #14
0
    def _dkx(x, y):
        import tensorflow as tf

        with tf.GradientTape() as t:
            # Get the numbers of inputs.
            nx = B.shape(x)[0]
            ny = B.shape(y)[0]

            # Copy the input `ny` times to efficiently compute many derivatives.
            xis = tf.identity_n([x[:, i:i + 1]] * ny)
            t.watch(xis)

            # Tile inputs for batched computation.
            x = B.tile(x, ny, 1)
            y = B.reshape(B.tile(y, 1, nx), ny * nx, -1)

            # Insert tracked dimension, which is different for every tile.
            xi = B.concat(*xis, axis=0)
            x = B.concat(x[:, :i], xi, x[:, i + 1:], axis=1)

            # Perform the derivative computation.
            out = dense(k_elwise(x, y))
            grads = t.gradient(out, xis, unconnected_gradients='zero')
            return B.concat(*grads, axis=1)
Beispiel #15
0
        xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=y, logits=logits)
        loss = tf.reduce_mean(xentropy, name="loss")

    learning_rate = tf.placeholder_with_default(0.01,
                                                shape=[],
                                                name="learning_rate")

    with tf.name_scope("train_simple"):
        optimizer = tf.train.GradientDescentOptimizer(learning_rate)
        training_op = optimizer.minimize(loss, name='optimize')

    with tf.name_scope("train"):
        optimizer = tf.train.GradientDescentOptimizer(learning_rate)
        gradients = optimizer.compute_gradients(loss)
        gradients_named = tf.identity_n(gradients, name='compute_gradients')
        gradients_named_length = tf.Variable(
            len(gradients_named),
            name='compute_gradients_output_length',
            trainable=False)
        training_op = optimizer.apply_gradients(gradients,
                                                name='apply_gradients')

    with tf.name_scope("eval"):
        correct = tf.nn.in_top_k(logits, y, 1)
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32),
                                  name="accuracy")

    init = tf.global_variables_initializer()

    with open('graph.pb', 'wb') as f:
Beispiel #16
0
 def _get_identity_states(states, name):
     # use `tf.identity_n` function to access tensors by name
     _states = [tf.identity_n(_state, name) for _state in states]
     return tuple([tf.nn.rnn_cell.LSTMStateTuple(*_state) for _state in _states])
Beispiel #17
0
def sample_gamma_RSVI(z, alpha, fz):
	"""Given a (frozen) Gamma sample z, and shape parameter alpha and log-prob fz, get RSVI gradient of sample.
	The key tensorflow dynamic is to overwrite the gradient of the identity_N op to work as the RSVI gradient."""
	sample, _, _ = tf.identity_n([z, alpha, fz])
	return sample
Beispiel #18
0
 def model(self, m):
     # Back up
     if isinstance(m, tf.keras.Model) and m.weights:
         self.backup = tf.identity_n(m.weights)
     # TODO assert m is None or isinstance(m, tf.keras.Model) or torch.nn.Module
     self._model = m
Beispiel #19
0
 def _common(cls, node, **kwargs):
     x = kwargs["tensor_dict"][node.inputs[0]]
     if isinstance(x, (list, tuple)):
         return [tf.identity_n(x)]
     else:
         return [tf.identity(x)]