示例#1
0
 def _singleMetaGraphSavedModel(self):
     export_graph = ops.Graph()
     with export_graph.as_default():
         start = array_ops.placeholder(shape=[1, 1],
                                       dtype=dtypes.float32,
                                       name="start")
         distractor = variables.RefVariable(-1., name="distractor")
         v = variables.RefVariable(3., name="v")
         local_variable = variables.VariableV1(
             1.,
             collections=[ops.GraphKeys.LOCAL_VARIABLES],
             trainable=False,
             use_resource=True)
         output = array_ops.identity(start * v * local_variable,
                                     name="output")
         with session_lib.Session() as session:
             session.run([
                 v.initializer, distractor.initializer,
                 local_variable.initializer
             ])
             path = os.path.join(self.get_temp_dir(), "saved_model",
                                 str(ops.uid()))
             simple_save.simple_save(
                 session,
                 path,
                 inputs={"start": start},
                 outputs={"output": output},
                 legacy_init_op=local_variable.initializer)
     return path
示例#2
0
 def _v1_single_metagraph_saved_model(self, use_resource):
   export_graph = ops.Graph()
   with export_graph.as_default():
     start = array_ops.placeholder(
         shape=None, dtype=dtypes.float32, name="start")
     if use_resource:
       distractor = variables.RefVariable(-1., name="distractor")
       v = resource_variable_ops.ResourceVariable(3., name="v")
     else:
       # "distractor" gets saved in the checkpoint and so used in the restore
       # function, but not in the pruned function for the signature. This tests
       # node naming: it needs to be consistent (and ideally always the same as
       # the node in the original GraphDef) for the resource manager to find
       # the right variable.
       distractor = variables.RefVariable(-1., name="distractor")
       v = variables.RefVariable(3., name="v")
     local_variable = variables.VariableV1(
         1.,
         collections=[ops.GraphKeys.LOCAL_VARIABLES],
         trainable=False,
         use_resource=True)
     output = array_ops.identity(start * v * local_variable, name="output")
     with session_lib.Session() as session:
       session.run([v.initializer, distractor.initializer,
                    local_variable.initializer])
       path = os.path.join(self.get_temp_dir(), "saved_model", str(ops.uid()))
       simple_save.simple_save(
           session,
           path,
           inputs={"start": start},
           outputs={"output": output},
           legacy_init_op=local_variable.initializer)
   return path
示例#3
0
    def doTestSparse(self, use_resource=False):
        for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
            with self.cached_session():
                # Initialize variables for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                if use_resource:
                    var0 = resource_variable_ops.ResourceVariable(var0_np)
                    var1 = resource_variable_ops.ResourceVariable(var1_np)
                else:
                    var0 = variables.RefVariable(var0_np)
                    var1 = variables.RefVariable(var1_np)
                grads0_np_indices = np.array([0, 1], dtype=np.int32)
                grads0 = ops.IndexedSlices(
                    constant_op.constant(grads0_np),
                    constant_op.constant(grads0_np_indices),
                    constant_op.constant([2]))
                grads1_np_indices = np.array([0, 1], dtype=np.int32)
                grads1 = ops.IndexedSlices(
                    constant_op.constant(grads1_np),
                    constant_op.constant(grads1_np_indices),
                    constant_op.constant([2]))
                opt = adam.AdamOptimizer()
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                variables.global_variables_initializer().run()

                # Fetch params to validate initial values
                self.assertAllClose([1.0, 2.0], self.evaluate(var0))
                self.assertAllClose([3.0, 4.0], self.evaluate(var1))

                beta1_power, beta2_power = opt._get_beta_accumulators()

                # Run 3 steps of Adam
                for t in range(1, 4):
                    self.assertAllCloseAccordingToType(
                        0.9**t, self.evaluate(beta1_power))
                    self.assertAllCloseAccordingToType(
                        0.999**t, self.evaluate(beta2_power))
                    update.run()

                    var0_np, m0, v0 = adam_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adam_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0))
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1))
示例#4
0
def Rtt_default_variable_creator(next_creator=None, **kwargs):
    """Default variable creator."""
    assert next_creator is None
    initial_value = kwargs.get("initial_value", None)
    trainable = kwargs.get("trainable", None)
    collections = kwargs.get("collections", None)
    validate_shape = kwargs.get("validate_shape", True)
    caching_device = kwargs.get("caching_device", None)
    name = kwargs.get("name", None)
    variable_def = kwargs.get("variable_def", None)
    dtype = kwargs.get("dtype", None)
    expected_shape = kwargs.get("expected_shape", None)
    import_scope = kwargs.get("import_scope", None)
    constraint = kwargs.get("constraint", None)
    use_resource = kwargs.get("use_resource", None)
    synchronization = kwargs.get("synchronization", None)
    aggregation = kwargs.get("aggregation", None)
    shape = kwargs.get("shape", None)

    initial_value = convert_init_value_to_string(initial_value, dtype)

    if use_resource is None:
        use_resource = variable_scope.get_variable_scope().use_resource
    if use_resource is None:
        use_resource = variable_scope._DEFAULT_USE_RESOURCE
    use_resource = use_resource or context.executing_eagerly()
    if use_resource:
        distribute_strategy = kwargs.get("distribute_strategy", None)
        return rtt_ts.convert_to_rtttensor(
            resource_variable_ops.ResourceVariable(
                initial_value=initial_value,
                trainable=trainable,
                collections=collections,
                validate_shape=validate_shape,
                caching_device=caching_device,
                name=name,
                dtype=dtype,
                constraint=constraint,
                variable_def=variable_def,
                import_scope=import_scope,
                distribute_strategy=distribute_strategy,
                synchronization=synchronization,
                aggregation=aggregation,
                shape=shape))
    else:
        return rtt_ts.convert_to_rtttensor(
            variables.RefVariable(initial_value=initial_value,
                                  trainable=trainable,
                                  collections=collections,
                                  validate_shape=validate_shape,
                                  caching_device=caching_device,
                                  name=name,
                                  dtype=dtype,
                                  constraint=constraint,
                                  variable_def=variable_def,
                                  expected_shape=expected_shape,
                                  import_scope=import_scope,
                                  synchronization=synchronization,
                                  aggregation=aggregation,
                                  shape=shape))
示例#5
0
def create_host_embedding(name,
                          shape,
                          dtype,
                          partition_strategy="TOKEN",
                          optimizer_spec=None,
                          initializer=None):
    """ Create a HostEmbedding.

      Args:
        name: The name which uniquely identifies the embedding.
        shape: The shape for the tensor which will hold the embedding.
        dtype: The dtype for the tensor which will hold the embedding.
        partition_strategy: When
          `enable_experimental_remote_buffer_embedding` is `True` and using
          replication, the embedding must be distributed across the replicas.
          This option decides on which axis the embedding will be split. Options
          are "TOKEN" or "ENCODING".
        optimizer_spec: A description of how the embedding will be optimized.
          When `None`, the embedding is assumed to not be trainable.
        initializer: The initializer to use when creating the embedding tensor.

      Returns:
        A `HostEmbedding` object that wraps the created embedding tensor.

  """
    if initializer is None:
        initializer = array_ops.zeros(shape, dtype)
    with ops.device('cpu'):
        embedding_tensor = variables.RefVariable(initial_value=initializer,
                                                 name=name)
    return HostEmbedding(name,
                         embedding_tensor,
                         partition_strategy=partition_strategy,
                         optimizer_spec=optimizer_spec)
示例#6
0
 def testGatherNdRefVariable(self):
   with self.cached_session():
     v = variables.RefVariable(constant_op.constant([[1, 2], [3, 4], [5, 6]]))
     self.evaluate(variables.global_variables_initializer())
     gather = array_ops.gather_nd(v, [[0, 1], [2, 0]])
     if not context.executing_eagerly():  # .op doesn't make sense in Eager
       self.assertEqual("GatherNd", gather.op.name)
     self.assertAllEqual([2, 5], gather)
示例#7
0
    def doTestBasic(self, use_resource=False, use_callable_params=False):
        if context.executing_eagerly() and not use_resource:
            self.skipTest(
                "Skipping test with use_resource=False and executing eagerly.")
        for i, dtype in enumerate(
            [dtypes.half, dtypes.float32, dtypes.float64]):
            with self.session(graph=ops.Graph()):
                # Initialize variables for numpy implementation.
                m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)

                if use_resource:
                    var0 = resource_variable_ops.ResourceVariable(
                        var0_np, name="var0_%d" % i)
                    var1 = resource_variable_ops.ResourceVariable(
                        var1_np, name="var1_%d" % i)
                else:
                    var0 = variables.RefVariable(var0_np)
                    var1 = variables.RefVariable(var1_np)
                grads0 = constant_op.constant(grads0_np)
                grads1 = constant_op.constant(grads1_np)

                learning_rate = lambda: 0.001
                beta1 = lambda: 0.9
                beta2 = lambda: 0.999
                epsilon = lambda: 1e-8
                if not use_callable_params:
                    learning_rate = learning_rate()
                    beta1 = beta1()
                    beta2 = beta2()
                    epsilon = epsilon()

                opt = adam.AdamOptimizer(learning_rate=learning_rate)
                update = opt.apply_gradients(
                    zip([grads0, grads1], [var0, var1]))
                opt_variables = opt.variables()
                beta1_power, beta2_power = opt._get_beta_accumulators()
                self.assertTrue(beta1_power is not None)
                self.assertTrue(beta2_power is not None)
                self.assertIn(beta1_power, opt_variables)
                self.assertIn(beta2_power, opt_variables)
                # Ensure that non-slot variables are the same type as the requested
                # variables.
                self.assertEqual(
                    use_resource,
                    resource_variable_ops.is_resource_variable(beta1_power))
                self.assertEqual(
                    use_resource,
                    resource_variable_ops.is_resource_variable(beta2_power))

                if not context.executing_eagerly():
                    with ops.Graph().as_default():
                        # Shouldn't return non-slot variables from other graphs.
                        self.assertEqual(0, len(opt.variables()))
                    self.evaluate(variables.global_variables_initializer())
                    # Fetch params to validate initial values
                    self.assertAllClose([1.0, 2.0], self.evaluate(var0))
                    self.assertAllClose([3.0, 4.0], self.evaluate(var1))

                beta1_power, beta2_power = opt._get_beta_accumulators()

                # Run 3 steps of Adam
                for t in range(1, 4):
                    if not context.executing_eagerly():
                        self.evaluate(update)
                    elif t > 1:
                        opt.apply_gradients(zip([grads0, grads1],
                                                [var0, var1]))

                    self.assertAllCloseAccordingToType(
                        0.9**(t + 1), self.evaluate(beta1_power))
                    self.assertAllCloseAccordingToType(
                        0.999**(t + 1), self.evaluate(beta2_power))

                    var0_np, m0, v0 = adam_update_numpy(
                        var0_np, grads0_np, t, m0, v0)
                    var1_np, m1, v1 = adam_update_numpy(
                        var1_np, grads1_np, t, m1, v1)

                    # Validate updated params
                    self.assertAllCloseAccordingToType(var0_np,
                                                       self.evaluate(var0))
                    self.assertAllCloseAccordingToType(var1_np,
                                                       self.evaluate(var1))
                    if use_resource:
                        self.assertEqual("var0_%d/Adam:0" % (i, ),
                                         opt.get_slot(var=var0, name="m").name)