コード例 #1
0
 def testObjectsCombined(self):
   # Currently fine to load two checkpoint objects into one Python object
   checkpoint_directory = self.get_temp_dir()
   save_root = checkpointable.Checkpointable()
   save_root.dep_one = checkpointable.Checkpointable()
   save_root.dep_two = checkpointable.Checkpointable()
   checkpointable_utils.add_variable(
       save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64)
   checkpointable_utils.add_variable(
       save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64)
   self.evaluate(checkpointable_utils.gather_initializers(save_root))
   save_path = checkpointable_utils.CheckpointableSaver(save_root).save(
       os.path.join(checkpoint_directory, "ckpt"))
   load_root = checkpointable.Checkpointable()
   load_root.dep_one = checkpointable.Checkpointable()
   load_root.dep_two = load_root.dep_one
   v1 = checkpointable_utils.add_variable(
       load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64)
   v2 = checkpointable_utils.add_variable(
       load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64)
   status = checkpointable_utils.CheckpointableSaver(load_root).restore(
       save_path).assert_consumed()
   status.run_restore_ops()
   self.assertEqual(32., self.evaluate(v1))
   self.assertEqual(64., self.evaluate(v2))
コード例 #2
0
  def testAddVariable(self):
    obj = NonLayerCheckpointable()
    with self.assertRaisesRegexp(ValueError, "do not specify shape"):
      checkpointable_utils.add_variable(
          obj, name="shape_specified_twice", shape=[], initializer=1)
    constant_initializer = checkpointable_utils.add_variable(
        obj, name="constant_initializer", initializer=1)
    with variable_scope.variable_scope("some_variable_scope"):
      ones_initializer = checkpointable_utils.add_variable(
          obj,
          name="ones_initializer",
          shape=[2],
          initializer=init_ops.ones_initializer(dtype=dtypes.float32))
    bare_initializer = checkpointable_utils.add_variable(
        obj,
        name="bare_initializer",
        shape=[2, 2],
        dtype=dtypes.float64,
        initializer=init_ops.zeros_initializer)

    # Even in graph mode, there are no naming conflicts between objects, only
    # naming conflicts within an object.
    other_duplicate = resource_variable_ops.ResourceVariable(
        name="duplicate", initial_value=1.)
    duplicate = checkpointable_utils.add_variable(
        obj, name="duplicate", shape=[])
    with self.assertRaisesRegexp(ValueError, "'duplicate' already exists"):
      checkpointable_utils.add_variable(obj, name="duplicate", shape=[])

    self.evaluate(checkpointable_utils.gather_initializers(obj))
    self.assertEqual("constant_initializer:0", constant_initializer.name)
    self.assertEqual(1, self.evaluate(constant_initializer))
    self.assertEqual("some_variable_scope/ones_initializer:0",
                     ones_initializer.name)
    self.assertAllEqual([1, 1], self.evaluate(ones_initializer))
    self.assertAllEqual([[0., 0.],
                         [0., 0.]], self.evaluate(bare_initializer))
    self.assertEqual("a_variable:0", obj.a_variable.name)
    self.assertEqual("duplicate:0", other_duplicate.name)
    if context.in_graph_mode():
      # The .name attribute may be globally influenced, but the checkpoint name
      # won't be (tested below).
      self.assertEqual("duplicate_1:0", duplicate.name)
    else:
      # When executing eagerly, there's no uniquification of variable names. The
      # checkpoint name will be the same.
      self.assertEqual("duplicate:0", duplicate.name)
    named_variables, _ = checkpointable_utils._serialize_object_graph(obj)
    expected_checkpoint_names = (
        "a_variable/.ATTRIBUTES/VARIABLE_VALUE",
        "bare_initializer/.ATTRIBUTES/VARIABLE_VALUE",
        "constant_initializer/.ATTRIBUTES/VARIABLE_VALUE",
        "duplicate/.ATTRIBUTES/VARIABLE_VALUE",
        "ones_initializer/.ATTRIBUTES/VARIABLE_VALUE",
    )
    six.assertCountEqual(
        self, expected_checkpoint_names, named_variables.keys())
コード例 #3
0
  def testMultipleGraphsNonSlotVariables(self):
    with context.graph_mode():
      checkpoint_directory = self.get_temp_dir()
      checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
      optimizer = adam.AdamOptimizer(0.001)
      # Construct a model in one graph
      first_graph = ops.Graph()
      first_session = session_lib.Session(graph=first_graph)
      with first_graph.as_default(), first_session.as_default():
        first_variable = resource_variable_ops.ResourceVariable([1.])
        first_root_checkpointable = checkpointable_utils.Checkpoint(
            optimizer=optimizer, variable=first_variable)
        train_op = optimizer.minimize(first_variable.read_value)
        self.evaluate(checkpointable_utils.gather_initializers(
            first_root_checkpointable))
        self.evaluate(train_op)
        self.evaluate(first_variable.assign([1.]))
        self.evaluate(optimizer.get_slot(
            var=first_variable, name="m").assign([2.]))
        beta1_power, _ = optimizer._get_beta_accumulators()
        self.evaluate(beta1_power.assign(3.))

      # Save and load in a second graph
      second_graph = ops.Graph()
      with second_graph.as_default(), session_lib.Session(graph=second_graph):
        second_variable = resource_variable_ops.ResourceVariable([1.])
        second_root_checkpointable = checkpointable_utils.Checkpoint(
            optimizer=optimizer, variable=second_variable)
        train_op = optimizer.minimize(second_variable.read_value)
        second_root_checkpointable.restore(None).initialize_or_restore()
        self.evaluate(train_op)
        self.evaluate(second_variable.assign([4.]))
        self.evaluate(optimizer.get_slot(
            var=second_variable, name="m").assign([5.]))
        beta1_power, _ = optimizer._get_beta_accumulators()
        self.evaluate(beta1_power.assign(6.))
        save_path = second_root_checkpointable.save(checkpoint_prefix)
        self.evaluate(second_variable.assign([7.]))
        self.evaluate(optimizer.get_slot(
            var=second_variable, name="m").assign([8.]))
        beta1_power, _ = optimizer._get_beta_accumulators()
        self.assertAllEqual(6., self.evaluate(beta1_power))
        status = second_root_checkpointable.restore(save_path)
        status.assert_consumed().run_restore_ops()
        self.assertAllEqual([4.], self.evaluate(second_variable))
        self.assertAllEqual([5.], self.evaluate(optimizer.get_slot(
            var=second_variable, name="m")))
        beta1_power, _ = optimizer._get_beta_accumulators()
        self.assertAllEqual(6., self.evaluate(beta1_power))

      # Check that the first graph is unmolested
      with first_graph.as_default(), first_session.as_default():
        self.assertAllEqual([1.], self.evaluate(first_variable))
        self.assertAllEqual([2.], self.evaluate(optimizer.get_slot(
            var=first_variable, name="m")))
        beta1_power, _ = optimizer._get_beta_accumulators()
        self.assertAllEqual(3., self.evaluate(beta1_power))
コード例 #4
0
 def testSaveRestore(self):
   network = MyNetwork()
   optimizer = adam.AdamOptimizer(0.001)
   root_checkpointable = checkpointable_utils.Checkpoint(
       optimizer=optimizer, network=network)
   input_value = constant_op.constant([[3.]])
   if context.in_eager_mode():
     optimizer.minimize(
         lambda: network(input_value))
   else:
     train_op = optimizer.minimize(network(input_value))
     # TODO(allenl): Make initialization more pleasant when graph building.
     root_checkpointable.save_counter  # pylint: disable=pointless-statement
     self.evaluate(checkpointable_utils.gather_initializers(
         root_checkpointable))
     self.evaluate(train_op)
   prefix = os.path.join(self.get_temp_dir(), "ckpt")
   self.evaluate(state_ops.assign(network._named_dense.variables[1], [42.]))
   m_bias_slot = optimizer.get_slot(network._named_dense.variables[1], "m")
   self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
   save_path = root_checkpointable.save(file_prefix=prefix)
   self.evaluate(state_ops.assign(network._named_dense.variables[1], [43.]))
   self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
   optimizer_variables = self.evaluate(optimizer.variables())
   self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
   # Immediate restoration
   status = root_checkpointable.restore(save_path=save_path).assert_consumed()
   status.run_restore_ops()
   self.assertAllEqual([42.], self.evaluate(network._named_dense.variables[1]))
   self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
   self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
   if context.in_graph_mode():
     return  # Restore-on-create is only supported when executing eagerly
   on_create_network = MyNetwork()
   on_create_optimizer = adam.AdamOptimizer(0.001)
   on_create_root = checkpointable_utils.Checkpoint(
       optimizer=on_create_optimizer, network=on_create_network)
   # Deferred restoration
   status = on_create_root.restore(save_path=save_path)
   on_create_network(constant_op.constant([[3.]]))  # create variables
   self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
   self.assertAllEqual([42.],
                       self.evaluate(
                           on_create_network._named_dense.variables[1]))
   on_create_m_bias_slot = on_create_optimizer.get_slot(
       on_create_network._named_dense.variables[1], "m")
   # Optimizer slot variables are created when the original variable is
   # restored.
   self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
   self.assertAllEqual(optimizer_variables[2:],
                       self.evaluate(on_create_optimizer.variables()))
   on_create_optimizer._create_slots(
       [resource_variable_ops.ResourceVariable([1.])])
   status.assert_consumed()
   beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
   self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
   self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))
コード例 #5
0
 def testManySavesGraph(self):
   """Saves after the first should not modify the graph."""
   with context.graph_mode():
     graph = ops.Graph()
     with graph.as_default(), self.test_session(graph):
       checkpoint_directory = self.get_temp_dir()
       checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
       obj = checkpointable.Checkpointable()
       obj.var = variable_scope.get_variable(name="v", initializer=0.)
       obj.opt = adam.AdamOptimizer(0.1)
       obj.opt.minimize(obj.var.read_value())
       self.evaluate(checkpointable_utils.gather_initializers(obj))
       saver = checkpointable_utils.CheckpointableSaver(obj)
       saver.save(checkpoint_prefix)
       before_ops = graph.get_operations()
       saver.save(checkpoint_prefix)
       self.assertEqual(before_ops, graph.get_operations())
コード例 #6
0
  def testDependencyLoop(self):
    # Note: this test creates garbage during eager execution because it
    # purposefully creates a reference cycle.
    first = checkpointable.Checkpointable()
    second = checkpointable.Checkpointable()
    first.second = second
    second.first = first
    first.v = checkpointable_utils.add_variable(
        first, "v1", initializer=[3., 1., 4.])
    second.v = checkpointable_utils.add_variable(
        second, "v2", initializer=[1., 1., 2., 3.])
    self.evaluate(checkpointable_utils.gather_initializers(first))
    checkpoint_directory = self.get_temp_dir()
    save_path = checkpointable_utils.CheckpointableSaver(first).save(
        os.path.join(checkpoint_directory, "ckpt"))

    # Test deferred loading
    first_load = checkpointable.Checkpointable()
    status = checkpointable_utils.CheckpointableSaver(
        first_load).restore(save_path)
    second_load = checkpointable.Checkpointable()
    first_load.second = second_load
    second_load.first = first_load
    with self.assertRaises(AssertionError):
      status.assert_consumed()
    first_load.v = checkpointable_utils.add_variable(
        first_load, "v1", shape=[3])
    second_load.v = checkpointable_utils.add_variable(
        second_load, "v2", shape=[4])
    status.assert_consumed()
    status.run_restore_ops()
    self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
    self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))

    # Test loading when variables have already been created
    self.evaluate(first_load.v.assign([2., 7., 1.]))
    self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v))
    self.evaluate(second_load.v.assign([2., 7., 1., 8.]))
    self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v))
    status = checkpointable_utils.CheckpointableSaver(first_load).restore(
        save_path).assert_consumed()
    status.run_restore_ops()
    self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v))
    self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v))
コード例 #7
0
 def _initialized_model(self):
   input_value = constant_op.constant([[3.]])
   network = MyNetwork()
   optimizer = adam.AdamOptimizer(0.001)
   optimizer_step = training_util.get_or_create_global_step()
   root_checkpointable = checkpointable_utils.Checkpoint(
       optimizer=optimizer, network=network, optimizer_step=optimizer_step)
   train_op = optimizer.minimize(
       functools.partial(network, input_value),
       global_step=optimizer_step)
   self.evaluate(checkpointable_utils.gather_initializers(
       root_checkpointable))
   self.evaluate(train_op)
   # A regular variable, a slot variable, and a non-slot Optimizer variable
   # with known values to check when loading.
   self.evaluate(network._named_dense.bias.assign([1.]))
   self.evaluate(optimizer.get_slot(
       var=network._named_dense.bias, name="m").assign([2.]))
   beta1_power, _ = optimizer._get_beta_accumulators()
   self.evaluate(beta1_power.assign(3.))
   return root_checkpointable
コード例 #8
0
 def testAmbiguousLoad(self):
   # Not OK to split one checkpoint object into two
   checkpoint_directory = self.get_temp_dir()
   save_root = checkpointable.Checkpointable()
   save_root.dep_one = checkpointable.Checkpointable()
   save_root.dep_two = checkpointable.Checkpointable()
   dep_three = checkpointable.Checkpointable()
   save_root.dep_one.dep_three = dep_three
   save_root.dep_two.dep_three = dep_three
   checkpointable_utils.add_variable(dep_three, name="var", initializer=0.)
   self.evaluate(checkpointable_utils.gather_initializers(save_root))
   save_path = checkpointable_utils.CheckpointableSaver(save_root).save(
       os.path.join(checkpoint_directory, "ckpt"))
   load_root = checkpointable.Checkpointable()
   checkpointable_utils.CheckpointableSaver(load_root).restore(save_path)
   load_root.dep_one = checkpointable.Checkpointable()
   load_root.dep_two = checkpointable.Checkpointable()
   load_root.dep_one.dep_three = checkpointable.Checkpointable()
   with self.assertRaisesRegexp(AssertionError,
                                "resolved to different objects"):
     load_root.dep_two.dep_three = checkpointable.Checkpointable()
コード例 #9
0
  def testDeferredSlotRestoration(self):
    checkpoint_directory = self.get_temp_dir()

    root = checkpointable.Checkpointable()
    root.var = checkpointable_utils.add_variable(
        root, name="var", initializer=0.)
    optimizer = adam.AdamOptimizer(0.1)
    if context.in_graph_mode():
      train_op = optimizer.minimize(root.var)
      # Note that `optimizer` has not been added as a dependency of
      # `root`. Create a one-off grouping so that slot variables for `root.var`
      # get initialized too.
      self.evaluate(checkpointable_utils.gather_initializers(
          checkpointable_utils.Checkpoint(root=root, optimizer=optimizer)))
      self.evaluate(train_op)
    else:
      optimizer.minimize(root.var.read_value)
    self.evaluate(state_ops.assign(root.var, 12.))
    no_slots_path = checkpointable_utils.CheckpointableSaver(root).save(
        os.path.join(checkpoint_directory, "no_slots"))
    root.optimizer = optimizer
    self.evaluate(state_ops.assign(root.var, 13.))
    self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
                                   14.))
    slots_path = checkpointable_utils.CheckpointableSaver(root).save(
        os.path.join(checkpoint_directory, "with_slots"))
    new_root = checkpointable.Checkpointable()
    # Load the slot-containing checkpoint (deferred), then immediately overwrite
    # the non-slot variable (also deferred).
    slot_status = checkpointable_utils.CheckpointableSaver(
        new_root).restore(slots_path)
    no_slot_status = checkpointable_utils.CheckpointableSaver(
        new_root).restore(no_slots_path)
    with self.assertRaises(AssertionError):
      no_slot_status.assert_consumed()
    new_root.var = checkpointable_utils.add_variable(
        new_root, name="var", shape=[])
    no_slot_status.assert_consumed()
    no_slot_status.run_restore_ops()
    self.assertEqual(12., self.evaluate(new_root.var))
    new_root.optimizer = adam.AdamOptimizer(0.1)
    with self.assertRaisesRegexp(AssertionError, "beta1_power"):
      slot_status.assert_consumed()
    self.assertEqual(12., self.evaluate(new_root.var))
    if context.in_eager_mode():
      # Slot variables are only created with restoring initializers when
      # executing eagerly.
      self.assertEqual(14., self.evaluate(
          new_root.optimizer.get_slot(name="m", var=new_root.var)))
    else:
      self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
                    None)
    if context.in_graph_mode():
      train_op = new_root.optimizer.minimize(new_root.var)
      # The slot variable now exists; restore() didn't create it, but we should
      # now have a restore op for it.
      slot_status.run_restore_ops()
      self.assertEqual(14., self.evaluate(
          new_root.optimizer.get_slot(name="m", var=new_root.var)))
      self.evaluate(train_op)
    else:
      new_root.optimizer.minimize(new_root.var.read_value)
    slot_status.assert_consumed()
コード例 #10
0
 def testNamingWithOptimizer(self):
   input_value = constant_op.constant([[3.]])
   network = MyNetwork()
   # A nuisance Network using the same optimizer. Its slot variables should not
   # go in the checkpoint, since it is never depended on.
   other_network = MyNetwork()
   optimizer = adam.AdamOptimizer(0.001)
   optimizer_step = training_util.get_or_create_global_step()
   root_checkpointable = checkpointable_utils.Checkpoint(
       optimizer=optimizer, network=network, optimizer_step=optimizer_step)
   if context.in_eager_mode():
     optimizer.minimize(
         lambda: network(input_value),
         global_step=optimizer_step)
     optimizer.minimize(
         lambda: other_network(input_value),
         global_step=optimizer_step)
   else:
     train_op = optimizer.minimize(
         network(input_value), global_step=optimizer_step)
     optimizer.minimize(
         other_network(input_value),
         global_step=optimizer_step)
     self.evaluate(checkpointable_utils.gather_initializers(
         root_checkpointable))
     self.evaluate(train_op)
   named_variables, serialized_graph = (
       checkpointable_utils._serialize_object_graph(root_checkpointable))
   expected_checkpoint_names = (
       # Created in the root node, so no prefix.
       "optimizer_step",
       # No name provided to track_checkpointable(), so the position is used
       # instead (one-based).
       "network/via_track_layer/kernel",
       # track_checkpointable() with a name provided, so that's used
       "network/_named_dense/kernel",
       "network/_named_dense/bias",
       # non-Layer dependency of the network
       "network/_non_layer/a_variable",
       # The optimizer creates two non-slot variables
       "optimizer/beta1_power",
       "optimizer/beta2_power",
       # Slot variables
       "network/via_track_layer/kernel/.OPTIMIZER_SLOT/optimizer/m",
       "network/via_track_layer/kernel/.OPTIMIZER_SLOT/optimizer/v",
       "network/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
       "network/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
       "network/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
       "network/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
   )
   suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
   expected_checkpoint_names = [
       name + suffix for name in expected_checkpoint_names]
   six.assertCountEqual(self, expected_checkpoint_names,
                        named_variables.keys())
   # Check that we've mapped to the right variable objects (not exhaustive)
   self.assertEqual(
       "global_step:0",
       named_variables["optimizer_step" + suffix].name)
   self.assertEqual(
       "my_network/dense_1/kernel:0",
       named_variables["network/via_track_layer/kernel" + suffix].name)
   self.assertEqual(
       "my_network/dense/kernel:0",
       named_variables["network/_named_dense/kernel" + suffix].name)
   self.assertEqual(
       "beta1_power:0",
       named_variables["optimizer/beta1_power" + suffix].name)
   self.assertEqual(
       "beta2_power:0",
       named_variables["optimizer/beta2_power" + suffix].name)
   # Spot check the generated protocol buffers.
   self.assertEqual("optimizer",
                    serialized_graph.nodes[0].children[1].local_name)
   optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
       1].node_id]
   self.assertEqual("beta1_power",
                    optimizer_node.children[0].local_name)
   self.assertEqual("beta1_power",
                    serialized_graph.nodes[optimizer_node.children[0].node_id]
                    .attributes[0].full_name)
   self.assertEqual(
       "my_network/dense/kernel",
       serialized_graph.nodes[optimizer_node.slot_variables[0]
                              .original_variable_node_id]
       .attributes[0].full_name)
   # We strip off the :0 suffix, as variable.name-based saving does.
   self.assertEqual(
       "my_network/dense/kernel/Adam",
       serialized_graph.nodes[optimizer_node.slot_variables[0]
                              .slot_variable_node_id]
       .attributes[0].full_name)
   self.assertEqual(
       "my_network/dense/kernel/Adam:0",
       optimizer.get_slot(
           var=named_variables["network/_named_dense/kernel" + suffix],
           name="m").name)
   self.assertEqual(
       "network/_named_dense/kernel" + suffix,
       serialized_graph.nodes[
           optimizer_node.slot_variables[0]
           .original_variable_node_id].attributes[0].checkpoint_key)
   self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
   self.assertEqual(
       "network/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
       serialized_graph.nodes[
           optimizer_node.slot_variables[0]
           .slot_variable_node_id].attributes[0].checkpoint_key)
コード例 #11
0
 def testSaveRestore(self):
     model = MyModel()
     optimizer = adam.AdamOptimizer(0.001)
     root_checkpointable = checkpointable_utils.Checkpoint(
         optimizer=optimizer, model=model)
     input_value = constant_op.constant([[3.]])
     if context.executing_eagerly():
         optimizer.minimize(lambda: model(input_value))
     else:
         train_op = optimizer.minimize(model(input_value))
         # TODO(allenl): Make initialization more pleasant when graph building.
         root_checkpointable.save_counter  # pylint: disable=pointless-statement
         self.evaluate(
             checkpointable_utils.gather_initializers(root_checkpointable))
         self.evaluate(train_op)
     prefix = os.path.join(self.get_temp_dir(), "ckpt")
     self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
     m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
     self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
     save_path = root_checkpointable.save(file_prefix=prefix)
     self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
     self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
     optimizer_variables = self.evaluate(optimizer.variables())
     self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
     # Immediate restoration
     status = root_checkpointable.restore(
         save_path=save_path).assert_consumed()
     status.run_restore_ops()
     self.assertAllEqual([42.],
                         self.evaluate(model._named_dense.variables[1]))
     self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
     self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
     if not context.executing_eagerly():
         return  # Restore-on-create is only supported when executing eagerly
     on_create_model = MyModel()
     on_create_optimizer = adam.AdamOptimizer(
         0.001,
         # Preserve beta1_power and beta2_power when appying gradients so we can
         # test that they've been restored correctly.
         beta1=1.0,
         beta2=1.0)
     on_create_root = checkpointable_utils.Checkpoint(
         optimizer=on_create_optimizer, model=on_create_model)
     # Deferred restoration
     status = on_create_root.restore(save_path=save_path)
     on_create_model(constant_op.constant([[3.]]))  # create variables
     self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
     self.assertAllEqual([42.],
                         self.evaluate(
                             on_create_model._named_dense.variables[1]))
     on_create_m_bias_slot = on_create_optimizer.get_slot(
         on_create_model._named_dense.variables[1], "m")
     # Optimizer slot variables are created when the original variable is
     # restored.
     self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
     self.assertAllEqual(optimizer_variables[2:],
                         self.evaluate(on_create_optimizer.variables()))
     dummy_var = resource_variable_ops.ResourceVariable([1.])
     on_create_optimizer.minimize(loss=dummy_var.read_value)
     status.assert_consumed()
     beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
     self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
     self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))
コード例 #12
0
 def testSaveRestore(self):
   model = MyModel()
   optimizer = adam.AdamOptimizer(0.001)
   root_checkpointable = checkpointable_utils.Checkpoint(
       optimizer=optimizer, model=model)
   input_value = constant_op.constant([[3.]])
   if context.executing_eagerly():
     optimizer.minimize(
         lambda: model(input_value))
   else:
     train_op = optimizer.minimize(model(input_value))
     # TODO(allenl): Make initialization more pleasant when graph building.
     root_checkpointable.save_counter  # pylint: disable=pointless-statement
     self.evaluate(checkpointable_utils.gather_initializers(
         root_checkpointable))
     self.evaluate(train_op)
   prefix = os.path.join(self.get_temp_dir(), "ckpt")
   self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
   m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
   self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
   save_path = root_checkpointable.save(file_prefix=prefix)
   self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
   self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
   optimizer_variables = self.evaluate(optimizer.variables())
   self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
   # Immediate restoration
   status = root_checkpointable.restore(save_path=save_path).assert_consumed()
   status.run_restore_ops()
   self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
   self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
   self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
   if not context.executing_eagerly():
     return  # Restore-on-create is only supported when executing eagerly
   on_create_model = MyModel()
   on_create_optimizer = adam.AdamOptimizer(
       0.001,
       # Preserve beta1_power and beta2_power when appying gradients so we can
       # test that they've been restored correctly.
       beta1=1.0, beta2=1.0)
   on_create_root = checkpointable_utils.Checkpoint(
       optimizer=on_create_optimizer, model=on_create_model)
   # Deferred restoration
   status = on_create_root.restore(save_path=save_path)
   on_create_model(constant_op.constant([[3.]]))  # create variables
   self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
   self.assertAllEqual([42.],
                       self.evaluate(
                           on_create_model._named_dense.variables[1]))
   on_create_m_bias_slot = on_create_optimizer.get_slot(
       on_create_model._named_dense.variables[1], "m")
   # Optimizer slot variables are created when the original variable is
   # restored.
   self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
   self.assertAllEqual(optimizer_variables[2:],
                       self.evaluate(on_create_optimizer.variables()))
   dummy_var = resource_variable_ops.ResourceVariable([1.])
   on_create_optimizer.minimize(loss=dummy_var.read_value)
   status.assert_consumed()
   beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators()
   self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power))
   self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power))