Пример #1
0
  def testVariableCreator(self):
    variable_names = []

    def creator_a(next_creator, **kwargs):
      variable_names.append(kwargs.get("name", ""))
      return next_creator(**kwargs)

    def creator_b(next_creator, **kwargs):
      kwargs["name"] = "forced_name"
      return next_creator(**kwargs)

    with tf.variable_creator_scope(creator_a):
      with tf.variable_creator_scope(creator_b):
        tf.compat.v1.Variable(1.0, name="one_name")

    self.assertEqual(variable_names[0], "forced_name")

    called = [False]

    def creater_c(next_creator, **kwargs):
      called[0] = True
      self.assertEqual(kwargs["synchronization"],
                       tf.VariableSynchronization.ON_WRITE)
      self.assertEqual(kwargs["aggregation"],
                       tf.compat.v1.VariableAggregation.MEAN)
      return next_creator(**kwargs)

    with tf.variable_creator_scope(creater_c):
      tf.compat.v1.get_variable(
          "v", [],
          synchronization=tf.VariableSynchronization.ON_WRITE,
          aggregation=tf.compat.v1.VariableAggregation.MEAN)
    self.assertTrue(called[0])
Пример #2
0
    def testVariableCreatorNestingError(self):
        def creator(next_creator, **kwargs):
            return next_creator(**kwargs)

        # Save the state so we can clean up at the end.
        graph = tf.compat.v1.get_default_graph()
        old_creator_stack = graph._variable_creator_stack

        try:
            scope = tf.variable_creator_scope(creator)
            scope.__enter__()
            with tf.variable_creator_scope(creator):
                with self.assertRaises(RuntimeError):
                    scope.__exit__(None, None, None)
        finally:
            graph._variable_creator_stack = old_creator_stack
Пример #3
0
def MnistTeacher(input,keep_prob_conv, keep_prob_hidden, scope = 'Mnist',reuse = False):
  with tf2.variable_creator_scope(scope, reuse = reuse) as sc:
    with slim.arg_scope([slim.conv2d],kernel_size = [3,3],stride = [1,1], biases_initializer = tf2.constant_initializer(0.0),activation_fn= tf2.nn.relu):

      net = slim.conv2d(input ,32,scope= 'conv1')
      net = slim.max_pool2d(net,[2,2],2, scope= 'pool1')
      net = tf2.nn.dropout(net,kepp_prob_conv)


      net = slim.conv2d(net,64, scope='conv2')
      net = slim.max_pool2d(net,[2,2,],2, scope='pool2')
      net = tf2.nn.dropout(net,keep_prob_conv)

      net = slim.conv2d(net, 128, scope='conv3' )
      net = slim.max_pool2d(net,[2,2],2, scope='pool3')
      net = tf2.nn.dropout(net, keep_prob_conv)

      net = slim.flatten(net)
    with slim.arg_scope([slim.fully_connected],biases_initializer = tf2.constant_initializer(0,0),activation_fn = tf2.nn.relu):
     net = slim.fully_connected(net, 625, scope = 'fc1')
     net = tf2.nn.dropout(net, keep_prob_hidden)
     net = slim.fully_connected(net, 10, activation_fn= None, scope= 'fc2')

     net = tf2.nn.softmax(net,temperature)
     return net
  def testOptimizerInsideModelFn(self, distribution, optimizer_fn):
    if (not tf.executing_eagerly() and
        tf.compat.v1.control_flow_v2_enabled()):
      self.skipTest("b/138751864")
    created_variables = []
    trainable_variables = []

    def appending_creator(next_creator, **kwargs):
      v = next_creator(**kwargs)
      created_variables.append(v.name)
      if "trainable" in kwargs and kwargs["trainable"]:
        trainable_variables.append(v.name)
      return v

    # Creator scope needs to be set before it's used inside
    # `distribution.scope`.
    with tf.variable_creator_scope(
        appending_creator), distribution.scope():
      optimizer = optimizer_fn()
      model_fn, dataset_fn, _ = minimize_loss_example(
          optimizer, use_bias=True, use_callable_loss=True)

      def step_fn(ctx, inputs):
        del ctx  # Unused
        return distribution.group(
            distribution.extended.call_for_each_replica(
                model_fn, args=(inputs,)))

      iterator = self._get_iterator(distribution, dataset_fn)

      def run_step():
        return distribution.extended.experimental_run_steps_on_iterator(
            step_fn, iterator, iterations=1).run_op

      if not tf.executing_eagerly():
        with self.cached_session() as sess:
          run_step = sess.make_callable(run_step())
      self.evaluate(tf.compat.v1.global_variables_initializer())
      run_step()

      def get_expected_variables(num_parameter_devices):
        name = optimizer._name

        if isinstance(optimizer, optimizer_v2.OptimizerV2):
          variables = VAR_MAP_V2[name]
        else:
          variables = VAR_MAP_V1[name]

        extended_variables = [
            v + "/replica_{}".format(replica)
            for v in variables
            for replica in range(1, num_parameter_devices)
        ]
        variables = list(variables) + extended_variables
        return set(v + ":0" for v in variables)

      self.assertEqual(
          get_expected_variables(len(distribution.extended.parameter_devices)),
          set(created_variables))
Пример #5
0
def MnistStudent (input, scope = "Mnist", reuse = False):
  with tf2.variable_creator_scope(scope, reuse = reuse) as sc:
    with slim.arg_scope([slim.fully_connected], biases_initializer = tf2.constant_initializer(0,0), activation_fn = tf2.nn.sigmoid):


      net = slim.fully_connected(input, 1000,scope='fc1')
      net = slim.fully_connected(net, 10, activation_fn= None, scope= 'fc2')
      return net
Пример #6
0
 def wrapper(*args, **kwargs):
     with contextlib.ExitStack() as stack:
         # The two hacks below enable a large speedup when initializing large
         # models on TPU pods.
         # TODO(b/141243467) Remove these workarounds.
         stack.enter_context(_eager_initial_values())
         stack.enter_context(
             tf.variable_creator_scope(_eager_variable_creator))
         return f(*args, **kwargs)
    def _from_components(self, weights):
        counter = [0]

        def fetch_variable(next_creator, **kwargs):
            del next_creator, kwargs
            # TODO(yuefengz): verify the var creation order matches the weights
            # property
            var = weights[counter[0]]
            counter[0] += 1
            return var

        with tf.variable_creator_scope(fetch_variable):
            ret = MeanMetricAsCompositeTensor.from_config(self._config)
        assert len(weights) == len(ret.weights)
        return ret
Пример #8
0
  def _call_wrapper(self, *args, **kwargs):
    created_variables = []

    def _variable_creator(next_creator, **creator_kwargs):
      var = next_creator(**creator_kwargs)
      created_variables.append(var)
      return var

    with tf.GradientTape(watch_accessed_variables=True) as tape, \
        tf.variable_creator_scope(_variable_creator):
      # We explicitly drop `name` arguments here,
      # to guard against the case where an op explicitly has a
      # `name` passed (which is susceptible to producing
      # multiple ops w/ the same name when the layer is reused)
      kwargs.pop('name', None)
      result = self.function(*args, **kwargs)
    self._check_variables(created_variables, tape.watched_variables())
    return result
Пример #9
0
  def test_keras_layer_add_weight(self):

    class Layer(base_layer.Layer):

      def __init__(self):
        super().__init__()
        self.w = self.add_weight(
            shape=(2,), initializer=lambda shape, dtype: [0, 1], trainable=True)
        self.b = self.add_weight(
            shape=(2,),
            initializer=lambda shape, dtype: [2, 3],
            trainable=False)

    def sharded_variable_creator(next_creator, **kwargs):
      v1_value = kwargs['initial_value']()[0:1]
      v2_value = kwargs['initial_value']()[1:]

      kwargs['initial_value'] = v1_value
      kwargs['shape'] = (1,)
      v1 = next_creator(**kwargs)

      kwargs['initial_value'] = v2_value
      kwargs['shape'] = (1,)
      v2 = next_creator(**kwargs)

      return sharded_variable.ShardedVariable([v1, v2])

    with tf.variable_creator_scope(sharded_variable_creator):
      layer = Layer()

    self.assertLen(layer.trainable_weights, 2)
    self.assertEqual(layer.trainable_weights[0], [0])
    self.assertEqual(layer.trainable_weights[1], [1])
    self.assertLen(layer.non_trainable_weights, 2)
    self.assertEqual(layer.non_trainable_weights[0], [2])
    self.assertEqual(layer.non_trainable_weights[1], [3])
    self.assertAllEqual(layer.weights,
                        layer.trainable_weights + layer.non_trainable_weights)
    self.assertAllEqual(layer.trainable_weights, layer.trainable_variables)
    self.assertAllEqual(layer.weights, layer.variables)

    checkpoint_deps = set(dep.ref for dep in layer._checkpoint_dependencies)
    self.assertEqual(checkpoint_deps, set([layer.w, layer.b]))
Пример #10
0
    def __call__(self, *args, **kwargs):
        new_kwargs = dict(self._fn_kwargs).copy()
        new_kwargs.update(kwargs)
        kwargs = new_kwargs

        def _Creator(next_creator, **kwargs):
            if self._built:
                ret = self._var_cache[_Creator.counter]
            else:
                ret = next_creator(**kwargs)
                self._var_cache.append(ret)
            _Creator.counter += 1
            return ret

        _Creator.counter = 0

        with tf.variable_creator_scope(_Creator):
            ret = self._fn(*args, **kwargs)
        self._built = True
        return ret
Пример #11
0
    def call(self, inputs, mask=None, training=None):
        # We must copy for thread safety, but it only needs to be a shallow copy.
        kwargs = {k: v for k, v in self.arguments.items()}
        if self._fn_expects_mask_arg:
            kwargs['mask'] = mask
        if self._fn_expects_training_arg:
            kwargs['training'] = training

        created_variables = []

        def _variable_creator(next_creator, **kwargs):
            var = next_creator(**kwargs)
            created_variables.append(var)
            return var

        with tf.GradientTape(watch_accessed_variables=True) as tape,\
            tf.variable_creator_scope(_variable_creator):
            result = self.function(inputs, **kwargs)
        self._check_variables(created_variables, tape.watched_variables())
        return result
Пример #12
0
 def scope(self):
     with contextlib.ExitStack() as stack:
         stack.enter_context(super(Replicator, self).scope())
         stack.enter_context(
             tf.variable_creator_scope(replica_local_creator))
         yield
Пример #13
0
 def scope(self):
   with tf.variable_creator_scope(
       self._variable_creator), vs.with_variable_store(self._var_store):
     yield
Пример #14
0
 def create_variables():
     # When this method is called in a graph context, any usage of
     # `tf.init_scope` will bypass this variable creator scope, resulting
     # in different behavior.
     with tf.variable_creator_scope(capture_variable_creation):
         return metrics.Sum().variables
Пример #15
0
 def build(self, input_shape=None):
     del input_shape  # Unused.
     with tf.variable_creator_scope(self._tensor_creator):
         self.module = self.constructor()
     self.built = True