Ejemplo n.º 1
0
  def __init__(self, batch_env, step, is_training, should_log, config):
    self._batch_env = batch_env
    self._step = step  # Trainer step, not environment step.
    self._is_training = is_training
    self._should_log = should_log
    self._config = config
    self._num_models = config.num_models
    #self._cell = config.cell
    #self._modelsampler = tfd.Uniform(low=0.0,high=2.0)
    #self._model = tf.dtypes.cast(self._modelsampler.sample(),tf.int32)
    self._cell = config.cell ### Initialize with the 0th model
    #state = self._cell[0].zero_state(len(batch_env), tf.float32) #Using a type of cell to init the state
    #print(state)
    var_like = lambda x: tf.get_local_variable(
        x.name.split(':')[0].replace('/', '_') + '_var',
        shape=x.shape,
        initializer=lambda *_, **__: tf.zeros_like(x), use_resource=True)
    self._state = []
    for mdl in range(self._num_models):
        self._state.append(nested.map(var_like, self._cell[mdl].zero_state(len(batch_env), tf.float32)))
        # print('asdfasdf',self._state)

    #self._state = nested.map(var_like, state)
    self._prev_action = tf.get_local_variable(
        'prev_action_var', shape=self._batch_env.action.shape,
        initializer=lambda *_, **__: tf.zeros_like(self._batch_env.action),
        use_resource=True)
Ejemplo n.º 2
0
def lr_plateau_decay(lr=0.01,decay=0.9999, min_lr=1e-6, loss=None, scope='lr_plateau_decay'):
    with tf.name_scope(scope):
        his_len = 10
        local_lr = tf.get_local_variable(name='local_lr', dtype=tf.float32,
                                         initializer=tf.constant(lr, dtype=tf.float32))
        loss_idx = tf.get_local_variable(name='loss_idx', dtype=tf.int32,
                                         initializer=tf.constant(1,dtype=tf.int32))
        his_loss = tf.get_local_variable(name='history_loss',dtype=tf.float32,
                                             initializer=tf.zeros([his_len])-1.0)
        if loss is None:
            loss = tf.losses.get_total_loss()
        def true_fn():
            update_history = tf.assign(his_loss[loss_idx], loss)
            with tf.control_dependencies([update_history]):
                update_idx = tf.assign(loss_idx, tf.mod(loss_idx + 1, his_len))
            with tf.control_dependencies([update_idx]):
                updated_lr = tf.cond(pred=loss>tf.reduce_mean(his_loss),
                                     true_fn=lambda: tf.assign(local_lr, local_lr*decay),
                                     false_fn=lambda: local_lr)
            lr = tf.maximum(updated_lr, min_lr)
            return lr
        lr = tf.cond(pred=tf.equal(tf.mod(tf.train.get_global_step(), 100), 0),
                     true_fn=true_fn,
                     false_fn=lambda: tf.identity(local_lr))
        tf.summary.scalar('lr_plateau_decay', lr)
    return lr
Ejemplo n.º 3
0
    def create_global_summary_ops(self):
        assert (self._scope == "global")
        # get summary ops
        global_summaries = []
        with tf.variable_scope('global', reuse=True):
            fc1_weight = tf.get_local_variable(
                name='shared_layers/fc1/fc_weights')
            fc1_bias = tf.get_variable(name='shared_layers/fc1/fc_bias')
            fc2_weight = tf.get_local_variable(
                name='shared_layers/fc2/fc_weights')
            fc2_bias = tf.get_variable(name='shared_layers/fc2/fc_bias')
            fc3_weight = tf.get_local_variable(name='env_t1/fc3/fc_weights')
            fc3_bias = tf.get_variable(name='env_t1/fc3/fc_bias')

        with tf.name_scope("global_summary"):
            global_summaries.append(
                tf.summary.scalar('reward', tf.reduce_mean(self._tf_reward)))
            global_summaries.append(
                tf.summary.scalar('num_step',
                                  tf.reduce_mean(self._tf_num_steps)))
            global_summaries.append(
                tf.summary.histogram('fc1_weight', fc1_weight))
            global_summaries.append(tf.summary.histogram('fc1_bias', fc1_bias))
            global_summaries.append(
                tf.summary.histogram('fc2_weight', fc2_weight))
            global_summaries.append(tf.summary.histogram('fc2_bias', fc2_bias))
            global_summaries.append(
                tf.summary.histogram('fc3_weight', fc3_weight))
            global_summaries.append(tf.summary.histogram('fc3_bias', fc3_bias))
        A3CAgent.tf_global_summary_op = tf.summary.merge(global_summaries)
        A3CAgent.tf_summary_writer = tf.summary.FileWriter(
            A3CConfig.summary_folder, self._tf_sess.graph)
Ejemplo n.º 4
0
 def testLogVariables(self):
   tf.get_default_graph().add_to_collection("config", {"version": 1})
   with tf.variable_scope("m1"):
     tf.get_variable("v1", shape=[3, 4])
   with tf.device("/gpu"):
     with tf.variable_scope("m2"):
       tf.get_local_variable("v2", shape=[5, 6])
   snt.log_variables()
Ejemplo n.º 5
0
 def testLogVariables(self):
     tf.get_default_graph().add_to_collection("config", {"version": 1})
     with tf.variable_scope("m1"):
         tf.get_variable("v1", shape=[3, 4])
     with tf.device("/gpu"):
         with tf.variable_scope("m2"):
             tf.get_local_variable("v2", shape=[5, 6])
     snt.log_variables()
Ejemplo n.º 6
0
def get_variable(name):
    if name.rfind(':') == -1:
        name += ':0'
    for var in tf.get_local_variable():
        if name == var.name:
            return var
    return None
Ejemplo n.º 7
0
    def make_internal(self, name, shape, minibatched=True):
        """
        Creates a variable to represent an internal simulation signal.

        This is to handle the case where we want to add a signal that is
        not represented as a `nengo.builder.Signal` in the Nengo op graph.

        Parameters
        ----------
        name : str
            Name for the signal/variable.
        shape : tuple of int
            Shape of the signal/variable.
        minibatched : bool
            Whether or not this signal contains a minibatch dimension.

        Returns
        -------
        sig : `.TensorSignal`
            A TensorSignal representing the newly created variable.
        """
        sig = self.get_tensor_signal(
            np.arange(shape[0]), object(), self.dtype, shape,
            minibatched, label=name)

        with tf.variable_scope(tf.get_default_graph().get_name_scope(),
                               reuse=False):
            var = tf.get_local_variable(
                name, shape=sig.full_shape, dtype=sig.dtype, trainable=False,
                initializer=tf.zeros_initializer())

        self.internal_vars[sig.key] = var

        return sig
Ejemplo n.º 8
0
    def get_initialized_params(self,
                               trainable=False,
                               scope="embedding",
                               reuse=False):
        """Returns a variable with the embeddings.

    Unlike `get_params` this does not require running a Scaffold to initialize
    the variable, however this method is not compatible with `tf.SavedModel`
    since it uses a `tf.py_func` to initialize the embedddings variable.

    Args:
      trainable: Boolean indicating whether the params should be trainable.
      scope: The name of the inner-most scope for the params.
      reuse: Boolean indicating whether to reuse params in the same scope.

    Returns:
      embedding_weights: The embedding weights.
    """

        # Hide `self._idx2emb` behind tf.py_func so its does not get serialized as
        # as part of the graph and blow up our log sizes.
        init_value = tf.py_func(lambda: self._idx2emb, [], tf.float32, False)
        init_value.set_shape([len(self._idx2emb), self._dims])

        with tf.variable_scope(scope, reuse=reuse):
            if trainable:
                embedding_weights = tf.get_variable("embedding_weights",
                                                    initializer=init_value)
            else:
                # Local variable so the embeddings won't get dumped into the checkpoints
                embedding_weights = tf.get_local_variable(
                    "embedding_weights", initializer=init_value)
        return embedding_weights
Ejemplo n.º 9
0
    def build_loss(self, do_summary=True):
        """
        The loss consists of two parts: pixel_cls_loss + link_cls_loss,
            and link_cls_loss is calculated only on positive pixels
        """
        import config

        count_warning = tf.get_local_variable(
            name='count_warning', initializer=tf.constant(0.0))
        batch_size = config.batch_size_per_gpu
        background_label = config.background_label
        text_label = config.text_label
        pixel_link_neg_loss_weight_lambda = config.pixel_link_neg_loss_weight_lambda
        pixel_cls_loss_weight_lambda = config.pixel_cls_loss_weight_lambda


        # build the cls loss
        mask_input = tf.split(self.mask_input, num_or_size_splits=3, axis=-1)
        pos_mask = tf.squeeze(tf.equal(mask_input[1], text_label))
        pos_mask_flatten = tf.reshape(pos_mask, [batch_size, -1])
        neg_mask = tf.squeeze(tf.equal(mask_input[1], background_label))
        neg_mask_flatten = tf.reshape(neg_mask, [batch_size, -1])
        print('the pos_mask=', pos_mask)
        print('the neg_mask=', neg_mask)
        n_pos = tf.reduce_sum(tf.cast(pos_mask, dtype=tf.float32))
        pixel_cls_loss, pixel_cls_dice, pixel_cls_dice_loss = self.build_cls_loss(batch_size, pos_mask, neg_mask,
                                                                                  pos_mask_flatten, neg_mask_flatten,
                                                                                  n_pos, do_summary,
                                                                                  pixel_cls_loss_weight_lambda)

        if do_summary:
            tf.summary.scalar('pixel_cls_loss', pixel_cls_loss)
            tf.summary.scalar('pixel_cls_dice', pixel_cls_dice)
            tf.summary.scalar('pixel_cls_dice_loss', pixel_cls_dice_loss)
Ejemplo n.º 10
0
def _update(name, update, init_shape, dtype):
    """Updates variable 'name' by concatenating 'update'.

  Args:
    name: Variable name.
    update: tensor to be be concatenated to the existing value of the variable.
    init_shape: The initial shape of the variable.
    dtype: the type of the variable.

  Returns:
    v: the variable ref.
    v_assign: tensor that hold the new value of the variable after the update.
  """
    with tf.variable_scope('detection_metrics', reuse=tf.AUTO_REUSE):
        v = tf.get_local_variable(
            name,
            dtype=dtype,
            # init_shape is required to pass the shape inference check.
            initializer=tf.constant([], shape=init_shape, dtype=dtype))
        shape = tf.concat([[-1], tf.shape(update)[1:]], axis=0)
        v_reshape = tf.reshape(v.value(), shape)
        v_assign = tf.assign(v,
                             tf.concat([v_reshape, update], axis=0),
                             validate_shape=False)
    return v, v_assign
Ejemplo n.º 11
0
 def testFormatVariables(self, use_resource, expected):
   with tf.variable_scope("m1"):
     v1 = tf.get_variable("v1", shape=[3, 4], use_resource=use_resource)
   with tf.device("/gpu"):
     with tf.variable_scope("m2"):
       v2 = tf.get_local_variable(
           "v2", shape=[5, 6], use_resource=use_resource)
   self.assertEqual(snt.format_variables([v2, v1]), expected)
Ejemplo n.º 12
0
def bleu_score(predictions, labels, max_n):

    numerators_cum = tf.get_local_variable('numerators', [max_n], dtype=tf.float32, initializer=tf.zeros_initializer())
    denominators_cum = tf.get_local_variable('denominators', [max_n], dtype=tf.float32, initializer=tf.zeros_initializer())
    cand_lengths_cum = tf.get_local_variable('cand_lengths', [], dtype=tf.float32, initializer=tf.zeros_initializer())
    ref_lengths_cum = tf.get_local_variable('ref_lengths', [], dtype=tf.float32, initializer=tf.zeros_initializer())
    num, den, can, ref = tf.py_func(_get_bleu_score_params, (predictions, labels, max_n), (tf.float32, tf.float32, tf.float32, tf.float32))

    update_num_op = tf.assign_add(numerators_cum, num)
    update_denom_op = tf.assign_add(denominators_cum, den)
    update_cand_op = tf.assign_add(cand_lengths_cum, can)
    update_ref_op = tf.assign_add(ref_lengths_cum, ref)

    bleu = _calc_bleu_score(numerators_cum, denominators_cum, cand_lengths_cum, ref_lengths_cum, max_n)
    update_op = _calc_bleu_score(update_num_op, update_denom_op, update_cand_op, update_ref_op, max_n)

    return bleu, update_op
Ejemplo n.º 13
0
 def testFormatVariables(self, use_resource, expected):
   with tf.variable_scope("m1"):
     v1 = tf.get_variable("v1", shape=[3, 4], use_resource=use_resource)
   with tf.device("/gpu"):
     with tf.variable_scope("m2"):
       v2 = tf.get_local_variable(
           "v2", shape=[5, 6], use_resource=use_resource)
   self.assertEqual(snt.format_variables([v2, v1]), expected)
Ejemplo n.º 14
0
def bias_ops(ds: tf.data.Dataset, V):
    features, labels = ds.make_one_shot_iterator().get_next()
    tokens = features[TEXT]  # (N, L)
    token_lengths = features[SENTENCE_LENGTH]  # (N,)
    vocab_tally = tf.get_local_variable(
        name='vocab_tally',
        dtype=tf.int64,
        initializer=tf.initializers.zeros,
        shape=(V,)
    )  # (V,)
    word_count = tf.get_local_variable(
        name='word_count',
        dtype=token_lengths.dtype,
        initializer=tf.initializers.zeros,
        shape=[]
    )
    max_length = tf.get_local_variable(
        name='max_length',
        dtype=token_lengths.dtype,
        initializer=tf.initializers.zeros,
        shape=[]
    )
    sentence_count = tf.get_local_variable(
        name='sentence_count',
        dtype=tf.int32,
        initializer=tf.initializers.zeros,
        shape=[]
    )
    mask = tf.sequence_mask(
        maxlen=tf.shape(tokens)[1],
        lengths=token_lengths
    )  # (N, L)
    valid_tokens = tf.boolean_mask(tensor=tokens, mask=mask)  # (Z,)
    update_tally = tf.scatter_nd_add(
        ref=vocab_tally,
        indices=tf.expand_dims(valid_tokens, 1),
        updates=tf.ones(shape=tf.shape(valid_tokens), dtype=vocab_tally.dtype)
    )
    update_sentence_count = tf.assign_add(ref=sentence_count, value=tf.shape(tokens)[0])
    update_word_count = tf.assign_add(ref=word_count, value=tf.reduce_sum(token_lengths))
    update_max_length = tf.assign(ref=max_length, value=tf.maximum(
        max_length,
        tf.reduce_max(token_lengths)
    ))
    update = tf.group(update_tally, update_sentence_count, update_word_count, update_max_length)
    return vocab_tally, sentence_count, word_count, max_length, update
Ejemplo n.º 15
0
 def testFormatVariables(self):
   with tf.variable_scope("m1"):
     v1 = tf.get_variable("v1", shape=[3, 4])
   with tf.device("/gpu"):
     with tf.variable_scope("m2"):
       v2 = tf.get_local_variable("v2", shape=[5, 6])
   self.assertEqual(snt.format_variables([v2, v1]),
                    _EXPECTED_FORMATTED_VARIABLE_LIST)
Ejemplo n.º 16
0
 def testFormatVariables(self):
     with tf.variable_scope("m1"):
         v1 = tf.get_variable("v1", shape=[3, 4])
     with tf.device("/gpu"):
         with tf.variable_scope("m2"):
             v2 = tf.get_local_variable("v2", shape=[5, 6])
     self.assertEqual(snt.format_variables([v2, v1]),
                      _EXPECTED_FORMATTED_VARIABLE_LIST)
def local_state_variables(init_values, return_init_values):
    """Create local variables initialized from init_values.

  This will create local variables from a list of init_values. Each variable
  will be named based on the value's shape and dtype.

  As a convenience, a boolean tensor allows you to return value from
  the created local variable or from the original init value.

  Args:
    init_values: iterable of tensors
    return_init_values: boolean tensor

  Returns:
    local_vars: list of the created local variables.
    vals: if return_init_values is true, then this returns the values of
      init_values. Otherwise it returns the values of the local_vars.
  """
    if not init_values:
        return [], []

    # This generates a harmless warning when saving the metagraph.
    variable_use_count = tf.get_collection_ref(
        _LOCAL_STATE_VARIABLE_COLLECTION)
    if not variable_use_count:
        variable_use_count.append(collections.defaultdict(int))
    variable_use_count = variable_use_count[0]

    local_vars = []
    with tf.variable_scope(OPTIMIZER_SCOPE):
        # We can't use the init_value as an initializer as init_value may
        # itself depend on some problem variables. This would produce
        # inter-variable initialization order dependence which TensorFlow
        # sucks at making easy.
        for init_value in init_values:
            name = create_local_state_variable_name(init_value)
            unique_name = name + "_" + str(variable_use_count[name])
            variable_use_count[name] += 1
            # The overarching idea here is to be able to reuse variables between
            # different sessions on the same TensorFlow master without errors. By
            # uniquifying based on the type and name we mirror the checks made inside
            # TensorFlow, while still allowing some memory reuse. Ultimately this is a
            # hack due to the broken Session.reset().
            local_vars.append(
                tf.get_local_variable(unique_name,
                                      initializer=tf.zeros(
                                          init_value.get_shape(),
                                          dtype=init_value.dtype)))

    # It makes things a lot simpler if we use the init_value the first
    # iteration, instead of the variable itself. It allows us to propagate
    # gradients through it as well as simplifying initialization. The variable
    # ends up assigned to after the first iteration.
    vals = tf.cond(return_init_values, lambda: init_values, lambda: local_vars)
    if len(init_values) == 1:
        # tf.cond extracts elements from singleton lists.
        vals = [vals]
    return local_vars, vals
Ejemplo n.º 18
0
def local_state_variables(init_values, return_init_values):
  """Create local variables initialized from init_values.

  This will create local variables from a list of init_values. Each variable
  will be named based on the value's shape and dtype.

  As a convenience, a boolean tensor allows you to return value from
  the created local variable or from the original init value.

  Args:
    init_values: iterable of tensors
    return_init_values: boolean tensor

  Returns:
    local_vars: list of the created local variables.
    vals: if return_init_values is true, then this returns the values of
      init_values. Otherwise it returns the values of the local_vars.
  """
  if not init_values:
    return [], []

  # This generates a harmless warning when saving the metagraph.
  variable_use_count = tf.get_collection_ref(_LOCAL_STATE_VARIABLE_COLLECTION)
  if not variable_use_count:
    variable_use_count.append(collections.defaultdict(int))
  variable_use_count = variable_use_count[0]

  local_vars = []
  with tf.variable_scope(OPTIMIZER_SCOPE):
    # We can't use the init_value as an initializer as init_value may
    # itself depend on some problem variables. This would produce
    # inter-variable initialization order dependence which TensorFlow
    # sucks at making easy.
    for init_value in init_values:
      name = create_local_state_variable_name(init_value)
      unique_name = name + "_" + str(variable_use_count[name])
      variable_use_count[name] += 1
      # The overarching idea here is to be able to reuse variables between
      # different sessions on the same TensorFlow master without errors. By
      # uniquifying based on the type and name we mirror the checks made inside
      # TensorFlow, while still allowing some memory reuse. Ultimately this is a
      # hack due to the broken Session.reset().
      local_vars.append(
          tf.get_local_variable(
              unique_name,
              initializer=tf.zeros(
                  init_value.get_shape(), dtype=init_value.dtype)))

  # It makes things a lot simpler if we use the init_value the first
  # iteration, instead of the variable itself. It allows us to propagate
  # gradients through it as well as simplifying initialization. The variable
  # ends up assigned to after the first iteration.
  vals = tf.cond(return_init_values, lambda: init_values, lambda: local_vars)
  if len(init_values) == 1:
    # tf.cond extracts elements from singleton lists.
    vals = [vals]
  return local_vars, vals
Ejemplo n.º 19
0
 def testFormatVariableMap(self, use_resource, expected):
   with tf.variable_scope("m1"):
     v1 = tf.get_variable("v1", shape=[3, 4], use_resource=use_resource)
   with tf.device("/gpu"):
     with tf.variable_scope("m2"):
       v2 = tf.get_local_variable(
           "v2", shape=[5, 6], use_resource=use_resource)
   var_map = {"vv1": v1, "vv2": v2}
   self.assertEqual(snt.format_variable_map(var_map), expected)
Ejemplo n.º 20
0
 def testFormatVariableMap(self, use_resource, expected):
   with tf.variable_scope("m1"):
     v1 = tf.get_variable("v1", shape=[3, 4], use_resource=use_resource)
   with tf.device("/gpu"):
     with tf.variable_scope("m2"):
       v2 = tf.get_local_variable(
           "v2", shape=[5, 6], use_resource=use_resource)
   var_map = {"vv1": v1, "vv2": v2}
   self.assertEqual(snt.format_variable_map(var_map), expected)
Ejemplo n.º 21
0
 def testFormatVariableMap(self):
     with tf.variable_scope("m1"):
         v1 = tf.get_variable("v1", shape=[3, 4])
     with tf.device("/gpu"):
         with tf.variable_scope("m2"):
             v2 = tf.get_local_variable("v2", shape=[5, 6])
     var_map = {"vv1": v1, "vv2": v2}
     self.assertEqual(snt.format_variable_map(var_map),
                      _EXPECTED_FORMATTED_VARIABLE_MAP)
 def make_value_op():
     # It's important to use a local variable rather than a global Variable.
     # Global variables get restored from checkpoints. This would be bad here,
     # since we want to recompute the library with respect to the predictions
     # of the current model.
     return tf.get_local_variable(name=name,
                                  shape=library_shape,
                                  dtype=tf.float32,
                                  initializer=tf.zeros_initializer)
Ejemplo n.º 23
0
 def testFormatVariableMap(self):
   with tf.variable_scope("m1"):
     v1 = tf.get_variable("v1", shape=[3, 4])
   with tf.device("/gpu"):
     with tf.variable_scope("m2"):
       v2 = tf.get_local_variable("v2", shape=[5, 6])
   var_map = {"vv1": v1, "vv2": v2}
   self.assertEqual(snt.format_variable_map(var_map),
                    _EXPECTED_FORMATTED_VARIABLE_MAP)
Ejemplo n.º 24
0
def _get_local_variable_like(x):
    '''
    Shape of `x` must be known!
    '''
    if any(d is None for d in x.shape.as_list()):
        raise RuntimeError('shape not static: variable {} has shape {}'.format(
            x, x.shape))
    return tf.get_local_variable(_escape(x.name),
                                 initializer=tf.zeros(shape=x.shape,
                                                      dtype=x.dtype))
Ejemplo n.º 25
0
 def make_tensor(shape, dtype, name):
     tf_type = tf.as_dtype(dtype)
     if tf_type == tf.string:
         empty = ''
     elif tf_type == tf.bool:
         empty = False
     else:
         empty = 0
     init = tf.constant(empty, shape=shape, dtype=tf_type)
     return tf.get_local_variable(name=name, initializer=init, dtype=tf_type)
Ejemplo n.º 26
0
 def __init__(self, batch_env, step, is_training, should_log, config):
     self._batch_env = batch_env
     self._step = step  # Trainer step, not environment step.
     self._is_training = is_training
     self._should_log = should_log
     self._config = config
     self._cell = config.cell
     state = self._cell.zero_state(len(batch_env), tf.float32)
     var_like = lambda x: tf.get_local_variable(
         x.name.split(':')[0].replace('/', '_') + '_var',
         shape=x.shape,
         initializer=lambda *_, **__: tf.zeros_like(x),
         use_resource=True)
     self._state = nested.map(var_like, state)
     self._prev_action = tf.get_local_variable(
         'prev_action_var',
         shape=self._batch_env.action.shape,
         initializer=lambda *_, **__: tf.zeros_like(self._batch_env.action),
         use_resource=True)
Ejemplo n.º 27
0
 def __init__(self, batch_env, step, is_training, should_log, config, adaptation, phase, global_step):
   self._step = step  # Trainer step, not environment step.
   self._globalstep = global_step
   self._is_training = is_training
   self._phase = phase
   self._adaptation = adaptation
   self._should_log = should_log
   self._config = config
   self._cell = config.cell
   self._num_envs = len(batch_env)
   state = self._cell.zero_state(self._num_envs, tf.float32)
   var_like = lambda x: tf.get_local_variable(
       x.name.split(':')[0].replace('/', '_') + '_var',
       shape=x.shape,
       initializer=lambda *_, **__: tf.zeros_like(x), use_resource=True)
   self._state = nested.map(var_like, state)
   batch_action_shape = (self._num_envs,) + batch_env.action_space.shape
   self._prev_action = tf.get_local_variable(
       'prev_action_var', shape=batch_action_shape,
       initializer=lambda *_, **__: tf.zeros(batch_action_shape),
       use_resource=True)
Ejemplo n.º 28
0
 def make_tensor(shape, dtype, name):
     # todo 将[pd.DataFrame, pd.Series, np.ndarray]等数据类型转化为tensor。name是tensor_vars的键值
     tf_type = tf.as_dtype(dtype)
     if tf_type == tf.string:
         empty = ''
     elif tf_type == tf.bool:
         empty = False
     else:
         empty = 0
     init = tf.constant(empty, shape=shape, dtype=tf_type)
     return tf.get_local_variable(name=name,
                                  initializer=init,
                                  dtype=tf_type)
Ejemplo n.º 29
0
def get_accuracy(labels, predictions, mask=None, name='accuracy'):
    with tf.variable_scope(name):
        correct = tf.cast(tf.equal(tf.to_int32(predictions), labels), dtype=tf.float32)
        if mask is not None:
            mask = tf.cast(mask, tf.float32)
            correct *= mask
            num = tf.reduce_sum(mask)
        else:
            num = tf.cast(tf.size(labels), tf.float32)

        correct = tf.reduce_sum(correct)

        total = tf.get_local_variable('total', [], dtype=tf.float32, initializer=tf.zeros_initializer())
        count = tf.get_local_variable('count', [], dtype=tf.float32, initializer=tf.zeros_initializer())

        update_total_op = tf.assign_add(total, correct)
        update_count_op = tf.assign_add(count, num)

        mean_t = _safe_div(total, count, 'value')
        update_op = _safe_div(update_total_op, update_count_op, 'update_op')

        return mean_t, update_op
Ejemplo n.º 30
0
Archivo: ops.py Proyecto: kiminh/debias
def as_initialized_variable(x, var_name, local=True):
    """Build a variable the is initialized to `x`, but without adding
  `x` to the tensorflow graph.

  The main reason to do this is to avoid the tensorflow
  graph becoming bloating with huge constants, which can make some operation very slow.
  This is accomplished by `hiding` the variable behind a py_fun intitializer
  """
    init_fn = tf.py_func(lambda: x, [], tf.float32, False)
    init_fn.set_shape(x.shape)
    if local:
        return tf.get_local_variable(var_name, initializer=init_fn)
    else:
        return tf.get_variable(var_name, initializer=init_fn)
Ejemplo n.º 31
0
    def __init__(self, batch_env, step, is_training, should_log, config):
        print('mpc agent dual2')
        self._batch_env = batch_env
        self._step = step  # Trainer step, not environment step.
        self._is_training = is_training
        self._should_log = should_log
        self._config = config
        self._cell = config.cell
        self._length = 0
        self.logdir = config.logdir
        self.rival_dir = os.path.join('benchmark', config.rival, 'rollout')
        state = self._cell.zero_state(len(batch_env), tf.float32)
        var_like = lambda x: tf.get_local_variable(
            x.name.split(':')[0].replace('/', '_') + '_var',
            shape=x.shape,
            initializer=lambda *_, **__: tf.zeros_like(x),
            use_resource=True)
        self._state = nested.map(var_like, state)
        self._prev_action = tf.get_local_variable(
            'prev_action_var',
            shape=self._batch_env.action.shape,
            initializer=lambda *_, **__: tf.zeros_like(self._batch_env.action),
            use_resource=True)
        import functools
        dtypes, shapes = numpy_episodes._read_spec2(
            numpy_episodes.episode_reader, self.rival_dir)
        rival = tf.data.Dataset.from_generator(
            functools.partial(gener, self.rival_dir), dtypes, shapes)

        def wh(sq):
            print('sq', sq['observ'])
            return sq['observ'], sq['action'], sq['all_action'], sq[
                'collect'], sq['all_reward']

        rival = rival.map(wh)
        rival = rival.batch(1)
        self.rival = rival.make_one_shot_iterator()
Ejemplo n.º 32
0
def get_or_create_counter(name):
    global _COUNTERS
    try:
        return _COUNTERS[name]
    except KeyError:
        var = tf.get_local_variable(f'counters/{name}',
                                    shape=[],
                                    dtype=tf.int64,
                                    initializer=tf.zeros_initializer())

        counter = attrdict.AttrDict(name=name,
                                    var=var,
                                    reset_op=tf.assign(var, 0),
                                    increment_op=tf.assign_add(var, 1))
        _COUNTERS[name] = counter
        return counter
Ejemplo n.º 33
0
 def create_id_queue(queue_name, id_var):
     queue = tf.get_local_variable(
         queue_name,
         trainable=False,  # not trainable parameters
         collections=[tf.GraphKeys.LOCAL_VARIABLES
                      ],  # place it on the local worker
         initializer=tf.zeros_initializer(dtype=tf.int64),
         dtype=tf.int64,
         shape=[FLAGS.queue_size])
     # update dictionary: dequeue the earliest batch, and enqueue the current batch
     # the indexing operation, i.e., queue[...], will not work if the queue is partitioned
     updated_queue = tf.concat(
         [queue[get_shape(id_var)[0]:], id_var],
         axis=0)  # [Q-B],[B]->[Q]
     self.queue_ops.append(queue.assign(updated_queue))
     return updated_queue
Ejemplo n.º 34
0
 def __init__(self, dataset, split_by=1):
     self._input_data = {}
     # dataset.take(1) doesn't have GPU kernel.
     with tf.device("device:CPU:0"):
         tensor = tf.data.experimental.get_single_element(dataset.take(1))
     flat_tensor = tf.nest.flatten(tensor)
     variable_data = []
     self._initializers = []
     for t in flat_tensor:
         rebatched_t = tf.split(t, num_or_size_splits=split_by, axis=0)[0]
         assert rebatched_t.shape.is_fully_defined(), rebatched_t.shape
         v = tf.get_local_variable(self.random_name(),
                                   initializer=rebatched_t)  # pylint: disable=cell-var-from-loop
         variable_data.append(v)
         self._initializers.append(v.initializer)
     self._input_data = tf.nest.pack_sequence_as(tensor, variable_data)
Ejemplo n.º 35
0
def build_logits(data_ops, embed_layer, rnn_core, output_linear, name_prefix):
    """This is the core model logic.

    Unrolls a Bayesian RNN over the given sequence.

    Args:
      data_ops: A `sequence_data.SequenceDataOps` namedtuple.
      embed_layer: A `snt.Embed` instance.
      rnn_core: A `snt.RNNCore` instance.
      output_linear: A `snt.Linear` instance.
      name_prefix: A string to use to prefix local variable names.

    Returns:
      A 3D time-major tensor representing the model's logits for a sequence of
      predictions. Shape `[time_steps, batch_size, vocab_size]`.
    """
    # Embed the input index sequence.
    embedded_input_seq = snt.BatchApply(embed_layer, name="input_embed_seq")(
        data_ops.sparse_obs)

    # Construct variables for holding the RNN state.
    initial_rnn_state = nest.map_structure(
        lambda t: tf.get_local_variable(  # pylint: disable long lambda warning
            "{}/rnn_state/{}".format(name_prefix, t.op.name),
            initializer=t),
        rnn_core.initial_state(FLAGS.batch_size),
    )
    assign_zero_rnn_state = nest.map_structure(
        lambda x: x.assign(tf.zeros_like(x)), initial_rnn_state)
    assign_zero_rnn_state = tf.group(*nest.flatten(assign_zero_rnn_state))

    # Unroll the RNN core over the sequence.
    rnn_output_seq, rnn_final_state = tf.nn.dynamic_rnn(
        cell=rnn_core,
        inputs=embedded_input_seq,
        initial_state=initial_rnn_state,
        time_major=True,
    )

    # Persist the RNN state for the next unroll.
    update_rnn_state = nest.map_structure(tf.assign, initial_rnn_state,
                                          rnn_final_state)
    with tf.control_dependencies(nest.flatten(update_rnn_state)):
        rnn_output_seq = tf.identity(rnn_output_seq, name="rnn_output_seq")
    output_logits = snt.BatchApply(output_linear,
                                   name="output_embed_seq")(rnn_output_seq)
    return output_logits, assign_zero_rnn_state
Ejemplo n.º 36
0
Archivo: ops.py Proyecto: kiminh/debias
def bucket_by_quantiles(len_fn, batch_size, n_buckets, hist_bounds):
    n_hist_binds = len(hist_bounds)

    if n_hist_binds < n_buckets:
        raise ValueError(
            "Requested %d buckets, but only have %d histogram bins" %
            (n_buckets, n_hist_binds))
    if any(hist_bounds[i] >= hist_bounds[i + 1]
           for i in range(n_hist_binds - 1)):
        raise ValueError("Bins must be descending")

    # The hisogram: A count of the number of elements whose length was
    # greater than a fixed set values (so `hist_counts[i]` is the number of elements
    # with size > hist_bounds[i]_
    # Need to use `use_resource = True` to make this work correctly
    # within tf.data.Dataset
    hist_counts = tf.get_local_variable("hist-counts",
                                        n_hist_binds + 1,
                                        tf.int64,
                                        tf.zeros_initializer(),
                                        use_resource=True)
    hist_bounds = tf.constant(hist_bounds, tf.int64)

    def bucket_fn(x):
        """Compute the element bucket and update the histogram."""
        ix = len_fn(x)
        if ix.dtype == tf.int32:
            ix = tf.to_int64(ix)
        elif ix.dtype != tf.int64:
            raise ValueError("Len function returned a non-int")

        adds_to_bins = tf.to_int64(tf.greater(hist_bounds, ix))
        # pad with a 1 for the "larger than all" bin
        adds_to_bins = tf.pad(adds_to_bins, [[0, 1]], constant_values=1)
        new_counts = tf.assign_add(hist_counts, adds_to_bins)
        bin_ix = n_hist_binds - tf.reduce_sum(adds_to_bins)

        # Computes the quantile based on the counts of the exammple's bucket
        bucket_ix = tf.floordiv(((n_buckets - 1) * new_counts[bin_ix]),
                                new_counts[-1])
        return bucket_ix

    def reduce_fn(_, x):
        return x.padded_batch(batch_size, x.output_shapes)

    return tf.contrib.data.group_by_window(bucket_fn, reduce_fn, batch_size)
Ejemplo n.º 37
0
def build_logits(data_ops, embed_layer, rnn_core, output_linear, name_prefix):
  """This is the core model logic.

  Unrolls a Bayesian RNN over the given sequence.

  Args:
    data_ops: A `sequence_data.SequenceDataOps` namedtuple.
    embed_layer: A `snt.Embed` instance.
    rnn_core: A `snt.RNNCore` instance.
    output_linear: A `snt.Linear` instance.
    name_prefix: A string to use to prefix local variable names.

  Returns:
    A 3D time-major tensor representing the model's logits for a sequence of
    predictions. Shape `[time_steps, batch_size, vocab_size]`.
  """
  # Embed the input index sequence.
  embedded_input_seq = snt.BatchApply(
      embed_layer, name="input_embed_seq")(data_ops.sparse_obs)

  # Construct variables for holding the RNN state.
  initial_rnn_state = nest.map_structure(
      lambda t: tf.get_local_variable(  # pylint: disable=g-long-lambda
          "{}/rnn_state/{}".format(name_prefix, t.op.name), initializer=t),
      rnn_core.initial_state(FLAGS.batch_size))
  assign_zero_rnn_state = nest.map_structure(
      lambda x: x.assign(tf.zeros_like(x)), initial_rnn_state)
  assign_zero_rnn_state = tf.group(*nest.flatten(assign_zero_rnn_state))

  # Unroll the RNN core over the sequence.
  rnn_output_seq, rnn_final_state = tf.nn.dynamic_rnn(
      cell=rnn_core,
      inputs=embedded_input_seq,
      initial_state=initial_rnn_state,
      time_major=True)

  # Persist the RNN state for the next unroll.
  update_rnn_state = nest.map_structure(
      tf.assign, initial_rnn_state, rnn_final_state)
  with tf.control_dependencies(nest.flatten(update_rnn_state)):
    rnn_output_seq = tf.identity(rnn_output_seq, name="rnn_output_seq")
  output_logits = snt.BatchApply(
      output_linear, name="output_embed_seq")(rnn_output_seq)
  return output_logits, assign_zero_rnn_state
Ejemplo n.º 38
0
def streaming_confusion_matrix(labels,
                               predictions,
                               num_classes,
                               weights=None,
                               prefix=""):
    """Calculates a confusion matrix.

    This creates local variables to track the confusion matrix statistics across
    a stream of data.
    Args:
        labels: the ground truth labels, a Tensor of the same shape as predictions
        predictions: the prediction values, a Tensor of shape (?,)
        num_classes: the number of classes for this confusion matrix
        weights: the weight of each prediction (default None)
    Returns:
        confusion: A k x k Tensor representing the confusion matrix, where
            the columns represent the predicted label and the rows represent the
            true label
        update_op: An operation that updates the values in confusion_matrix
            appropriately.
    """

    _confusion = tf.confusion_matrix(labels=labels,
                                     predictions=predictions,
                                     num_classes=num_classes,
                                     weights=weights,
                                     name=prefix + "cm")

    # accumulator for the confusion matrix
    confusion = tf.get_local_variable(name=prefix + "confusion",
                                      shape=[num_classes, num_classes],
                                      dtype=tf.int32,
                                      initializer=tf.zeros_initializer)

    # update op
    update_op = confusion.assign(confusion + _confusion)

    confusion_image = tf.reshape(tf.cast(confusion, tf.float32),
                                 [1, num_classes, num_classes, 1])

    summary = tf.summary.image(prefix + "confusion_matrix", confusion_image)

    return confusion, update_op
 def create_state(t):
   # Creates a unique variable scope to ensure the variable name is unique.
   with tf.variable_scope(None, default_name='state'):
     return tf.get_local_variable(t.op.name, initializer=t, use_resource=True)
Ejemplo n.º 40
0
    def build(self, progress):
        """
        Constructs a new graph to simulate the model.

        progress : `.utils.ProgressBar`
            Progress bar for construction stage
        """

        self.target_phs = {}
        self.outputs = {}
        self.optimizers = {}

        # create these constants once here for reuse in different operators
        self.signals.dt = tf.constant(self.dt, self.dtype)
        self.signals.dt_val = self.dt  # store the actual value as well
        self.signals.zero = tf.constant(0, self.dtype)
        self.signals.one = tf.constant(1, self.dtype)

        if not self.inference_only:
            # this variable controls behaviour in the simulation that is
            # conditional on whether we are doing training or inference
            self.signals.training = tf.placeholder(tf.bool, shape=(),
                                                   name="training")

            # variable to track training step
            self.training_step = tf.train.get_or_create_global_step()
        else:
            self.training_step = None

        # create base arrays
        sub = progress.sub("creating base arrays")
        self.base_vars = OrderedDict()
        unique_ids = defaultdict(int)
        for k, (v, trainable) in sub(self.base_arrays_init.items()):
            name = "%s_%s_%s_%d" % (
                v.dtype, "_".join(str(x) for x in v.shape), trainable,
                unique_ids[(v.dtype, v.shape, trainable)])
            unique_ids[(v.dtype, v.shape, trainable)] += 1

            # we initialize all the variables from placeholders, and then
            # feed in the initial values when the init op is called. this
            # prevents TensorFlow from storing large constants in the graph
            # def, which can cause problems for large models
            ph = tf.placeholder(v.dtype, v.shape, name="%s_init" % name)

            if trainable:
                with tf.variable_scope("trainable_vars", reuse=False):
                    var = tf.get_variable(name, initializer=ph, trainable=True)
            else:
                with tf.variable_scope("local_vars", reuse=False):
                    var = tf.get_local_variable(name, initializer=ph,
                                                trainable=False)

            self.base_vars[k] = (var, ph, v)

        logger.debug("created base arrays")
        logger.debug([str(x[0]) for x in self.base_vars.values()])

        # set up invariant inputs
        sub = progress.sub("building inputs")
        self.build_inputs(sub)

        # pre-build stage
        with progress.sub("pre-build stage", max_value=len(self.plan)) as sub:
            self.op_builder.pre_build(sub)

        # build stage
        with progress.sub(
                "build stage", max_value=len(self.plan) * self.unroll) as sub:
            self.build_loop(sub)

        # ops for initializing variables (will be called by simulator)
        trainable_vars = tf.trainable_variables()
        if not self.inference_only:
            trainable_vars.append(self.training_step)
        self.trainable_init_op = tf.variables_initializer(trainable_vars)
        self.local_init_op = tf.local_variables_initializer()
        self.global_init_op = tf.variables_initializer(
            [v for v in tf.global_variables() if v not in trainable_vars])
        self.constant_init_op = tf.variables_initializer(
            tf.get_collection("constants"))

        # logging
        logger.info("Number of reads: %d", sum(
            x for x in self.signals.read_types.values()))
        for x in self.signals.read_types.items():
            logger.info("    %s: %d", *x)
        logger.info("Number of writes: %d", sum(
            x for x in self.signals.write_types.values()))
        for x in self.signals.write_types.items():
            logger.info("    %s: %d", *x)