示例#1
0
def tabular_learning(Qs_t, states_t, actions_t, targets):
    reusing_scope = tf.get_variable_scope().reuse

    state_action_pairs = tf.stack([states_t, actions_t], 1)
    estimates = tf.gather_nd(Qs_t, state_action_pairs)
    err_estimates = targets - estimates
    loss = tf.reduce_mean(err_estimates)

    Nsa = tf.get_variable("Nsa",
                          shape=Qs_t.get_shape(),
                          dtype=tf.float32,
                          trainable=False,
                          initializer=tf.zeros_initializer())
    if reusing_scope is False:
        tf.summary.histogram('Nsa', Nsa)

    update_Nsa = tf.scatter_nd_add(Nsa, state_action_pairs,
                                   tf.ones_like(states_t, dtype=tf.float32))
    global_step = tf.Variable(
        0,
        trainable=False,
        name="global_step",
        collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
    inc_global_step = global_step.assign_add(1)
    with tf.control_dependencies([update_Nsa, inc_global_step]):
        epsilon = 1e-7
        lr = (1 / (epsilon + tf.gather_nd(Nsa, state_action_pairs)))
        updates = lr * err_estimates
        train_op = tf.scatter_nd_add(Qs_t, state_action_pairs, updates)

    return loss, train_op
示例#2
0
 def body(i):
     nearest_pattern = tf.cast(
         tf.argmin(distances[i], axis=0), tf.int32)
     tf.scatter_nd_add(self.pattern_weights,
                       [[label, nearest_pattern]], [1])
     update = tf.cast(
         (1 / self.pattern_weights[label][nearest_pattern]),
         tf.float32) * (
             sen_list[i] -
             self.patterns[label][nearest_pattern])
     tf.scatter_nd_add(self.patterns,
                       [[label, nearest_pattern]], [update])
     return tf.add(i, 1)
示例#3
0
def smooth_loss(ver_attrs, ver_neighbors, thres, attr_name):
    batch_size, n_ver, n_channels = ver_attrs.get_shape().as_list()
    n_ver_neighbor_pair = ver_neighbors.get_shape().as_list()[0]

    with tf.variable_scope(attr_name):

        var_sum_of_neighbor_attrs = tf.get_variable(
                'ver_sum_of_neighbor_attrs',
                [batch_size, n_ver, n_channels],
                tf.float32,
                tf.zeros_initializer(),
                trainable=False
                )

        var_sum_of_neighbor_counts = tf.get_variable(
                'ver_sum_of_counts',
                [batch_size, n_ver, 1],
                tf.float32,
                tf.zeros_initializer(),
                trainable=False
                )

    init_sum_of_neighbor_attrs = tf.zeros_like(var_sum_of_neighbor_attrs)
    init_sum_of_neighbor_counts = tf.zeros_like(var_sum_of_neighbor_counts)
    assign_op = tf.group([
        tf.assign(var_sum_of_neighbor_attrs, init_sum_of_neighbor_attrs),
        tf.assign(var_sum_of_neighbor_counts, init_sum_of_neighbor_counts)
        ], name='assign_op')

    with tf.control_dependencies([assign_op]):
        to_ver_ids, from_ver_ids = tf.split(ver_neighbors, 2, axis=1)
        tmp_ver_neighbor_attrs = tf.gather(ver_attrs, tf.squeeze(from_ver_ids), axis=1)
        tmp_ver_neighbor_counts = tf.ones([batch_size, n_ver_neighbor_pair, 1], tf.float32)

        batch_indices = tf.reshape(
                        tf.tile(tf.expand_dims(tf.range(batch_size),axis=1),[1,n_ver_neighbor_pair]),
                        [batch_size, n_ver_neighbor_pair, 1], name='batch_indices')
        to_ver_ids = tf.tile(tf.expand_dims(to_ver_ids, axis=0), [batch_size,1,1])
        batch_to_ver_ids = tf.concat([batch_indices, to_ver_ids], axis=2)
        var_sum_of_neighbor_attrs = tf.scatter_nd_add(var_sum_of_neighbor_attrs,
                batch_to_ver_ids, tmp_ver_neighbor_attrs)
        var_sum_of_neighbor_counts = tf.scatter_nd_add(var_sum_of_neighbor_counts,
                batch_to_ver_ids, tmp_ver_neighbor_counts)
        mean_neighbor_attrs = tf.div(var_sum_of_neighbor_attrs, var_sum_of_neighbor_counts + 1e-8)

        error = tf.maximum(tf.square(mean_neighbor_attrs - ver_attrs), thres)
        loss = tf.reduce_mean(error , name=attr_name + '_smooth_loss')
        return loss
示例#4
0
def tabular_UCB(Qs_t, inputs_t):
    reusing_scope = tf.get_variable_scope().reuse

    timestep = tf.get_variable("timestep", shape=[], dtype=tf.int32, trainable=False, initializer=tf.zeros_initializer())
    inc_t = tf.assign_add(timestep, 1)

    # State Action count
    Nsa_t = tf.get_variable("Nsa", shape=Qs_t.get_shape(), dtype=tf.float32, trainable=False, initializer=tf.ones_initializer())
    if reusing_scope is False:
        tf.summary.histogram('Nsa', Nsa_t)

    with tf.control_dependencies([inc_t]):
        qs_t = tf.gather(Qs_t, inputs_t)
        nsa_t = tf.gather(Nsa_t, inputs_t)

    values_t = qs_t + ( (2 * tf.log(tf.cast(timestep, tf.float32))) / nsa_t )**(1/2)
    actions_t = tf.cast(tf.argmax(values_t, 1), dtype=tf.int32)
    probs_t = tf.one_hot(actions_t, depth=tf.shape(Qs_t)[1])

    state_action_pairs = tf.stack([inputs_t, actions_t], 1)
    update_Nsa = tf.scatter_nd_add(Nsa_t, state_action_pairs, tf.ones_like(inputs_t, dtype=tf.float32))

    with tf.control_dependencies([update_Nsa]): # Force the update call
        actions_t = tf.identity(actions_t)

    return actions_t, probs_t
示例#5
0
    def make_update_op(self, predicted_labels, labels):
        with tf.name_scope(self.name), tf.name_scope('update_op'):
            flat_labels = tf.reshape(labels, (-1, 1))
            flat_predictions = tf.reshape(predicted_labels, (-1, self.depth))
            n = tf.shape(flat_labels)[0]

            # We are going to update n X depth elements
            indices = tf.stack((tf.tile(
                tf.reshape(tf.range(self.depth, dtype=self.dtype),
                           (1, self.depth)),
                (n, 1)), tf.tile(flat_labels,
                                 (1, self.depth)), flat_predictions),
                               axis=-1)

            _update_n = tf.assign_add(self.n, tf.cast(n, dtype=self.dtype))

            with tf.control_dependencies([_update_n]):
                # Use tf.group to avoid any return value
                return tf.group(
                    tf.scatter_nd_add(self.matrix,
                                      indices,
                                      tf.ones((n, self.depth),
                                              dtype=self.dtype),
                                      use_locking=True,
                                      name='update_op'))
示例#6
0
def tabular_learning_with_lr(init_lr, decay_steps, Qs_t, states_t, actions_t,
                             targets):
    reusing_scope = tf.get_variable_scope().reuse

    state_action_pairs = tf.stack([states_t, actions_t], 1)
    estimates = tf.gather_nd(Qs_t, state_action_pairs)
    err_estimates = targets - estimates
    loss = tf.reduce_mean(err_estimates)

    global_step = tf.Variable(
        0,
        trainable=False,
        name="global_step",
        collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
    lr = tf.train.exponential_decay(tf.constant(init_lr, dtype=tf.float32),
                                    global_step,
                                    decay_steps,
                                    0.5,
                                    staircase=True)
    if reusing_scope is False:
        tf.summary.scalar('lr', lr)
    inc_global_step = global_step.assign_add(1)
    with tf.control_dependencies([inc_global_step]):
        updates = lr * err_estimates
        train_op = tf.scatter_nd_add(Qs_t, state_action_pairs, updates)

    return loss, train_op
示例#7
0
 def update_centroids(self, centroid):
     """compute updated values for centroids as mean of assigned samples
         :param centroid - centroid to be updated
         :returns updated centroids Tensor"""
     sample = self.data_queue_2.dequeue()
     # update per centroid count
     per_centroid_count = tf.scatter_nd_add(self.samples_per_centroid,
                                            indices=[[centroid]],
                                            updates=[1],
                                            name="incrementPerCenterCount")
     # update per center learning rate
     with tf.control_dependencies([per_centroid_count]):
         learning_rate = tf.squeeze(
             tf.cast(1 / tf.slice(per_centroid_count, [centroid], [1]),
                     tf.float64))
         # learning_rate = tf.Print(learning_rate, [learning_rate], message="learning rate: ")
     tf.scatter_nd_update(self.learning_rate, [[centroid]], [learning_rate],
                          name="updateLearningRate")
     # compute new centroids
     updated_centroids = tf.scatter_nd_update(
         self.centroids,
         indices=[centroid],
         updates=tf.add(
             tf.scalar_mul(scalar=(1 - learning_rate),
                           x=tf.slice(input_=self.centroids,
                                      begin=[centroid, 0],
                                      size=[1, self.n_features])),
             tf.scalar_mul(scalar=learning_rate, x=sample)))
     with tf.control_dependencies([updated_centroids]):
         return centroid
示例#8
0
def calculate_via_todense(nz_values, col_indices, ncol_full):

    nslice = nz_values.get_shape()[0]
    nrow = nz_values.get_shape()[1]
    ncol = nz_values.get_shape()[2]


    values = tf.reshape(nz_values, [-1])
    for_fetch = tf.concat([tf.zeros([1], dtype=tf.float64), values], axis=0) 

    slice_indices = tf.tile(tf.reshape(tf.range(nslice), [nslice, 1, 1]), [1, nrow, ncol])
    row_indices = tf.tile(tf.reshape(tf.range(nrow), [1, nrow, 1]), [nslice, 1, ncol])
    
    indices = tf.stack([slice_indices, row_indices, col_indices], axis=3)
    indices = tf.reshape(indices, [-1, 3])

    fetch_ind = tf.Variable(tf.zeros([nslice, nrow, ncol_full], dtype=tf.int32))
    
    assign_zero = tf.assign(fetch_ind, tf.zeros(tf.shape(fetch_ind), dtype=tf.int32))
    with tf.control_dependencies([assign_zero]):
        fetch_ind = tf.scatter_nd_add(fetch_ind, indices, tf.range(nslice * nrow * ncol, dtype=tf.int32) + 1)

    dense = tf.gather(for_fetch, fetch_ind) 
    
    matmul = tf.matmul(dense, dense, transpose_b=True)

    return matmul 
示例#9
0
    def joint_probability(self, y_nn, labels):
        # TODO I don't know if it works properly because I only use it for logging
        """
        Create joint probability P(s_k, m_j)

        :param y_nn:    output of the neural net
        :param labels:  phonemes or states of the data (coming from the alignments of kaldi)
        :return:        return P(s_k, m_j)
        """
        with tf.variable_scope('MiscNNHelper/joint_probability'):
            # determine batch size
            batch_size = tf.cast(tf.shape(y_nn)[0], dtype=tf.float32)

            # create variable in order to use scatter_nd_add
            joint_prob = tf.Variable(tf.zeros([self.num_labels, self.cb_size]),
                                     trainable=False,
                                     dtype=tf.float32)
            joint_prob = joint_prob.assign(
                tf.fill([self.num_labels, self.cb_size],
                        0.0))  # reset Variable/floor

            # cast labels to int32
            labels = tf.cast(labels, dtype=tf.int32)

            # create P(s_k, m_j), (check dissertation of Neukirchen, p.61 (5.46))
            joint_prob = tf.scatter_nd_add(joint_prob, labels, y_nn)
            joint_prob = tf.div(joint_prob, batch_size)

            return joint_prob
示例#10
0
    def test_builder_to_backend_programmatic(self, use_cpu_only, backend,
                                             rankData_rankIndices,
                                             accumulate_mode):
        data_rank, indices_rank = rankData_rankIndices
        data_shape = np.random.randint(low=2, high=5, size=data_rank)
        indices_shape = np.random.randint(low=2, high=5, size=indices_rank)
        indices_shape[-1] = np.random.randint(low=1, high=data_rank + 1)
        updates_shape = list(indices_shape[:-1]) + list(
            data_shape[indices_shape[-1]:])

        data = np.random.rand(*data_shape).astype(np.float32)
        updates = np.random.rand(*updates_shape).astype(np.float32)
        indices_list = []
        for i in range(indices_shape[-1]):
            indices_list.append(
                np.random.randint(0, data_shape[i], size=indices_shape[:-1]))

        indices = np.stack(indices_list, axis=-1).astype(np.int32)

        def build(data, indices, updates):
            return mb.scatter_nd(data=data,
                                 indices=indices,
                                 updates=updates,
                                 mode=accumulate_mode)

        with tf.Graph().as_default(), tf.Session() as sess:
            tf_output = tf.Variable(data)
            sess.run(tf.global_variables_initializer())
            if accumulate_mode == "update":
                sess.run(tf.scatter_nd_update(tf_output, indices, updates))
            if accumulate_mode == "add":
                sess.run(tf.scatter_nd_add(tf_output, indices, updates))
            if accumulate_mode == "sub":
                sess.run(tf.scatter_nd_sub(tf_output, indices, updates))
            expected_output = sess.run(tf_output)

        input_placeholders = {
            "data": mb.placeholder(shape=data.shape),
            "indices": mb.placeholder(shape=indices.shape, dtype=types.int32),
            "updates": mb.placeholder(shape=updates.shape),
        }

        input_values = {"data": data, "indices": indices, "updates": updates}

        expected_output_types = tuple(data_shape[:]) + (types.fp32, )
        run_compare_builder(
            build,
            input_placeholders,
            input_values,
            expected_output_types,
            expected_output,
            use_cpu_only=use_cpu_only,
            frontend_only=False,
            backend=backend,
        )
示例#11
0
 def test_scatter_nd_add(self):
     indices = tf.constant([[4], [3], [1], [7], [1]])
     updates = tf.constant([9, 10, 11, 12, 13])
     shape = tf.constant([8])
     ref = tf.Variable(tf.zeros(shape, dtype=tf.int32), trainable=False)
     scatter = tf.scatter_nd_add(ref, indices, updates)
     expected = np.array([0, 24, 0, 10, 9, 0, 0, 12])
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         actual = sess.run(scatter)
         self.assertTrue(np.allclose(expected, actual))
示例#12
0
文件: crf.py 项目: amh28/NIF
def permutohedral_compute(data_vectors, barycentric, blurNeighbours1,
                          blurNeighbours2, indices, name, reverse):
    batch_size = tf.shape(data_vectors)[0]
    numSimplexCorners = int(barycentric.get_shape()[-1])
    nCh = numSimplexCorners - 1
    nChData = tf.shape(data_vectors)[-1]
    data_vectors = tf.reshape(data_vectors, [-1, nChData])
    data_vectors = tf.concat(
        [data_vectors, tf.ones_like(data_vectors[:, 0:1])],
        1)  # Convert to homogenous coordinates
    ## Splatting
    initialSplat = tf.zeros(
        [tf.shape(blurNeighbours1[0])[0] + 1, batch_size, nChData + 1])
    with tf.variable_scope(name):
        # WARNING: we use local variables so the graph must initialize local variables with tf.local_variables_initializer()
        splat = tf.contrib.framework.local_variable(tf.ones([0, 0]),
                                                    validate_shape=False,
                                                    name='splatbuffer')

    with tf.control_dependencies([splat.initialized_value()]):
        resetSplat = tf.assign(splat,
                               initialSplat,
                               validate_shape=False,
                               name='assign')
    # This is needed to force tensorflow to update the cache
    with tf.control_dependencies([resetSplat]):
        uncachedSplat = splat.read_value()
    for scit in range(numSimplexCorners):
        data = data_vectors * barycentric[:, scit:scit + 1]
        with tf.control_dependencies([uncachedSplat]):
            splat = tf.scatter_nd_add(splat, indices[scit], data)

    ## Blur
    with tf.control_dependencies([splat]):
        blurred = [splat]
        order = range(nCh, -1, -1) if reverse else range(nCh + 1)
        for dit in order:
            with tf.control_dependencies([blurred[-1]]):
                b1 = 0.5 * tf.gather(blurred[-1], blurNeighbours1[dit])
                b2 = blurred[-1][1:, :, :]
                b3 = 0.5 * tf.gather(blurred[-1], blurNeighbours2[dit])
                blurred.append(
                    tf.concat([blurred[-1][0:1, :, :], b2 + b1 + b3], 0))

    # Alpha is a magic scaling constant from CRFAsRNN code
    alpha = 1. / (1. + numpy.power(2., -nCh))
    normalized = blurred[-1][:, :, :-1] / blurred[-1][:, :, -1:]
    ## Slice
    sliced = tf.gather_nd(normalized, indices[0]) * barycentric[:, 0:1] * alpha
    for scit in range(1, numSimplexCorners):
        sliced = sliced + tf.gather_nd(
            normalized, indices[scit]) * barycentric[:, scit:scit + 1] * alpha

    return sliced
示例#13
0
    def get_q_tensor_update(self, ind_heads, ind_best_heads, q_tensor,
                            discount, lr, current_states, actions, rewards,
                            next_states, apply_actions):

        num_models = self.config['num_models']
        num_heads = self.config['num_heads']
        heads_per_sample = self.config['heads_per_sample']
        model_range = tf.range(0, num_models, dtype=tf.int64)

        # we have to modify the states and actions a little bit
        ind_models = self.duplicate_each_element(model_range, heads_per_sample)
        ind_states = self.duplicate_each_element(current_states,
                                                 heads_per_sample)
        ind_best_heads_duplic = tf.cast(
            self.duplicate_each_element(ind_best_heads, heads_per_sample),
            tf.int64)
        ind_actions = self.duplicate_each_element(actions, heads_per_sample)
        ind_next_states = self.duplicate_each_element(next_states,
                                                      heads_per_sample)

        if ind_heads == None:
            ind_heads = tf.tile(tf.range(0, num_heads, dtype=tf.int64),
                                [num_models])

        # obtain current q values
        ind_current_q_values = tf.stack(
            [ind_models, ind_heads, ind_states, ind_actions], axis=1)
        current_q_values = tf.gather_nd(q_tensor, ind_current_q_values)

        # retrieve the best model
        ind_best_q_values = tf.stack(
            [ind_models, ind_best_heads_duplic, ind_states], axis=1)
        best_q_values = tf.gather_nd(q_tensor, ind_best_q_values)
        actions = tf.argmax(best_q_values, axis=1)

        # obtain the best q function available for the next state
        ind_next_q_vectors = tf.stack(
            [ind_models, ind_heads, ind_next_states,
             tf.squeeze(actions)],
            axis=1)
        next_q_values = tf.gather_nd(q_tensor, ind_next_q_vectors)

        # duplicate the rewards as well
        mod_shaped_rewards = self.duplicate_each_element(
            rewards, heads_per_sample)
        td_errors = mod_shaped_rewards + discount * next_q_values - current_q_values

        # add dependencies
        with tf.control_dependencies([apply_actions]):

            # define the q tensor update for the q values
            return tf.scatter_nd_add(q_tensor, ind_current_q_values,
                                     lr * td_errors)
示例#14
0
文件: graph.py 项目: hyzcn/tf_G
  def remove(self, src: int, dst: int) -> None:
    """ Remove an edge to the graph.

    This method process an input edge deleting it to the graph updating all the
    variables necessaries to maintain the graph in correct state.

    Args:
      src (int): The id of the source vertex of the edge.
      dst (int): The id of the destination vertex of the edge.

    Returns:
      This method returns nothing.

    """
    if src and dst is None:
      raise ValueError(
        "tf_G and dst must not be None ")
    self.run_tf([tf.scatter_nd_add(self.A_tf, [[src, dst]], [-1.0]),
                 tf.scatter_nd_add(self.out_degrees_tf, [[src, 0]], [-1.0]),
                 tf.scatter_nd_add(self.in_degrees_tf, [[0, dst]], [-1.0])])
    self.m -= 1
    self._notify(np.array([src, dst]), -1)
示例#15
0
 def move(self, desire):#  desire = the softmax desire where each slot is where you want to move.
     slot = tf.cast(tf.argmax(tf.multiply(desire, tf.cast(tf.less(self.height, tf.constant(6, dtype=tf.int8)), tf.float32))), tf.int32)
     height = tf.cast(tf.gather(self.height, slot), tf.int32)
     # print(tf.shape(height))
     # print(tf.shape(slot))
     # print(type(tf.stack([slot, height], 0)));exit()
     self.grid = tf.scatter_nd_add(self.grid, tf.expand_dims(tf.stack([height, slot], 0), 0), tf.expand_dims(self.player, -1))
     tf.scatter_add(self.height, slot, tf.constant(1, dtype=tf.int8))
     tf.initialize_all_variables()
     sess = tf.Session()
     init = tf.global_variables_initializer()
     sess.run(init)
     print(sess.run(self.grid))
    def update(self, indices, values):
        """Returns op to do sparse update.

    To elaborate on this one, lets consider two examples of 5x5x5 patches
    with target volume 100x100x100:

    1. We have 10 patches batched together into [10, 5, 5, 5] then our indices
    need to be [10, 5, 5, 5, 3].
    2. We have a single patch of shape [5, 5, 5] and indices [5, 5, 5, 3].

    As a third example consider a flattened patch:

    3. We have 4 5x5x5 patches flattened into [500] then we need indices of
    shape [500, 3].

    In other words for the innermost dimension we need to provide an index
    into the target shape for every value but the outer dimensions are
    arbitrary as long as they are the same for indices and values.

    Args:
      indices
      values
    Returns:
      op
    """ ""
        if not self._built:
            self._build()

        with tf.name_scope(f'{self._name}/patch_aggregator'):
            weights = tf.to_float(tf.greater(values, 0))

            if self._average is None:
                with tf.name_scope('average'):
                    cond = tf.not_equal(self._weight, 0)
                    ones = tf.ones_like(self._weight)
                    weight = tf.where(cond, self._weight, ones)
                    self._average = self._value / weight
            return tf.group(tf.scatter_nd_add(self._value, indices, values),
                            tf.scatter_nd_add(self._weight, indices, weights))
示例#17
0
def permutohedral_compute(data_vectors, barycentric, blur_neighbours1,
                          blur_neighbours2, indices, name, reverse):
    """
    Splat, Gaussian blur, and slice

    :param data_vectors: value map to be filtered
    :param barycentric: embedding coordinates
    :param blur_neighbours1: first neighbours' coordinates relative to indices
    :param blur_neighbours2: second neighbours' coordinates relative to indices
    :param indices: corresponding locations of data_vectors
    :param name: layer name
    :param reverse: transpose the Gaussian kernel if True
    :return: filtered data_vectors (sliced to the original space)
    """

    num_simplex_corners = barycentric.shape.as_list()[-1]
    n_ch = num_simplex_corners - 1
    batch_size, n_voxels, n_ch_data = data_vectors.shape.as_list()
    data_vectors = tf.reshape(data_vectors, [-1, n_ch_data])

    # Splatting
    with tf.variable_scope(name):
        splat = tf.contrib.framework.local_variable(tf.constant(0.0),
                                                    validate_shape=False,
                                                    name='splatbuffer')

    # with tf.control_dependencies([splat.initialized_value()]):
    initial_splat = tf.zeros(
        [tf.shape(blur_neighbours1[0])[0] + 1, batch_size, n_ch_data])
    reset_splat = tf.assign(splat, initial_splat, validate_shape=False)
    with tf.control_dependencies([reset_splat]):
        for scit in range(num_simplex_corners):
            data = data_vectors * barycentric[:, scit:scit + 1]
            splat = tf.scatter_nd_add(splat, indices[scit], data)

    # Blur with 1D kernels
    for dit in range(n_ch, -1, -1) if reverse else range(n_ch + 1):
        b1 = tf.gather(splat, blur_neighbours1[dit])
        b3 = tf.gather(splat, blur_neighbours2[dit])
        splat = tf.concat([splat[:1, ...], splat[1:, ...] + 0.5 * (b1 + b3)],
                          0)

    # Slice
    sliced = 0.0
    # Alpha is a magic scaling constant from CRFAsRNN code
    alpha = 1. / (1. + np.power(2., -n_ch))
    for scit in range(0, num_simplex_corners):
        sliced += tf.gather_nd(splat, indices[scit]) * \
                  barycentric[:, scit:scit + 1] * alpha
    sliced = tf.reshape(sliced, [batch_size, n_voxels, n_ch_data])
    return sliced
示例#18
0
 def create_mask(var, output_shape, pos_idxs, pos_val, neg_val, zero):
     # output_shape is a tensor of shape
     # zero is the zero constant with the shape shape as var
     var = tf.assign(var, zero)
     if isinstance(pos_val, int) or isinstance(pos_val, float):
         pos_val = np.array(pos_val, dtype=np.float32)
     if isinstance(neg_val, int) or isinstance(neg_val, float):
         neg_val = np.array(neg_val, dtype=np.float32)
     var_subset = tf.gather_nd(var, pos_idxs) + \
         pos_val - neg_val
     var = tf.scatter_nd_add(var, pos_idxs, var_subset)
     var += neg_val
     var = tf.slice(var, [0, 0], output_shape)
     return var
def scatter_nd_add_diff(x0, x1, x2):
    """
    Custom gradient version of scatter_nd_add
    """
    dummy = tf.Variable(x0, name='dummy', use_resource=True)
    reset_dummy = dummy.assign(0.0 * x0)

    with tf.control_dependencies([reset_dummy]):
        f = tf.scatter_nd_add(dummy, x1, x2)

    def grad(dy, variables=[dummy]):
        g = tf.gather_nd(dy, x1)
        return [None, None, g], [None]

    return f, grad
示例#20
0
def bias_ops(ds: tf.data.Dataset, V):
    features, labels = ds.make_one_shot_iterator().get_next()
    tokens = features[TEXT]  # (N, L)
    token_lengths = features[SENTENCE_LENGTH]  # (N,)
    vocab_tally = tf.get_local_variable(
        name='vocab_tally',
        dtype=tf.int64,
        initializer=tf.initializers.zeros,
        shape=(V,)
    )  # (V,)
    word_count = tf.get_local_variable(
        name='word_count',
        dtype=token_lengths.dtype,
        initializer=tf.initializers.zeros,
        shape=[]
    )
    max_length = tf.get_local_variable(
        name='max_length',
        dtype=token_lengths.dtype,
        initializer=tf.initializers.zeros,
        shape=[]
    )
    sentence_count = tf.get_local_variable(
        name='sentence_count',
        dtype=tf.int32,
        initializer=tf.initializers.zeros,
        shape=[]
    )
    mask = tf.sequence_mask(
        maxlen=tf.shape(tokens)[1],
        lengths=token_lengths
    )  # (N, L)
    valid_tokens = tf.boolean_mask(tensor=tokens, mask=mask)  # (Z,)
    update_tally = tf.scatter_nd_add(
        ref=vocab_tally,
        indices=tf.expand_dims(valid_tokens, 1),
        updates=tf.ones(shape=tf.shape(valid_tokens), dtype=vocab_tally.dtype)
    )
    update_sentence_count = tf.assign_add(ref=sentence_count, value=tf.shape(tokens)[0])
    update_word_count = tf.assign_add(ref=word_count, value=tf.reduce_sum(token_lengths))
    update_max_length = tf.assign(ref=max_length, value=tf.maximum(
        max_length,
        tf.reduce_max(token_lengths)
    ))
    update = tf.group(update_tally, update_sentence_count, update_word_count, update_max_length)
    return vocab_tally, sentence_count, word_count, max_length, update
  def testConcurrentUpdates(self):
    num_updates = 10000
    update_values = np.random.rand(num_updates)
    ref = tf.Variable(np.zeros([2, 2]), dtype=tf.float64)
    indices = tf.constant([[0, 1]] * num_updates, dtype=tf.int32)
    updates = tf.constant(update_values, dtype=tf.float64)

    exepected_result = np.zeros([2, 2], dtype=np.float64)
    exepected_result[0, 1] = np.sum(update_values)

    scatter = tf.scatter_nd_add(ref, indices, updates)
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
      sess.run(init)
      result = sess.run(scatter)
      assert np.allclose(result, exepected_result)
示例#22
0
    def testConcurrentUpdates(self):
        num_updates = 10000
        update_values = np.random.rand(num_updates)
        ref = tf.Variable(np.zeros([2, 2]), dtype=tf.float64)
        indices = tf.constant([[0, 1]] * num_updates, dtype=tf.int32)
        updates = tf.constant(update_values, dtype=tf.float64)

        exepected_result = np.zeros([2, 2], dtype=np.float64)
        exepected_result[0, 1] = np.sum(update_values)

        scatter = tf.scatter_nd_add(ref, indices, updates)
        init = tf.global_variables_initializer()

        with tf.Session() as sess:
            sess.run(init)
            result = sess.run(scatter)
            assert np.allclose(result, exepected_result)
示例#23
0
文件: crf.py 项目: nhu2000/NiftyNet
def permutohedral_compute(data_vectors,barycentric,blurNeighbours1,blurNeighbours2,indices,name,reverse):
    batch_size=tf.shape(data_vectors)[0]
    numSimplexCorners=int(barycentric.shape[-1])
    nCh=numSimplexCorners-1
    nChData=tf.shape(data_vectors)[-1]
    data_vectors = tf.reshape(data_vectors,[-1,nChData])
    data_vectors = tf.concat([data_vectors,tf.ones_like(data_vectors[:,0:1])],1) # Convert to homogenous coordinates
    ## Splatting
    initialSplat=tf.zeros([tf.shape(blurNeighbours1[0])[0]+1,batch_size,nChData+1])
    with tf.variable_scope(name):
        # WARNING: we use local variables so the graph must initialize local variables with tf.local_variables_initializer()
        splat=tf.contrib.framework.local_variable(tf.ones([0,0]),validate_shape=False,name='splatbuffer')

    with tf.control_dependencies([splat.initialized_value()]):
        resetSplat=tf.assign(splat,initialSplat,validate_shape=False,name='assign')
    # This is needed to force tensorflow to update the cache
    with tf.control_dependencies([resetSplat]):
        uncachedSplat=splat.read_value()
    for scit in range(numSimplexCorners):
        data = data_vectors*barycentric[:,scit:scit+1]
        with tf.control_dependencies([uncachedSplat]):
            splat=tf.scatter_nd_add(splat,indices[scit],data)

    ## Blur
    with tf.control_dependencies([splat]):
        blurred=[splat]
        order = range(nCh,-1,-1) if reverse else range(nCh+1)
        for dit in order:
            with tf.control_dependencies([blurred[-1]]):
                b1=0.5*tf.gather(blurred[-1],blurNeighbours1[dit])
                b2=blurred[-1][1:,:,:]
                b3=0.5*tf.gather(blurred[-1],blurNeighbours2[dit])
                blurred.append(tf.concat([blurred[-1][0:1,:,:], b2+b1+b3],0))

    # Alpha is a magic scaling constant from CRFAsRNN code
    alpha = 1. / (1.+numpy.power(2., -nCh))
    normalized=blurred[-1][:,:,:-1]/blurred[-1][:,:,-1:]
    ## Slice
    sliced = tf.gather_nd(normalized,indices[0])*barycentric[:,0:1]*alpha
    for scit in range(1,numSimplexCorners):
        sliced = sliced+tf.gather_nd(normalized,indices[scit])*barycentric[:,scit:scit+1]*alpha

    return sliced
示例#24
0
def eligibility_dutch_traces(Qs_t, states_t, actions_t, lr, discount, lambda_value):
    # Beware this trace has to be used with a different learning rule
    et = tf.get_variable(
        "eligibilitytraces"
        , shape=Qs_t.get_shape()
        , dtype=tf.float32
        , trainable=False
        , initializer=tf.zeros_initializer()
    )
    tf.summary.histogram('eligibilitytraces', et)
    state_action_pairs = tf.stack([states_t, actions_t], 1)
    current_trace = tf.gather_nd(et, state_action_pairs)
    updates = 1 - lr * discount * lambda_value * current_trace
    with tf.control_dependencies([updates]):
        dec_et_op = tf.assign(et, discount * lambda_value * et)
        with tf.control_dependencies([dec_et_op]):
            update_et_op = tf.scatter_nd_add(et, indices=state_action_pairs, updates=updates)

    reset_et_op = et.assign(tf.zeros_like(et, dtype=tf.float32))

    return (et, update_et_op, reset_et_op)
示例#25
0
    def create_stats_val(self, y_labels, labels):

        with tf.variable_scope('MiscNNHelper/create_stats_val'):
            labels = tf.cast(labels, dtype=tf.int32)
            # define tf variables to use scatter_nd and scatter_nd_add
            # pwtmp = tf.Variable(tf.zeros(self.num_labels), trainable=False, dtype=tf.float32)
            # pytmp = tf.Variable(tf.zeros(self.cb_size), trainable=False, dtype=tf.float32)
            # pw_y_tmp = tf.Variable(tf.zeros([self.num_labels, self.cb_size]), trainable=False, dtype=tf.float32)
            pwtmp = tf.get_default_graph().get_tensor_by_name('p_w:0')
            pytmp = tf.get_default_graph().get_tensor_by_name('p_y:0')
            pw_y_tmp = tf.get_default_graph().get_tensor_by_name('p_w_y:0')

            # self.reset_p_w

            # create P(w)
            pwtmp = tf.assign(pwtmp, tf.zeros([self.num_labels
                                               ]))  # reset Variable/floor
            # pwtmp = self.reset_variable(pwtmp)
            pwtmp = tf.scatter_add(pwtmp, labels, tf.ones(tf.shape(labels)))

            # create P(y)
            pytmp = tf.assign(pytmp,
                              tf.zeros([self.cb_size]))  # reset Variable/floor
            pytmp = tf.scatter_add(pytmp, y_labels,
                                   tf.ones(tf.shape(y_labels)))

            # create P(w|y)
            pw_y_tmp = tf.assign(pw_y_tmp,
                                 tf.zeros([self.num_labels, self.cb_size
                                           ]))  # reset Variable/floor
            pw_y_tmp = tf.scatter_nd_add(
                pw_y_tmp,
                tf.concat([
                    tf.cast(labels, dtype=tf.int64),
                    tf.expand_dims(y_labels, 1)
                ],
                          axis=1), tf.ones(tf.shape(y_labels)))

            return pwtmp, pytmp, pw_y_tmp
示例#26
0
def main(unused_argv):
    ref = tf.Variable([[[1, 2, 3, 4], [5, 6, 7, 8],
                        [1, 2, 3, 4], [5, 6, 7, 8]],
                       [[10, 11, 12, 13], [14, 15, 16, 17],
                        [10, 11, 12, 13], [14, 15, 16, 17]]
                       ])
    indices = tf.constant([[1, 1]])
    updates = tf.constant([[-14, -15, -16, -17]])
    out = tf.scatter_nd_add(ref, indices, updates)
    init = tf.global_variables_initializer()

    with tf.Session(config=config('cpu')) as session:
        session.run(init)
        result_cpu = session.run(out)
    with tf.Session(config=config('ai_core')) as session:
        session.run(init)
        result_ai_core = session.run(out)

    print('====================================')
    cmp_result = np.allclose(result_ai_core, result_cpu, atol, rtol)
    print(cmp_result)
    print('====================================')
示例#27
0
def build_mask2(input):
    # row = input.eval()
    print(row)
    y_extent = tf.range(row[0], row[2])
    x_extent = tf.range(row[1], row[3])
    print('y_extent', y_extent.eval())
    Y,X   = tf.meshgrid(y_extent, x_extent)
    print(Y.shape, X.shape)
    bbox_mask    = tf.stack([Y,X],axis=2)
    print(' bbox_mask shapoe: ',bbox_mask.shape)

    mask_indices = tf.reshape(bbox_mask,[-1,2])
    print('  size of mask_indices: ', mask_indices.shape)

    mask_size = mask_indices.get_shape()[0]
    mask_updates = tf.ones([mask_size], dtype = tf.int32)
    print('  size of bbox_mask: ', mask_size)
    res = tf.scatter_nd_add(ref2, mask_indices, mask_updates)
    print( ' ref shape: ', res.shape)
    print( ' indices shape: ', mask_indices.shape)
    print( ' updates shape: ', mask_updates.shape)
    return res
示例#28
0
def scatter_var(
        blocks,
        bin_counts,  # pylint: disable=unused-argument
        active_block_indices,
        outputs,
        *,
        bsize,
        boffset,
        bstride,
        add=False):

    raise NotImplementedError("no gradient for sparse_lib.scatter_var")

    indices = _upsample_block_indices(active_block_indices, bsize, boffset,
                                      bstride)  # [M, bsize[0], bsize[1], 3]

    if add:
        outputs = tf.scatter_nd_add(outputs, indices, blocks)
    else:
        outputs = tf.scatter_nd_update(outputs, indices, blocks)

    return outputs
示例#29
0
    def _helper_mi_tf(self, labels, alignments, cb_len):
        """
        Helper functions in tensorflow to get P(w), P(y) and P(w|y)

        :param labels:      labels coming e.g. out of a neural network
        :param alignments:  phonemes from e.g. alignments
        :param cb_len:      codebook size of the neural network (output dim)
        :return:            P(w), P(y) and P(w|y)
        """
        p = 41

        pwtmp = tf.Variable(tf.zeros(p), trainable=False, dtype=tf.float32)
        pytmp = tf.Variable(tf.zeros(cb_len),
                            trainable=False,
                            dtype=tf.float32)
        pw_y_tmp = tf.Variable(tf.zeros([p, cb_len]),
                               trainable=False,
                               dtype=tf.float32)

        # use input array as indexing array
        pwtmp = pwtmp.assign(tf.fill([p], 0.0))  # reset Variable/floor
        pwtmp = tf.scatter_add(pwtmp, alignments,
                               tf.ones(tf.shape(alignments)))

        pytmp = pytmp.assign(tf.fill([cb_len], 0.0))  # reset Variable/floor
        pytmp = tf.scatter_add(pytmp, labels, tf.ones(tf.shape(labels)))

        pw_y_tmp = pw_y_tmp.assign(tf.fill([p, cb_len],
                                           0.0))  # reset Variable/floor
        pw_y_tmp = tf.scatter_nd_add(
            pw_y_tmp,
            tf.concat([
                tf.cast(alignments, dtype=tf.int64),
                tf.expand_dims(labels, 1)
            ],
                      axis=1), tf.ones(tf.shape(labels)))
        return pwtmp, pytmp, pw_y_tmp
示例#30
0
    def vq_data(self, y_nn, labels, nominator, denominator, discrete=True):
        """
        Create the nominator and denominator for P(s_k|m_j)
        This function is used for using all the training data to create P(s_k|m_j)

        :param y_nn:        output of the neural net
        :param labels:      phonemes or states of the data (coming from the alignments of kaldi)
        :param nominator:   nominator for P(s_k|m_j)
        :param denominator: denominator for P(s_k|m_j)
        :param discrete:    flag for creating P(s_k|m_j) in a discrete way
        :return:            return nominator and denominator of creating P(s_k|m_j)
        """
        with tf.variable_scope('MiscNNHelper/vq_data'):
            # cast labels to int32
            labels = tf.cast(labels, dtype=tf.int64)

            y_labels = tf.argmax(y_nn, axis=1)
            # labels_softmax = output_soft

            if discrete:
                # create nominator
                nominator = tf.scatter_nd_add(
                    nominator,
                    tf.concat([
                        tf.cast(labels, dtype=tf.int64),
                        tf.expand_dims(y_labels, 1)
                    ],
                              axis=1), tf.ones(tf.shape(y_labels)))

                # create dominator
                denominator = tf.scatter_add(denominator, y_labels,
                                             tf.ones(tf.shape(y_labels)[0]))
            else:
                raise NotImplementedError("Not implemented!")

            return nominator, denominator
示例#31
0
    def project_uv_render(
        ori_img,
        norm_image,
        clip_xyzw,
        tri,
        tri_vt,
        vt_list,
        imageH,
        imageW,
        uv_rgb,
        uv_mask,
        para_illum,
        var_scope_name,
    ):
        batch_size, _, _ = clip_xyzw.get_shape().as_list()
        # get uv coordinates
        V, U = tf.split(vt_list, 2, axis=1)
        uv_size = uv_rgb.get_shape().as_list()[1]
        U = (1.0 - U) * uv_size
        V = V * uv_size
        UV = tf.concat([U, V], axis=1)
        batch_UV = tf.tile(UV, [batch_size, 1])

        # get clip_xyzw for ver_uv (according to the correspondence between tri and tri_vt)
        # gather and scatter
        EPS = 1e-12
        batch_tri_indices = tf.reshape(
            tf.tile(tf.expand_dims(tf.range(batch_size), axis=1), [1, len(tri_vt) * 3]),
            [-1],
            name="batch_tri_indices",
        )
        tri_inds = tf.stack(
            [
                batch_tri_indices,
                tf.concat([tf.reshape(tri, [len(tri) * 3])] * batch_size, axis=0),
            ],
            axis=1,
        )
        tri_vt_inds = tf.stack(
            [
                batch_tri_indices,
                tf.concat([tf.reshape(tri_vt, [len(tri_vt) * 3])] * batch_size, axis=0),
            ],
            axis=1,
        )
        tri_clip_xyzw = tf.gather_nd(clip_xyzw, tri_inds, name="tri_clip_xyzw")
        ver_uv_clip_xyzw_sum = tf.get_variable(
            shape=[batch_size, len(vt_list), 4],
            dtype=tf.float32,
            initializer=tf.zeros_initializer(),
            name=var_scope_name + "ver_uv_clip_xyzw_sum",
            trainable=False,
        )
        ver_uv_clip_xyzw_cnt = tf.get_variable(
            shape=[batch_size, len(vt_list), 4],
            dtype=tf.float32,
            initializer=tf.zeros_initializer(),
            name=var_scope_name + "ver_uv_clip_xyzw_cnt",
            trainable=False,
        )
        init_ver_uv = tf.zeros(shape=[batch_size, len(vt_list), 4], dtype=tf.float32)
        assign_op1 = tf.assign(ver_uv_clip_xyzw_sum, init_ver_uv)
        assign_op2 = tf.assign(ver_uv_clip_xyzw_cnt, init_ver_uv)
        with tf.control_dependencies([assign_op1, assign_op2]):
            ver_uv_clip_xyzw_sum = tf.scatter_nd_add(
                ver_uv_clip_xyzw_sum, tri_vt_inds, tri_clip_xyzw
            )
            ver_uv_clip_xyzw_cnt = tf.scatter_nd_add(
                ver_uv_clip_xyzw_cnt, tri_vt_inds, tf.ones_like(tri_clip_xyzw)
            )
            ver_uv_clip_xyzw = tf.div(ver_uv_clip_xyzw_sum, ver_uv_clip_xyzw_cnt + EPS)

            uv_image, uv_alphas = rasterize_clip_space(
                ver_uv_clip_xyzw, batch_UV, tri_vt, imageW, imageH, -1.0
            )

            uv_image = tf.clip_by_value(
                tf.cast(uv_image, tf.int32), 0, 511
            )  # should be integer

            batch_vt_indices = tf.reshape(
                tf.tile(
                    tf.expand_dims(tf.range(batch_size), axis=1), [1, imageW * imageH]
                ),
                [-1, 1],
                name="batch_indices",
            )

            batch_vt_indices = tf.concat(
                [batch_vt_indices, tf.reshape(uv_image, [-1, 2])], axis=1
            )

            # careful
            diffuse_image = tf.reshape(
                tf.gather_nd(uv_rgb, batch_vt_indices), [batch_size, imageH, imageW, 3]
            )
            uv_alphas = (
                tf.reshape(
                    tf.gather_nd(uv_mask[:, :, :, 0], batch_vt_indices),
                    [batch_size, imageH, imageW, 1],
                )
                * uv_alphas
            )

        # Have shading
        para_light = para_illum
        background = ori_img
        rgb_images, shading_image = Shader.sh_shader(
            norm_image, uv_alphas, background, para_light, diffuse_image
        )
        ori_img_remove_shading = ori_img / shading_image

        diffuse_image = tf.clip_by_value(diffuse_image, 0, 1)
        rgb_images = tf.clip_by_value(rgb_images, 0, 1)
        uv_attrs_image = tf.clip_by_value(uv_alphas, 0, 1)
        ori_img_remove_shading = tf.clip_by_value(ori_img_remove_shading, 0, 1)

        render_image = rgb_images
        render_image = render_image * uv_attrs_image + ori_img * (1 - uv_attrs_image)

        return render_image, uv_attrs_image, ori_img_remove_shading
示例#32
0
    def get_ver_norm(ver_xyz, tri, scope_name="normal"):
        """
        Compute vertex normals.

        :param:
            ver_xyz: [batch, N, 3], vertex geometry
            tri: [M, 3], mesh triangles definition

        :return:
            ver_normals: [batch, N, 3], vertex normals
        """

        with tf.variable_scope(scope_name):

            v1_idx, v2_idx, v3_idx = tf.unstack(tri, 3, axis=-1)
            v1 = tf.gather(ver_xyz, v1_idx, axis=1, name="v1_tri")
            v2 = tf.gather(ver_xyz, v2_idx, axis=1, name="v2_tri")
            v3 = tf.gather(ver_xyz, v3_idx, axis=1, name="v3_tri")

            EPS = 1e-8
            tri_normals = tf.cross(v2 - v1, v3 - v1)
            tri_normals = tf.div(
                tri_normals,
                (tf.norm(tri_normals, axis=-1, keep_dims=True) + EPS),
                name="norm_tri",
            )
            tri_normals = tf.tile(tf.expand_dims(tri_normals, 2), [1, 1, 3, 1])
            tri_normals = tf.reshape(tri_normals, [-1, 3])
            tri_votes = tf.cast(tf.greater(tri_normals[:, 2:], float(0.1)), tf.float32)
            tri_cnts = tf.ones_like(tri_votes)

            B = v1.get_shape().as_list()[0]  # batch size
            batch_indices = tf.reshape(
                tf.tile(tf.expand_dims(tf.range(B), axis=1), [1, len(tri) * 3]),
                [-1],
                name="batch_indices",
            )
            tri_inds = tf.stack(
                [
                    batch_indices,
                    tf.concat([tf.reshape(tri, [len(tri) * 3])] * B, axis=0),
                ],
                axis=1,
            )

            ver_shape = ver_xyz.get_shape().as_list()

            ver_normals = tf.get_variable(
                shape=ver_shape,
                dtype=tf.float32,
                initializer=tf.zeros_initializer(),
                name="ver_norm",
                trainable=False,
            )
            init_normals = tf.zeros(shape=ver_shape, dtype=tf.float32)
            assign_op = tf.assign(ver_normals, init_normals)
            with tf.control_dependencies([assign_op]):
                ver_normals = tf.scatter_nd_add(ver_normals, tri_inds, tri_normals)
                ver_normals = ver_normals / (
                    tf.norm(ver_normals, axis=2, keep_dims=True) + EPS
                )

            votes = tf.reshape(
                tf.concat([tri_votes, tri_votes, tri_votes], axis=-1), [-1, 1]
            )
            cnts = tf.reshape(
                tf.concat([tri_cnts, tri_cnts, tri_cnts], axis=-1), [-1, 1]
            )
            ver_votes = tf.get_variable(
                shape=ver_shape[:-1] + [1],
                dtype=tf.float32,
                initializer=tf.zeros_initializer(),
                name="ver_vote",
                trainable=False,
            )
            ver_cnts = tf.get_variable(
                shape=ver_shape[:-1] + [1],
                dtype=tf.float32,
                initializer=tf.zeros_initializer(),
                name="ver_cnt",
                trainable=False,
            )
            init_votes = tf.zeros(shape=ver_shape[:-1] + [1], dtype=tf.float32)
            assign_op2 = tf.assign(ver_votes, init_votes)
            assign_op3 = tf.assign(ver_cnts, init_votes)
            with tf.control_dependencies([assign_op2, assign_op3]):
                ver_votes = tf.scatter_nd_add(ver_votes, tri_inds, tri_votes)
                ver_cnts = tf.scatter_nd_add(ver_cnts, tri_inds, tri_cnts)
                ver_votes = ver_votes / (ver_cnts + EPS)

                ver_votes1 = tf.less(ver_votes, float(1.0))
                ver_votes2 = tf.greater(ver_votes, float(0.0))
                ver_votes = tf.cast(tf.logical_and(ver_votes1, ver_votes2), tf.float32)

            return ver_normals, ver_votes