Пример #1
0
  def accumulate_client_votes(vote_accumulator, example):
    """Accumulates client votes on prefix extensions."""

    example = tf.strings.lower(example)
    # Append the default terminator to the example.
    default_terminator = tf.constant(DEFAULT_TERMINATOR, dtype=tf.string)
    example = tf.strings.join([example, default_terminator])

    # Compute effective round number.
    effective_round_num = tf.math.floordiv(round_num, num_sub_rounds)

    if tf.strings.length(example) < effective_round_num:
      return vote_accumulator
    else:
      discovered_prefixes_index = discovered_prefixes_table.lookup(
          tf.strings.substr(example, 0, effective_round_num))
      possible_prefix_extensions_index = possible_prefix_extensions_table.lookup(
          tf.strings.substr(example, effective_round_num, 1))

      if tf.math.equal(possible_prefix_extensions_index,
                       tf.constant(DEFAULT_VALUE)):
        return vote_accumulator

      elif tf.math.equal(discovered_prefixes_index, tf.constant(DEFAULT_VALUE)):
        indices = [[0, possible_prefix_extensions_index]]
        updates = tf.constant([1])
        return tf.tensor_scatter_nd_add(vote_accumulator, indices, updates)

      else:
        indices = [[
            discovered_prefixes_index, possible_prefix_extensions_index
        ]]
        updates = tf.constant([1])
        return tf.tensor_scatter_nd_add(vote_accumulator, indices, updates)
Пример #2
0
def jet_reco(px, py, jet_idx, max_jets):

    tf.debugging.assert_shapes(
        [
            (px, ("N")),
            (py, ("N")),
            (jet_idx, ("N")),
        ]
    )

    jet_idx_capped = tf.where(jet_idx <= max_jets, jet_idx, 0)

    jet_px = tf.zeros(
        [
            max_jets,
        ],
        dtype=px.dtype,
    )
    jet_py = tf.zeros(
        [
            max_jets,
        ],
        dtype=py.dtype,
    )

    jet_px_new = tf.tensor_scatter_nd_add(jet_px, indices=tf.expand_dims(jet_idx_capped, axis=-1), updates=px)
    jet_py_new = tf.tensor_scatter_nd_add(jet_py, indices=tf.expand_dims(jet_idx_capped, axis=-1), updates=py)

    jet_pt = tf.math.sqrt(jet_px_new**2 + jet_py_new**2)

    return jet_pt
Пример #3
0
def salt_and_pepper_noise_tf(img_tensor, proportion=0.05):
    img_shape = tf.shape(img_tensor, out_type=tf.int64)
    float_shape = tf.cast(img_shape, tf.float32)
    h, w = img_shape[0], img_shape[1]
    fh, fw = float_shape[0], float_shape[1]
    num = tf.cast(fh * fw * proportion, tf.int32) // 2  # 添加椒盐噪声的个数

    # 椒噪声
    hs = tf.random.uniform([num], 0, h - 1, dtype=tf.int64)
    ws = tf.random.uniform([num], 0, w - 1, dtype=tf.int64)
    hs_ws = tf.stack([hs, ws], axis=1)
    noise = tf.zeros(shape=[num, 3], dtype=img_tensor.dtype)
    img_tensor = tf.tensor_scatter_nd_add(img_tensor,
                                          indices=hs_ws,
                                          updates=noise)

    # 盐噪声
    hs = tf.random.uniform([num], 0, h - 1, dtype=tf.int64)
    ws = tf.random.uniform([num], 0, w - 1, dtype=tf.int64)
    hs_ws = tf.stack([hs, ws], axis=1)
    noise = tf.zeros(shape=[num, 3], dtype=img_tensor.dtype) + 255
    img_tensor = tf.tensor_scatter_nd_add(img_tensor,
                                          indices=hs_ws,
                                          updates=noise)

    return img_tensor
Пример #4
0
    def _backtrack_one_step(flip_matrix, active, curr_job_idx):
        """Take one step in backtracking."""
        # Discover the worker that the job originated from, note that this worker
        # must exist by construction.
        curr_worker_idx = tf.gather(
            state["jobs_from_worker"], curr_job_idx, batch_dims=1) - 1
        curr_worker_idx = tf.maximum(curr_worker_idx, 0)
        update_indices = tf.stack([batch_range, curr_worker_idx, curr_job_idx],
                                  axis=1)
        update_indices = tf.maximum(update_indices, 0)
        flip_matrix = tf.tensor_scatter_nd_add(flip_matrix, update_indices,
                                               tf.cast(active, tf.int32))

        # Discover the (potential) job that the worker originated from.
        curr_job_idx = tf.gather(
            state["workers_from_job"], curr_worker_idx, batch_dims=1) - 1
        # Note that jobs may not be active, and we track that here (before
        # adjusting indices so that they are all >= 0 for gather).
        active &= curr_job_idx >= 0
        curr_job_idx = tf.maximum(curr_job_idx, 0)
        update_indices = tf.stack([batch_range, curr_worker_idx, curr_job_idx],
                                  axis=1)
        update_indices = tf.maximum(update_indices, 0)
        flip_matrix = tf.tensor_scatter_nd_add(flip_matrix, update_indices,
                                               tf.cast(active, tf.int32))

        return flip_matrix, active, curr_job_idx
Пример #5
0
    def call(self, obj_vecs, pred_vecs, edges, training=True):
        O = obj_vecs.shape[0]
        T = pred_vecs.shape[0]

        Din, H, Dout = self.input_dim, self.hidden_dim, self.output_dim

        # (T, )
        s_idx = edges[:, 0]
        o_idx = edges[:, 1]

        # (T, D)
        # cur_s_vecs = obj_vecs[s_idx.numpy()]
        # cur_o_vecs = obj_vecs[o_idx.numpy()]
        cur_s_vecs = []
        cur_o_vecs = []

        for idx in s_idx.numpy():
            cur_s_vecs.append(obj_vecs[idx])
        
        for idx in o_idx.numpy():
            cur_o_vecs.append(obj_vecs[idx])
        
        cur_s_vecs = tf.convert_to_tensor(cur_s_vecs)
        cur_o_vecs = tf.convert_to_tensor(cur_o_vecs)

        # (T, 3 * D)
        cur_t_vecs = tf.concat([cur_s_vecs, pred_vecs, cur_o_vecs], axis=1)
        new_t_vecs = self.net1(cur_t_vecs, training=training)

        # (T, x)
        new_s_vecs = new_t_vecs[:, :H]
        new_p_vecs = new_t_vecs[:, H: (H + Dout)]
        new_o_vecs = new_t_vecs[:, (H + Dout): (2 * H + Dout)]

        # TODO: dtype should be determined by obj_vecs
        # (O, H)
        pooled_obj_vecs = tf.zeros(shape=(O, H), dtype=tf.float32)
        pooled_obj_vecs = tf.tensor_scatter_nd_add(
            pooled_obj_vecs, tf.reshape(s_idx, (-1, 1)), new_s_vecs)
        pooled_obj_vecs = tf.tensor_scatter_nd_add(
            pooled_obj_vecs, tf.reshape(o_idx, (-1, 1)), new_o_vecs)

        if self.pooling == 'avg':
            # (O, )
            obj_counts = tf.zeros(O, dtype=tf.float32)
            # (T, )
            ones = tf.ones(T, dtype=tf.float32)
            obj_counts = tf.tensor_scatter_nd_add(
                obj_counts, tf.reshape(s_idx, (-1, 1)), ones)
            obj_counts = tf.tensor_scatter_nd_add(
                obj_counts, tf.reshape(o_idx, (-1, 1)), ones)

            obj_counts = tf.clip_by_value(obj_counts, 1, O)
            pooled_obj_vecs = pooled_obj_vecs / tf.reshape(obj_counts, (-1, 1))

        new_obj_vecs = self.net2(pooled_obj_vecs, training=training)

        return new_obj_vecs, new_p_vecs
Пример #6
0
 def reduce_fn(state, point):
     cluster_sums, cluster_weights, num_examples = state
     closest_centroid = _find_closest_centroid(centroids, point)
     scatter_index = [[closest_centroid]]
     cluster_sums = tf.tensor_scatter_nd_add(cluster_sums, scatter_index,
                                             tf.expand_dims(point, axis=0))
     cluster_weights = tf.tensor_scatter_nd_add(cluster_weights,
                                                scatter_index, [1])
     num_examples += 1
     return cluster_sums, cluster_weights, num_examples
Пример #7
0
    def ll_dist(self, gamma, states_t, next_states_t, actions_t, rewards_t, done_mask, weights):
        if self.use_double:
            states_t = tf.concat([states_t, next_states_t], 0)
        # Calculate current state probabilities
        net_output = self.net(states_t)
        state_action_dist = tf.nn.log_softmax(net_output[:self.batch_size], axis=-1)
        state_action_dist = tf.squeeze(tf.gather(state_action_dist, tf.reshape(actions_t, [-1, 1, 1]), batch_dims=1))

        # Calculate next state probabilities
        target_net_output = tf.nn.softmax(self.tgt_net(next_states_t), axis=-1)
        if self.use_double:
            next_state_actions = tf.nn.softmax(net_output[self.batch_size:])
            next_best_actions = tf.argmax(tf.reduce_sum(next_state_actions, -1), -1)
        else:
            next_best_actions = tf.argmax(tf.reduce_sum(target_net_output, -1), -1)
        next_state_dist = tf.squeeze(tf.gather(target_net_output, tf.reshape(next_best_actions, [-1, 1, 1]),
                                               batch_dims=1))

        # Calculate the Bellman operator T to produce Tz
        delta_z = (self.v_max - self.v_min) / (self.n_atoms - 1)
        support = tf.linspace(self.v_min, self.v_max, self.n_atoms)
        Tz = tf.expand_dims(rewards_t, -1) + tf.expand_dims(tf.cast(tf.logical_not(done_mask), tf.float32), -1) * (
                gamma ** self.n_steps) * tf.expand_dims(support, 0)
        Tz = tf.clip_by_value(Tz, self.v_min, self.v_max)
        b = (Tz - self.v_min) / delta_z
        l = tf.math.floor(b)
        u = tf.math.floor(b)

        # Fix disappearing probability mass
        eq_mask = tf.equal(l, u)
        u_greater = tf.greater(u, 0)
        l_less = tf.less(l, self.n_atoms - 1.0)
        l = tf.where(tf.logical_and(eq_mask, u_greater), x=l - 1, y=l)
        u = tf.where(tf.logical_and(eq_mask, l_less), x=u + 1, y=u)

        m = tf.zeros(self.batch_size * self.n_atoms)
        offset = tf.linspace(0.0, ((self.batch_size - 1.0) * self.n_atoms), self.batch_size)
        offset = tf.reshape(tf.tile(tf.expand_dims(offset, -1), [1, self.n_atoms]), [-1, 1])
        m = tf.tensor_scatter_nd_add(
            m,
            tf.cast(tf.reshape(l, [-1, 1]) + offset, tf.int32),
            tf.reshape(next_state_dist * (u - b), [-1])
        )
        m = tf.tensor_scatter_nd_add(
            m,
            tf.cast(tf.reshape(u, [-1, 1]) + offset, tf.int32),
            tf.reshape(next_state_dist * (b - l), [-1])
        )
        m = tf.reshape(m, [self.batch_size, self.n_atoms])
        m = tf.stop_gradient(m)

        # Calculate loss
        losses = -tf.reduce_sum(m * state_action_dist, -1)
        losses = tf.multiply(weights, losses)
        return tf.reduce_mean(losses, -1), losses
Пример #8
0
def histogram_2d(eta, phi, weights_px, weights_py, eta_range, phi_range, nbins, bin_dtype=tf.float32):
    eta_bins = tf.histogram_fixed_width_bins(eta, eta_range, nbins=nbins, dtype=bin_dtype)
    phi_bins = tf.histogram_fixed_width_bins(phi, phi_range, nbins=nbins, dtype=bin_dtype)

    hist_px = tf.zeros((nbins, nbins), dtype=weights_px.dtype)
    hist_py = tf.zeros((nbins, nbins), dtype=weights_py.dtype)
    indices = tf.transpose(tf.stack([phi_bins, eta_bins]))

    hist_px = tf.tensor_scatter_nd_add(hist_px, indices, weights_px)
    hist_py = tf.tensor_scatter_nd_add(hist_py, indices, weights_py)
    hist_pt = tf.sqrt(hist_px**2 + hist_py**2)
    return hist_pt
Пример #9
0
    def evaluate(self, reward, done, next_observation):
        futur_reward = reward
        not_done = tf.logical_not(done)

        if tf.reduce_any(not_done):
            not_done_indicies = tf.where(not_done)
            next_value, _ = self.value_policy(next_observation)
            try:
                tf.tensor_scatter_nd_add(futur_reward, not_done_indicies, self.discount_factor * next_value)
            except:
                print(futur_reward, not_done_indicies, next_value)

        return futur_reward
Пример #10
0
def parallel_kernel(
    prob: Tensor,
    gdata: Any,
    nnp: Tensor,
    kernel_func: Callable[[Any, Tensor, Sequence[int]], Tuple[Tensor, Tensor]],
) -> Tuple[Tensor, Tensor, Tensor]:
    """
    kernel for multiprocess to run parallel in DQAS function

    :param prob:
    :param gdata:
    :param nnp:
    :param kernel_func:
    :return:
    """
    sp.random.seed()  # make each subprocess run with different random state
    # see https://stackoverflow.com/a/6914470/9062180
    # it is still not the best way to corporate numpy random and multiprocessing
    # see more in https://github.com/numpy/numpy/issues/9650
    dtype = tf.float32
    p = prob.shape[0]
    preset = preset_byprob(prob)
    loss, gnnp = kernel_func(gdata, nnp, preset)

    gs = tf.tensor_scatter_nd_add(
        tf.cast(-prob, dtype=dtype),
        tf.constant(list(zip(range(p), preset))),
        tf.ones([p], dtype=dtype),
    )  # \nabla lnp
    return loss, gnnp, gs
Пример #11
0
def kgcnn_ops_tensor_scatter_nd_by_name(segment_name,
                                        tensor,
                                        indices,
                                        updates,
                                        name=None):
    """Scatter operation chosen by name that can replace segment-operations.

    Args:
        segment_name (str): Operation to update scattered updates. Either 'sum' or 'min' etc.
        tensor (tf.tensor): Tensor to scatter updates into.
        indices (tf.tensor): Indices to for updates.
        updates (tf.tensor): Updates of new entries for tensor.
        name (str): Name of the tensor.

    Returns:
        tf.tensor: Updates scattered into tensor with different update rules.
    """
    pool = None
    if segment_name in ["segment_mean", "mean", "reduce_mean"]:
        pool = tensor_scatter_nd_mean(tensor, indices, updates, name=name)
    elif segment_name in ["segment_sum", "sum", "reduce_sum"]:
        pool = tf.tensor_scatter_nd_add(tensor, indices, updates, name=name)
    elif segment_name in ["segment_max", "max", "reduce_max"]:
        pool = tf.tensor_scatter_nd_max(tensor, indices, updates, name=name)
    elif segment_name in ["segment_min", "sum", "reduce_min"]:
        pool = tf.tensor_scatter_nd_min(tensor, indices, updates, name=name)
    else:
        raise TypeError("Unknown pooling, choose: 'mean', 'sum', ...")
    return pool
def scatter_add_tensor(ref, indices, updates, name=None):
    """
    Adds sparse updates to a variable reference.

    This operation outputs ref after the update is done. This makes it
    easier to chain operations that need to use the reset value.

    Duplicate indices: if multiple indices reference the same location,
    their contributions add.

    Requires updates.shape = indices.shape + ref.shape[1:].

    :param ref: A Tensor. Must be one of the following types: float32,
    float64, int64, int32, uint8, uint16, int16, int8, complex64, complex128,
    qint8, quint8, qint32, half.

    :param indices: A Tensor. Must be one of the following types: int32,
    int64. A tensor of indices into the first dimension of ref.

    :param updates: A Tensor. Must have the same dtype as ref. A tensor of
    updated values to add to ref

    :param name: A name for the operation (optional).

    :return: Same as ref. Returned as a convenience for operations that want
    to use the updated values after the update is done.
    """
    return tensorflow.tensor_scatter_nd_add(ref, indices, updates, name)
Пример #13
0
    def propagate(self,
                  in_states,
                  layer_no,
                  edge_type_ids,
                  message_sources,
                  message_targets,
                  training,
                  residuals=None):
        # Collect messages across all edge types.
        messages = tf.zeros_like(in_states)
        for type_index in range(self.num_edge_types):
            type_ids = edge_type_ids[type_index]
            if tf.shape(type_ids)[0] == 0:
                continue
            # Retrieve source states and compute type-transformation.
            edge_source_states = gather_dense_grad(in_states,
                                                   message_sources[type_index])
            type_messages = tf.matmul(edge_source_states,
                                      self.type_weights[layer_no][type_index])
            if self.add_type_bias:
                type_messages += self.type_biases[layer_no][type_index]
            messages = tf.tensor_scatter_nd_add(
                messages, message_targets[type_index][..., tf.newaxis],
                type_messages)

        # Concatenate residual messages, if applicable.
        if residuals is not None:
            messages = tf.concat(residuals + [messages], axis=-1)

        # Run GRU for each node.
        new_states, _ = self.rnns[layer_no](messages,
                                            in_states,
                                            training=training)
        return new_states
    def insert_record(histogram, record):
        """Inserts a record to the histogram.

    If the record is outside the valid range, it will be dropped.

    Args:
      histogram: A `tf.Tensor` representing the histogram.
      record: A `float` representing the incoming record.

    Returns:
      A `tf.Tensor` representing updated histgoram with the input record
      inserted.
    """

        if histogram.shape != (num_bins, ):
            raise ValueError(f'Expected shape ({num_bins}, ). '
                             f'Get {histogram.shape}.')

        if record < lower_bound or record >= upper_bound:
            return histogram
        else:
            bin_index = tf.cast(
                tf.math.floor((record - lower_bound) / precision), tf.int32)
        return tf.tensor_scatter_nd_add(tensor=histogram,
                                        indices=[[bin_index]],
                                        updates=[1])
Пример #15
0
def linear_small_translation_1d(X,axis,t,name=None):
    '''
    Translates X along axis by an amount 0<=t<=1.  Intuitively, something like

        result[...,i...,] approx X[...,i+t,...]

    where t is floating point.  The shape
    of result will be same as the shape of X,
    except along axis, where it will reduced by 1.
    '''

    if name is None:
        name='linear_small_translation_1d'

    with tf.name_scope(name):

        shp=tf.shape(X)

        # get two neighbors
        stv,szv=construct_1dslice(shp,0,shp[axis]-1,axis)
        W = tf.slice(X,stv,szv)

        stv=tf.tensor_scatter_nd_add(stv,[[axis]],[1])
        E = tf.slice(X,stv,szv)

        # linear interpolation does the rest
        return linear_interpolation(W,E,t)
Пример #16
0
    def _loss_semantic_segmentation(self, pred_seg, mask_gt, classes, num_obj):

        shape_mask = tf.shape(mask_gt)
        num_batch = shape_mask[0]
        seg_shape = tf.shape(pred_seg)[1]
        loss_seg = 0.

        for idx in tf.range(num_batch):
            seg = pred_seg[idx]
            masks = mask_gt[idx]
            cls = classes[idx]
            objects = num_obj[idx]

            # seg shape (69, 69, num_cls)
            # resize masks from (100, 138, 138) to (100, 69, 69)
            masks = tf.expand_dims(masks, axis=-1)
            masks = tf.image.resize(masks, [seg_shape, seg_shape], method=tf.image.ResizeMethod.BILINEAR)
            masks = tf.cast((masks > 0.5), seg.dtype)
            masks = tf.squeeze(masks)

            # obj_mask shape (objects, 138, 138)
            obj_mask = masks[:objects]
            obj_cls = tf.expand_dims(cls[:objects], axis=-1)

            # create empty ground truth (138, 138, num_cls)
            seg_gt = tf.zeros_like(seg)
            seg_gt = tf.transpose(seg_gt, perm=(2, 0, 1))
            seg_gt = tf.tensor_scatter_nd_add(seg_gt, indices=obj_cls, updates=obj_mask)
            seg_gt = tf.transpose(seg_gt, perm=(1, 2, 0))
            loss_seg += tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(seg_gt, seg))
        loss_seg = loss_seg / tf.cast(seg_shape, pred_seg.dtype) ** 2 / tf.cast(num_batch, pred_seg.dtype)

        return loss_seg
Пример #17
0
def _compare_dynamic_gather_nd_with_tf(
    test_case, params_shape, static_params_shape, indices_shape
):
    params, indices = _random_inputs(params_shape, indices_shape)

    i = tf.constant(indices)
    with tf.GradientTape() as t:
        x = tf.Variable(params)
        y = tf.gather_nd(x, i)

    dy = t.gradient(y, x)
    if isinstance(dy, tf.IndexedSlices):
        test_case.assertTrue(
            np.array_equal(indices.ravel(), dy.indices.numpy().ravel())
        )
        zero_params = tf.constant(np.full(params.shape, 0.0, dtype=np.float32))
        dy = tf.tensor_scatter_nd_add(zero_params, i, dy.values)

    def compare_dy(params_grad):
        test_case.assertTrue(np.array_equal(dy.numpy(), params_grad.numpy_list()[0]))

    of_y = _of_dynamic_params_gather_nd(
        params, indices, static_params_shape, compare_dy
    )
    test_case.assertTrue(np.array_equal(y.numpy(), of_y))
Пример #18
0
    def body(i, s, S, I, A, R, D, Ru):
        U = A + R + D
        alpha_t = param_vector[0] + (
            param_vector[1] / (tf.constant(1.0) + tf.pow(U, param_vector[2])))
        h_1 = (S * I / P) * alpha_t
        h_2 = I * param_vector[4]
        h_3 = A * param_vector[3]
        h_4 = A * param_vector[5]
        h_5 = I * param_vector[6] * param_vector[3]
        h = tf.stack([h_1, h_2, h_3, h_4, h_5])
        Y_store = tf.clip_by_value(
            tf.math.floor(tfd.Normal(loc=h, scale=tf.sqrt(h)).sample()), 0.0,
            P)

        m = tf.matmul(tf.transpose(nu), Y_store)

        S = tf.clip_by_value(S + m[0, :], 0.0, P)
        I = tf.clip_by_value(I + m[1, :], 0.0, P)
        A = tf.clip_by_value(A + m[2, :], 0.0, P)
        R = tf.clip_by_value(R + m[3, :], 0.0, P)
        D = tf.clip_by_value(D + m[4, :], 0.0, P)
        Ru = tf.clip_by_value(Ru + m[5, :], 0.0, P)

        s = tf.tensor_scatter_nd_add(s, [[i, 0], [i, 1], [i, 2]],
                                     tf.stack([A, R, D]))

        return i + 1, s, S, I, A, R, D, Ru
 def layerSave(self,save_mode=True):
     '''
     For saving intialization values (savemode=True) and then reinstating them without overwriting
     the frozen paramerters (savemode=False).
     '''
     if save_mode: 
         self.saved_layers = self.net.dense_layers
         for i in range(len(self.saved_layers)):
             for j in range(len(self.saved_layers[i].trainable_weights)):
                 params = self.saved_layers[i].trainable_weights[j]
                 self.saved_layers[i].trainable_weights[j] = params
                 
     else:
         for i, dlayer in enumerate(self.saved_layers):
             for j, layer in enumerate(dlayer.trainable_weights):
                 
                 # Check for the case of a single-valued layer.
                 if len(self.frozen_inds[i][j])>0:
                     tensor=layer
                     indices=self.frozen_inds[i][j]
                     updates=self.frozen_vals[i][j]
                     self.net.dense_layers[i].trainable_weights[j] = tf.tensor_scatter_nd_add(
                             tensor=tensor,
                             indices=indices, 
                             updates=updates)
                 else:
                     self.net.dense_layers[i].trainable_weights[j] = layer
Пример #20
0
    def graph_sampler(self, batch_size, seed, beta):
        #Same as sample method above but specialised for graph compilation
        sample = tf.zeros([batch_size, self.L, self.L, 1], tf.float32)
        tf_binomial = tf.random.stateless_binomial
        full_ones = tf.ones([batch_size], tf.int32)
        full_zeros = tf.zeros_like(full_ones)
        r = self.learn_range
        for i in range(self.L):
            for j in range(self.L):
                seed.assign((seed * 1664525 + 1013904223) % 2**31)
                sub_sample = sample[:,
                                    np.maximum(i - 1, 0):i + 1,
                                    np.maximum(j - r, 0):np.
                                    minimum(j + r + 1, self.L)]
                x_hat = self.call(sub_sample, beta)
                i_h = tfm.minimum(i, 1)
                j_h = tfm.minimum(j, r)
                probs = 0.5 if i == 0 and j == 0 else x_hat[:, i_h, j_h, 0]
                indices = tf.stack([
                    tf.range(batch_size), i * full_ones, j * full_ones,
                    full_zeros
                ], 1)
                updates = tf_binomial([batch_size], seed, 1., probs,
                                      tf.float32) * 2 - 1
                sample = tf.tensor_scatter_nd_add(sample,
                                                  tf.cast(indices, tf.int32),
                                                  updates)

        #x_hat = self.call(sample)
        if self.z2:
            seed.assign((seed * 1664525 + 1013904223) % 2**31)
            flip = tf_binomial([batch_size, 1, 1, 1], seed, 1., 0.5,
                               tf.float32) * 2 - 1
            sample = sample * flip
        return sample
Пример #21
0
    def _get_labels_embed(
        label_ids: tf.Tensor, all_labels_embed: tf.Tensor
    ) -> tf.Tensor:
        # instead of processing labels again, gather embeddings from
        # all_labels_embed using label ids

        indices = tf.cast(label_ids[:, :, 0], tf.int32)

        # Find padding indices. They should have a value equal to `LABEL_PAD_ID`
        padding_indices = tf.where(tf.equal(indices, LABEL_PAD_ID))

        # Create a tensor of values with sign opposite to `LABEL_PAD_ID` which
        # will serve as updates to original `indices`
        updates_to_indices = (
            tf.ones((tf.shape(padding_indices)[0]), dtype=tf.int32) * -1 * LABEL_PAD_ID
        )

        # Add the updates tensor to indices with padding.
        # So, effectively all indices with `LABEL_PAD_ID=-1`
        # become 0 because updates contain 1s.
        # This is fine because we don't change the original non-padding label
        # indices but only make the padding indices 'compatible'
        # for the `tf.gather` op below.
        indices_to_gather = tf.cast(
            tf.tensor_scatter_nd_add(indices, padding_indices, updates_to_indices),
            tf.int32,
        )

        labels_embed = tf.gather(all_labels_embed, indices_to_gather)

        return labels_embed
Пример #22
0
    def _tf_scatter_add(tensor, indices, updates):
        # manual implementation of pytorch._scatter_add(dim=2, src=tensor, idx=indeces, out=updaties)

        original_tensor = tensor
        # expand index value from vocab size
        indices = tf.reshape(indices, shape=[-1, tf.shape(indices)[-1]])
        indices_add = tf.expand_dims(tf.range(0,
                                              tf.shape(indices)[0], 1) *
                                     (tf.shape(tensor)[-1]),
                                     axis=-1)
        indices += indices_add

        # resize
        tensor = tf.reshape(tensor, shape=[-1])
        indices = tf.reshape(indices, shape=[-1, 1])
        updates = tf.reshape(updates, shape=[-1])

        #check_
        """
        update = tensor.shape[indices.shape[-1]:]
        res = indices.shape[:-1] + update
        """
        #same Torch scatter_add_
        #scatter = tf.scatter_nd_add(tensor, indices, updates)
        scatter = tf.tensor_scatter_nd_add(tensor, indices, updates)
        scatter = tf.reshape(scatter,
                             shape=[
                                 tf.shape(original_tensor)[0],
                                 tf.shape(original_tensor)[1], -1
                             ])
        return scatter
Пример #23
0
    def body(i, s, S, I, A, R, D, Ru):
        """Single update for one day."""
        U = A / (tf.pow(10.0, param_vector[8]))
        alpha_t = param_vector[0] + (
            param_vector[1] / (tf.constant(1.0) + tf.pow(U, param_vector[6])))
        h_1 = (S * I / P) * alpha_t
        h_2 = I * param_vector[3]
        h_3 = A * param_vector[2]
        h_4 = A * param_vector[4]
        h_5 = I * param_vector[2] * param_vector[5]
        h = tf.stack([h_1, h_2, h_3, h_4, h_5])
        normal_sample = tfd.Normal(loc=h, scale=tf.sqrt(h)).sample()
        Y_store = tf.clip_by_value(tf.math.floor(normal_sample), 0.0, P)

        m = tf.matmul(tf.transpose(MIXING_MATRIX), Y_store)

        # Note: Simple vectorisation suppresses parameter update in loop.
        S = tf.clip_by_value(S + m[0, :], 0.0, P)
        I = tf.clip_by_value(I + m[1, :], 0.0, P)
        A = tf.clip_by_value(A + m[2, :], 0.0, P)
        R = tf.clip_by_value(R + m[3, :], 0.0, P)
        D = tf.clip_by_value(D + m[4, :], 0.0, P)
        Ru = tf.clip_by_value(Ru + m[5, :], 0.0, P)

        s = tf.tensor_scatter_nd_add(tensor=s,
                                     indices=[[i, 0], [i, 1], [i, 2]],
                                     updates=tf.stack([A, R, D]))

        return i+1, s, S, I, A, R, D, Ru
Пример #24
0
    def accumulate_client_votes(vote_accumulator, example):
        """Accumulates client votes on prefix extensions."""

        example = tf.strings.lower(example)
        # Append the default terminator to the example.
        example = tf.strings.join([example, default_terminator])

        # Compute effective round number.
        effective_round_num = tf.math.floordiv(round_num, num_sub_rounds)

        if tf.strings.length(example) < effective_round_num:
            return vote_accumulator
        else:
            discovered_prefixes_index = discovered_prefixes_table.lookup(
                tf.strings.substr(example, 0, effective_round_num))
            possible_prefix_extensions_index = possible_prefix_extensions_table.lookup(
                tf.strings.substr(example, effective_round_num, 1))

            # If the character extension is not in the alphabet, or the prefix is not
            # already in the discovered prefixes, do not add client's vote.
            if tf.math.logical_or(
                    tf.math.equal(possible_prefix_extensions_index,
                                  tf.constant(DEFAULT_VALUE)),
                    tf.math.equal(discovered_prefixes_index,
                                  tf.constant(DEFAULT_VALUE))):
                return vote_accumulator

            else:
                indices = [[
                    discovered_prefixes_index, possible_prefix_extensions_index
                ]]
                updates = tf.constant([1])
                return tf.tensor_scatter_nd_add(vote_accumulator, indices,
                                                updates)
Пример #25
0
def apply_scatter_nd_add(tensor, updates, indices, tf_int, tf_float):
    """ applies the tensor_scatter_nd_add over the batch dimension
    """
    out = Lambda(lambda entry: K.map_fn(
        lambda entry: tf.tensor_scatter_nd_add(entry[0], entry[1], entry[2]),
        entry,
        dtype=tf_float))([tensor, indices, updates])
    return out
Пример #26
0
def _compare_tensor_scatter_nd_add_with_tf(test_case, params_shape,
                                           indices_shape, updates_shape,
                                           device_type, mirrored):
    params, updates, indices = _random_inputs(params_shape, indices_shape,
                                              updates_shape, True)

    params_const = tf.constant(params)
    indices_const = tf.constant(indices)
    updates_const = tf.constant(updates)
    with tf.GradientTape() as t1:
        params_var = tf.Variable(params)
        tf_out1 = tf.tensor_scatter_nd_add(params_var, indices_const,
                                           updates_const)
    tf_params_grad = t1.gradient(tf_out1, params_var)

    with tf.GradientTape() as t2:
        updates_var = tf.Variable(updates)
        tf_out2 = tf.tensor_scatter_nd_add(params_const, indices_const,
                                           updates_var)
    tf_updates_grad = t2.gradient(tf_out2, updates_var)

    test_case.assertTrue(np.allclose(tf_out1.numpy(), tf_out2.numpy()))

    def compare_params_grad(of_params_grad):
        tf_params_grad_np = tf_params_grad.numpy()
        of_params_grad_np = (of_params_grad.numpy_list()[0]
                             if mirrored else of_params_grad.numpy())
        test_case.assertTrue(np.allclose(tf_params_grad_np, of_params_grad_np))

    def compare_updates_grad(of_updates_grad):
        tf_updates_grad_np = tf_updates_grad.numpy()
        of_updates_grad_np = (of_updates_grad.numpy_list()[0]
                              if mirrored else of_updates_grad.numpy())
        test_case.assertTrue(
            np.allclose(tf_updates_grad_np, of_updates_grad_np))

    of_out = _of_tensor_scatter_nd_add(
        params,
        indices,
        updates,
        device_type,
        mirrored,
        compare_params_grad,
        compare_updates_grad,
    )
    test_case.assertTrue(np.allclose(tf_out1.numpy(), of_out))
Пример #27
0
def _scatter_element_add_tf(tensor, index, value):
    """In-place addition of a multidimensional value over various
    indices of a tensor."""
    import tensorflow as tf

    indices = tf.expand_dims(index, 0)
    value = tf.cast(tf.expand_dims(value, 0), tensor.dtype)
    return tf.tensor_scatter_nd_add(tensor, indices, value)
Пример #28
0
def scatter_sum(src, index, dim: int = -1, out=None, dim_size=None):
    index = broadcast(index, src, dim)
    if out is None:
        size = src.get_shape().as_list()
        if dim_size is not None:
            size[dim] = dim_size
        elif index.numel() == 0:
            size[dim] = 0
        else:
            size[dim] = int(index.max()) + 1
        out = tf.zeros(size, dtype=src.dtype)
        print(out)
        print(index)
        print(src)
        return tf.tensor_scatter_nd_add(out, index, src)
    else:
        return tf.tensor_scatter_nd_add(out, index, src)
Пример #29
0
    def lazy_marginal(self, instruments, numeraire, time, book):
        batch_size = instruments.shape[0]
        book_size = book.book_size
        timesteps = len(time) - 1

        marginal_prices = tf.zeros((batch_size, book_size, timesteps + 1),
                                   FLOAT_DTYPE)
        marginal_deltas = tf.zeros((batch_size, book_size, timesteps + 1),
                                   FLOAT_DTYPE)
        grid = tf.constant(list(itertools.product(
            range(batch_size), range(book_size), range(timesteps + 1))))

        for book_idx in tf.range(book_size):
            entry = book.derivatives[book_idx]
            instrument = instruments[:, entry["link"], :]
            sign = entry["exposure"]
            value = sign * entry["derivative"].value(
                time, instrument, numeraire)
            delta = sign * entry["derivative"].delta(
                time, instrument, numeraire)

            indices = tf.boolean_mask(grid, grid[:, 1] == book_idx)
            marginal_prices = tf.tensor_scatter_nd_add(
                marginal_prices, indices, tf.reshape(value, -1))
            marginal_deltas = tf.tensor_scatter_nd_add(
                marginal_deltas, indices, tf.reshape(delta, -1))

        prices = tf.reduce_sum(marginal_prices, axis=1)

        deltas = tf.zeros((batch_size, book.instrument_dim, timesteps + 1))
        links = [entry["link"] for entry in book.derivatives]
        for path_idx in range(batch_size):
            for time_idx in range(timesteps + 1):
                indices = tf.stack([
                    path_idx * tf.ones(book.instrument_dim, INT_DTYPE),
                    tf.range(book.instrument_dim),
                    time_idx * tf.ones(book.instrument_dim, INT_DTYPE)], 1)

                updates = tf.math.bincount(
                    links,
                    marginal_deltas[path_idx, :, time_idx],
                    book.instrument_dim)

                deltas = tf.tensor_scatter_nd_add(deltas, indices, updates)

        return prices, deltas
    def call(self, inputs):
        # NOTE: it is for now impossible to do slice assignment in tensorflow
        # there are some on-going GH issues or SO questions but for now
        # tensor_scatter_nd_add seems to be the only way to go.
        # https://stackoverflow.com/questions/62092147/how-to-efficiently-assign-to-a-slice-of-a-tensor-in-tensorflow
        in_shape = tf.shape(inputs)
        batch_size = in_shape[0]
        height = in_shape[1]
        width = in_shape[2]
        # the number of channels can't be unknown for the convolutions
        n_channels = inputs.shape[3] // 4
        outputs = tf.zeros([batch_size, 2 * height, 2 * width, n_channels])
        # for now we only consider greyscale
        x1 = inputs[..., 0:n_channels] / 2
        x2 = inputs[..., n_channels:2 * n_channels] / 2
        x3 = inputs[..., 2 * n_channels:3 * n_channels] / 2
        x4 = inputs[..., 3 * n_channels:4 * n_channels] / 2
        # in the following, E denotes even and O denotes odd
        x_EE = x1 - x2 - x3 + x4
        x_OE = x1 - x2 + x3 - x4
        x_EO = x1 + x2 - x3 - x4
        x_OO = x1 + x2 + x3 + x4

        # now the preparation to tensor_scatter_nd_add
        height_range_E = 2 * tf.range(height)
        height_range_O = height_range_E + 1
        width_range_E = 2 * tf.range(width)
        width_range_O = width_range_E + 1

        # this transpose allows to only index the varying dimensions
        # only the first dimensions can be indexed in tensor_scatter_nd_add
        # we also need to match the indices with the updates reshaping
        scatter_nd_perm = [2, 1, 3, 0]
        outputs_reshaped = tf.transpose(outputs, perm=scatter_nd_perm)

        combos_list = [
            ((height_range_E, width_range_E), x_EE),
            ((height_range_O, width_range_E), x_OE),
            ((height_range_E, width_range_O), x_EO),
            ((height_range_O, width_range_O), x_OO),
        ]
        for (height_range, width_range), x_comb in combos_list:
            h_range, w_range = tf.meshgrid(height_range, width_range)
            h_range = tf.reshape(h_range, (-1, ))
            w_range = tf.reshape(w_range, (-1, ))
            combo_indices = tf.stack([w_range, h_range], axis=-1)
            combo_reshaped = tf.transpose(x_comb, perm=scatter_nd_perm)
            outputs_reshaped = tf.tensor_scatter_nd_add(
                outputs_reshaped,
                indices=combo_indices,
                updates=tf.reshape(combo_reshaped,
                                   (-1, n_channels, batch_size)),
            )

        inverse_scatter_nd_perm = [3, 1, 0, 2]
        outputs = tf.transpose(outputs_reshaped, perm=inverse_scatter_nd_perm)

        return outputs