Esempio n. 1
0
    def similarity_matrix(self, embeds):
        # (N, M, C)
        speakers_per_batch, utterances_per_speaker, embed_dim = embeds.shape

        # Inclusive centroids (1 per speaker). Cloning is needed for reverse differentiation
        centroids_incl = paddle.mean(embeds, axis=1)
        centroids_incl_norm = paddle.norm(centroids_incl,
                                          p=2,
                                          axis=1,
                                          keepdim=True)
        normalized_centroids_incl = centroids_incl / centroids_incl_norm

        # Exclusive centroids (1 per utterance)
        centroids_excl = paddle.broadcast_to(
            paddle.sum(embeds, axis=1, keepdim=True), embeds.shape) - embeds
        centroids_excl /= (utterances_per_speaker - 1)
        centroids_excl_norm = paddle.norm(centroids_excl,
                                          p=2,
                                          axis=2,
                                          keepdim=True)
        normalized_centroids_excl = centroids_excl / centroids_excl_norm

        p1 = paddle.matmul(embeds.reshape([-1, embed_dim]),
                           normalized_centroids_incl,
                           transpose_y=True)  # (NMN)
        p1 = p1.reshape([-1])
        # print("p1: ", p1.shape)
        p2 = paddle.bmm(embeds.reshape([-1, 1, embed_dim]),
                        normalized_centroids_excl.reshape([-1, embed_dim,
                                                           1]))  # (NM, 1, 1)
        p2 = p2.reshape([-1])  # (NM)

        # begin: alternative implementation for scatter
        with paddle.no_grad():
            index = paddle.arange(0,
                                  speakers_per_batch * utterances_per_speaker,
                                  dtype="int64").reshape([
                                      speakers_per_batch,
                                      utterances_per_speaker
                                  ])
            index = index * speakers_per_batch + paddle.arange(
                0, speakers_per_batch, dtype="int64").unsqueeze(-1)
            index = paddle.reshape(index, [-1])
        ones = paddle.ones(
            [speakers_per_batch * utterances_per_speaker * speakers_per_batch])
        zeros = paddle.zeros_like(index, dtype=ones.dtype)
        mask_p1 = paddle.scatter(ones, index, zeros)
        p = p1 * mask_p1 + (1 - mask_p1) * paddle.scatter(ones, index, p2)
        # end: alternative implementation for scatter
        # p = paddle.scatter(p1, index, p2)

        p = p * self.similarity_weight + self.similarity_bias  # neg
        p = p.reshape(
            [speakers_per_batch * utterances_per_speaker, speakers_per_batch])
        return p, p1, p2
Esempio n. 2
0
    def _bipartite_match_for_batch(self, gt_bbox, gt_label, prior_boxes,
                                   bg_index):
        """
        Args:
            gt_bbox (Tensor): [B, N, 4]
            gt_label (Tensor): [B, N, 1]
            prior_boxes (Tensor): [A, 4]
            bg_index (int): Background class index
        """
        batch_size, num_priors = gt_bbox.shape[0], prior_boxes.shape[0]
        ious = iou_similarity(gt_bbox.reshape((-1, 4)), prior_boxes).reshape(
            (batch_size, -1, num_priors))

        # Calculate the number of object per sample.
        num_object = (ious.sum(axis=-1) > 0).astype('int64').sum(axis=-1)

        # For each prior box, get the max IoU of all GTs.
        prior_max_iou, prior_argmax_iou = ious.max(axis=1), ious.argmax(axis=1)
        # For each GT, get the max IoU of all prior boxes.
        gt_max_iou, gt_argmax_iou = ious.max(axis=2), ious.argmax(axis=2)

        # Gather target bbox and label according to 'prior_argmax_iou' index.
        batch_ind = paddle.arange(
            0, batch_size, dtype='int64').unsqueeze(-1).tile([1, num_priors])
        prior_argmax_iou = paddle.stack([batch_ind, prior_argmax_iou], axis=-1)
        targets_bbox = paddle.gather_nd(gt_bbox, prior_argmax_iou)
        targets_label = paddle.gather_nd(gt_label, prior_argmax_iou)
        # Assign negative
        bg_index_tensor = paddle.full([batch_size, num_priors, 1], bg_index,
                                      'int64')
        targets_label = paddle.where(
            prior_max_iou.unsqueeze(-1) < self.overlap_threshold,
            bg_index_tensor, targets_label)

        # Ensure each GT can match the max IoU prior box.
        for i in range(batch_size):
            if num_object[i] > 0:
                targets_bbox[i] = paddle.scatter(
                    targets_bbox[i], gt_argmax_iou[i, :int(num_object[i])],
                    gt_bbox[i, :int(num_object[i])])
                targets_label[i] = paddle.scatter(
                    targets_label[i], gt_argmax_iou[i, :int(num_object[i])],
                    gt_label[i, :int(num_object[i])])

        # Encode box
        prior_boxes = prior_boxes.unsqueeze(0).tile([batch_size, 1, 1])
        targets_bbox = bbox2delta(prior_boxes.reshape([-1, 4]),
                                  targets_bbox.reshape([-1, 4]),
                                  self.prior_box_var)
        targets_bbox = targets_bbox.reshape([batch_size, -1, 4])

        return targets_bbox, targets_label
Esempio n. 3
0
def rpn_anchor_target(anchors,
                      gt_boxes,
                      rpn_batch_size_per_im,
                      rpn_positive_overlap,
                      rpn_negative_overlap,
                      rpn_fg_fraction,
                      use_random=True,
                      batch_size=1,
                      ignore_thresh=-1,
                      is_crowd=None,
                      weights=[1., 1., 1., 1.],
                      assign_on_cpu=False):
    tgt_labels = []
    tgt_bboxes = []
    tgt_deltas = []
    for i in range(batch_size):
        gt_bbox = gt_boxes[i]
        is_crowd_i = is_crowd[i] if is_crowd else None
        # Step1: match anchor and gt_bbox
        matches, match_labels = label_box(anchors, gt_bbox,
                                          rpn_positive_overlap,
                                          rpn_negative_overlap, True,
                                          ignore_thresh, is_crowd_i,
                                          assign_on_cpu)
        # Step2: sample anchor
        fg_inds, bg_inds = subsample_labels(match_labels,
                                            rpn_batch_size_per_im,
                                            rpn_fg_fraction, 0, use_random)
        # Fill with the ignore label (-1), then set positive and negative labels
        labels = paddle.full(match_labels.shape, -1, dtype='int32')
        if bg_inds.shape[0] > 0:
            labels = paddle.scatter(labels, bg_inds,
                                    paddle.zeros_like(bg_inds))
        if fg_inds.shape[0] > 0:
            labels = paddle.scatter(labels, fg_inds, paddle.ones_like(fg_inds))
        # Step3: make output
        if gt_bbox.shape[0] == 0:
            matched_gt_boxes = paddle.zeros([0, 4])
            tgt_delta = paddle.zeros([0, 4])
        else:
            matched_gt_boxes = paddle.gather(gt_bbox, matches)
            tgt_delta = bbox2delta(anchors, matched_gt_boxes, weights)
            matched_gt_boxes.stop_gradient = True
            tgt_delta.stop_gradient = True
        labels.stop_gradient = True
        tgt_labels.append(labels)
        tgt_bboxes.append(matched_gt_boxes)
        tgt_deltas.append(tgt_delta)

    return tgt_labels, tgt_bboxes, tgt_deltas
Esempio n. 4
0
    def scatter_add(self, dim,index, updates ):
        assert  dim==0, "scatter_add_, no support dim>0"
        if "64" in str(updates.dtype):
            updates=updates.astype("float32")
        ret=self
        if "64" in str(ret.dtype):
            ret=ret.astype("float32")
        if len(index.shape)==1:
            ret=paddle.scatter(ret, index , updates , overwrite=False)
        else:
            for ii in range(index.shape[1]):
                ret=paddle.scatter(ret,index[:,ii],updates ,overwrite=False)

        return ret
Esempio n. 5
0
    def scatter_paddle(self, refined_seg_logits, point_indices, point_logits):
        """
        paddle version scatter : equal to pytorch version scatter(-1,point_indices,point_logits).

        Args:
            refined_seg_logits(Tensor): shape=[batch_size, channels, height * width]
            point_indices(Tensor): shape=[batch_size, channels, height * width]
            point_logits(Tensor): shape[batch_size, channels, height * width]
        Returns:
            scattered refined_seg_logits(Tensor).
        """

        original_shape = paddle.shape(
            refined_seg_logits)  # [batch_size, channels, height * width]
        new_refined_seg_logits = refined_seg_logits.flatten(0, 1)  # [N*C,H*W]
        offsets = (paddle.arange(paddle.shape(new_refined_seg_logits)[0]) *
                   paddle.shape(new_refined_seg_logits)[1]).unsqueeze(
                       -1)  # [N*C,1]
        point_indices = point_indices.flatten(0, 1)  # [N*C,H*W]
        new_point_indices = (point_indices + offsets).flatten()
        point_logits = point_logits.flatten()  # [N*C*H*W]
        refined_seg_logits = paddle.scatter(refined_seg_logits.flatten(),
                                            new_point_indices,
                                            point_logits,
                                            overwrite=True)
        return refined_seg_logits.reshape(shape=original_shape)
Esempio n. 6
0
    def forward(self, inp):
        if self.div_val == 1:
            embed = self.emb_layers[0](inp)
            if self.d_proj != self.d_embed:
                embed = F.linear(embed, self.emb_projs[0])
        else:
            inp_flat = paddle.reshape(inp, shape=[-1])
            emb_flat = paddle.zeros(
                [inp_flat.shape[0], self.d_proj], dtype=global_dtype)
            for i in range(len(self.cutoffs)):
                l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]

                mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
                indices_i = paddle.nonzero(mask_i).squeeze([1])

                if indices_i.numel() == 0:
                    continue

                inp_i = paddle.gather(inp_flat, indices_i, axis=0) - l_idx
                emb_i = self.emb_layers[i](inp_i)
                emb_i = F.linear(emb_i, self.emb_projs[i])

                emb_flat = paddle.scatter(emb_flat, indices_i, emb_i)

            embed = paddle.reshape(
                emb_flat, shape=inp.shape.append(self.d_proj))

        embed = embed * self.emb_scale

        return embed
Esempio n. 7
0
    def forward(self, x):
        topk_val, topk_idx, gate_score = super().forward(
            x, return_all_scores=True)
        s = gate_score.shape[0]
        top1_idx = topk_idx.flatten()
        c_e = paddle.scatter(paddle.zeros(shape=[self.tot_expert]),
                             top1_idx,
                             paddle.ones_like(top1_idx, dtype="float32"),
                             overwrite=False) / s
        m_e = paddle.mean(F.softmax(gate_score, axis=1), axis=0)
        loss = paddle.mean(c_e * m_e) * (self.num_expert**2)
        self.set_loss(loss)

        cap_rate = self.capacity[0 if self.training else 1]
        capacity = math.ceil(cap_rate * x.shape[0])
        _new_lec, _new_gec, topk_idx = limit_by_capacity(topk_idx,
                                                         self.num_expert,
                                                         self.world_size,
                                                         capacity,
                                                         group=self.group)

        if self.random_routing:
            rand_routing_prob = paddle.rand(shape=[gate_score.shape[0]],
                                            dtype="float32")
            topk_idx = paddle.distributed.models.moe.utils._random_routing(
                topk_idx, topk_val, rand_routing_prob)
        return topk_val, topk_idx
Esempio n. 8
0
        def TopPProcess(probs, top_p, min_tokens_to_keep):
            sorted_probs = paddle.sort(probs, descending=True)
            sorted_indices = paddle.argsort(probs, descending=True)
            cumulative_probs = paddle.cumsum(sorted_probs, axis=-1)

            # Remove tokens with cumulative probs above the top_p, But keep at
            # least min_tokens_to_keep tokens
            sorted_indices_to_remove = cumulative_probs > top_p
            if min_tokens_to_keep > 1:
                # Set 'min_tokens_to_keep - 1' because the first token is kept
                sorted_indices_to_remove[:, :min_tokens_to_keep - 1] = 0
            # Keep the first token
            sorted_indices_to_remove = paddle.cast(sorted_indices_to_remove,
                                                   dtype='int64')
            sorted_indices_to_remove[:, 1:] = (
                sorted_indices_to_remove[:, :-1].clone())
            sorted_indices_to_remove[:, 0] = 0

            # Scatter sorted tensors to original indexing
            sorted_indices = sorted_indices + paddle.arange(
                probs.shape[0]).unsqueeze(-1) * probs.shape[-1]
            condition = paddle.scatter(sorted_indices_to_remove.flatten(),
                                       sorted_indices.flatten(),
                                       sorted_indices_to_remove.flatten())
            condition = paddle.cast(condition, 'bool').reshape(probs.shape)
            probs = paddle.where(condition, paddle.full_like(probs, 0.0),
                                 probs)
            return probs
Esempio n. 9
0
def generate_segment_id(index):
    zeros = paddle.zeros(index[-1] + 1, dtype="int32")
    index = index[:-1]
    segments = paddle.scatter(
        zeros, index, paddle.ones_like(
                index, dtype="int32"), overwrite=False)
    segments = paddle.cumsum(segments)[:-1] - 1
    return segments
Esempio n. 10
0
    def set_item(self, tensor_origin, value):

        if not isinstance(value, paddle.fluid.Variable):
            value = paddle.assign(value)
        tensor_type = None

        if tensor_origin.dtype in [
                core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP64
        ]:
            tensor = tensor_origin
        else:
            tensor_type = tensor_origin.dtype
            tensor = tensor_origin.astype(core.VarDesc.VarType.FP32)

        if value.dtype != tensor.dtype:
            value = value.astype(tensor.dtype)

        shape_transpose = self.get_offset_stride(tensor_origin.shape)
        index = paddle.assign(shape_transpose)

        gather_tensor_shape = get_list_index_shape(
            tensor.shape, [len(self.indexes), ] + list(self.indexes[-1].shape))

        value_dims_bd = [1, ] * len(gather_tensor_shape)
        value_dims_bd[-len(value.shape):] = list(value.shape)

        for i in range(len(gather_tensor_shape)):
            if not (value_dims_bd[i] == gather_tensor_shape[i] or
                    value_dims_bd[i] == 1):
                raise ValueError("{} can not broadcast into {}".format(
                    value.shape, gather_tensor_shape))

        value_broadcast = paddle.broadcast_to(value, gather_tensor_shape)

        value_1d = value_broadcast.reshape([-1] + gather_tensor_shape[len(
            index.shape) - 1:])

        index_1d = index.reshape([-1, index.shape[-1]])

        tensor_stride = paddle.assign(
            self.shape_stride(tensor.shape[:index.shape[-1]]))
        inds = []
        for i in range(index_1d.shape[0]):
            temp = (index_1d[i] * tensor_stride).sum()
            inds.append(temp)
        index_1d = paddle.stack(inds).reshape([-1])
        t_reshape = tensor.reshape([-1] + list(tensor.shape[index.shape[-1]:]))
        out = paddle.scatter(t_reshape, index_1d, value_1d)
        if tensor_type is not None:
            out = out.astype(tensor_type)
        tensor_origin[:] = out.reshape(tensor_origin.shape)

        return tensor_origin
Esempio n. 11
0
def index_copy(x: paddorch.Tensor, dim, index, tensor):
    query_key = []
    for k in range(dim):
        query_key.append(None)
    if isinstance(index, Tensor):
        index = index.long()
    query_key.append(index)
    # x[tuple(query_key)]=tensor

    query_key = paddle.concat(query_key)
    y = convertTensor(paddle.scatter(x, query_key, tensor))
    return y
Esempio n. 12
0
def rpn_anchor_target(anchors,
                      gt_boxes,
                      rpn_batch_size_per_im,
                      rpn_positive_overlap,
                      rpn_negative_overlap,
                      rpn_fg_fraction,
                      use_random=True,
                      batch_size=1,
                      weights=[1., 1., 1., 1.]):
    tgt_labels = []
    tgt_bboxes = []

    tgt_deltas = []
    for i in range(batch_size):
        gt_bbox = gt_boxes[i]

        # Step1: match anchor and gt_bbox
        matches, match_labels = label_box(anchors, gt_bbox,
                                          rpn_positive_overlap,
                                          rpn_negative_overlap, True)
        # Step2: sample anchor
        fg_inds, bg_inds = subsample_labels(match_labels,
                                            rpn_batch_size_per_im,
                                            rpn_fg_fraction, 0, use_random)
        # Fill with the ignore label (-1), then set positive and negative labels
        labels = paddle.full(match_labels.shape, -1, dtype='int32')
        labels = paddle.scatter(labels, fg_inds, paddle.ones_like(fg_inds))
        labels = paddle.scatter(labels, bg_inds, paddle.zeros_like(bg_inds))
        # Step3: make output
        matched_gt_boxes = paddle.gather(gt_bbox, matches)

        tgt_delta = bbox2delta(anchors, matched_gt_boxes, weights)
        labels.stop_gradient = True
        matched_gt_boxes.stop_gradient = True
        tgt_delta.stop_gradient = True
        tgt_labels.append(labels)
        tgt_bboxes.append(matched_gt_boxes)
        tgt_deltas.append(tgt_delta)

    return tgt_labels, tgt_bboxes, tgt_deltas
Esempio n. 13
0
def _local_gather(inp, pos, out_batch_size, maybe_overlap=True):
    if pos.shape != [0]:
        origin_dtype = inp.dtype
        inp = paddle.cast(inp, dtype="float32")
        inp_buf = paddle.scatter(paddle.zeros(
            shape=[out_batch_size, inp.shape[-1]], dtype="float32"),
                                 pos,
                                 inp,
                                 overwrite=True)
        inp_buf = paddle.cast(inp_buf, dtype=origin_dtype)
    else:
        inp_buf = paddle.zeros([out_batch_size, inp.shape[-1]],
                               dtype=inp.dtype)
    return inp_buf
Esempio n. 14
0
    def forward(self, input, targets):
        relations, texts, x = input
        node_nums, char_nums = [], []
        for text in texts:
            node_nums.append(text.shape[0])
            char_nums.append(paddle.sum((text > -1).astype(int), axis=-1))

        max_num = max([char_num.max() for char_num in char_nums])
        all_nodes = paddle.concat([
            paddle.concat(
                [text,
                 paddle.zeros((text.shape[0], max_num - text.shape[1]))], -1)
            for text in texts
        ])
        temp = paddle.clip(all_nodes, min=0).astype(int)
        embed_nodes = self.node_embed(temp)
        rnn_nodes, _ = self.rnn(embed_nodes)

        b, h, w = rnn_nodes.shape
        nodes = paddle.zeros([b, w])
        all_nums = paddle.concat(char_nums)
        valid = paddle.nonzero((all_nums > 0).astype(int))
        temp_all_nums = (paddle.gather(all_nums, valid) -
                         1).unsqueeze(-1).unsqueeze(-1)
        temp_all_nums = paddle.expand(temp_all_nums, [
            temp_all_nums.shape[0], temp_all_nums.shape[1], rnn_nodes.shape[-1]
        ])
        temp_all_nodes = paddle.gather(rnn_nodes, valid)
        N, C, A = temp_all_nodes.shape
        one_hot = F.one_hot(temp_all_nums[:, 0, :],
                            num_classes=C).transpose([0, 2, 1])
        one_hot = paddle.multiply(temp_all_nodes,
                                  one_hot.astype("float32")).sum(axis=1,
                                                                 keepdim=True)
        t = one_hot.expand([N, 1, A]).squeeze(1)
        nodes = paddle.scatter(nodes, valid.squeeze(1), t)

        if x is not None:
            nodes = self.fusion([x, nodes])

        all_edges = paddle.concat(
            [rel.reshape([-1, rel.shape[-1]]) for rel in relations])
        embed_edges = self.edge_embed(all_edges.astype('float32'))
        embed_edges = F.normalize(embed_edges)

        for gnn_layer in self.gnn_layers:
            nodes, cat_nodes = gnn_layer(nodes, embed_edges, node_nums)

        node_cls, edge_cls = self.node_cls(nodes), self.edge_cls(cat_nodes)
        return node_cls, edge_cls
Esempio n. 15
0
    def test_dygraph(self):
        for place in self.places:
            with fluid.dygraph.guard(place):
                x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float64)
                index_data = np.array([2, 1, 0, 1]).astype(np.int64)
                updates_data = np.array([[1, 1], [2, 2], [3, 3],
                                         [4, 4]]).astype(np.float64)

                x = fluid.dygraph.to_variable(x_data)
                index = fluid.dygraph.to_variable(index_data)
                updates = fluid.dygraph.to_variable(updates_data)

                output1 = paddle.scatter(x, index, updates, overwrite=False)
                self.assertEqual((output1.numpy() == \
                                  np.array([[3., 3.],[6., 6.],[1., 1.]])).all(), True)
Esempio n. 16
0
    def force_decoding(self, beam_search_output, beam_search_state, trg_word,
                       trg_length, time):
        batch_size = paddle.shape(beam_search_output.predicted_ids)[0]
        beam_size = paddle.shape(beam_search_output.predicted_ids)[1]

        ids_dtype = beam_search_output.predicted_ids.dtype
        scores_dtype = beam_search_output.scores.dtype
        parent_ids = paddle.zeros(shape=[batch_size, 1], dtype=ids_dtype)
        scores = paddle.ones(shape=[batch_size, beam_size],
                             dtype=scores_dtype) * -10e9
        scores = paddle.scatter(
            scores.flatten(),
            paddle.arange(0,
                          batch_size * beam_size,
                          step=beam_size,
                          dtype=scores_dtype),
            paddle.zeros([batch_size])).reshape([batch_size, beam_size])

        force_position = paddle.unsqueeze(trg_length > time, [1])
        # NOTE: When the date type of the input of paddle.tile is bool
        # and enable static mode, its stop_gradient must be True .
        force_position.stop_gradient = True
        force_position = paddle.tile(force_position, [1, beam_size])
        crt_trg_word = paddle.slice(trg_word,
                                    axes=[1],
                                    starts=[time],
                                    ends=[time + 1])
        crt_trg_word = paddle.tile(crt_trg_word, [1, beam_size])

        predicted_ids = paddle.where(force_position, crt_trg_word,
                                     beam_search_output.predicted_ids)
        scores = paddle.where(force_position, scores,
                              beam_search_output.scores)
        parent_ids = paddle.where(force_position, parent_ids,
                                  beam_search_output.parent_ids)

        cell_states = beam_search_state.cell_states
        log_probs = paddle.where(force_position, scores,
                                 beam_search_state.log_probs)
        finished = beam_search_state.finished
        lengths = beam_search_state.lengths

        return self.OutputWrapper(scores, predicted_ids,
                                  parent_ids), self.StateWrapper(
                                      cell_states, log_probs, finished,
                                      lengths)
Esempio n. 17
0
def perm_to_Pmat(perm, dim):
    pshape = perm.shape
    bs = int(np.product(perm.shape[:-1]).item())
    perm = perm.reshape((bs, pshape[-1]))
    oneslst = []
    for i in range(bs):
        idlst = np.arange(dim)
        perm_item = perm[i, :]
        for idx, p in enumerate(perm_item - 1):
            temp = idlst[idx]
            idlst[idx] = idlst[p]
            idlst[p] = temp

        ones = paddle.eye(dim)
        nmat = paddle.scatter(ones, paddle.to_tensor(idlst), ones)
        oneslst.append(nmat)
    return np.array(oneslst).reshape(list(pshape[:-1]) + [dim, dim])
Esempio n. 18
0
    def forward(self, graph_list, feature, m2v_feature, label_y, label_idx):
        m2v_fc = self.input_drop(self.m2v_fc(m2v_feature))
        feature = feature + m2v_fc

        label_embed = self.label_embed(label_y)
        label_embed = self.input_drop(label_embed)
        feature_label = paddle.gather(feature, label_idx)
        label_embed = paddle.concat([label_embed, feature_label], axis=1)
        label_embed = self.label_mlp(label_embed)
        feature = paddle.scatter(feature,
                                 label_idx,
                                 label_embed,
                                 overwrite=True)

        for idx, (sg, sub_index) in enumerate(graph_list):
            temp_feat = []
            skip_feat = paddle.gather(feature, sub_index, axis=0)
            skip_feat = self.skips[idx](skip_feat)
            skip_feat = self.norms[idx][0](skip_feat)
            skip_feat = F.elu(skip_feat)
            temp_feat.append(skip_feat)

            for i in range(self.edge_type):
                masked = sg.edge_feat['edge_type'] == i
                m_sg = self.get_subgraph_by_masked(sg, masked)
                if m_sg is not None:
                    feature_temp = self.gats[idx][i](m_sg, feature)
                    feature_temp = paddle.gather(feature_temp,
                                                 sub_index,
                                                 axis=0)
                    feature_temp = self.norms[idx][i + 1](feature_temp)
                    feature_temp = F.elu(feature_temp)
                    #skip_feat += feature_temp
                    temp_feat.append(feature_temp)
            temp_feat = paddle.stack(temp_feat, axis=1)  # b x 3 x dim
            temp_feat_attn = self.path_attns[idx](temp_feat)  # b x 3 x 1
            temp_feat_attn = F.softmax(temp_feat_attn, axis=1)
            temp_feat_attn = paddle.transpose(temp_feat_attn,
                                              perm=[0, 2, 1])  # b x 1 x 3
            skip_feat = paddle.bmm(temp_feat_attn, temp_feat)[:, 0]
            skip_feat = self.path_norms[idx](skip_feat)
            #feature = F.elu(skip_feat)
            feature = self.dropout(skip_feat)
        output = self.mlp(feature)
        return output
Esempio n. 19
0
 def test_scatter_fp16(self):
     paddle.disable_static(place=paddle.CUDAPlace(0))
     x_tensor = paddle.to_tensor(self.x_np, stop_gradient=False)
     index_tensor = paddle.to_tensor(self.index_np)
     updates_tensor = paddle.to_tensor(self.updates_np, stop_gradient=False)
     out_tensor = paddle.scatter(x_tensor, index_tensor, updates_tensor)
     paddle.autograd.backward([out_tensor],
                              [paddle.to_tensor(self.dout_np)],
                              retain_graph=True)
     ref_grad_updates = self.compute_ref_grad_updates()
     np.testing.assert_allclose(ref_grad_updates.numpy(),
                                updates_tensor.grad.numpy(),
                                rtol=1e-5,
                                atol=1e-5)
     np.testing.assert_allclose(self.ref_dx,
                                x_tensor.grad.numpy(),
                                rtol=1e-5,
                                atol=1e-5)
Esempio n. 20
0
def index_fill_(self, dim, index, val):
    x_shape = self.shape
    index_shape = index.shape
    if dim != 0:
        perm_list = list(range(len(x_shape)))
        while dim < 0:
            dim += len(x_shape)
        perm_list.pop(dim)
        perm_list = [dim] + perm_list
        self = paddle.transpose(self, perm=perm_list)
        s = x_shape.pop(dim)
        x_shape = [s] + x_shape
    updates_shape = index_shape + x_shape[1:]
    updates = paddle.full(updates_shape, fill_value=val, dtype=self.dtype)
    out = paddle.scatter(self, index, updates)
    if dim != 0:
        perm_list = list(range(len(x_shape)))
        perm_list.pop(0)
        perm_list.insert(dim, 0)
        out = paddle.transpose(out, perm=perm_list)
    paddle.assign(out, output=self)
Esempio n. 21
0
 def _get_loss_class(self, logits, gt_class, match_indices, bg_index,
                     num_gts):
     # logits: [b, query, num_classes], gt_class: list[[n, 1]]
     target_label = paddle.full(logits.shape[:2], bg_index, dtype='int64')
     bs, num_query_objects = target_label.shape
     if sum(len(a) for a in gt_class) > 0:
         index, updates = self._get_index_updates(num_query_objects,
                                                  gt_class, match_indices)
         target_label = paddle.scatter(target_label.reshape([-1, 1]), index,
                                       updates.astype('int64'))
         target_label = target_label.reshape([bs, num_query_objects])
     if self.use_focal_loss:
         target_label = F.one_hot(target_label,
                                  self.num_classes + 1)[..., :-1]
     return {
         'loss_class':
         self.loss_coeff['class'] *
         sigmoid_focal_loss(logits, target_label, num_gts /
                            num_query_objects)
         if self.use_focal_loss else F.cross_entropy(
             logits, target_label, weight=self.loss_coeff['class'])
     }
Esempio n. 22
0
    def check_static_result(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(name="input", shape=[3, 2], dtype="float64")
            index = fluid.data(name="index", shape=[4], dtype="int64")
            updates = fluid.data(name="updates", shape=[4, 2], dtype="float64")
            result = paddle.scatter(input, index, updates, False)

            input_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float64)
            index_data = np.array([2, 1, 0, 1]).astype(np.int64)
            updates_data = np.array([[1, 1], [2, 2], [3, 3],
                                     [4, 4]]).astype(np.float64)

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={
                                  "input": input_data,
                                  "index": index_data,
                                  "updates": updates_data
                              },
                              fetch_list=[result])
            self.assertEqual((fetches[0] == \
                              np.array([[3., 3.],[6., 6.],[1., 1.]])).all(), True)
Esempio n. 23
0
def aggr(batch, y, nxt_y, y0, alpha):
    pred = graph.predecessor(batch.numpy())
    self_label = paddle.to_tensor(y[batch.numpy()])
    self_label0 = paddle.to_tensor(y0[batch.numpy()])
    pred_id = []
    for n, p in enumerate(pred):
        if len(p) > 0:
            pred_id.append(np.ones(len(p)) * n)
    pred_cat = np.concatenate(pred)
    pred_id_cat = paddle.to_tensor(np.concatenate(pred_id), dtype="int64")
    pred_cat_pd = paddle.to_tensor(pred_cat)

    pred_label = paddle.to_tensor(y[pred_cat])

    pred_norm = paddle.gather(indegree, pred_cat_pd)
    self_norm = paddle.gather(indegree, paddle.to_tensor(batch, dtype="int64"))

    others = paddle.zeros_like(self_label)
    others = paddle.scatter(others, pred_id_cat, pred_label)
    others = (1 - alpha) * (others + self_label
                            ) * self_norm + alpha * self_label0
    others = others / paddle.sum(others, -1, keepdim=True)
    nxt_y[batch] = others.numpy()
Esempio n. 24
0
        def test_static_graph():
            with paddle.static.program_guard(paddle.static.Program(),
                                             paddle.static.Program()):
                x_t = paddle.static.data(name="x",
                                         dtype=x.dtype,
                                         shape=x.shape)
                index_t = paddle.static.data(name="index",
                                             dtype=index.dtype,
                                             shape=index.shape)
                updates_t = paddle.static.data(name="updates",
                                               dtype=updates.dtype,
                                               shape=updates.shape)
                out_t = paddle.scatter(x_t, index_t, updates_t)
                feed = {
                    x_t.name: x,
                    index_t.name: index,
                    updates_t.name: updates
                }
                fetch = [out_t]

                gpu_exe = paddle.static.Executor(paddle.CUDAPlace(0))
                gpu_value = gpu_exe.run(feed=feed, fetch_list=fetch)[0]
                return gpu_value
Esempio n. 25
0
    def forward(self, hidden, target, keep_order=False):
        assert (hidden.shape[0] == target.shape[0])

        if self.num_clusters == 0:
            logit = self._compute_logits(hidden, self.out_layers_weight[0],
                                         self.out_layers_bias[0],
                                         self.out_projs[0])
            nll = -paddle.log(F.softmax(logit, axis=-1))
            idx = paddle.concat(
                [
                    paddle.arange(0, nll.shape[0]).unsqueeze([1]),
                    target.unsqueeze(1)
                ],
                axis=1)
            nll = paddle.gather_nd(nll, idx)
        else:
            weights, biases = [], []
            for i in range(len(self.cutoffs)):
                if self.div_val == 1:
                    l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
                    weight_i = self.out_layers_weight[0][l_idx:r_idx]
                    bias_i = self.out_layers_bias[0][l_idx:r_idx]
                else:
                    weight_i = self.out_layers_weight[i]
                    bias_i = self.out_layers_bias[i]

                if i == 0:
                    weight_i = paddle.concat(
                        [weight_i, self.cluster_weight], axis=0)
                    bias_i = paddle.concat([bias_i, self.cluster_bias], axis=0)

                weights.append(weight_i)
                biases.append(bias_i)

            head_weight, head_bias, head_proj = weights[0], biases[
                0], self.out_projs[0]

            head_logit = self._compute_logits(hidden, head_weight, head_bias,
                                              head_proj)
            head_logprob = paddle.log(F.softmax(head_logit, axis=-1))

            nll = paddle.zeros_like(target, dtype=hidden.dtype)

            offset = 0
            cutoff_values = [0] + self.cutoffs
            for i in range(len(cutoff_values) - 1):
                l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]

                mask_i = paddle.cast(
                    target >= l_idx,
                    dtype=paddle.get_default_dtype()) * paddle.cast(
                        target < r_idx, dtype="int64")
                indices_i = paddle.nonzero(mask_i).squeeze([1])

                if paddle.numel(indices_i) == 0:
                    continue
                target_i = paddle.gather(target, indices_i, axis=0) - l_idx
                head_logprob_i = paddle.gather(head_logprob, indices_i, axis=0)
                if i == 0:
                    target_i_idx = paddle.concat(
                        [
                            paddle.arange(0, head_logprob_i.shape[0]).unsqueeze(
                                [1]), target_i.unsqueeze([1])
                        ],
                        axis=1)
                    logprob_i = head_logprob_i.gather_nd(target_i_idx)
                else:
                    weight_i, bias_i, proj_i = weights[i], biases[
                        i], self.out_projs[i].weight if self.out_projs[
                            i] is not None else None

                    hidden_i = paddle.gather(hidden, indices_i, axis=0)

                    tail_logit_i = self._compute_logits(hidden_i, weight_i,
                                                        bias_i, proj_i)
                    tail_logprob_i = paddle.log(
                        F.softmax(
                            tail_logit_i, axis=-1))

                    target_i_idx = paddle.concat(
                        [
                            paddle.arange(0, tail_logprob_i.shape[0]).unsqueeze(
                                [1]), target_i.unsqueeze([1])
                        ],
                        axis=1)
                    logprob_i = tail_logprob_i.gather_nd(target_i_idx)

                    logprob_i = head_logprob_i[:, -i] + logprob_i

                if self.keep_order or keep_order:
                    nll = paddle.scatter(nll, indices_i, -logprob_i)
                else:
                    index = paddle.arange(offset, offset + logprob_i.shape[0],
                                          1)
                    nll = paddle.scatter(nll, index, -logprob_i)

                offset += logprob_i.shape[0]

        return nll
Esempio n. 26
0
 def test_dygraph():
     with fluid.dygraph.guard():
         gpu_out = paddle.scatter(paddle.to_tensor(x),
                                  paddle.to_tensor(index),
                                  paddle.to_tensor(updates))
         return gpu_out.numpy()
Esempio n. 27
0
File: conv.py Progetto: WenjinW/PGL
    def forward(self, feed_dict):
        g = feed_dict['graph']

        x = g.node_feat["feat"]
        edge_feat = g.edge_feat["feat"]
        h_list = [self.atom_encoder(x)]

        ### virtual node embeddings for graphs
        virtualnode_embedding = self.virtualnode_embedding.expand(
                [g.num_graph, self.virtualnode_embedding.shape[-1]])

        junc_feat = self.junc_embed(feed_dict['junc_graph'].node_feat['feat'])
        junc_feat = paddle.squeeze(junc_feat, axis=1)
        for layer in range(self.num_layers):
            ### add message from virtual nodes to graph nodes
            h_list[layer] = h_list[layer] + paddle.gather(virtualnode_embedding, g.graph_node_id)

            ### Message passing among graph nodes
            h = self.convs[layer](g, h_list[layer], edge_feat)

            h = self.batch_norms[layer](h)
            if layer == self.num_layers - 1:
                #remove relu for the last layer
                h = F.dropout(h, self.drop_ratio, training = self.training)
            else:
                h = F.dropout(F.swish(h), self.drop_ratio, training = self.training)

            if self.residual:
                h = h + h_list[layer]

            # junction tree aggr
            atom_index = feed_dict['mol2junc'][:, 0]
            junc_index = feed_dict['mol2junc'][:, 1]
            gather_h = paddle.gather(h, atom_index)
            out_dim = gather_h.shape[-1]
            num = feed_dict['junc_graph'].num_nodes
            init_h = paddle.zeros(shape=[num, out_dim], dtype=gather_h.dtype)
            junc_h = paddle.scatter(init_h, junc_index, gather_h, overwrite=False)
            # node feature of junction tree
            junc_h = junc_feat + junc_h

            junc_h = self.junc_convs[layer](feed_dict['junc_graph'], junc_h)

            junc_h = paddle.gather(junc_h, junc_index)
            init_h = paddle.zeros(shape=[feed_dict['graph'].num_nodes, out_dim], dtype=h.dtype)
            sct_h = paddle.scatter(init_h, atom_index, junc_h, overwrite=False)
            h = h + sct_h

            h_list.append(h)

            ### update the virtual nodes
            if layer < self.num_layers - 1:
                ### add message from graph nodes to virtual nodes
                virtualnode_embedding_temp = self.pool(g, h_list[layer]) + virtualnode_embedding
                ### transform virtual nodes using MLP

                if self.residual:
                    virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
                else:
                    virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)

        ### Different implementations of Jk-concat
        if self.JK == "last":
            node_representation = h_list[-1]
        elif self.JK == "sum":
            node_representation = 0
            for layer in range(self.num_layers):
                node_representation += h_list[layer]
        
        return node_representation
Esempio n. 28
0
 def forward(self, inputs, _index, _updates):
     """
     forward
     """
     x = paddle.scatter(inputs, _index, _updates, overwrite=self.overwrite)
     return x
    def test_tensor_patch_method(self):
        paddle.disable_static()
        x_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype)
        y_np = np.random.uniform(-1, 1, [2, 3]).astype(self.dtype)
        z_np = np.random.uniform(-1, 1, [6, 9]).astype(self.dtype)

        x = paddle.to_tensor(x_np)
        y = paddle.to_tensor(y_np)
        z = paddle.to_tensor(z_np)

        a = paddle.to_tensor([[1, 1], [2, 2], [3, 3]])
        b = paddle.to_tensor([[1, 1], [2, 2], [3, 3]])

        # 1. Unary operation for Tensor
        self.assertEqual(x.dim(), 2)
        self.assertEqual(x.ndimension(), 2)
        self.assertEqual(x.ndim, 2)
        self.assertEqual(x.size, 6)
        self.assertEqual(x.numel(), 6)
        self.assertTrue(np.array_equal(x.exp().numpy(), paddle.exp(x).numpy()))
        self.assertTrue(
            np.array_equal(x.tanh().numpy(),
                           paddle.tanh(x).numpy()))
        self.assertTrue(
            np.array_equal(x.atan().numpy(),
                           paddle.atan(x).numpy()))
        self.assertTrue(np.array_equal(x.abs().numpy(), paddle.abs(x).numpy()))
        m = x.abs()
        self.assertTrue(
            np.array_equal(m.sqrt().numpy(),
                           paddle.sqrt(m).numpy()))
        self.assertTrue(
            np.array_equal(m.rsqrt().numpy(),
                           paddle.rsqrt(m).numpy()))
        self.assertTrue(
            np.array_equal(x.ceil().numpy(),
                           paddle.ceil(x).numpy()))
        self.assertTrue(
            np.array_equal(x.floor().numpy(),
                           paddle.floor(x).numpy()))
        self.assertTrue(np.array_equal(x.cos().numpy(), paddle.cos(x).numpy()))
        self.assertTrue(
            np.array_equal(x.acos().numpy(),
                           paddle.acos(x).numpy()))
        self.assertTrue(
            np.array_equal(x.asin().numpy(),
                           paddle.asin(x).numpy()))
        self.assertTrue(np.array_equal(x.sin().numpy(), paddle.sin(x).numpy()))
        self.assertTrue(
            np.array_equal(x.sinh().numpy(),
                           paddle.sinh(x).numpy()))
        self.assertTrue(
            np.array_equal(x.cosh().numpy(),
                           paddle.cosh(x).numpy()))
        self.assertTrue(
            np.array_equal(x.round().numpy(),
                           paddle.round(x).numpy()))
        self.assertTrue(
            np.array_equal(x.reciprocal().numpy(),
                           paddle.reciprocal(x).numpy()))
        self.assertTrue(
            np.array_equal(x.square().numpy(),
                           paddle.square(x).numpy()))
        self.assertTrue(
            np.array_equal(x.rank().numpy(),
                           paddle.rank(x).numpy()))
        self.assertTrue(
            np.array_equal(x[0].t().numpy(),
                           paddle.t(x[0]).numpy()))
        self.assertTrue(
            np.array_equal(x.asinh().numpy(),
                           paddle.asinh(x).numpy()))
        ### acosh(x) = nan, need to change input
        t_np = np.random.uniform(1, 2, [2, 3]).astype(self.dtype)
        t = paddle.to_tensor(t_np)
        self.assertTrue(
            np.array_equal(t.acosh().numpy(),
                           paddle.acosh(t).numpy()))
        self.assertTrue(
            np.array_equal(x.atanh().numpy(),
                           paddle.atanh(x).numpy()))
        d = paddle.to_tensor([[1.2285208, 1.3491015, 1.4899898],
                              [1.30058, 1.0688717, 1.4928783],
                              [1.0958099, 1.3724753, 1.8926544]])
        d = d.matmul(d.t())
        # ROCM not support cholesky
        if not fluid.core.is_compiled_with_rocm():
            self.assertTrue(
                np.array_equal(d.cholesky().numpy(),
                               paddle.cholesky(d).numpy()))

        self.assertTrue(
            np.array_equal(x.is_empty().numpy(),
                           paddle.is_empty(x).numpy()))
        self.assertTrue(
            np.array_equal(x.isfinite().numpy(),
                           paddle.isfinite(x).numpy()))
        self.assertTrue(
            np.array_equal(
                x.cast('int32').numpy(),
                paddle.cast(x, 'int32').numpy()))
        self.assertTrue(
            np.array_equal(
                x.expand([3, 2, 3]).numpy(),
                paddle.expand(x, [3, 2, 3]).numpy()))
        self.assertTrue(
            np.array_equal(
                x.tile([2, 2]).numpy(),
                paddle.tile(x, [2, 2]).numpy()))
        self.assertTrue(
            np.array_equal(x.flatten().numpy(),
                           paddle.flatten(x).numpy()))
        index = paddle.to_tensor([0, 1])
        self.assertTrue(
            np.array_equal(
                x.gather(index).numpy(),
                paddle.gather(x, index).numpy()))
        index = paddle.to_tensor([[0, 1], [1, 2]])
        self.assertTrue(
            np.array_equal(
                x.gather_nd(index).numpy(),
                paddle.gather_nd(x, index).numpy()))
        self.assertTrue(
            np.array_equal(
                x.reverse([0, 1]).numpy(),
                paddle.reverse(x, [0, 1]).numpy()))
        self.assertTrue(
            np.array_equal(
                a.reshape([3, 2]).numpy(),
                paddle.reshape(a, [3, 2]).numpy()))
        self.assertTrue(
            np.array_equal(
                x.slice([0, 1], [0, 0], [1, 2]).numpy(),
                paddle.slice(x, [0, 1], [0, 0], [1, 2]).numpy()))
        self.assertTrue(
            np.array_equal(
                x.split(2)[0].numpy(),
                paddle.split(x, 2)[0].numpy()))
        m = paddle.to_tensor(
            np.random.uniform(-1, 1, [1, 6, 1, 1]).astype(self.dtype))
        self.assertTrue(
            np.array_equal(
                m.squeeze([]).numpy(),
                paddle.squeeze(m, []).numpy()))
        self.assertTrue(
            np.array_equal(
                m.squeeze([1, 2]).numpy(),
                paddle.squeeze(m, [1, 2]).numpy()))
        m = paddle.to_tensor([2, 3, 3, 1, 5, 3], 'float32')
        self.assertTrue(
            np.array_equal(m.unique()[0].numpy(),
                           paddle.unique(m)[0].numpy()))
        self.assertTrue(
            np.array_equal(
                m.unique(return_counts=True)[1],
                paddle.unique(m, return_counts=True)[1]))
        self.assertTrue(np.array_equal(x.flip([0]), paddle.flip(x, [0])))
        self.assertTrue(np.array_equal(x.unbind(0), paddle.unbind(x, 0)))
        self.assertTrue(np.array_equal(x.roll(1), paddle.roll(x, 1)))
        self.assertTrue(np.array_equal(x.cumsum(1), paddle.cumsum(x, 1)))
        m = paddle.to_tensor(1)
        self.assertTrue(np.array_equal(m.increment(), paddle.increment(m)))
        m = x.abs()
        self.assertTrue(np.array_equal(m.log(), paddle.log(m)))
        self.assertTrue(np.array_equal(x.pow(2), paddle.pow(x, 2)))
        self.assertTrue(np.array_equal(x.reciprocal(), paddle.reciprocal(x)))

        # 2. Binary operation
        self.assertTrue(
            np.array_equal(x.divide(y).numpy(),
                           paddle.divide(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.matmul(y, True, False).numpy(),
                paddle.matmul(x, y, True, False).numpy()))
        self.assertTrue(
            np.array_equal(
                x.norm(p='fro', axis=[0, 1]).numpy(),
                paddle.norm(x, p='fro', axis=[0, 1]).numpy()))
        self.assertTrue(
            np.array_equal(x.dist(y).numpy(),
                           paddle.dist(x, y).numpy()))
        self.assertTrue(
            np.array_equal(x.cross(y).numpy(),
                           paddle.cross(x, y).numpy()))
        m = x.expand([2, 2, 3])
        n = y.expand([2, 2, 3]).transpose([0, 2, 1])
        self.assertTrue(
            np.array_equal(m.bmm(n).numpy(),
                           paddle.bmm(m, n).numpy()))
        self.assertTrue(
            np.array_equal(
                x.histogram(5, -1, 1).numpy(),
                paddle.histogram(x, 5, -1, 1).numpy()))
        self.assertTrue(
            np.array_equal(x.equal(y).numpy(),
                           paddle.equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.greater_equal(y).numpy(),
                paddle.greater_equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.greater_than(y).numpy(),
                paddle.greater_than(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.less_equal(y).numpy(),
                paddle.less_equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.less_than(y).numpy(),
                paddle.less_than(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.not_equal(y).numpy(),
                paddle.not_equal(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.equal_all(y).numpy(),
                paddle.equal_all(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.allclose(y).numpy(),
                paddle.allclose(x, y).numpy()))
        m = x.expand([2, 2, 3])
        self.assertTrue(
            np.array_equal(
                x.expand_as(m).numpy(),
                paddle.expand_as(x, m).numpy()))
        index = paddle.to_tensor([2, 1, 0])
        self.assertTrue(
            np.array_equal(
                a.scatter(index, b).numpy(),
                paddle.scatter(a, index, b).numpy()))

        # 3. Bool tensor operation
        x = paddle.to_tensor([[True, False], [True, False]])
        y = paddle.to_tensor([[False, False], [False, True]])
        self.assertTrue(
            np.array_equal(
                x.logical_and(y).numpy(),
                paddle.logical_and(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_not(y).numpy(),
                paddle.logical_not(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_or(y).numpy(),
                paddle.logical_or(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_xor(y).numpy(),
                paddle.logical_xor(x, y).numpy()))
        self.assertTrue(
            np.array_equal(
                x.logical_and(y).numpy(),
                paddle.logical_and(x, y).numpy()))
        a = paddle.to_tensor([[1, 2], [3, 4]])
        b = paddle.to_tensor([[4, 3], [2, 1]])
        self.assertTrue(
            np.array_equal(
                x.where(a, b).numpy(),
                paddle.where(x, a, b).numpy()))

        x_np = np.random.randn(3, 6, 9, 7)
        x = paddle.to_tensor(x_np)
        x_T = x.T
        self.assertTrue(x_T.shape, [7, 9, 6, 3])
        self.assertTrue(np.array_equal(x_T.numpy(), x_np.T))

        self.assertTrue(inspect.ismethod(a.dot))
        self.assertTrue(inspect.ismethod(a.logsumexp))
        self.assertTrue(inspect.ismethod(a.multiplex))
        self.assertTrue(inspect.ismethod(a.prod))
        self.assertTrue(inspect.ismethod(a.scale))
        self.assertTrue(inspect.ismethod(a.stanh))
        self.assertTrue(inspect.ismethod(a.add_n))
        self.assertTrue(inspect.ismethod(a.max))
        self.assertTrue(inspect.ismethod(a.maximum))
        self.assertTrue(inspect.ismethod(a.min))
        self.assertTrue(inspect.ismethod(a.minimum))
        self.assertTrue(inspect.ismethod(a.floor_divide))
        self.assertTrue(inspect.ismethod(a.remainder))
        self.assertTrue(inspect.ismethod(a.floor_mod))
        self.assertTrue(inspect.ismethod(a.multiply))
        self.assertTrue(inspect.ismethod(a.logsumexp))
        self.assertTrue(inspect.ismethod(a.inverse))
        self.assertTrue(inspect.ismethod(a.log1p))
        self.assertTrue(inspect.ismethod(a.erf))
        self.assertTrue(inspect.ismethod(a.addmm))
        self.assertTrue(inspect.ismethod(a.clip))
        self.assertTrue(inspect.ismethod(a.trace))
        self.assertTrue(inspect.ismethod(a.kron))
        self.assertTrue(inspect.ismethod(a.isinf))
        self.assertTrue(inspect.ismethod(a.isnan))
        self.assertTrue(inspect.ismethod(a.concat))
        self.assertTrue(inspect.ismethod(a.broadcast_to))
        self.assertTrue(inspect.ismethod(a.scatter_nd_add))
        self.assertTrue(inspect.ismethod(a.scatter_nd))
        self.assertTrue(inspect.ismethod(a.shard_index))
        self.assertTrue(inspect.ismethod(a.chunk))
        self.assertTrue(inspect.ismethod(a.stack))
        self.assertTrue(inspect.ismethod(a.strided_slice))
        self.assertTrue(inspect.ismethod(a.unsqueeze))
        self.assertTrue(inspect.ismethod(a.unstack))
        self.assertTrue(inspect.ismethod(a.argmax))
        self.assertTrue(inspect.ismethod(a.argmin))
        self.assertTrue(inspect.ismethod(a.argsort))
        self.assertTrue(inspect.ismethod(a.masked_select))
        self.assertTrue(inspect.ismethod(a.topk))
        self.assertTrue(inspect.ismethod(a.index_select))
        self.assertTrue(inspect.ismethod(a.nonzero))
        self.assertTrue(inspect.ismethod(a.sort))
        self.assertTrue(inspect.ismethod(a.index_sample))
        self.assertTrue(inspect.ismethod(a.mean))
        self.assertTrue(inspect.ismethod(a.std))
        self.assertTrue(inspect.ismethod(a.numel))
Esempio n. 30
0
import paddle

a = paddle.zeros([1]).astype("float32")
b = paddle.zeros([5]).astype("int32")
c = paddle.arange(5, 10).astype("float32")

d = paddle.scatter(a, b, c, overwrite=False)
print(d)