示例#1
0
    def test_out(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[3, 2], dtype="float32")
            y = fluid.data(name='y', shape=[2, 3], dtype='float32')
            res = fluid.data(name="output", shape=[3, 3], dtype="float32")
            y_1 = paddle.mm(x, y, out=res)
            exe = fluid.Executor(fluid.CPUPlace())
            data1 = np.random.rand(3, 2).astype('float32')
            data2 = np.random.rand(2, 3).astype('float32')
            np_res, np_y_1 = exe.run(feed={
                'x': data1,
                'y': data2
            },
                                     fetch_list=[res, y_1])
        self.assertEqual((np_res == np_y_1).all(), True)

        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2], dtype="float32")
            y = fluid.data(name='y', shape=[2], dtype='float32')
            res = fluid.data(name="output", shape=[1], dtype="float32")
            result = paddle.mm(x, y)
            exe = fluid.Executor(fluid.CPUPlace())
            data1 = np.random.rand(2).astype('float32')
            data2 = np.random.rand(2).astype('float32')
            np_res = exe.run(feed={
                'x': data1,
                'y': data2
            },
                             fetch_list=[result])
            expected_result = np.matmul(data1.reshape(1, 2),
                                        data2.reshape(2, 1))

        self.assertEqual((np_res == expected_result).all(), True)
示例#2
0
 def test_error3():
     with fluid.program_guard(fluid.Program(), fluid.Program()):
         data1 = fluid.data(name="data1",
                            shape=[10, 10, 2],
                            dtype="float32")
         data2 = fluid.data(name="data2",
                            shape=[3, 2, 10],
                            dtype="float32")
         paddle.mm(data1, data2)
示例#3
0
    def forward(self, query, keys, values=None):
        if not values:
            values = keys

        query_t, keys_t, values_t = self.transform_arguments(
            query, keys, values)

        scores = paddle.t(paddle.mm(query_t, keys_t))  # len(key) x len(query)

        distribution = F.softmax(scores, axis=0)  # len(key) x len(query)

        context_vector = paddle.mm(
            values_t, distribution).squeeze()  # value_size x len(query)

        return AttentionResult(scores, distribution, context_vector)
示例#4
0
def score_query_tokens(previous_query, previous_query_states, scorer):
    scores = paddle.t(paddle.mm(paddle.t(scorer),
                                previous_query_states))  # num_tokens x 1
    if scores.shape[0] != len(previous_query):
        raise ValueError("Got " + str(scores.shape[0]) + " scores for " +
                         str(len(previous_query)) + " query tokens")
    return scores, previous_query
示例#5
0
def score_schema_tokens(input_schema, schema_states, scorer):
    # schema_states: emd_dim x num_tokens
    scores = paddle.t(paddle.mm(paddle.t(scorer),
                                schema_states))  # num_tokens x 1
    if scores.shape[0] != len(input_schema):
        raise ValueError("Got " + str(scores.shape[0]) + " scores for " +
                         str(len(input_schema)) + " schema tokens")
    return scores, input_schema.column_names_surface_form
示例#6
0
def pdist(e, squared=False, eps=1e-12):
    e_square = e.pow(2).sum(axis=1)
    prod = paddle.mm(e, e.t())
    res = (e_square.unsqueeze(1) + e_square.unsqueeze(0) -
           2 * prod).clip(min=eps)

    if not squared:
        res = res.sqrt()
    return res
示例#7
0
    def forward(self, x, mask=None):
        """
        Args:
            x: input features with shape of (num_windows*B, N, C)
            mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
        """
        B_, N, C = x.shape
        qkv = self.qkv(x).reshape(
            [B_, N, 3, self.num_heads,
             C // self.num_heads]).transpose([2, 0, 3, 1, 4])
        q, k, v = qkv[0], qkv[1], qkv[2]

        q = q * self.scale
        attn = paddle.mm(q, k.transpose([0, 1, 3, 2]))

        index = self.relative_position_index.reshape([-1])

        relative_position_bias = paddle.index_select(
            self.relative_position_bias_table, index)
        relative_position_bias = relative_position_bias.reshape([
            self.window_size[0] * self.window_size[1],
            self.window_size[0] * self.window_size[1], -1
        ])  # Wh*Ww,Wh*Ww,nH

        relative_position_bias = relative_position_bias.transpose(
            [2, 0, 1])  # nH, Wh*Ww, Wh*Ww
        attn = attn + relative_position_bias.unsqueeze(0)

        if mask is not None:
            nW = mask.shape[0]
            attn = attn.reshape([B_ // nW, nW, self.num_heads, N, N
                                 ]) + mask.unsqueeze(1).unsqueeze(0)
            attn = attn.reshape([-1, self.num_heads, N, N])
            attn = self.softmax(attn)
        else:
            attn = self.softmax(attn)

        attn = self.attn_drop(attn)

        # x = (attn @ v).transpose(1, 2).reshape([B_, N, C])
        x = paddle.mm(attn, v).transpose([0, 2, 1, 3]).reshape([B_, N, C])
        x = self.proj(x)
        x = self.proj_drop(x)
        return x
示例#8
0
 def test_dygraph_without_out(self):
     device = fluid.CPUPlace()
     with fluid.dygraph.guard(device):
         input_array1 = np.random.rand(3, 4).astype("float64")
         input_array2 = np.random.rand(4, 3).astype("float64")
         data1 = fluid.dygraph.to_variable(input_array1)
         data2 = fluid.dygraph.to_variable(input_array2)
         out = paddle.mm(data1, data2)
         expected_result = np.matmul(input_array1, input_array2)
     self.assertTrue(np.allclose(expected_result, out.numpy()))
 def test_mm(self):
     with _test_eager_guard():
         np_input = np.random.random([16, 32]).astype('float32')
         np_mat2 = np.random.random([32, 32]).astype('float32')
         input = paddle.to_tensor(np_input)
         mat2 = paddle.to_tensor(np_mat2)
         out = paddle.mm(input, mat2)
         out_arr = out.numpy()
         out_arr_expected = np.matmul(np_input, np_mat2)
         self.assertTrue(np.allclose(out_arr, out_arr_expected))
示例#10
0
    def prm_exp(self, x):
        # ==== positive random features for gaussian kernels ====
        # x = (B, T, hs)
        # w = (m, hs)
        # return : x : B, T, m
        # SM(x, y) = E_w[exp(w^T x - |x|/2) exp(w^T y - |y|/2)]
        # therefore return exp(w^Tx - |x|/2)/sqrt(m)
        xd = ((x * x).sum(axis=-1, keepdim=True)).tile([1, 1, self.m]) / 2
        wtx = paddle.mm(x, self.w.transpose((1, 0)))

        return paddle.exp(wtx - xd) / math.sqrt(self.m)
示例#11
0
 def test_dygraph_with_out(self):
     device = fluid.CPUPlace()
     with fluid.dygraph.guard(device):
         input_array1 = np.random.rand(3, 4).astype("float64")
         input_array2 = np.random.rand(4, 3).astype("float64")
         out_array = np.random.rand(3, 3).astype("float64")
         data1 = fluid.dygraph.to_variable(input_array1)
         data2 = fluid.dygraph.to_variable(input_array2)
         paddle_out_holder = fluid.dygraph.to_variable(out_array)
         out = paddle.mm(data1, data2, out=paddle_out_holder)
     self.assertTrue(np.allclose(paddle_out_holder.numpy(), out.numpy()))
示例#12
0
 def forward(self, prev_hidden, batch_H, char_onehots):
     batch_H_proj = self.i2h(batch_H)
     prev_hidden_proj = paddle.unsqueeze(self.h2h(prev_hidden), axis=1)
     res = paddle.add(batch_H_proj, prev_hidden_proj)
     res = paddle.tanh(res)
     e = self.score(res)
     alpha = F.softmax(e, axis=1)
     alpha = paddle.transpose(alpha, [0, 2, 1])
     context = paddle.squeeze(paddle.mm(alpha, batch_H), axis=1)
     concat_context = paddle.concat([context, char_onehots], 1)
     cur_hidden = self.rnn(concat_context, prev_hidden)
     return cur_hidden, alpha
示例#13
0
    def test_out(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[3, 2], dtype="float64")
            y = fluid.data(name='y', shape=[2, 3], dtype='float64')
            res = fluid.data(name="output", shape=[3, 3], dtype="float64")
            y_1 = paddle.mm(x, y, out=res)
            exe = fluid.Executor(fluid.CPUPlace())
            data1 = np.random.rand(3, 2)
            data2 = np.random.rand(2, 3)
            np_res, expected_result = exe.run(feed={
                'x': data1,
                'y': data2
            },
                                              fetch_list=[res, y_1])
        self.assertTrue(
            np.allclose(np.array(np_res), np.array(expected_result),
                        atol=1e-5), "two value is\
            {}\n{}, check diff!".format(np_res, expected_result))

        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2], dtype="float64")
            y = fluid.data(name='y', shape=[2], dtype='float64')
            res = fluid.data(name="output", shape=[1], dtype="float64")
            result = paddle.mm(x, y)
            exe = fluid.Executor(fluid.CPUPlace())
            data1 = np.random.rand(2)
            data2 = np.random.rand(2)
            np_res = exe.run(feed={
                'x': data1,
                'y': data2
            },
                             fetch_list=[result])
            expected_result = np.matmul(data1.reshape(1, 2),
                                        data2.reshape(2, 1))

        self.assertTrue(
            np.allclose(np_res, expected_result, atol=1e-5), "two value is\
            {}\n{}, check diff!".format(np_res, expected_result))
示例#14
0
def compute_pointer_with_align(model, node_type, prev_state, prev_action_emb,
                               parent_h, parent_action_emb, desc_enc):
    """compute_pointer_with_align"""
    new_state, attention_weights = model._update_state(node_type, prev_state,
                                                       prev_action_emb,
                                                       parent_h,
                                                       parent_action_emb,
                                                       desc_enc)
    # output shape: batch (=1) x emb_size
    output = new_state[0]
    memory_pointer_logits = model.pointers[node_type](output, desc_enc.memory)
    memory_pointer_probs = paddle.nn.functional.softmax(memory_pointer_logits,
                                                        axis=1)
    # pointer_logits shape: batch (=1) x num choices
    if node_type == "column":
        pointer_probs = paddle.mm(memory_pointer_probs, desc_enc.m2c_align_mat)
    elif node_type == 'table':
        pointer_probs = paddle.mm(memory_pointer_probs, desc_enc.m2t_align_mat)
    else:  # value
        pointer_probs = paddle.mm(memory_pointer_probs, desc_enc.m2v_align_mat)
    pointer_probs = pointer_probs.clip(min=1e-9)
    pointer_logits = paddle.log(pointer_probs)
    return output, new_state, pointer_logits, attention_weights
示例#15
0
文件: paddle.py 项目: paddlelaw/jina
def euclidean(x_mat: 'tensor',
              y_mat: 'tensor',
              device: str = 'cpu') -> 'numpy.ndarray':
    """Euclidean distance between each row in x_mat and each row in y_mat.

    :param x_mat:  paddle array with ndim=2
    :param y_mat:  paddle array with ndim=2
    :param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
    :return: np.ndarray  with ndim=2
    """
    paddle.set_device(device)

    return paddle.sqrt(
        paddle.sum(y_mat**2, axis=1) + paddle.sum(x_mat**2, axis=1)[:, None] -
        2 * paddle.mm(x_mat, y_mat.transpose(perm=[1, 0]))).numpy()
示例#16
0
 def forward(self, feature, label):
     cos_theta = paddle.mm(F.normalize(feature, axis=1),
                           F.normalize(self.weight, axis=0))
     sin_theta = paddle.sqrt(
         paddle.clip(1.0 - paddle.pow(cos_theta, 2), min=0, max=1))
     cos_theta_m = cos_theta * self.cos_m - sin_theta * self.sin_m
     cos_theta_m = paddle.where(cos_theta > self.threshold, cos_theta_m,
                                cos_theta - self.mm)
     one_hot = paddle.nn.functional.one_hot(label, self.class_dim)
     output = (one_hot * cos_theta_m) + (paddle.abs(
         (1.0 - one_hot)) * cos_theta)
     output *= self.s
     # 简单的分类方法,学习率需要设置为0.1
     # cosine = self.cosine_sim(feature, self.weight)
     # one_hot = paddle.nn.functional.one_hot(label, self.class_dim)
     # output = self.s * (cosine - one_hot * self.m)
     return output
示例#17
0
文件: paddle.py 项目: paddlelaw/jina
def cosine(x_mat: 'tensor',
           y_mat: 'tensor',
           eps: float = 1e-7,
           device: str = 'cpu') -> 'numpy.ndarray':
    """Cosine distance between each row in x_mat and each row in y_mat.

    :param x_mat: np.ndarray with ndim=2
    :param y_mat: np.ndarray with ndim=2
    :param eps: a small jitter to avoid divde by zero
    :param device: the computational device for `embed_model`, can be either `cpu` or `cuda`.
    :return: np.ndarray  with ndim=2
    """
    paddle.set_device(device)

    a_n, b_n = x_mat.norm(axis=1)[:, None], y_mat.norm(axis=1)[:, None]
    a_norm = x_mat / paddle.clip(a_n, min=eps)
    b_norm = y_mat / paddle.clip(b_n, min=eps)
    sim_mt = 1 - paddle.mm(a_norm, b_norm.transpose(perm=[1, 0]))
    return sim_mt.numpy()
示例#18
0
        def test_out(self):
            with fluid.program_guard(fluid.Program()):
                x = fluid.data(name="x", shape=[2], dtype=self.in_type)
                y = fluid.data(name='y', shape=[2], dtype=self.in_type)
                res = fluid.data(name="output", shape=[1], dtype=self.in_type)
                result = paddle.mm(x, y)
                exe = fluid.Executor(fluid.XPUPlace(0))
                data1 = np.random.rand(2).astype(self.in_type)
                data2 = np.random.rand(2).astype(self.in_type)
                np_res = exe.run(feed={
                    'x': data1,
                    'y': data2
                },
                                 fetch_list=[result])
                expected_result = np.matmul(data1.reshape(1, 2),
                                            data2.reshape(2, 1))

                self.assertTrue(
                    np.allclose(np_res, expected_result, atol=1e-3),
                    "two value is\
                    {}\n{}, check diff!".format(np_res, expected_result))
示例#19
0
def linreg(X, w, b):
    return paddle.mm(X, w) + b
示例#20
0
 def cosine_sim(feature, weight, eps=1e-8):
     ip = paddle.mm(feature, weight)
     w1 = paddle.norm(feature, 2, axis=1).unsqueeze(1)
     w2 = paddle.norm(weight, 2, axis=0).unsqueeze(0)
     outer = paddle.matmul(w1, w2)
     return ip / outer.clip(min=eps)
示例#21
0
    def __call__(self,
                 seg_preds,
                 seg_masks,
                 cate_labels,
                 cate_scores,
                 sum_masks=None):
        # sort and keep top nms_pre
        sort_inds = self._sort_score(cate_scores, self.pre_nms_top_n)
        seg_masks = paddle.gather(seg_masks, index=sort_inds)
        seg_preds = paddle.gather(seg_preds, index=sort_inds)
        sum_masks = paddle.gather(sum_masks, index=sort_inds)
        cate_scores = paddle.gather(cate_scores, index=sort_inds)
        cate_labels = paddle.gather(cate_labels, index=sort_inds)

        seg_masks = paddle.flatten(seg_masks, start_axis=1, stop_axis=-1)
        # inter.
        inter_matrix = paddle.mm(seg_masks,
                                 paddle.transpose(seg_masks, [1, 0]))
        n_samples = paddle.shape(cate_labels)
        # union.
        sum_masks_x = paddle.expand(sum_masks, shape=[n_samples, n_samples])
        # iou.
        iou_matrix = (inter_matrix /
                      (sum_masks_x + paddle.transpose(sum_masks_x, [1, 0]) -
                       inter_matrix))
        iou_matrix = paddle.triu(iou_matrix, diagonal=1)
        # label_specific matrix.
        cate_labels_x = paddle.expand(cate_labels,
                                      shape=[n_samples, n_samples])
        label_matrix = paddle.cast(
            (cate_labels_x == paddle.transpose(cate_labels_x, [1, 0])),
            'float32')
        label_matrix = paddle.triu(label_matrix, diagonal=1)

        # IoU compensation
        compensate_iou = paddle.max((iou_matrix * label_matrix), axis=0)
        compensate_iou = paddle.expand(compensate_iou,
                                       shape=[n_samples, n_samples])
        compensate_iou = paddle.transpose(compensate_iou, [1, 0])

        # IoU decay
        decay_iou = iou_matrix * label_matrix

        # matrix nms
        if self.kernel == 'gaussian':
            decay_matrix = paddle.exp(-1 * self.sigma * (decay_iou**2))
            compensate_matrix = paddle.exp(-1 * self.sigma *
                                           (compensate_iou**2))
            decay_coefficient = paddle.min(decay_matrix / compensate_matrix,
                                           axis=0)
        elif self.kernel == 'linear':
            decay_matrix = (1 - decay_iou) / (1 - compensate_iou)
            decay_coefficient = paddle.min(decay_matrix, axis=0)
        else:
            raise NotImplementedError

        # update the score.
        cate_scores = cate_scores * decay_coefficient
        y = paddle.zeros(shape=paddle.shape(cate_scores), dtype='float32')
        keep = paddle.where(cate_scores >= self.update_threshold, cate_scores,
                            y)
        keep = paddle.nonzero(keep)
        keep = paddle.squeeze(keep, axis=[1])
        # Prevent empty and increase fake data
        keep = paddle.concat(
            [keep,
             paddle.cast(paddle.shape(cate_scores)[0] - 1, 'int64')])

        seg_preds = paddle.gather(seg_preds, index=keep)
        cate_scores = paddle.gather(cate_scores, index=keep)
        cate_labels = paddle.gather(cate_labels, index=keep)

        # sort and keep top_k
        sort_inds = self._sort_score(cate_scores, self.post_nms_top_n)
        seg_preds = paddle.gather(seg_preds, index=sort_inds)
        cate_scores = paddle.gather(cate_scores, index=sort_inds)
        cate_labels = paddle.gather(cate_labels, index=sort_inds)
        return seg_preds, cate_scores, cate_labels
示例#22
0
 def sim(self, zi, zj):
     # zi = F.normalize(zi)
     # zj = F.normalize(zj)
     zi = zi/paddle.sqrt((zi*zi).sum(1)).unsqueeze(1)
     zj = zj/paddle.sqrt((zj*zj).sum(1)).unsqueeze(1)
     return paddle.mm(zi, zj.t())