Пример #1
0
def bev_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):
    """box encode for VoxelNet
    Args:
        boxes ([N, 7] Tensor): normal boxes: x, y, z, l, w, h, r
        anchors ([N, 7] Tensor): anchors
    """
    xa, ya, wa, la, ra = paddle.split(anchors, 5, axis=-1)
    xg, yg, wg, lg, rg = paddle.split(boxes, 5, axis=-1)
    diagonal = paddle.sqrt(la**2 + wa**2)
    xt = (xg - xa) / diagonal
    yt = (yg - ya) / diagonal
    if smooth_dim:
        lt = lg / la - 1
        wt = wg / wa - 1
    else:
        lt = paddle.log(lg / la)
        wt = paddle.log(wg / wa)
    if encode_angle_to_vector:
        rgx = paddle.cos(rg)
        rgy = paddle.sin(rg)
        rax = paddle.cos(ra)
        ray = paddle.sin(ra)
        rtx = rgx - rax
        rty = rgy - ray
        return paddle.concat([xt, yt, wt, lt, rtx, rty], axis=-1)
    else:
        rt = rg - ra
        return paddle.concat([xt, yt, wt, lt, rt], axis=-1)
Пример #2
0
    def __measure_parameterized(self, state, which_qubits, result_desired,
                                theta):
        r"""进行参数化的测量。

        Args:
            state (Tensor): 输入的量子态
            which_qubits (list): 测量作用的量子比特编号
            result_desired (str): 期望得到的测量结果
            theta (Tensor): 测量运算的参数

        Returns:
            Tensor: 测量坍塌后的量子态
            Tensor:测量坍塌得到的概率
            str: 测量得到的结果
        """
        n = self.get_qubit_number()
        assert len(which_qubits) == len(result_desired), \
            "the length of qubits wanted to be measured and the result desired should be same"
        op_list = [paddle.to_tensor(np.eye(2, dtype=np.complex128))] * n
        for idx in range(0, len(which_qubits)):
            i = which_qubits[idx]
            ele = result_desired[idx]
            if int(ele) == 0:
                basis0 = paddle.to_tensor(
                    np.array([[1, 0], [0, 0]], dtype=np.complex128))
                basis1 = paddle.to_tensor(
                    np.array([[0, 0], [0, 1]], dtype=np.complex128))
                rho0 = multiply(basis0, cos(theta[idx]))
                rho1 = multiply(basis1, sin(theta[idx]))
                rho = add(rho0, rho1)
                op_list[i] = rho
            elif int(ele) == 1:
                # rho = diag(concat([cos(theta[idx]), sin(theta[idx])]))
                # rho = paddle.to_tensor(rho, zeros((2, 2), dtype="float64"))
                basis0 = paddle.to_tensor(
                    np.array([[1, 0], [0, 0]], dtype=np.complex128))
                basis1 = paddle.to_tensor(
                    np.array([[0, 0], [0, 1]], dtype=np.complex128))
                rho0 = multiply(basis0, sin(theta[idx]))
                rho1 = multiply(basis1, cos(theta[idx]))
                rho = add(rho0, rho1)
                op_list[i] = rho
            else:
                print("cannot recognize the result_desired.")
            # rho = paddle.to_tensor(ones((2, 2), dtype="float64"), zeros((2, 2), dtype="float64"))
        measure_operator = paddle.to_tensor(op_list[0])
        if n > 1:
            for idx in range(1, len(op_list)):
                measure_operator = kron(measure_operator, op_list[idx])
        state_measured = matmul(matmul(measure_operator, state),
                                dagger(measure_operator))
        prob = real(
            trace(
                matmul(matmul(dagger(measure_operator), measure_operator),
                       state)))
        state_measured = divide(state_measured, prob)
        return state_measured, prob, result_desired
Пример #3
0
def compute_rot_loss(output, target_bin, target_res, mask):
    # output: (B, 128, 8) [bin1_cls[0], bin1_cls[1], bin1_sin, bin1_cos,
    #                 bin2_cls[0], bin2_cls[1], bin2_sin, bin2_cos]
    # target_bin: (B, 128, 2) [bin1_cls, bin2_cls]
    # target_res: (B, 128, 2) [bin1_res, bin2_res]
    # mask: (B, 128, 1)
    # import pdb; pdb.set_trace()

    # output = output.view(-1, 8)
    # target_bin = target_bin.view(-1, 2)
    # target_res = target_res.view(-1, 2)
    # mask = mask.view(-1, 1)
    output = output.reshape(-1, 8)
    target_bin = target_bin.reshape(-1, 2)
    target_res = target_res.reshape(-1, 2)
    mask = mask.reshape(-1, 1)
    loss_bin1 = compute_bin_loss(output[:, 0:2], target_bin[:, 0], mask)
    loss_bin2 = compute_bin_loss(output[:, 4:6], target_bin[:, 1], mask)
    # loss_res = torch.zeros_like(loss_bin1)
    loss_res = paddle.zeros_like(loss_bin1, dtype='float32')
    if target_bin[:, 0].nonzero().shape[0] > 0:
        idx1 = target_bin[:, 0].nonzero()[:, 0]
        # valid_output1 = torch.index_select(output, 0, idx1.long())
        valid_output1 = paddle.index_select(output, idx1.cast('int32'), 0)
        # valid_target_res1 = torch.index_select(target_res, 0, idx1.long())
        valid_target_res1 = paddle.index_select(target_res, idx1.cast('int32'),
                                                0)
        # loss_sin1 = compute_res_loss(
        #   valid_output1[:, 2], torch.sin(valid_target_res1[:, 0]))
        # loss_cos1 = compute_res_loss(
        #   valid_output1[:, 3], torch.cos(valid_target_res1[:, 0]))
        loss_sin1 = compute_res_loss(valid_output1[:, 2],
                                     paddle.sin(valid_target_res1[:, 0]))
        loss_cos1 = compute_res_loss(valid_output1[:, 3],
                                     paddle.cos(valid_target_res1[:, 0]))
        loss_res += loss_sin1 + loss_cos1
    if target_bin[:, 1].nonzero().shape[0] > 0:
        idx2 = target_bin[:, 1].nonzero()[:, 0]
        # valid_output2 = torch.index_select(output, 0, idx2.long())
        # valid_target_res2 = torch.index_select(target_res, 0, idx2.long())
        valid_output2 = paddle.index_select(output, idx2.cast('int32'), 0)
        valid_target_res2 = paddle.index_select(target_res, idx2.cast('int32'),
                                                0)
        # loss_sin2 = compute_res_loss(
        #   valid_output2[:, 6], torch.sin(valid_target_res2[:, 1]))
        # loss_cos2 = compute_res_loss(
        #   valid_output2[:, 7], torch.cos(valid_target_res2[:, 1]))
        loss_sin2 = compute_res_loss(valid_output2[:, 6],
                                     paddle.sin(valid_target_res2[:, 1]))
        loss_cos2 = compute_res_loss(valid_output2[:, 7],
                                     paddle.cos(valid_target_res2[:, 1]))
        loss_res += loss_sin2 + loss_cos2
    return loss_bin1 + loss_bin2 + loss_res
Пример #4
0
def delta2rbox(rrois,
               deltas,
               means=[0, 0, 0, 0, 0],
               stds=[1, 1, 1, 1, 1],
               wh_ratio_clip=1e-6):
    """
    :param rrois: (cx, cy, w, h, theta)
    :param deltas: (dx, dy, dw, dh, dtheta)
    :param means:
    :param stds:
    :param wh_ratio_clip:
    :return:
    """
    means = paddle.to_tensor(means)
    stds = paddle.to_tensor(stds)
    deltas = paddle.reshape(deltas, [-1, deltas.shape[-1]])
    denorm_deltas = deltas * stds + means

    dx = denorm_deltas[:, 0]
    dy = denorm_deltas[:, 1]
    dw = denorm_deltas[:, 2]
    dh = denorm_deltas[:, 3]
    dangle = denorm_deltas[:, 4]

    max_ratio = np.abs(np.log(wh_ratio_clip))
    dw = paddle.clip(dw, min=-max_ratio, max=max_ratio)
    dh = paddle.clip(dh, min=-max_ratio, max=max_ratio)

    rroi_x = rrois[:, 0]
    rroi_y = rrois[:, 1]
    rroi_w = rrois[:, 2]
    rroi_h = rrois[:, 3]
    rroi_angle = rrois[:, 4]

    gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin(
        rroi_angle) + rroi_x
    gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos(
        rroi_angle) + rroi_y
    gw = rroi_w * dw.exp()
    gh = rroi_h * dh.exp()
    ga = np.pi * dangle + rroi_angle
    ga = (ga + np.pi / 4) % np.pi - np.pi / 4
    ga = paddle.to_tensor(ga)

    gw = paddle.to_tensor(gw, dtype='float32')
    gh = paddle.to_tensor(gh, dtype='float32')
    bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1)
    return bboxes
Пример #5
0
def rotation_3d_in_axis(points, angles, axis=0):
    # points: [N, point_size, 3]
    # angles: [N]
    rot_sin = paddle.sin(angles)
    rot_cos = paddle.cos(angles)
    ones = paddle.ones_like(rot_cos)
    zeros = paddle.zeros_like(rot_cos)
    if axis == 1:
        rot_mat_T = paddle.stack([
            paddle.stack([rot_cos, zeros, -rot_sin]),
            paddle.stack([zeros, ones, zeros]),
            paddle.stack([rot_sin, zeros, rot_cos])
        ])
    elif axis == 2 or axis == -1:
        rot_mat_T = paddle.stack([
            paddle.stack([rot_cos, -rot_sin, zeros]),
            paddle.stack([rot_sin, rot_cos, zeros]),
            paddle.stack([zeros, zeros, ones])
        ])
    elif axis == 0:
        rot_mat_T = paddle.stack([
            paddle.stack([zeros, rot_cos, -rot_sin]),
            paddle.stack([zeros, rot_sin, rot_cos]),
            paddle.stack([ones, zeros, zeros])
        ])
    else:
        raise ValueError("axis should in range")

    return paddle.einsum('aij,jka->aik', (points, rot_mat_T))
Пример #6
0
def bev_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):
    """box decode for VoxelNet in lidar
    Args:
        boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r
        anchors ([N, 7] Tensor): anchors
    """
    xa, ya, wa, la, ra = paddle.split(anchors, 5, axis=-1)
    if encode_angle_to_vector:
        xt, yt, wt, lt, rtx, rty = paddle.split(
            box_encodings, 1, axis=-1)

    else:
        xt, yt, wt, lt, rt = paddle.split(box_encodings, 5, axis=-1)

    # xt, yt, zt, wt, lt, ht, rt = paddle.split(box_encodings, 1, axis=-1)
    diagonal = paddle.sqrt(la**2 + wa**2)
    xg = xt * diagonal + xa
    yg = yt * diagonal + ya
    if smooth_dim:
        lg = (lt + 1) * la
        wg = (wt + 1) * wa
    else:
        lg = paddle.exp(lt) * la
        wg = paddle.exp(wt) * wa
    if encode_angle_to_vector:
        rax = paddle.cos(ra)
        ray = paddle.sin(ra)
        rgx = rtx + rax
        rgy = rty + ray
        rg = atan2(rgy, rgx)
    else:
        rg = rt + ra
    return paddle.concat([xg, yg, wg, lg, rg], axis=-1)
Пример #7
0
    def get_neg_score(self,
                      heads,
                      relations,
                      tails,
                      batch_size,
                      mini_batch_size,
                      neg_sample_size,
                      neg_head=True):
        mini_batch_num = int(batch_size / mini_batch_size)
        if neg_head:
            hidden_dim = heads.shape[-1]
            re_tail, im_tail = paddle.chunk(tails, chunks=2, axis=-1)

            phase_rel = relations / (self.emb_init / np.pi)
            re_rel, im_rel = paddle.cos(phase_rel), paddle.sin(phase_rel)
            real = re_tail * re_rel + im_tail * im_rel
            imag = -re_tail * im_rel + im_tail * re_rel

            emb_complex = paddle.concat([real, imag], axis=-1)
            score = emb_complex.reshape([mini_batch_num, -1, 1, hidden_dim])
            heads = heads.reshape([mini_batch_num, 1, -1, hidden_dim])
            score = score - heads
            re_score, im_score = paddle.chunk(score, chunks=2, axis=1)
            score = paddle.stack(
                [re_score, im_score], axis=-1).norm(
                    p=2, axis=-1)
            return self.gamma - score.sum(-1)
        else:
            hidden_dim = heads.shape[-1]
            re_head, im_head = paddle.chunk(heads, chunks=2, axis=-1)

            phase_rel = relations / (self.emb_init / np.pi)
            re_rel, im_rel = paddle.cos(phase_rel), paddle.sin(phase_rel)
            real = re_head * re_rel - im_head * im_rel
            imag = re_head * im_rel + im_head * re_rel

            emb_complex = paddle.concat([real, imag], axis=-1)
            score = emb_complex.reshape([mini_batch_num, -1, 1, hidden_dim])
            tails = tails.reshape([mini_batch_num, 1, -1, hidden_dim])
            score = score - tails
            re_score, im_score = paddle.chunk(score, chunks=2, axis=1)
            score = paddle.stack(
                [re_score, im_score], axis=-1).norm(
                    p=2, axis=-1)

            return self.gamma - score.sum(-1)
Пример #8
0
 def create_matrix(self):
     cs = paddle.cos(self.theta / 2)
     sn = paddle.sin(self.theta / 2)
     self.matrix = paddle.concat([
         paddle.concat([cs, -sn], axis=1),
         paddle.concat([sn, cs], axis=1),
     ],
                                 axis=0)
Пример #9
0
def u_gate_matrix(params):
    """
    U3
    :param params:
    :return:
    """
    theta, phi, lam = params

    if (type(theta) is paddle.Tensor and type(phi) is paddle.Tensor
            and type(lam) is paddle.Tensor):
        re_a = paddle.cos(theta / 2)
        re_b = -paddle.cos(lam) * paddle.sin(theta / 2)
        re_c = paddle.cos(phi) * paddle.sin(theta / 2)
        re_d = paddle.cos(phi + lam) * paddle.cos(theta / 2)
        im_a = paddle.zeros([1], 'float64')
        im_b = -paddle.sin(lam) * paddle.sin(theta / 2)
        im_c = paddle.sin(phi) * paddle.sin(theta / 2)
        im_d = paddle.sin(phi + lam) * paddle.cos(theta / 2)

        re = paddle.reshape(paddle.concat([re_a, re_b, re_c, re_d]), [2, 2])
        im = paddle.reshape(paddle.concat([im_a, im_b, im_c, im_d]), [2, 2])

        return re + im * paddle.to_tensor([1j], 'complex128')
    elif (type(theta) is float and type(phi) is float and type(lam) is float):
        return np.array(
            [[np.cos(theta / 2), -np.exp(1j * lam) * np.sin(theta / 2)],
             [
                 np.exp(1j * phi) * np.sin(theta / 2),
                 np.exp(1j * phi + 1j * lam) * np.cos(theta / 2)
             ]])
    else:
        assert False
Пример #10
0
    def delta2rbox(self, rrois, deltas, wh_ratio_clip=1e-6):
        """
        :param rrois: (cx, cy, w, h, theta)
        :param deltas: (dx, dy, dw, dh, dtheta)
        :param means: means of anchor
        :param stds: stds of anchor
        :param wh_ratio_clip: clip threshold of wh_ratio
        :return:
        """
        deltas = paddle.reshape(deltas, [-1, 5])
        rrois = paddle.reshape(rrois, [-1, 5])
        # fix dy2st bug denorm_deltas = deltas * self.stds + self.means
        denorm_deltas = paddle.add(
            paddle.multiply(deltas, self.stds), self.means)

        dx = denorm_deltas[:, 0]
        dy = denorm_deltas[:, 1]
        dw = denorm_deltas[:, 2]
        dh = denorm_deltas[:, 3]
        dangle = denorm_deltas[:, 4]
        max_ratio = np.abs(np.log(wh_ratio_clip))
        dw = paddle.clip(dw, min=-max_ratio, max=max_ratio)
        dh = paddle.clip(dh, min=-max_ratio, max=max_ratio)

        rroi_x = rrois[:, 0]
        rroi_y = rrois[:, 1]
        rroi_w = rrois[:, 2]
        rroi_h = rrois[:, 3]
        rroi_angle = rrois[:, 4]

        gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin(
            rroi_angle) + rroi_x
        gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos(
            rroi_angle) + rroi_y
        gw = rroi_w * dw.exp()
        gh = rroi_h * dh.exp()
        ga = np.pi * dangle + rroi_angle
        ga = (ga + np.pi / 4) % np.pi - np.pi / 4
        ga = paddle.to_tensor(ga)
        gw = paddle.to_tensor(gw, dtype='float32')
        gh = paddle.to_tensor(gh, dtype='float32')
        bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1)
        return bboxes
Пример #11
0
 def xywhr2xyrs(self, xywhr):
     xywhr = paddle.reshape(xywhr, [-1, 5])
     xy = xywhr[:, :2]
     wh = paddle.clip(xywhr[:, 2:4], min=1e-7, max=1e7)
     r = xywhr[:, 4]
     cos_r = paddle.cos(r)
     sin_r = paddle.sin(r)
     R = paddle.stack(
         (cos_r, -sin_r, sin_r, cos_r), axis=-1).reshape([-1, 2, 2])
     S = 0.5 * paddle.nn.functional.diag_embed(wh)
     return xy, R, S
Пример #12
0
 def positional_embedding(self, inputs):
     seq_len = inputs.shape[1]
     pos_seq = paddle.arange(0, seq_len, dtype=dtype_float)
     indices = paddle.arange(0, self.head_dim, 2, dtype=dtype_float)
     indices = 1 / 10000**(indices / self.head_dim)
     sinusoid_inp = paddle.einsum("i,d->id", pos_seq, indices)
     pos_emb = paddle.concat(
         [paddle.sin(sinusoid_inp),
          paddle.cos(sinusoid_inp)], axis=-1)
     pos_emb = paddle.reshape(pos_emb, (1, 1, seq_len, self.head_dim))
     pos_emb.stop_gradient = True
     return pos_emb
Пример #13
0
    def delta2rbox(self, rrois, deltas, means, stds, wh_ratio_clip=1e-6):
        """
        :param rrois: (cx, cy, w, h, theta)
        :param deltas: (dx, dy, dw, dh, dtheta)
        :param means: means of anchor
        :param stds: stds of anchor
        :param wh_ratio_clip: clip threshold of wh_ratio
        :return:
        """
        deltas = paddle.reshape(deltas, [-1, 5])
        rrois = paddle.reshape(rrois, [-1, 5])
        pd_means = paddle.ones(shape=[5]) * means
        pd_stds = paddle.ones(shape=[5]) * stds
        denorm_deltas = deltas * pd_stds + pd_means

        dx = denorm_deltas[:, 0]
        dy = denorm_deltas[:, 1]
        dw = denorm_deltas[:, 2]
        dh = denorm_deltas[:, 3]
        dangle = denorm_deltas[:, 4]
        max_ratio = np.abs(np.log(wh_ratio_clip))
        dw = paddle.clip(dw, min=-max_ratio, max=max_ratio)
        dh = paddle.clip(dh, min=-max_ratio, max=max_ratio)

        rroi_x = rrois[:, 0]
        rroi_y = rrois[:, 1]
        rroi_w = rrois[:, 2]
        rroi_h = rrois[:, 3]
        rroi_angle = rrois[:, 4]

        gx = dx * rroi_w * paddle.cos(rroi_angle) - dy * rroi_h * paddle.sin(
            rroi_angle) + rroi_x
        gy = dx * rroi_w * paddle.sin(rroi_angle) + dy * rroi_h * paddle.cos(
            rroi_angle) + rroi_y
        gw = rroi_w * dw.exp()
        gh = rroi_h * dh.exp()
        ga = np.pi * dangle + rroi_angle
        ga = (ga + np.pi / 4) % np.pi - np.pi / 4
        bboxes = paddle.stack([gx, gy, gw, gh, ga], axis=-1)
        return bboxes
Пример #14
0
def get_sinusoid_encoding(n_position, feat_dim, wave_length=10000):
    # [n_position]
    positions = paddle.arange(0, n_position)
    # [feat_dim]
    dim_range = paddle.arange(0, feat_dim)
    dim_range = paddle.pow(wave_length, 2 * (dim_range // 2) / feat_dim)
    # [n_position, feat_dim]
    angles = paddle.unsqueeze(positions, axis=1) / paddle.unsqueeze(dim_range,
                                                                    axis=0)
    angles = paddle.cast(angles, "float32")
    angles[:, 0::2] = paddle.sin(angles[:, 0::2])
    angles[:, 1::2] = paddle.cos(angles[:, 1::2])
    return angles
Пример #15
0
 def positional_embedding(pos_seq, inv_freq, bsz=None):
     # Compute sinusoid_inp = einsum4x4("i,d->id", pos_seq, inv_freq)
     sinusoid_inp = paddle.matmul(pos_seq.reshape([-1, 1]),
                                  inv_freq.reshape([1, -1]))
     pos_emb = paddle.concat(
         [paddle.sin(sinusoid_inp),
          paddle.cos(sinusoid_inp)], axis=-1)
     pos_emb = paddle.unsqueeze(pos_emb, axis=1)
     if bsz is not None:
         pos_emb = pos_emb.expand([-1, bsz, -1])
         pos_emb.stop_gradient = True
     pos_emb.stop_gradient = True
     return pos_emb
Пример #16
0
    def _test_static(self, place, kwargs):
        paddle.enable_static()

        best = float("-10000") if kwargs['mode'] == "max" else float("10000")
        current_lr = 1.0
        cooldown_counter = 0
        num_bad_epochs = 0
        var_list = [best, current_lr, cooldown_counter, num_bad_epochs]

        main_prog = paddle.static.Program()
        start_prog = paddle.static.Program()
        with paddle.static.program_guard(main_prog, start_prog):
            x = fluid.layers.create_global_var([1],
                                               1,
                                               'float32',
                                               persistable=True)
            paddle.increment(x)
            loss = paddle.sin(x)
            scheduler = paddle.optimizer.lr.ReduceOnPlateau(**kwargs)
            adam = paddle.optimizer.Adam(learning_rate=scheduler)
            adam.minimize(loss)
            lr_var = adam._global_learning_rate()
            test_prog = main_prog.clone()

        exe = paddle.static.Executor(place)
        exe.run(start_prog)

        for epoch in range(20):
            for batch_id in range(1):
                out, actual_lr = exe.run(main_prog,
                                         fetch_list=[loss.name, lr_var.name])
                expected_lr = reduce_lr_on_plateau(
                    kwargs['factor'], kwargs['threshold'], kwargs['cooldown'],
                    kwargs['patience'], kwargs['mode'],
                    kwargs['threshold_mode'], out[0], var_list)

            scheduler.step(out[0])
            actual_lr = scheduler()
            self.assertEqual(actual_lr, np.array(expected_lr))

        for epoch in range(10):
            for batch_id in range(1):
                out, actual_lr = exe.run(test_prog,
                                         fetch_list=[loss.name, lr_var.name])
                expected_lr = reduce_lr_on_plateau(
                    kwargs['factor'], kwargs['threshold'], kwargs['cooldown'],
                    kwargs['patience'], kwargs['mode'],
                    kwargs['threshold_mode'], out[0], var_list)
            scheduler.step(out[0])
            actual_lr = scheduler()
            self.assertEqual(actual_lr, np.array(expected_lr))
Пример #17
0
    def __init__(self, dropout, dim, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = paddle.zeros([max_len, dim])
        position = paddle.arange(0, max_len, dtype=paddle.float32).unsqueeze(1)
        div_term = paddle.exp(
            paddle.arange(0, dim, 2).astype('float32') *
            (-math.log(10000.0) / dim))
        pe[:, 0::2] = paddle.sin(position * div_term)
        pe[:, 1::2] = paddle.cos(position * div_term)
        pe = pe.unsqueeze(0)
        pe = pe.transpose([1, 0, 2])
        self.register_buffer('pe', pe)
Пример #18
0
    def forward(self, pos_seq, bsz=None):
        sinusoid_inp = paddle.matmul(
            pos_seq.unsqueeze([1]), self.inv_freq.unsqueeze([0]))
        pos_emb = paddle.concat(
            [paddle.sin(sinusoid_inp), paddle.cos(sinusoid_inp)], axis=-1)

        if bsz is not None:
            pos_emb = pos_emb.unsqueeze([0]).expand([bsz, -1, -1])
            pos_emb.stop_gradient = True
            return pos_emb
        else:
            pos_emb = pos_emb.unsqueeze([0])
            pos_emb.stop_gradient = True
            return pos_emb
Пример #19
0
    def __init__(self, posistion: int = 60, d_model: int = 30):
        super().__init__()

        pos_enc = paddle.zeros(shape=[posistion, d_model], dtype='float32')
        pos = paddle.arange(start=0, end=posistion,
                            dtype='float32').unsqueeze(1)
        dim = paddle.arange(start=0, end=d_model, step=2, dtype='float32')
        div_den = paddle.pow(
            paddle.to_tensor(np.array([10000]), dtype='float32'),
            -(dim / d_model))
        pos_enc[:, 0::2] = paddle.sin(pos * div_den)
        pos_enc[:, 1::2] = paddle.cos(pos * div_den)
        pos_enc.stop_gradient = True
        self.register_buffer('pos_enc', pos_enc)
Пример #20
0
    def forward(self, x):
        #TODO:
        matrix = paddle.ones([1, 1], dtype=np.float32)
        for t in self.thetas:
            cs = paddle.cos(t / 2)
            sn = paddle.sin(t / 2)
            m = paddle.concat([
                paddle.concat([cs, -sn], axis=1),
                paddle.concat([sn, cs], axis=1),
            ],
                              axis=0)
            matrix = paddle.kron(matrix, m)

        x = paddle.matmul(matrix, x)
        return x
Пример #21
0
    def get_score(self, head, rel, tail):
        re_head, im_head = paddle.chunk(head, chunks=2, axis=-1)
        re_tail, im_tail = paddle.chunk(tail, chunks=2, axis=-1)

        phase_rel = rel / (self.emb_init / np.pi)
        re_rel, im_rel = paddle.cos(phase_rel), paddle.sin(phase_rel)
        re_score = re_rel * re_tail + im_rel * im_tail
        im_score = re_rel * im_tail - im_rel * re_tail
        re_score = re_score - re_head
        im_score = im_score - im_head

        score = paddle.stack([re_score, im_score], axis=0)
        score = self.gamma - paddle.sum(paddle.norm(score, p=2, axis=0),
                                        axis=-1)
        return score
Пример #22
0
def rotation_2d(points, angles):
    """rotation 2d points based on origin point clockwise when angle positive.

    Args:
        points (float array, shape=[N, point_size, 2]): points to be rotated.
        angles (float array, shape=[N]): rotation angle.

    Returns:
        float array: same shape as points
    """
    rot_sin = paddle.sin(angles)
    rot_cos = paddle.cos(angles)
    rot_mat_T = paddle.stack(
        [paddle.stack([rot_cos, -rot_sin]),
         paddle.stack([rot_sin, rot_cos])])
    return paddle.einsum('aij,jka->aik', (points, rot_mat_T))
Пример #23
0
def dft_matrix(n: int,
               return_complex: bool = False,
               dtype: str = 'float64') -> Tensor:
    """Compute discrete Fourier transform matrix.

    Parameters:
        n(int): the size of dft matrix.
        return_complex(bool): whether to return complex matrix. If True, the matrix will
            be complex type. Otherwise, the real and image part will be stored in the last
            axis of returned tensor.
        dtype(str): the datatype of the returned dft matrix.

    Shape:
        output: [n, n] or [n,n,2]

    Returns:
        Complex tensor of shape (n,n) if return_complex=True, and of shape (n,n,2) otherwise.

    Examples:

        .. code-block:: python

        import paddle
        import paddleaudio.functional as F
        m = F.dft_matrix(512)
        print(m.shape)
        >> [512, 512, 2]
        m = F.dft_matrix(512, return_complex=True)
        print(m.shape)
        >> [512, 512]

    """
    # This is due to a bug in paddle in lacking support for complex128, as of paddle 2.1.0
    if return_complex and dtype == 'float64':
        raise ValueError('not implemented')

    x, y = paddle.meshgrid(paddle.arange(0, n), paddle.arange(0, n))
    z = x.astype(dtype) * y.astype(dtype) * paddle.to_tensor(
        (-2 * math.pi / n), dtype)
    cos = paddle.cos(z)
    sin = paddle.sin(z)

    if return_complex:
        return cos + paddle.to_tensor([1j]) * sin
    cos = cos.unsqueeze(-1)
    sin = sin.unsqueeze(-1)
    return paddle.concat([cos, sin], -1)
Пример #24
0
def get_timestep_embedding(timesteps, embedding_dim):
    """
    This matches the implementation in Denoising Diffusion Probabilistic Models:
    From Fairseq.
    Build sinusoidal embeddings.
    This matches the implementation in tensor2tensor, but differs slightly
    from the description in Section 3.5 of "Attention Is All You Need".
    """
    assert len(timesteps.shape) == 1

    half_dim = embedding_dim // 2
    emb = math.log(10000) / (half_dim - 1)
    emb = paddle.exp(paddle.arange(half_dim, dtype='float32') * -emb)
    emb = timesteps.astype('float32').unsqueeze(1) * emb.unsqueeze(0)
    emb = paddle.concat([paddle.sin(emb), paddle.cos(emb)], 1)
    if embedding_dim % 2 == 1:  # zero pad
        emb = paddle.nn.functional.pad(emb, [0, 1, 0, 0])
    return emb
Пример #25
0
def cosine(M: int, sym: bool = True, dtype: str = 'float64') -> Tensor:
    """Compute a window with a simple cosine shape.
    Parameters:
        M(int): window size.
        sym(bool):whether to return symmetric window.
            The default value is True
        dtype(str): the datatype of returned tensor.
    Returns:
        Tensor: the window tensor
    Notes:
        This function is consistent with scipy.signal.windows.cosine().
    """
    if _len_guards(M):
        return paddle.ones((M, ), dtype=dtype)
    M, needs_trunc = _extend(M, sym)
    w = paddle.sin(math.pi / M * (paddle.arange(0, M, dtype=dtype) + .5))

    return _truncate(w, needs_trunc)
Пример #26
0
    def _test_dygraph(self, place, kwargs):
        paddle.disable_static(place)

        best = float("-10000") if kwargs['mode'] == "max" else float("10000")
        current_lr = 1.0
        cooldown_counter = 0
        num_bad_epochs = 0
        var_list = [best, current_lr, cooldown_counter, num_bad_epochs]

        linear = paddle.nn.Linear(10, 10)
        scheduler = paddle.optimizer.lr.ReduceOnPlateau(**kwargs)
        adam = paddle.optimizer.Adam(learning_rate=scheduler,
                                     parameters=linear.parameters())

        for epoch in range(20):
            for batch_id in range(1):
                x = paddle.to_tensor(epoch).astype('float32')
                loss = paddle.sin(x)
                loss.backward()
                adam.step()
                adam.clear_grad()

            scheduler.step(loss)
            # get lr from paddle
            current_lr = adam.get_lr()
            # get lr form python
            expected_lr = reduce_lr_on_plateau(
                kwargs['factor'], kwargs['threshold'], kwargs['cooldown'],
                kwargs['patience'], kwargs['mode'], kwargs['threshold_mode'],
                loss, var_list)
            self.assertEqual(current_lr, expected_lr)
        state_dict = adam.state_dict()
        scheduler1 = paddle.optimizer.lr.ReduceOnPlateau(**kwargs)
        adam1 = paddle.optimizer.Adam(learning_rate=scheduler1,
                                      parameters=linear.parameters())
        adam1.set_state_dict(state_dict)
        self.assertEqual(scheduler.cooldown_counter,
                         scheduler1.cooldown_counter)
        self.assertEqual(scheduler.best.numpy()[0], scheduler1.best)
        self.assertEqual(scheduler.num_bad_epochs, scheduler1.num_bad_epochs)
        self.assertEqual(scheduler.last_epoch, scheduler1.last_epoch)
        self.assertEqual(scheduler.last_lr, scheduler1.last_lr)
Пример #27
0
def test_paddle_param():
    U = paddle.to_tensor(
        np.array([[1, -1], [1, 1]], dtype=np.float32) / np.sqrt(2))
    U.stop_gradient = True
    theta = paddle.static.create_parameter(shape=[1, 1], dtype='float32')
    cs = paddle.cos(theta / 2)
    sn = paddle.sin(theta / 2)
    matrix = paddle.concat([
        paddle.concat([cs, -sn], axis=1),
        paddle.concat([sn, cs], axis=1),
    ],
                           axis=0)
    loss = 1 - paddle.trace(paddle.matmul(U, paddle.transpose(matrix,
                                                              [1, 0]))) / 2
    # I = paddle.to_tensor(np.eye(2, dtype=np.float32))
    dt = paddle.grad(outputs=[loss],
                     inputs=[theta],
                     create_graph=False,
                     retain_graph=True)[0]
    print(dt)
Пример #28
0
    def __init__(self, dropout, dim, max_len=5000):
        super(PositionalEncoding_2d, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = paddle.zeros([max_len, dim])
        position = paddle.arange(0, max_len, dtype=paddle.float32).unsqueeze(1)
        div_term = paddle.exp(
            paddle.arange(0, dim, 2).astype('float32') *
            (-math.log(10000.0) / dim))
        pe[:, 0::2] = paddle.sin(position * div_term)
        pe[:, 1::2] = paddle.cos(position * div_term)
        pe = pe.unsqueeze(0).transpose([1, 0, 2])
        self.register_buffer('pe', pe)

        self.avg_pool_1 = nn.AdaptiveAvgPool2D((1, 1))
        self.linear1 = nn.Linear(dim, dim)
        self.linear1.weight.data.fill_(1.)
        self.avg_pool_2 = nn.AdaptiveAvgPool2D((1, 1))
        self.linear2 = nn.Linear(dim, dim)
        self.linear2.weight.data.fill_(1.)
Пример #29
0
def rzz_gate_matrix(params):
    """
    RZZ gate
    :return:
    """
    theta = params
    re_a = paddle.cos(theta / 2)
    re_b = paddle.zeros([1], 'float64')
    im_a = paddle.sin(theta / 2)
    im_b = paddle.zeros([1], 'float64')
    re = paddle.reshape(
        paddle.concat([
            re_a, re_b, re_b, re_b, re_b, re_a, re_b, re_b, re_b, re_b, re_a,
            re_b, re_b, re_b, re_b, re_a
        ]), [4, 4])
    im = paddle.reshape(
        paddle.concat([
            -im_a, im_b, im_b, im_b, im_b, im_a, im_b, im_b, im_b, im_b, im_a,
            im_b, im_b, im_b, im_b, -im_a
        ]), [4, 4])

    return re + im * paddle.to_tensor([1j], 'complex128')
Пример #30
0
def bohman(M: int, sym: bool = True, dtype: str = 'float64') -> Tensor:
    """Compute a Bohman window.
    The Bohman window is the autocorrelation of a cosine window.
    Parameters:
        M(int): window size.
        sym(bool):whether to return symmetric window.
            The default value is True
        dtype(str): the datatype of returned tensor.
    Returns:
        Tensor: the window tensor
    Notes:
        This function is consistent with scipy.signal.windows.bohman().
    """
    if _len_guards(M):
        return paddle.ones((M, ), dtype=dtype)
    M, needs_trunc = _extend(M, sym)

    fac = paddle.abs(paddle.linspace(-1, 1, M, dtype=dtype)[1:-1])
    w = (1 - fac) * paddle.cos(math.pi * fac) + 1.0 / math.pi * paddle.sin(
        math.pi * fac)
    w = _cat([0, w, 0], dtype)

    return _truncate(w, needs_trunc)