コード例 #1
0
def test_batch_symeig_forward():
    xs = torch.randn(3, 5, 5).double()
    ws, vs = S.symeig(xs)
    for i in range(xs.shape[0]):
        w, v = S.symeig(xs[i])
        torch.testing.assert_allclose(ws[i], w)
        torch.testing.assert_allclose(vs[i], v)
コード例 #2
0
ファイル: test.py プロジェクト: tolgabirdal/qecnetworks
def test_batch_symeig_forward():
    xs = torch.randn(8, 4, 4).float().cuda()
    ws, vs = S.symeig(xs)
    #    test=torch.symeig(xs[0])
    eigenValues, eigenVectors = torch.symeig(xs[0], eigenvectors=True)
    for i in range(xs.shape[0]):
        w, v = S.symeig(xs[i])
        torch.testing.assert_allclose(ws[i], w)
        torch.testing.assert_allclose(vs[i], v)
コード例 #3
0
ファイル: test.py プロジェクト: tolgabirdal/qecnetworks
def test_batch_symeig_backward():
    input = torch.randn(8 * 64 * 32, 4, 4).float()
    input1 = input.clone()
    input.requires_grad = True
    w, v = S.symeig(input)
    (w.sum() + v.sum()).backward()
    # print(input.grad)
    for i in range(input1.size(0)):
        in1 = input1[i]
        in1.requires_grad = True
        wi, vi = S.symeig(in1)
        (wi.sum() + vi.sum()).backward()
        # print(in1.grad)
        torch.testing.assert_allclose(input.grad[i], in1.grad)
コード例 #4
0
    def averageQuaternions(self, _input_lrf, _input_a):
        num_q = _input_lrf.size(0)

        _input_lrf = _input_lrf.view(-1, 4)
        cov_matrix = torch.bmm(_input_lrf.unsqueeze(2),
                               _input_lrf.unsqueeze(1))
        cov_matrix = cov_matrix.view(num_q, self.num_neighbours, 4, 4)
        cov_matrix_ave = torch.mean(cov_matrix, 1)

        _input_a = _input_a.transpose(-2, -1).contiguous().view(
            -1, self.num_neighbours)
        mask = torch.sign(torch.abs(_input_a))
        mask_4_matrix = torch.sum(mask, -1)
        mask_4_matrix = mask_4_matrix.nonzero().squeeze()
        cov_matrix_ave_none_zero = cov_matrix_ave[mask_4_matrix]

        noise = (1e-6) * torch.randn_like(cov_matrix_ave_none_zero).cuda()
        cov_matrix_ave_none_zero = cov_matrix_ave_none_zero + noise

        e_w, e_v = S.symeig(cov_matrix_ave_none_zero)
        v_max_ = e_v[:, 3].clone()
        v_max = torch.zeros((cov_matrix_ave.size(0), 4),
                            device=cov_matrix_ave.device,
                            dtype=torch.float32)
        v_max[mask_4_matrix, :] = v_max_

        vmax_mask = torch.sign(v_max[:, 0])
        vmax_mask = vmax_mask.contiguous().view(-1, 1).expand(v_max.size(0), 4)
        v_max = v_max * vmax_mask
        return v_max
コード例 #5
0
ファイル: test.py プロジェクト: tolgabirdal/qecnetworks
def test_batch_symeig_top():
    #    input = torch.randn(8, 4, 4).float().cuda()
    num_q = 800
    input_lrf = torch.randn(num_q, 8, 4).cuda()
    input_lrf = F.normalize(input_lrf, p=2, dim=-1)
    input_lrf = input_lrf.view(-1, 4)
    test = torch.bmm(input_lrf.unsqueeze(2), input_lrf.unsqueeze(1))
    test = test.view(num_q, 8, 4, 4)
    input = torch.sum(test, 1)

    input3 = input.clone()
    input3.requires_grad = True

    input.requires_grad = True

    # batch wise back
    w, v = S.symeig(input)
    v_max = v[:, 3].clone()
    bool_vmax = v_max[:, 0] / torch.abs(v_max[:, 0])
    bool_vmax = bool_vmax.contiguous().view(-1, 1)
    bool_vmax = bool_vmax.expand(v_max.size(0), 4)
    v_max = v_max * bool_vmax
    (v_max.mean()).backward()

    # pytorch version loop back
    averaged_Q4 = torch.rand(num_q, 4)
    for i in range(input3.size(0)):
        eigenValues, eigenVectors = torch.eig(input3[i], eigenvectors=True)
        e_values, e_indices = torch.max(eigenValues, 0)
        averaged_Q4[i] = eigenVectors[:, e_indices]
        if (averaged_Q4[i][0] < 0):
            averaged_Q4[i] = -averaged_Q4[i]
    (averaged_Q4.mean()).backward()

    torch.testing.assert_allclose(input.grad, input3.grad)
コード例 #6
0
def test_generalized_symeig_forward():
    a = torch.randn(3, 3).double()
    a = a.t().mm(a)
    b = torch.randn(3, 3).double()
    b = b.t().mm(b)
    w, v = S.symeig(a, b)
    torch.testing.assert_allclose(a.mm(v), w * b.mm(v))
コード例 #7
0
ファイル: test.py プロジェクト: tolgabirdal/qecnetworks
def test_runtime():
    """test that there are no runtime errors"""
    import torch.nn as nn
    import torch.nn.functional as F
    x = torch.randn(30, 10)
    w = nn.Parameter(torch.rand(30, 10), requires_grad=True)
    xw = F.linear(x, w)
    a, b = S.symeig(xw)
    asum = a.sum()
    asum.backward()
コード例 #8
0
def ave_pose(pose):
    batch_size=pose.size(0)
    ave_out=torch.zeros(batch_size,4)
    for b_id in range (batch_size):
        mask_4_pose=torch.sum(pose[b_id ],-1).nonzero().squeeze()
        pose_none_zero=pose[b_id, mask_4_pose]
        cov_matrix=torch.bmm(pose_none_zero.unsqueeze(2),pose_none_zero.unsqueeze(1))
        cov_matrix_ave =torch.mean(cov_matrix,0)
        e_w, e_v = S.symeig(cov_matrix_ave)
        v_max=e_v[:,3].clone()
        vmax_mask = torch.sign(v_max[0])
        vmax_mask=vmax_mask.contiguous().view(-1,1).expand(1,4)
        ave_out[b_id]=v_max * vmax_mask
    return ave_out
コード例 #9
0
ファイル: test.py プロジェクト: tolgabirdal/qecnetworks
def test_bug():
    test1 = torch.tensor([[2.7378, -2.7378, -0.4963, -0.2952],
                          [-2.7378, 2.7378, 0.4963, 0.2952],
                          [-0.4963, 0.4963, 1.8648, 1.1091],
                          [-0.2952, 0.2952, 1.1091, 0.6596]]).cuda()
    #    test1=torch.randn( 4, 4).cuda()

    #    test1_V = Variable(test1.data, requires_grad=True)

    test2 = test1.unsqueeze(0)
    test2_V = Variable(test2.data, requires_grad=True)
    w, v = S.symeig(test2_V)
    loss = w.mean() + v.mean()
    loss.backward()
    print(test2_V.grad.max())
コード例 #10
0
ファイル: test_speed.py プロジェクト: tolgabirdal/qecnetworks
def test():
    seconds = time.time()
    for i in range(10):
        a = torch.rand(65535, 4, 4).cuda()
        #    b = a.clone()
        #    c = a.clone()
        a.requires_grad = True
        #    b.requires_grad = True
        #    c.requires_grad = True

        U, V = S.symeig(a)
        loss = U.mean() + V.mean()
        loss.backward()

    seconds2 = time.time()

    print("time of eig ", seconds2 - seconds)
コード例 #11
0
ファイル: test.py プロジェクト: tolgabirdal/qecnetworks
def test_batch_symeig_top_init():
    #    input = torch.randn(8, 4, 4).float().cuda()
    num_q = 80

    input1 = torch.randn(num_q, 8, 4).cuda()
    input2 = input1.clone()

    input1.requires_grad = True
    input_lrf = F.normalize(input1, p=2, dim=-1)
    input_lrf = input_lrf.view(-1, 4)
    input_cov = torch.bmm(input_lrf.unsqueeze(2), input_lrf.unsqueeze(1))
    input_cov = input_cov.view(num_q, 8, 4, 4)
    input_cov_sum = torch.sum(input_cov, 1)

    w, v = S.symeig(input_cov_sum)
    v_max = v[:, 3].clone()

    bool_vmax = v_max[:, 0] / torch.abs(v_max[:, 0])
    bool_vmax = bool_vmax.contiguous().view(-1, 1)
    bool_vmax = bool_vmax.expand(v_max.size(0), 4)
    v_max = v_max * bool_vmax

    (v_max.mean()).backward()

    #    input3.requires_grad = True
    averaged_Q4 = torch.rand(num_q, 4)
    input2.requires_grad = True
    input_lrf2 = F.normalize(input2, p=2, dim=-1)
    input_lrf2 = input_lrf2.view(-1, 4)
    input_cov2 = torch.bmm(input_lrf2.unsqueeze(2), input_lrf2.unsqueeze(1))
    input_cov2 = input_cov2.view(num_q, 8, 4, 4)
    input_cov_sum2 = torch.sum(input_cov2, 1)
    for i in range(input_cov_sum2.size(0)):
        eigenValues, eigenVectors = torch.symeig(input_cov_sum2[i],
                                                 eigenvectors=True)
        e_values, e_indices = torch.max(eigenValues, 0)
        averaged_Q4[i] = eigenVectors[:, e_indices]
        if (averaged_Q4[i][0] < 0):
            averaged_Q4[i] = -averaged_Q4[i]
    (averaged_Q4.mean()).backward()
    torch.testing.assert_allclose(input1.grad, input2.grad)
コード例 #12
0
    def weightedAverageQuaternions(self, pose, b_ij, input_a):
        weights = b_ij.float()
        mask = torch.sign(torch.abs(input_a))
        weights = weights * mask
        weights = F.normalize(weights, p=1, dim=-1)  # replace with softmax
        pose = pose.view(-1, self.num_neighbours * self.in_channels, 4)
        num_q = pose.size(0)

        mask_4_matrix = torch.sum(torch.sum(pose, -1), -1)
        mask_4_matrix = mask_4_matrix.nonzero().squeeze()

        pose = pose.view(-1, 4)
        weights = weights.contiguous().view(-1, 1, 1)
        weights = weights.expand(pose.size(0), 4, 4).contiguous()

        cov_matrix = torch.bmm(pose.unsqueeze(2), pose.unsqueeze(1))

        weighted_cov = weights * cov_matrix
        weighted_cov = weighted_cov.view(
            num_q, self.num_neighbours * self.in_channels, 4, 4)
        cov_matrix_sum = torch.sum(weighted_cov, 1)

        cov_matrix_sum_none_zero = cov_matrix_sum[mask_4_matrix]

        noise = (1e-6) * torch.randn_like(cov_matrix_sum_none_zero).cuda()
        cov_matrix_sum_none_zero = cov_matrix_sum_none_zero + noise

        e_w, e_v = S.symeig(cov_matrix_sum_none_zero)
        v_max_ = e_v[:, 3].clone()
        v_max = torch.zeros((cov_matrix_sum.size(0), 4),
                            device=cov_matrix_sum.device,
                            dtype=torch.float32)
        v_max[mask_4_matrix, :] = v_max_
        vmax_mask = torch.sign(v_max[:, 0])
        vmax_mask = vmax_mask.contiguous().view(-1, 1).expand(v_max.size(0), 4)
        v_max = v_max * vmax_mask

        return v_max