Пример #1
0
def gen_look_at_matrix(pos, look, up):
    d = normalize(look - pos)
    right = normalize(torch.cross(d, normalize(up)))
    new_up = normalize(torch.cross(right, d))
    z = torch.zeros([1], dtype=torch.float32)
    o = torch.ones([1], dtype=torch.float32)
    return torch.transpose(torch.stack([torch.cat([right , z], 0),
                                        torch.cat([new_up, z], 0),
                                        torch.cat([d     , z], 0),
                                        torch.cat([pos   , o], 0)]), 0, 1).contiguous()
Пример #2
0
def qrot(q, v):
    """
    Rotate vector(s) v about the rotation described by quaternion(s) q.
    Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
    where * denotes any number of dimensions.
    Returns a tensor of shape (*, 3).
    """
    assert q.shape[-1] == 4
    assert v.shape[-1] == 3
    assert q.shape[:-1] == v.shape[:-1]

    qvec = q[..., 1:]
    uv = torch.cross(qvec, v, dim=len(q.shape)-1)
    uuv = torch.cross(qvec, uv, dim=len(q.shape)-1)
    return (v + 2 * (q[..., :1] * uv + uuv))
Пример #3
0
    def test_remote_var_binary_methods(self):
        ''' Unit tests for methods mentioned on issue 1385
            https://github.com/OpenMined/PySyft/issues/1385'''
        hook = TorchHook(verbose=False)
        local = hook.local_worker
        remote = VirtualWorker(hook, 1)
        local.add_worker(remote)

        x = Var(torch.FloatTensor([1, 2, 3, 4])).send(remote)
        y = Var(torch.FloatTensor([[1, 2, 3, 4]])).send(remote)
        z = torch.matmul(x, y.t())
        assert (torch.equal(z.get(), Var(torch.FloatTensor([30]))))
        z = torch.add(x, y)
        assert (torch.equal(z.get(), Var(torch.FloatTensor([[2, 4, 6, 8]]))))
        x = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
        y = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
        z = torch.cross(x, y, dim=1)
        assert (torch.equal(z.get(), Var(torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]]))))
        x = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
        y = Var(torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])).send(remote)
        z = torch.dist(x, y)
        assert (torch.equal(z.get(), Var(torch.FloatTensor([0.]))))
        x = Var(torch.FloatTensor([1, 2, 3])).send(remote)
        y = Var(torch.FloatTensor([1, 2, 3])).send(remote)
        z = torch.dot(x, y)
        print(torch.equal(z.get(), Var(torch.FloatTensor([14]))))
        z = torch.eq(x, y)
        assert (torch.equal(z.get(), Var(torch.ByteTensor([1, 1, 1]))))
        z = torch.ge(x, y)
        assert (torch.equal(z.get(), Var(torch.ByteTensor([1, 1, 1]))))
Пример #4
0
 def test_local_var_binary_methods(self):
     ''' Unit tests for methods mentioned on issue 1385
         https://github.com/OpenMined/PySyft/issues/1385'''
     x = torch.FloatTensor([1, 2, 3, 4])
     y = torch.FloatTensor([[1, 2, 3, 4]])
     z = torch.matmul(x, y.t())
     assert (torch.equal(z, torch.FloatTensor([30])))
     z = torch.add(x, y)
     assert (torch.equal(z, torch.FloatTensor([[2, 4, 6, 8]])))
     x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
     y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
     z = torch.cross(x, y, dim=1)
     assert (torch.equal(z, torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]])))
     x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
     y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
     z = torch.dist(x, y)
     t = torch.FloatTensor([z])
     assert (torch.equal(t, torch.FloatTensor([0.])))
     x = torch.FloatTensor([1, 2, 3])
     y = torch.FloatTensor([1, 2, 3])
     z = torch.dot(x, y)
     t = torch.FloatTensor([z])
     assert torch.equal(t, torch.FloatTensor([14]))
     z = torch.eq(x, y)
     assert (torch.equal(z, torch.ByteTensor([1, 1, 1])))
     z = torch.ge(x, y)
     assert (torch.equal(z, torch.ByteTensor([1, 1, 1])))
Пример #5
0
 def parallel_self(self, kpts_est, **kwargs):
     "encourage parallel to self"
     # kpts_est: (nFramesxnViews, nJoints, 3)
     nFrames = kpts_est.shape[0] // 2
     kpts_out = kpts_est[:nFrames, ...]
     kpts_in = kpts_est[nFrames:, ...]
     kpts_in = flipPoint2D(kpts_in)
     direct = kpts_in - kpts_out
     direct_norm = direct / torch.norm(direct, dim=-1, keepdim=True)
     loss = torch.sum(
         torch.norm(torch.cross(direct_norm[:, self.idx0, :],
                                direct_norm[:, self.idx1, :]),
                    dim=2)) / self.idx0.shape[0]
     return loss / nFrames
Пример #6
0
 def parallel_mirror(self, kpts_est, **kwargs):
     "encourage parallel to mirror"
     # kpts_est: (nFramesxnViews, nJoints, 3)
     if self.normal is None:
         return torch.tensor(0.).to(self.device)
     nFrames = kpts_est.shape[0] // 2
     kpts_out = kpts_est[:nFrames, ...]
     kpts_in = kpts_est[nFrames:, ...]
     kpts_in = flipPoint2D(kpts_in)
     direct = kpts_in - kpts_out
     direct_norm = direct / torch.norm(direct, dim=-1, keepdim=True)
     loss = torch.sum(
         torch.norm(torch.cross(self.normal, direct_norm), dim=2))
     return loss / nFrames / kpts_est.shape[1]
Пример #7
0
def rotate_points_with_rotvec(points, rot_vec):  # [Bs, 3], [Bs, 3]
    """
    Rotate points by given rotation vectors.
    Rodrigues' rotation formula is used.
    """
    theta = torch.norm(rot_vec, dim=-1, keepdim=True)  # [Bs, 1, 1]
    mask = (theta < 1e-8).float()
    v = rot_vec / torch.max(theta, mask)  # [Bs, 1, 1]
    dot = torch.sum(points * v, dim=-1, keepdim=True)  # [Bs, N, 1]
    cos_theta = torch.cos(theta)
    sin_theta = torch.sin(theta)

    return cos_theta * points + sin_theta * torch.cross(
        v, points, dim=-1) + dot * (1 - cos_theta) * v
Пример #8
0
    def forward(self, vertices, eps=1e-6):
        # make v0s, v1s, v2s, v3s
        batch_size = 1

        v0s = vertices[self.v0s, :]
        v1s = vertices[self.v1s, :]
        v2s = vertices[self.v2s, :]
        v3s = vertices[self.v3s, :]

        c10 = v1s - v0s
        c20 = v2s - v0s
        c30 = v3s - v0s

        n0 = torch.cross(c10, c20)
        n1 = -torch.cross(c10, c30)

        n0n = torch.norm(n0, dim=1)
        n1n = torch.norm(n1, dim=1)

        cos = torch.sum(n0 * n1, dim=1) / (n0n * n1n)
        # cos_ = cos[cos < 0.70710678118] # 0.52532198881 = cos(45 deg)
        loss = (1.0 - cos).mean()
        return loss
Пример #9
0
def lighting_th(faces,
                textures,
                intensity_ambient=0.5,
                intensity_directional=0.5,
                color_ambient=(1, 1, 1),
                color_directional=(1, 1, 1),
                direction=(0, 1, 0)):
    bs, nf = faces.shape[:2]

    # arguments
    if isinstance(color_ambient, tuple) or isinstance(color_ambient, list):
        color_ambient = faces.new(color_ambient).float()
    if isinstance(color_directional, tuple) or isinstance(
            color_directional, list):
        color_directional = faces.new(color_directional).float()
    if isinstance(direction, tuple) or isinstance(direction, list):
        direction = faces.new(direction).float()
    if color_ambient.dim() == 1:
        color_ambient = color_ambient.unsqueeze(0).repeat(bs, 1)
    if color_directional.dim() == 1:
        color_directional = color_directional.unsqueeze(0).repeat(bs, 1)
    if direction.dim() == 1:
        direction = direction.unsqueeze(0).repeat(bs, 1)

    # create light
    light = faces.new_full((bs, nf, 3), fill_value=0)

    # ambient light
    if intensity_ambient != 0:
        light = light + intensity_ambient * color_ambient.unsqueeze(1)

    # directional light
    if intensity_directional != 0:
        faces = faces.view((bs * nf, 3, 3))
        v10 = faces[:, 0] - faces[:, 1]
        v12 = faces[:, 2] - faces[:, 1]
        normals = torch.cross(v10, v12)
        normals_norm = normals / torch.norm(normals, p=2, dim=1).unsqueeze(1)
        normals = normals_norm.reshape((bs, nf, 3))

        if direction.dim() == 2:
            direction = direction.unsqueeze(1)
        cos = torch.nn.functional.relu(torch.sum(normals * direction, dim=2))
        light = (light + intensity_directional * color_directional.unsqueeze(1)
                 * cos.unsqueeze(2))

    # apply
    light = light.unsqueeze(2).unsqueeze(3).unsqueeze(4)
    textures = textures * light
    return textures
Пример #10
0
def get_angles(a, b):
    '''
    calculate the angle between vector a and b
    :param a: Bx3xMxK tensor
    :param b: Bx3xMxK tensor
    :return: Bx1xMxK tensor
    '''
    axb = torch.cross(a, b, dim=1)  # Bx3xMxK
    a_1x3 = a.permute(0, 2, 3, 1).contiguous().unsqueeze(3)  # BxMxKx3 -> BxMxKx1x3
    b_3x1 = b.permute(0, 2, 3, 1).contiguous().unsqueeze(4)  # BxMxKx3 -> BxMxKx3x1
    ab = torch.matmul(a_1x3, b_3x1).squeeze(3).squeeze(3)  # BxMxKx1x1

    angle = torch.atan2(torch.norm(axb, dim=1, keepdim=False), ab).unsqueeze(1)
    return angle
Пример #11
0
def pix2world(vpix, hpix, height, aspect, fov, forward, up, pos):
	forward = totensor(forward)
	up = totensor(up)
	pos = totensor(pos)
	width = int(height * aspect)
	fov_scaler = np.tan(np.pi/180 * fov/2)
	pos_view = torch.tensor([1.0, fov*aspect*(1.0-(hpix/width)*2), fov*(1.0-(vpix/height)*2)])
	camera_forward = forward / forward.norm()
	camera_up = up / up.norm()
	camera_left = torch.cross(camera_up, camera_forward)
	R = torch.stack([camera_forward, camera_left, camera_up], dim=1)
	pos_world = R @ pos_view + pos
	vec = pos_world - pos; vec = vec / vec.norm()
	return vec
Пример #12
0
def random_quat_from_ray(forward, up=None):
    """
    Sample uniformly random quaternions that orients the camera forward direction.

    Args:
        forward: a vector representing the forward direction.

    Returns:

    """
    n = forward.shape[0]
    if up is None:
        down = three.uniform_unit_vector(n)
    else:
        up = torch.tensor(up).unsqueeze(0).expand(n, 3)
        up = up + forward
        down = -up
    right = three.normalize(torch.cross(down, forward))
    down = three.normalize(torch.cross(forward, right))

    mat = torch.stack([right, down, forward], dim=1)

    return three.quaternion.mat_to_quat(mat)
Пример #13
0
def look_at_rh(eyes, centers, ups):
    """look at (rh)
    Inputs:
    - eyes, centers, ups: float, [batch x 3]

    Returns:
    - view_mat: float, [batch x 4 x 4]
    """
    f = normalize(centers - eyes, dim=1)
    s = normalize(torch.cross(f, ups, dim=1), dim=1)
    u = torch.cross(s, f, dim=1)

    zeros_pl = torch.zeros([eyes.size(0)],
                           dtype=eyes.dtype,
                           device=eyes.device)
    ones_pl = torch.ones([eyes.size(0)], dtype=eyes.dtype, device=eyes.device)

    return torch.stack([
        s[:, 0], s[:, 1], s[:, 2], zeros_pl, u[:, 0], u[:, 1], u[:,
                                                                 2], zeros_pl,
        -f[:, 0], -f[:, 1], -f[:, 2], zeros_pl, -dot(s, eyes), -dot(u, eyes),
        dot(f, eyes), ones_pl
    ], -1).view(-1, 4, 4).permute(0, 2, 1)
Пример #14
0
def vote_axis_loss(keypoint_offsets, proposals, ori_depths, gt_keypoints,
                   keypoint_matched_idxs, masks_for_vote, gt_labels):
    N, _, _, H, W = keypoint_offsets.shape
    assert H == W
    gt_axis = []
    vote_offset_targets = []
    voter_labels = []

    for proposals_per_image, gt_kp_in_image, midx, ori_depth, label in zip(
            proposals, gt_keypoints, keypoint_matched_idxs, ori_depths,
            gt_labels):
        kp = gt_kp_in_image[midx].view(-1, 2, 3, 1, 1).repeat(1, 1, 1, H, W)
        voter = gt_voters(H, ori_depth, proposals_per_image,
                          config.Unreal_camera_mat)
        gt_offset = kp - voter.unsqueeze(1).repeat(1, 2, 1, 1, 1)
        gt_axis.append(F.normalize((kp[:, 1] - kp[:, 0]), dim=1))
        voter_labels.append(label[midx] - 1)
        vote_offset_targets.append(gt_offset)

    voter_labels = torch.cat(voter_labels, dim=0)
    keypoint_offsets = keypoint_offsets[torch.arange(N), voter_labels]

    vote_offset_targets_all = torch.cat(vote_offset_targets, dim=0)
    voter_axis_labels = torch.norm(vote_offset_targets_all,
                                   dim=2).argmin(dim=1).float()
    label_loss = F.binary_cross_entropy_with_logits(keypoint_offsets[:, 3],
                                                    voter_axis_labels)

    gt_axis = torch.cat(gt_axis, dim=0)

    axis_loss = torch.cross(
        (keypoint_offsets[:, :3] - vote_offset_targets_all[:, 1]),
        gt_axis,
        dim=1)
    axis_loss = torch.norm(axis_loss, dim=1)
    # gt_axis_distance = torch.cross(votes_targets[:, 1], gt_axis, dim=1)
    # gt_axis_distance = torch.norm(gt_axis_distance, dim=1)
    # axis_loss = axis_loss/gt_axis_distance

    norm_loss = torch.abs(torch.sum(keypoint_offsets[:, :3] * gt_axis, dim=1))
    axis_loss = (axis_loss + norm_loss) * masks_for_vote
    non_zero_index = torch.sum(masks_for_vote, (1, 2)).nonzero()
    axis_loss = torch.mean(
        torch.sum(axis_loss,
                  (1, 2))[non_zero_index] / torch.sum(masks_for_vote,
                                                      (1, 2))[non_zero_index])

    # norm_loss = torch.mean(torch.norm(keypoint_offsets[:, :3],dim=1))
    # norm_loss = torch.mean(torch.abs(torch.sum(F.normalize(keypoint_offsets[:, :3], dim=1)*gt_axis, dim=1)))
    return axis_loss + label_loss
Пример #15
0
 def edge_init(self, edges):
     # Calculate angles k -> j -> i
     R1, R2 = edges.src['o'], edges.dst['o']
     x = torch.sum(R1 * R2, dim=-1)
     y = torch.cross(R1, R2)
     y = torch.norm(y, dim=-1)
     angle = torch.atan2(y, x)
     # Transform via angles
     cbf = [f(angle) for f in self.sbf_layer.get_sph_funcs()]
     cbf = torch.stack(cbf, dim=1)  # [None, 7]
     cbf = cbf.repeat_interleave(self.num_radial, dim=1)  # [None, 42]
     # Notice: it's dst, not src
     sbf = edges.dst['rbf_env'] * cbf  # [None, 42]
     return {'sbf': sbf}
Пример #16
0
def qrot(q, v):
    #TODO can I change this function to also work with constant v and changing quaternions?
    # if not just tile/stack v accordingly
    """
    Rotate vector(s) v about the rotation described by quaternion(s) q.
    Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
    where * denotes any number of dimensions.
    Returns a tensor of shape (*, 3).+

    source: https://github.com/facebookresearch/QuaterNet/blob/master/common/quaternion.py
    """
    assert q.shape[-1] == 4
    assert v.shape[-1] == 3
    assert q.shape[:-1] == v.shape[:-1]

    original_shape = list(v.shape)
    q = q.view(-1, 4)
    v = v.view(-1, 3)

    qvec = q[:, 1:]
    uv = torch.cross(qvec, v, dim=1)
    uuv = torch.cross(qvec, uv, dim=1)
    return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape)
Пример #17
0
def compute_centers_and_normals(points, connectivity):
    dimension = points.shape[1]
    a = points[connectivity[:, 0]]
    b = points[connectivity[:, 1]]
    if dimension == 2:
        centers = (a + b) / 2.
        normals = b - a
    elif dimension == 3:
        c = points[connectivity[:, 2]]
        centers = (a + b + c) / 3.
        normals = torch.cross(b - a, c - a) / 2.
    else:
        raise RuntimeError('Not expected')
    return centers, normals
Пример #18
0
 def grad(f):
     gf = torch.zeros(m, 3, f.shape[-1], device=device, dtype=dtype)
     for i in range(3):
         s = (i + 1) % 3
         t = (i + 2) % 3
         v = -torch.cross(XF[t] - XF[s], N)
         if SAVE_MEMORY:
             gf.add_(f[T[:, i], None, :] *
                     (dA[:, 0, None, None] *
                      v[:, :, None]))  #Slower less-memeory
         else:
             gf.add_(f[T[:, i], None, :] *
                     (dA[:, 0, None, None] * v[:, :, None]))
     return gf
Пример #19
0
def get_rotation_from_two_vecs(rotation):
    # rotation: Nx3x2
    rotvec1 = rotation[:, :, 0] / torch.norm(
        rotation[:, :, 0], dim=1, keepdim=True)
    rotvec2_proj = torch.sum(rotvec1 * rotation[:, :, 1], dim=1,
                             keepdim=True) * rotvec1
    rotvec2 = rotation[:, :, 1] - rotvec2_proj
    rotvec2 = rotvec2 / torch.norm(rotvec2, dim=1, keepdim=True)
    rotvec3 = torch.cross(rotvec1, rotvec2, dim=1)
    rotmat = torch.cat(
        (rotvec1.view(-1, 3, 1), rotvec2.view(-1, 3, 1), rotvec3.view(
            -1, 3, 1)),
        dim=2)
    return rotmat
Пример #20
0
    def forward(self, inputs):
        atom_mask = inputs[Properties.atom_mask]
        neighbor_mask = inputs[Properties.neighbor_mask]
        distances = inputs["representation"]

        # Compute lennard jones potential
        power_6 = torch.where(
            neighbor_mask == 1,
            (self.r_equilibrium / distances)**6,
            torch.zeros_like(distances),
        )
        r_cut = self.cutoff(distances) * neighbor_mask

        yi = 0.5 * torch.sum((power_6**2 - power_6) * r_cut, dim=2)[:, :, None]

        y = self.well_depth * self.atom_pool(yi, atom_mask)

        # collect results
        result = {self.property: y}

        if self.derivative is not None:
            sign = -1.0 if self.negative_dr else 1.0
            dy = grad(
                result[self.property],
                inputs[Properties.R],
                grad_outputs=torch.ones_like(result[self.property]),
                create_graph=self.create_graph,
                retain_graph=True,
            )[0]
            result[self.derivative] = sign * dy

        if self.stress is not None:
            cell = inputs[Properties.cell]
            # Compute derivative with respect to cell displacements
            stress = grad(
                result[self.property],
                inputs["displacement"],
                grad_outputs=torch.ones_like(result[self.property]),
                create_graph=self.create_graph,
                retain_graph=True,
            )[0]
            # Compute cell volume
            volume = torch.sum(cell[:, 0] *
                               torch.cross(cell[:, 1], cell[:, 2]),
                               dim=1,
                               keepdim=True)[..., None]
            # Finalize stress tensor
            result[self.stress] = stress / volume

        return result
Пример #21
0
 def surface_normals(self):
     if self._surface_normals_update:
         v10 = self.face_vertices[:, :, 0] - self.face_vertices[:, :, 1]
         v12 = self.face_vertices[:, :, 2] - self.face_vertices[:, :, 1]
         # v10, v12 are batch_size x #faces x 3
         self._surface_normals = F.normalize(torch.cross(v12, v10),
                                             p=2,
                                             dim=2,
                                             eps=1e-6)
         self._surface_normals_update = False
     # if a face consists of the vertices v0 v1 v2 (anticlockwise), then
     # this calculates the normal as norm((v2 - v1) x (v0 - v1))
     # the return value is a 3D matrix with batch_size x #faces x normal
     return self._surface_normals
Пример #22
0
def compute_outer_normal(vert, triv, samples):
    edge_1 = torch.index_select(vert, 0, triv[:, 1]) - torch.index_select(
        vert, 0, triv[:, 0])
    edge_2 = torch.index_select(vert, 0, triv[:, 2]) - torch.index_select(
        vert, 0, triv[:, 0])

    face_norm = torch.cross(1e4 * edge_1, 1e4 * edge_2)

    normal = my_zeros(vert.shape)
    for d in range(3):
        normal = torch.index_add(normal, 0, triv[:, d], face_norm)
    normal = normal / (1e-5 + normal.norm(dim=1, keepdim=True))

    return normal[samples, :]
Пример #23
0
    def forward(self, data):
        pos = data.pos
        if self.regress_forces:
            pos = pos.requires_grad_(True)
        batch = data.batch
        x = self.embedding(data.atomic_numbers.long())
        edge_index = radius_graph(pos, r=self.cutoff, batch=batch)

        j, i = edge_index
        idx_i, idx_j, idx_k, idx_kj, idx_ji = self.triplets(
            edge_index, num_nodes=x.size(0))

        # Calculate distances.
        dist = (pos[i] - pos[j]).pow(2).sum(dim=-1).sqrt()

        # Calculate angles.
        pos_i = pos[idx_i].detach()
        pos_ji, pos_ki = (
            pos[idx_j].detach() - pos_i,
            pos[idx_k].detach() - pos_i,
        )
        a = (pos_ji * pos_ki).sum(dim=-1)
        b = torch.cross(pos_ji, pos_ki).norm(dim=-1)
        angle = torch.atan2(b, a)

        rbf = self.rbf(dist)
        sbf = self.sbf(dist, angle, idx_kj)

        # Embedding block.
        x = self.emb(x, rbf, i, j)
        P = self.output_blocks[0](x, rbf, i, num_nodes=pos.size(0))

        # Interaction blocks.
        for interaction_block, output_block in zip(self.interaction_blocks,
                                                   self.output_blocks[1:]):
            x = interaction_block(x, rbf, sbf, idx_kj, idx_ji)
            P += output_block(x, rbf, i)

        energy = P.sum(dim=0) if batch is None else scatter(P, batch, dim=0)

        if self.regress_forces:
            forces = -1 * (torch.autograd.grad(
                energy,
                pos,
                grad_outputs=torch.ones_like(energy),
                create_graph=True,
            )[0])
            return energy, forces
        else:
            return energy
Пример #24
0
def rot6d(x_raw, y_raw):
    """Convert 6D rotation representation to 3x3 rotation matrix.
    Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019
    Input:
        (B,6) Batch of 6-D rotation representations
    Output:
        (B,3,3) Batch of corresponding rotation matrices
    """
    a1 = x_raw
    a2 = y_raw
    b1 = F.normalize(a1)
    b2 = F.normalize(a2 - torch.einsum('bi,bi->b', b1, a2).unsqueeze(-1) * b1)
    b3 = torch.cross(b1, b2, dim=1)
    return torch.stack((b1, b2, b3), dim=-1)
    def get_surface_normal(self, cam_points, nei=1):
        cam_points_ctr = cam_points[:, :-1, nei:-nei, nei:-nei]
        cam_points_x0 = cam_points[:, :-1, nei:-nei, 0:-(2 * nei)]
        cam_points_y0 = cam_points[:, :-1, 0:-(2 * nei), nei:-nei]
        cam_points_x1 = cam_points[:, :-1, nei:-nei, 2 * nei:]
        cam_points_y1 = cam_points[:, :-1, 2 * nei:, nei:-nei]
        cam_points_x0y0 = cam_points[:, :-1, 0:-(2 * nei), 0:-(2 * nei)]
        cam_points_x0y1 = cam_points[:, :-1, 2 * nei:, 0:-(2 * nei)]
        cam_points_x1y0 = cam_points[:, :-1, 0:-(2 * nei), 2 * nei:]
        cam_points_x1y1 = cam_points[:, :-1, 2 * nei:, 2 * nei:]

        vector_x0 = cam_points_x0 - cam_points_ctr
        vector_y0 = cam_points_y0 - cam_points_ctr
        vector_x1 = cam_points_x1 - cam_points_ctr
        vector_y1 = cam_points_y1 - cam_points_ctr
        vector_x0y0 = cam_points_x0y0 - cam_points_ctr
        vector_x0y1 = cam_points_x0y1 - cam_points_ctr
        vector_x1y0 = cam_points_x1y0 - cam_points_ctr
        vector_x1y1 = cam_points_x1y1 - cam_points_ctr

        normal_0 = F.normalize(torch.cross(vector_x0, vector_y0, dim=1),
                               dim=1).unsqueeze(0)
        normal_1 = F.normalize(torch.cross(vector_x1, vector_y1, dim=1),
                               dim=1).unsqueeze(0)
        normal_2 = F.normalize(torch.cross(vector_x0y0, vector_x0y1, dim=1),
                               dim=1).unsqueeze(0)
        normal_3 = F.normalize(torch.cross(vector_x1y0, vector_x1y1, dim=1),
                               dim=1).unsqueeze(0)

        normals = torch.cat((normal_0, normal_1, normal_2, normal_3),
                            dim=0).mean(0)
        normals = F.normalize(normals, dim=1)

        refl = nn.ReflectionPad2d(nei)
        normals = refl(normals)

        return normals
Пример #26
0
def generate_scenes(camLocs, objects, envmap=None):
    scenes = []
    up = torch.tensor([0.0, 1.0, 0.0])
    offset_factor = 2.0
    light_intensity = 500.0

    for ind, loc in enumerate(camLocs):
        camera = pyredner.Camera(position=camera0.look_at + radius * loc,
                                 look_at=camera0.look_at,
                                 up=camera0.up,
                                 fov=camera0.fov,
                                 resolution=camera0.resolution)

        normal = camera.position.div(torch.norm(camera.position))
        tangent = torch.cross(normal, up)
        tangent = tangent.div(torch.norm(tangent))
        bitangent = torch.cross(normal, tangent)
        bitangent = bitangent.div(torch.norm(bitangent))

        lightPos = camera.position + offset_factor * tangent
        light = pyredner.generate_quad_light(position=lightPos,
                                             look_at=camera0.look_at,
                                             size=torch.tensor([0.1, 0.1]),
                                             intensity=torch.tensor([
                                                 light_intensity,
                                                 light_intensity,
                                                 light_intensity
                                             ]))

        # Camera data for voxel carving
        #print(str(ind) + " " + str(camera.position.data[0].item()) + " " + str(camera.position.data[1].item()) + " " + str(camera.position.data[2].item()) + " " + str(camera.look_at.data[0].item()) + " " + str(camera.look_at.data[1].item()) + " " + str(camera.look_at.data[2].item()))

        scenes.append(
            pyredner.Scene(camera=camera,
                           objects=[objects[0], light],
                           envmap=envmap))
    return scenes
Пример #27
0
    def forward(self, gt_depth, pred_depth, select=True):
        """
        Virtual normal loss.
        :param pred_depth: predicted depth map, [B,W,H,C]
        :param data: target label, ground truth depth, [B, W, H, C], padding region [padding_up, padding_down]
        :return:
        """
        gt_points, dt_points = self.select_points_groups(gt_depth, pred_depth)

        gt_p12 = gt_points[:, :, :, 1] - gt_points[:, :, :, 0]
        gt_p13 = gt_points[:, :, :, 2] - gt_points[:, :, :, 0]
        dt_p12 = dt_points[:, :, :, 1] - dt_points[:, :, :, 0]
        dt_p13 = dt_points[:, :, :, 2] - dt_points[:, :, :, 0]

        gt_normal = torch.cross(gt_p12, gt_p13, dim=2)
        dt_normal = torch.cross(dt_p12, dt_p13, dim=2)
        dt_norm = torch.norm(dt_normal, 2, dim=2, keepdim=True)
        gt_norm = torch.norm(gt_normal, 2, dim=2, keepdim=True)
        dt_mask = dt_norm == 0.0
        gt_mask = gt_norm == 0.0
        dt_mask = dt_mask.to(torch.float32)
        gt_mask = gt_mask.to(torch.float32)
        dt_mask *= 0.01
        gt_mask *= 0.01
        gt_norm = gt_norm + gt_mask
        dt_norm = dt_norm + dt_mask
        gt_normal = gt_normal / gt_norm
        dt_normal = dt_normal / dt_norm
        loss = torch.abs(gt_normal - dt_normal)
        # print(loss.shape)
        loss = torch.sum(torch.sum(loss, dim=2), dim=0)
        # print(loss.shape)
        if select:
            loss, indices = torch.sort(loss, dim=0, descending=False)
            loss = loss[int(loss.size(0) * 0.25):]
        loss = torch.mean(loss)
        return loss
Пример #28
0
    def forward(self, t, input):
        # Calculate quadrotor state derivative for numerical integration
        commands = input[-4:, :]
        state = input[:-4, :]
        ang = state[0:3, 0]
        rate = state[6:9, :]
        vel = state[9:12, :]

        thrusts = self.param_net(commands)  # update thrusts
        torques = torch.mm(self.torque_mat, thrusts)  # update torques

        # Calculate rotation matrix
        s_phi = (torch.sin(ang[0])).item()
        c_phi = (torch.cos(ang[0])).item()
        s_theta = (torch.sin(ang[1])).item()
        c_theta = (torch.cos(ang[1])).item()
        s_psi = (torch.sin(ang[2])).item()
        c_psi = (torch.cos(ang[2])).item()

        rbi = torch.tensor([[
            c_theta * c_psi, c_psi * s_theta * s_phi - c_phi * s_psi,
            c_phi * c_psi * s_theta + s_phi * s_psi
        ],
                            [
                                c_theta * s_psi,
                                s_psi * s_theta * s_phi + c_phi * c_psi,
                                c_phi * s_psi * s_theta - s_phi * c_psi
                            ], [-s_theta, c_theta * s_phi, c_theta * c_phi]])

        # Calculate Euler angle time derivatives
        M = torch.tensor([[1, 0, -s_phi], [0, c_phi, s_phi * c_theta],
                          [0, -s_phi, c_theta * c_phi]])
        m_inv = torch.inverse(M)
        ang_dot = torch.mm(m_inv, rate)

        # Linear Acceleration
        vel_dot = torch.mm(rbi, torch.mm(self.select,
                                         torques)) - self.kt * vel - self.g

        # Rotational Acceleration
        rate_dot = torch.mm(
            torch.inverse(self.I), torques[1:] -
            torch.cross(rate, torch.mm(self.I, rate), dim=0) - self.kr * rate)

        # Concatenate into final state derivative vector
        state_dot = torch.cat(
            [ang_dot, vel, rate_dot, vel_dot,
             torch.zeros((4, 1))])
        return state_dot
Пример #29
0
def depth_to_normals(depth: torch.Tensor,
                     camera_matrix: torch.Tensor,
                     normalize_points: bool = False) -> torch.Tensor:
    """Compute the normal surface per pixel.

    Args:
        depth: image tensor containing a depth value per pixel with shape :math:`(B, 1, H, W)`.
        camera_matrix: tensor containing the camera intrinsics with shape :math:`(B, 3, 3)`.
        normalize_points: whether to normalise the pointcloud. This must be set to `True` when the depth is
        represented as the Euclidean ray length from the camera position.

    Return:
        tensor with a normal surface vector per pixel of the same resolution as the input :math:`(B, 3, H, W)`.

    Example:
        >>> depth = torch.rand(1, 1, 4, 4)
        >>> K = torch.eye(3)[None]
        >>> depth_to_normals(depth, K).shape
        torch.Size([1, 3, 4, 4])
    """
    if not isinstance(depth, torch.Tensor):
        raise TypeError(
            f"Input depht type is not a torch.Tensor. Got {type(depth)}.")

    if not (len(depth.shape) == 4 and depth.shape[-3] == 1):
        raise ValueError(
            f"Input depth musth have a shape (B, 1, H, W). Got: {depth.shape}")

    if not isinstance(camera_matrix, torch.Tensor):
        raise TypeError(f"Input camera_matrix type is not a torch.Tensor. "
                        f"Got {type(camera_matrix)}.")

    if not (len(camera_matrix.shape) == 3
            and camera_matrix.shape[-2:] == (3, 3)):
        raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). "
                         f"Got: {camera_matrix.shape}.")

    # compute the 3d points from depth
    xyz: torch.Tensor = depth_to_3d(depth, camera_matrix,
                                    normalize_points)  # Bx3xHxW

    # compute the pointcloud spatial gradients
    gradients: torch.Tensor = spatial_gradient(xyz)  # Bx3x2xHxW

    # compute normals
    a, b = gradients[:, :, 0], gradients[:, :, 1]  # Bx3xHxW

    normals: torch.Tensor = torch.cross(a, b, dim=1)  # Bx3xHxW
    return F.normalize(normals, dim=1, p=2)
Пример #30
0
 def __init__(self, fdir, filename):
     """Loads a Wavefront OBJ file. """
     self.vertices = []
     self.faces = []
     self.normals = []
     self.vcount = 0
     self.fcount = 0
     self.light = torch.Tensor([0, 0, 0]).requires_grad_(
         False)  # by default,no light
     for line in open(fdir + filename, "r"):
         if line.startswith('#'):
             continue
         if line.startswith('o'):
             continue
         values = line.split()
         if not values:
             continue
         if values[0] == 'v':
             self.vertices.append(float(values[1]))
             self.vertices.append(float(values[2]))
             self.vertices.append(float(values[3]))
             self.vcount += 1
         elif values[0] == 'f':
             self.faces.append(int(values[1]) - 1)
             self.faces.append(int(values[2]) - 1)
             self.faces.append(int(values[3]) - 1)
             self.fcount += 1
     for i in range(self.fcount):
         face_idx = 3 * i
         p0 = [
             self.vertices[3 * (self.faces[face_idx] + 0)],
             self.vertices[3 * (self.faces[face_idx] + 0) + 1],
             self.vertices[3 * (self.faces[face_idx] + 0) + 2]
         ]
         p1 = [
             self.vertices[3 * (self.faces[face_idx + 1])],
             self.vertices[3 * (self.faces[face_idx + 1]) + 1],
             self.vertices[3 * (self.faces[face_idx + 1]) + 2]
         ]
         p2 = [
             self.vertices[3 * (self.faces[face_idx + 2])],
             self.vertices[3 * (self.faces[face_idx + 2]) + 1],
             self.vertices[3 * (self.faces[face_idx + 2]) + 2]
         ]
         d0 = torch.Tensor(p1) - torch.Tensor(p0)
         d1 = torch.Tensor(p2) - torch.Tensor(p1)
         n = pykay.normalize(torch.cross(d0, d1))
         for i in range(3):
             self.normals.append(n[i])
Пример #31
0
def compute_splitting_faces(meshes, index, angle=50, show=False):
    eps = .00001

    # extract vertex coordinated for each vertex in face
    faces = meshes['face_archive'][index]
    verts = meshes['update'][index].vertices
    face_list = meshes['face_lists'][index]
    p1 = torch.index_select(verts, 0, faces[:, 1])
    p2 = torch.index_select(verts, 0, faces[:, 0])
    p3 = torch.index_select(verts, 0, faces[:, 2])

    # cauculate normals of each face
    e1 = p2 - p1
    e2 = p3 - p1
    face_normals = torch.cross(e1, e2)
    qn = torch.norm(face_normals, p=2, dim=1).detach().view(-1, 1)
    face_normals = face_normals.div(qn.expand_as(face_normals))
    main_face_normals = torch.index_select(face_normals, 0, face_list[:, 0, 2])

    # cauculate the curvature with the 3 nighbor faces
    # 1
    face_1_normals = torch.index_select(face_normals, 0, face_list[:, 0, 0])
    curvature_proxi_rad = torch.sum(main_face_normals * face_1_normals,
                                    dim=1).clamp(-1.0 + eps, 1.0 - eps).acos()
    curvature_proxi_1 = (curvature_proxi_rad).view(-1, 1)
    # 2
    face_2_normals = torch.index_select(face_normals, 0, face_list[:, 1, 0])
    curvature_proxi_rad = torch.sum(main_face_normals * face_2_normals,
                                    dim=1).clamp(-1.0 + eps, 1.0 - eps).acos()
    curvature_proxi_2 = (curvature_proxi_rad).view(-1, 1)
    # 3
    face_3_normals = torch.index_select(face_normals, 0, face_list[:, 2, 0])
    curvature_proxi_rad = torch.sum(main_face_normals * face_3_normals,
                                    dim=1).clamp(-1.0 + eps, 1.0 - eps).acos()
    curvature_proxi_3 = (curvature_proxi_rad).view(-1, 1)

    # get average over neighbors
    curvature_proxi_full = torch.cat(
        (curvature_proxi_1, curvature_proxi_2, curvature_proxi_3), dim=1)
    curvature_proxi = torch.mean(curvature_proxi_full, dim=1)

    # select faces with high curvature and return their index
    splitting_faces = np.where(curvature_proxi.cpu() * 180 / np.pi > angle)[0]

    if splitting_faces.shape[0] < 3:
        splitting_faces = curvature_proxi.topk(3, sorted=False)[1]
    else:
        splitting_faces = torch.LongTensor(splitting_faces).to(faces.device)
    return splitting_faces
Пример #32
0
def get_orthog(vectors):
    out = torch.zeros(vectors.size(0), 3, 3)
    for i in range(vectors.size(0)):
        vec0 = vectors[i]
        if torch.norm(vec0) == 0:
            out[i, :, :] = torch.eye(3)
            continue

        vec0 = vec0 / torch.norm(vec0)

        vec1 = torch.tensor([vec0[1], vec0[2],
                             vec0[0]])  # find a non-parallel vector
        vec2 = torch.cross(vec0, vec1)  # find an orthogonal vector
        vec2 = vec2 / torch.norm(vec2)  # normalize it
        vec1 = torch.cross(
            vec0, vec2
        )  # find the last orthogonal vector, overwrite vec1 which was only non-parallel
        vec1 = vec1 / torch.norm(vec1)  # normalize it

        out[i, 0, :] = vec0
        out[i, 1, :] = vec1
        out[i, 2, :] = vec2

    return out
Пример #33
0
def depth2normal_perse(depth, intrinsics):
    global flag_XY, X, Y
    # depth: [B,1,H,W]
    # intrinsics: [fx, fy, cx, cy]
    fx, fy, cx, cy = intrinsics
    B, _, H, W = depth.shape
    inv_fx = 1.0 / fx
    inv_fy = 1.0 / fy
    depth = depth[:, 0, :, :]

    if flag_XY:
        Y, X = torch.meshgrid(torch.tensor(range(H)), torch.tensor(range(W)))
        X = X.unsqueeze(0).repeat(B, 1, 1).float().cuda()  # (B,H,W)
        Y = Y.unsqueeze(0).repeat(B, 1, 1).float().cuda()
        flag_XY = False

    x_cord_p = (X - cx) * inv_fx * depth
    y_cord_p = (Y - cy) * inv_fy * depth

    p = torch.stack([x_cord_p, y_cord_p, depth], dim=3)  # (B,H,W,3)

    # vector of p_3d in west, south, east, north direction
    p_ctr = p[:, 1:-1, 1:-1, :]
    vw = p_ctr - p[:, 1:-1, 2:, :]
    vs = p[:, 2:, 1:-1, :] - p_ctr
    ve = p_ctr - p[:, 1:-1, :-2, :]
    vn = p[:, :-2, 1:-1, :] - p_ctr
    normal_1 = torch.cross(vs, vw)  # (B,H-2,W-2,3)
    normal_2 = torch.cross(vn, ve)
    normal_1 = normalize(normal_1)
    normal_2 = normalize(normal_2)
    normal = normal_1 + normal_2
    normal = normalize(normal)
    paddings = (0, 0, 1, 1, 1, 1, 0, 0)
    normal = torch.nn.functional.pad(normal, paddings, 'constant')  # (B,H,W,3)
    return normal  # (B,H,W,3)
Пример #34
0
    def backface_culling(self, vert, bfm):
        N, nver, _ = vert.shape

        # Compute normal loss
        pt0 = vert[:, bfm.model['tri'][:, 0], :]  # (N, ntri, 3)
        pt1 = vert[:, bfm.model['tri'][:, 1], :]  # (N, ntri, 3)
        pt2 = vert[:, bfm.model['tri'][:, 2], :]  # (N, ntri, 3)
        tri_normal = torch.cross(pt0 - pt1, pt0 - pt2, dim=-1)  # (N, ntri, 3). normal of each triangle
        tri_normal = torch.cat([tri_normal, torch.zeros_like(tri_normal[:, :1, :])], dim=1)  # (N, ntri + 1, 3)
        vert_tri_normal = tri_normal[:, bfm.tri_idx.ravel(), :].view(N, nver, bfm.tri_idx.shape[1], 3)
        normal = torch.sum(vert_tri_normal, dim=2)  # (N, nver, 3)

        # Compute mask
        vis_mask = torch.lt(normal[:, :, 2:3], 0.0)
        return vis_mask
Пример #35
0
    def test_remote_tensor_binary_methods(self):

        hook = TorchHook(verbose = False)
        local = hook.local_worker
        remote = VirtualWorker(hook, 0)
        local.add_worker(remote)

        x = torch.FloatTensor([1, 2, 3, 4, 5]).send(remote)
        y = torch.FloatTensor([1, 2, 3, 4, 5]).send(remote)
        assert (x.add_(y).get() == torch.FloatTensor([2,4,6,8,10])).all()

        x = torch.FloatTensor([1, 2, 3, 4]).send(remote)
        y = torch.FloatTensor([[1, 2, 3, 4]]).send(remote)
        z = torch.matmul(x, y.t())
        assert (torch.equal(z.get(), torch.FloatTensor([30])))

        z = torch.add(x, y)
        assert (torch.equal(z.get(), torch.FloatTensor([[2, 4, 6, 8]])))

        x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]]).send(remote)
        y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]]).send(remote)
        z = torch.cross(x, y, dim=1)
        assert (torch.equal(z.get(), torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]])))

        x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]]).send(remote)
        y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]]).send(remote)
        z = torch.dist(x, y)
        t = torch.FloatTensor([z])
        assert (torch.equal(t, torch.FloatTensor([0.])))

        x = torch.FloatTensor([1, 2, 3]).send(remote)
        y = torch.FloatTensor([1, 2, 3]).send(remote)
        z = torch.dot(x, y)
        t = torch.FloatTensor([z])
        assert torch.equal(t, torch.FloatTensor([14]))

        z = torch.eq(x, y)
        assert (torch.equal(z.get(), torch.ByteTensor([1, 1, 1])))

        z = torch.ge(x, y)
        assert (torch.equal(z.get(), torch.ByteTensor([1, 1, 1])))
Пример #36
0
    def test_local_tensor_binary_methods(self):
        ''' Unit tests for methods mentioned on issue 1385
        https://github.com/OpenMined/PySyft/issues/1385'''

        x = torch.FloatTensor([1, 2, 3, 4])
        y = torch.FloatTensor([[1, 2, 3, 4]])
        z = torch.matmul(x, y.t())
        assert (torch.equal(z, torch.FloatTensor([30])))

        z = torch.add(x, y)
        assert (torch.equal(z, torch.FloatTensor([[2, 4, 6, 8]])))

        x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
        y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
        z = torch.cross(x, y, dim=1)
        assert (torch.equal(z, torch.FloatTensor([[0, 0, 0], [0, 0, 0], [0, 0, 0]])))

        x = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
        y = torch.FloatTensor([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
        z = torch.dist(x, y)
        assert (torch.equal(torch.FloatTensor([z]), torch.FloatTensor([0])))

        x = torch.FloatTensor([1, 2, 3])
        y = torch.FloatTensor([1, 2, 3])
        z = torch.dot(x, y)
        # There is an issue with some Macs getting 0.0 instead
        # Solved here: https://github.com/pytorch/pytorch/issues/5609
        assert torch.equal(torch.FloatTensor([z]), torch.FloatTensor([14]))

        z = torch.eq(x, y)
        assert (torch.equal(z, torch.ByteTensor([1, 1, 1])))

        z = torch.ge(x, y)
        assert (torch.equal(z, torch.ByteTensor([1, 1, 1])))

        x = torch.FloatTensor([1, 2, 3, 4, 5])
        y = torch.FloatTensor([1, 2, 3, 4, 5])
        assert (x.add_(y) == torch.FloatTensor([2, 4, 6, 8, 10])).all()
Пример #37
0
 def forward(ctx, input, other, dim=-1):
     ctx.dim = dim
     ctx.save_for_backward(input, other)
     return torch.cross(input, other, ctx.dim)