コード例 #1
0
    def compute_dr_wrt(self, wrt):

        if wrt is not self.camera and wrt is not self.v:
            return None

        visibility = self.visibility_image
        visible = np.nonzero(visibility.ravel() != 4294967295)[0]
        barycentric = self.barycentric_image
        if wrt is self.camera:
            shape = visibility.shape
            depth = self.depth_image

            if self.overdraw:
                result1 = common.dImage_wrt_2dVerts_bnd(
                    depth, visible, visibility, barycentric,
                    self.frustum['width'], self.frustum['height'],
                    self.v.r.size / 3, self.f,
                    self.boundaryid_image != 4294967295)
            else:
                result1 = common.dImage_wrt_2dVerts(depth, visible, visibility,
                                                    barycentric,
                                                    self.frustum['width'],
                                                    self.frustum['height'],
                                                    self.v.r.size / 3, self.f)

            # result1 = common.dImage_wrt_2dVerts(depth, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f)

            return result1

        elif wrt is self.v:

            IS = np.tile(col(visible), (1, 9)).ravel()
            JS = col(self.f[visibility.ravel()[visible]].ravel())
            JS = np.hstack((JS * 3, JS * 3 + 1, JS * 3 + 2)).ravel()

            # FIXME: there should be a faster way to get the camera axis.
            # But it should be carefully tested with distortion present!
            pts = np.array([[self.camera.c.r[0], self.camera.c.r[1], 2],
                            [self.camera.c.r[0], self.camera.c.r[1], 1]])
            pts = self.camera.unproject_points(pts)
            cam_axis = pts[0, :] - pts[1, :]

            if True:  # use barycentric coordinates (correct way)
                w = visibility.shape[1]
                pxs = np.asarray(visible % w, np.int32)
                pys = np.asarray(np.floor(np.floor(visible) / w), np.int32)
                bc0 = col(barycentric[pys, pxs, 0])
                bc1 = col(barycentric[pys, pxs, 1])
                bc2 = col(barycentric[pys, pxs, 2])
                bc = np.hstack(
                    (bc0, bc0, bc0, bc1, bc1, bc1, bc2, bc2, bc2)).ravel()
            else:  # each vert contributes equally (an approximation)
                bc = 1. / 3.

            data = np.tile(row(cam_axis), (IS.size / 3, 1)).ravel() * bc
            result2 = sp.csc_matrix(
                (data, (IS, JS)),
                shape=(self.frustum['height'] * self.frustum['width'],
                       self.v.r.size))
            return result2
コード例 #2
0
ファイル: geometry.py プロジェクト: chenliu0831/opendr
def SecondFundamentalForm(v, f):
    from chumpy import hstack, vstack
    from chumpy.linalg import Pinv
    nbrs = MatVecMult(FirstEdgesMtx(v, f, want_big=True), v.ravel()).reshape(
        (-1, 3))

    b0 = NormalizedNx3(VertNormalsScaled(f=f, v=v)).reshape((-1, 3))
    b1 = NormalizedNx3(CrossProduct(b0, nbrs - v)).reshape((-1, 3))
    b2 = NormalizedNx3(CrossProduct(b0, b1)).reshape((-1, 3))

    cnct = get_vert_connectivity(v.r, f)
    ffs = []
    for i in range(v.size / 3):
        nbrs = v[np.nonzero(np.asarray(cnct[i].todense()).ravel())[0]] - row(
            v[i])
        us = nbrs.dot(b2[i])
        vs = nbrs.dot(b1[i])
        hs = nbrs.dot(b0[i])
        coeffs = Pinv(
            hstack((col((us * .5)**2), col(us * vs), col(
                (vs * .5)**2)))).dot(hs)
        ffs.append(row(coeffs))
        # if i == 3586:
        #     import pdb; pdb.set_trace()

    ffs = vstack(ffs)
    return ffs
コード例 #3
0
ファイル: camera.py プロジェクト: minar09/pix2surf_windows
    def r_and_derivatives(self):
        tmp = self.v.dot(Rodrigues(self.rt)) + self.t

        return ch.hstack((
            col(2. / (self.right - self.left) * tmp[:, 0] - (self.right + self.left) / (
                self.right - self.left) + 1.) * self.width / 2.,
            col(2. / (self.bottom - self.top) * tmp[:, 1] - (self.bottom + self.top) / (
                self.bottom - self.top) + 1.) * self.height / 2.,
        ))
コード例 #4
0
def flow_to(self, v_next, cam_next):
    from chumpy.ch import MatVecMult

    color_image = self.r
    visibility = self.visibility_image
    pxpos = np.zeros_like(self.color_image)
    pxpos[:, :, 0] = np.tile(row(np.arange(self.color_image.shape[1])),
                             (self.color_image.shape[0], 1))
    pxpos[:, :, 2] = np.tile(col(np.arange(self.color_image.shape[0])),
                             (1, self.color_image.shape[1]))

    visible = np.nonzero(visibility.ravel() != 4294967295)[0]
    num_visible = len(visible)

    barycentric = self.barycentric_image

    # map 3d to 3d
    JS = col(self.f[visibility.ravel()[visible]]).ravel()
    IS = np.tile(col(np.arange(JS.size / 3)), (1, 3)).ravel()
    data = barycentric.reshape((-1, 3))[visible].ravel()

    # replicate to xyz
    IS = np.concatenate((IS * 3, IS * 3 + 1, IS * 3 + 2))
    JS = np.concatenate((JS * 3, JS * 3 + 1, JS * 3 + 2))
    data = np.concatenate((data, data, data))

    verts_to_visible = sp.csc_matrix((data, (IS, JS)),
                                     shape=(np.max(IS) + 1, self.v.r.size))

    v_old = self.camera.v
    cam_old = self.camera

    if cam_next is None:
        cam_next = self.camera

    self.camera.v = MatVecMult(verts_to_visible, self.v.r)
    r1 = self.camera.r.copy()

    self.camera = cam_next
    self.camera.v = MatVecMult(verts_to_visible, v_next)
    r2 = self.camera.r.copy()

    n_channels = self.camera.shape[1]
    flow = r2 - r1
    flow_im = np.zeros(
        (self.frustum['height'], self.frustum['width'], n_channels)).reshape(
            (-1, n_channels))

    flow_im[visible] = flow
    flow_im = flow_im.reshape(
        (self.frustum['height'], self.frustum['width'], n_channels))

    self.camera = cam_old
    self.camera.v = v_old
    return flow_im
コード例 #5
0
ファイル: topology.py プロジェクト: Hutaimu1/cv-
def get_vertices_per_edge(mesh_v, mesh_f):
    """Returns an Ex2 array of adjacencies between vertices, where
    each element in the array is a vertex index. Each edge is included
    only once. If output of get_faces_per_edge is provided, this is used to
    avoid call to get_vert_connectivity()"""

    vc = sp.coo_matrix(get_vert_connectivity(mesh_v, mesh_f))
    result = np.hstack((col(vc.row), col(vc.col)))
    result = result[result[:,0] < result[:,1]] # for uniqueness

    return result
コード例 #6
0
ファイル: optimization.py プロジェクト: vstarlinger/opendr
def predicted_improvement(d, e, J, sqnorm_e, JTJ, JTe):
    d = col(d)
    e = col(e)
    aa = .5 * sqnorm_e
    bb = JTe.T.dot(d)
    c1 = .5 * d.T
    c2 = JTJ
    c3 = d
    cc = c1.dot(c2.dot(c3))
    result = 2. * (aa - bb + cc)[0, 0]
    return sqnorm_e - result
コード例 #7
0
ファイル: topology.py プロジェクト: cadik/opendr
def get_vertices_per_edge(mesh_v, mesh_f):
    """Returns an Ex2 array of adjacencies between vertices, where
    each element in the array is a vertex index. Each edge is included
    only once. If output of get_faces_per_edge is provided, this is used to
    avoid call to get_vert_connectivity()"""

    vc = sp.coo_matrix(get_vert_connectivity(mesh_v, mesh_f))
    result = np.hstack((col(vc.row), col(vc.col)))
    result = result[result[:, 0] < result[:, 1]]  # for uniqueness

    return result
コード例 #8
0
    def unproject_points(self, uvd, camera_space=False):
        tmp = np.hstack((
            col(2. * uvd[:, 0] / self.width - 1 + (self.right + self.left) / (self.right - self.left)).r * (self.right - self.left).r / 2.,
            col(2. * uvd[:, 1] / self.height - 1 + (self.bottom + self.top) / (self.bottom - self.top)).r * (self.bottom - self.top).r / 2.,
            np.ones((uvd.shape[0], 1))
        ))

        if camera_space:
            return tmp
        tmp -= self.t.r  # translate

        return tmp.dot(Rodrigues(self.rt).r.T)  # rotate
コード例 #9
0
ファイル: renderer.py プロジェクト: cadik/opendr
    def compute_dr_wrt(self, wrt):
        
        if wrt is not self.camera and wrt is not self.v:
            return None
        
        visibility = self.visibility_image
        visible = np.nonzero(visibility.ravel() != 4294967295)[0]
        barycentric = self.barycentric_image
        if wrt is self.camera:
            shape = visibility.shape
            depth = self.depth_image

            if self.overdraw:
                result1 = common.dImage_wrt_2dVerts_bnd(depth, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f, self.boundaryid_image != 4294967295)
            else:
                result1 = common.dImage_wrt_2dVerts(depth, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f)

            # result1 = common.dImage_wrt_2dVerts(depth, visible, visibility, barycentric, self.frustum['width'], self.frustum['height'], self.v.r.size/3, self.f)

            return result1

        elif wrt is self.v:

            IS = np.tile(col(visible), (1, 9)).ravel()
            JS = col(self.f[visibility.ravel()[visible]].ravel())
            JS = np.hstack((JS*3, JS*3+1, JS*3+2)).ravel()

            # FIXME: there should be a faster way to get the camera axis.
            # But it should be carefully tested with distortion present!
            pts = np.array([
                [self.camera.c.r[0], self.camera.c.r[1], 2],
                [self.camera.c.r[0], self.camera.c.r[1], 1]
            ])
            pts = self.camera.unproject_points(pts)
            cam_axis = pts[0,:] - pts[1,:]

            if True: # use barycentric coordinates (correct way)
                w = visibility.shape[1]
                pxs = np.asarray(visible % w, np.int32)
                pys = np.asarray(np.floor(np.floor(visible) / w), np.int32)
                bc0 = col(barycentric[pys, pxs, 0])
                bc1 = col(barycentric[pys, pxs, 1])
                bc2 = col(barycentric[pys, pxs, 2])
                bc = np.hstack((bc0,bc0,bc0,bc1,bc1,bc1,bc2,bc2,bc2)).ravel()
            else: # each vert contributes equally (an approximation)
                bc = 1. / 3.

            data = np.tile(row(cam_axis), (IS.size/3,1)).ravel() * bc
            result2 = sp.csc_matrix((data, (IS, JS)), shape=(self.frustum['height']*self.frustum['width'], self.v.r.size))
            return result2
コード例 #10
0
ファイル: common.py プロジェクト: chenliu0831/opendr
def flow_to(self, v_next, cam_next):
    from chumpy.ch import MatVecMult

    color_image = self.r
    visibility = self.visibility_image
    pxpos = np.zeros_like(self.color_image)
    pxpos[:,:,0] = np.tile(row(np.arange(self.color_image.shape[1])), (self.color_image.shape[0], 1))
    pxpos[:,:,2] = np.tile(col(np.arange(self.color_image.shape[0])), (1, self.color_image.shape[1]))

    visible = np.nonzero(visibility.ravel() != 4294967295)[0]
    num_visible = len(visible)

    barycentric = self.barycentric_image


    # map 3d to 3d
    JS = col(self.f[visibility.ravel()[visible]]).ravel()
    IS = np.tile(col(np.arange(JS.size/3)), (1, 3)).ravel()
    data = barycentric.reshape((-1,3))[visible].ravel()

    # replicate to xyz
    IS = np.concatenate((IS*3, IS*3+1, IS*3+2))
    JS = np.concatenate((JS*3, JS*3+1, JS*3+2))
    data = np.concatenate((data, data, data))

    verts_to_visible = sp.csc_matrix((data, (IS, JS)), shape=(np.max(IS)+1, self.v.r.size))

    v_old = self.camera.v
    cam_old = self.camera

    if cam_next is None:
        cam_next = self.camera

    self.camera.v = MatVecMult(verts_to_visible, self.v.r)
    r1 = self.camera.r.copy()

    self.camera = cam_next
    self.camera.v = MatVecMult(verts_to_visible, v_next)
    r2 = self.camera.r.copy()

    n_channels = self.camera.shape[1]
    flow = r2 - r1
    flow_im = np.zeros((self.frustum['height'], self.frustum['width'], n_channels)).reshape((-1,n_channels))

    flow_im[visible] = flow
    flow_im = flow_im.reshape((self.frustum['height'], self.frustum['width'], n_channels))

    self.camera = cam_old
    self.camera.v = v_old
    return flow_im
コード例 #11
0
ファイル: renderer.py プロジェクト: cadik/opendr
    def getDepthMesh(self, depth_image=None):
        self._call_on_changed() # make everything is up-to-date
        v = self.glb.getDepthCloud(depth_image)
        w = self.frustum['width']
        h = self.frustum['height']
        idxs = np.arange(w*h).reshape((h, w))
    
        # v0 is upperleft, v1 is upper right, v2 is lowerleft, v3 is lowerright
        v0 = col(idxs[:-1,:-1])
        v1 = col(idxs[:-1,1:])
        v2 = col(idxs[1:,:-1])
        v3 = col(idxs[1:,1:])

        f = np.hstack((v0, v1, v2, v1, v3, v2)).reshape((-1,3))        
        return v, f
コード例 #12
0
ファイル: renderer.py プロジェクト: classner/opendr
    def getDepthMesh(self, depth_image=None):
        self._call_on_changed()  # make everything is up-to-date
        v = self.glb.getDepthCloud(depth_image)
        w = self.frustum['width']
        h = self.frustum['height']
        idxs = np.arange(w * h).reshape((h, w))

        # v0 is upperleft, v1 is upper right, v2 is lowerleft, v3 is lowerright
        v0 = col(idxs[:-1, :-1])
        v1 = col(idxs[:-1, 1:])
        v2 = col(idxs[1:, :-1])
        v3 = col(idxs[1:, 1:])

        f = np.hstack((v0, v1, v2, v1, v3, v2)).reshape((-1, 3))
        return v, f
コード例 #13
0
ファイル: extras.py プロジェクト: vstarlinger/opendr
    def compute_dr_wrt(self, wrt):
        if wrt is self.locations:
            locations = self.locations.r.copy()
            for i in range(3):
                locations[:,i] = np.clip(locations[:,i], 0, self.image.shape[i]-1)
            locations = locations.astype(np.uint32)

            xc = col(self.gx[locations[:,0], locations[:,1], locations[:,2]])
            yc = col(self.gy[locations[:,0], locations[:,1], locations[:,2]])
            zc = col(self.gz[locations[:,0], locations[:,1], locations[:,2]])

            data = np.vstack([xc.ravel(), yc.ravel(), zc.ravel()]).T.copy()
            JS = np.arange(locations.size)
            IS = JS // 3

            return sp.csc_matrix((data.ravel(), (IS, JS)))
コード例 #14
0
ファイル: lighting.py プロジェクト: Hutaimu1/cv-
def LightDotNormal(num_verts):

    normalize_rows = lambda v : v / col(ch.sqrt(ch.sum(v.reshape((-1,3))**2, axis=1)))
    sum_rows = lambda v :  ch.sum(v.reshape((-1,3)), axis=1)

    return Ch(lambda light_pos, v, vn :
        sum_rows(normalize_rows(light_pos.reshape((1,3)) - v.reshape((-1,3))) * vn.reshape((-1,3))))
コード例 #15
0
ファイル: camera.py プロジェクト: cadik/opendr
    def unproject_points(self, uvd, camera_space=False):
        cam = ProjectPoints3D(**{k: getattr(self, k)  for k in self.dterms if hasattr(self, k)})

        try:
            xy_undistorted_camspace = cv2.undistortPoints(np.asarray(uvd[:,:2].reshape((1,-1,2)).copy()), np.asarray(cam.camera_mtx), cam.k.r)
            xyz_camera_space = np.hstack((xy_undistorted_camspace.squeeze(), col(uvd[:,2])))
            xyz_camera_space[:,:2] *= col(xyz_camera_space[:,2]) # scale x,y by z
            if camera_space:
                return xyz_camera_space
            other_answer = xyz_camera_space - row(cam.view_mtx[:,3]) # translate
            result = other_answer.dot(cam.view_mtx[:,:3]) # rotate
        except: # slow way, probably not so good. But doesn't require cv2.undistortPoints.
            cam.v = np.ones_like(uvd)
            ch.minimize(cam - uvd, x0=[cam.v], method='dogleg', options={'disp': 0})
            result = cam.v.r
        return result
コード例 #16
0
ファイル: lighting.py プロジェクト: mattloper/opendr
def LightDotNormal(num_verts):

    normalize_rows = lambda v : v / col(ch.sqrt(ch.sum(v.reshape((-1,3))**2, axis=1)))
    sum_rows = lambda v :  ch.sum(v.reshape((-1,3)), axis=1)

    return Ch(lambda light_pos, v, vn :
        sum_rows(normalize_rows(light_pos.reshape((1,3)) - v.reshape((-1,3))) * vn.reshape((-1,3))))
コード例 #17
0
ファイル: geometry.py プロジェクト: Hutaimu1/cv-
    def compute_dr_wrt(self, wrt):
        if wrt is not self.v:
            return None

        v = self.v.r.reshape(-1, 3)
        blocks = -np.einsum('ij,ik->ijk', v, v) * (self.ss
                                                   **(-3. / 2.)).reshape(
                                                       (-1, 1, 1))
        for i in range(3):
            blocks[:, i, i] += self.s_inv

        if True:
            data = blocks.ravel()
            indptr = np.arange(0, (self.v.r.size + 1) * 3, 3)
            indices = col(np.arange(0, self.v.r.size))
            indices = np.hstack([indices, indices, indices])
            indices = indices.reshape((-1, 3, 3))
            indices = indices.transpose((0, 2, 1)).ravel()
            result = sp.csc_matrix((data, indices, indptr),
                                   shape=(self.v.r.size, self.v.r.size))
            return result
        else:
            matvec = lambda x: np.einsum('ijk,ik->ij', blocks,
                                         x.reshape(
                                             (blocks.shape[0], 3))).ravel()
            return sp.linalg.LinearOperator((self.v.r.size, self.v.r.size),
                                            matvec=matvec)
コード例 #18
0
ファイル: lighting.py プロジェクト: bearpaw/opendr
 def compute_r(self):
     comps = self.components.r
     n = len(comps)
     result = self.mtx.dot(self.sh_coeffs[:, :n].dot(col(
         self.components.r)))
     result[result < 0] = 0
     return result.reshape((-1, self.num_channels))
コード例 #19
0
 def compute_dr_wrt(self, wrt):
     if wrt is self.v:
         IS = np.tile(col(np.arange(self.v.r.size/3)), (1, 3)).ravel()
         JS = np.arange(self.v.r.size)
         data = np.ones_like(JS)
         result = sp.csc_matrix((data, (IS, JS)), shape=(self.v.r.size/3, self.v.r.size))
         return result
コード例 #20
0
ファイル: common.py プロジェクト: vstarlinger/opendr
def dr_wrt_vc(visible, visibility, f, barycentric, frustum, vc_size, num_channels):

    # Each pixel relies on three verts
    IS = np.tile(col(visible), (1, 3)).ravel()
    JS = col(f[visibility.ravel()[visible]].ravel())

    bc = barycentric.reshape((-1,3))
    data = np.asarray(bc[visible,:], order='C').ravel()

    IS = np.concatenate([IS*num_channels+k for k in range(num_channels)])
    JS = np.concatenate([JS*num_channels+k for k in range(num_channels)])
    data = np.concatenate([data for i in range(num_channels)])

    ij = np.vstack((IS.ravel(), JS.ravel())).astype(np.int32)
    result = sp.csc_matrix((data, ij), shape=(frustum['width']*frustum['height']*num_channels, vc_size))
    return result
コード例 #21
0
    def unproject_points(self, uvd, camera_space=False):
        cam = ProjectPoints3D(**{k: getattr(self, k)  for k in self.dterms if hasattr(self, k)})

        try:
            xy_undistorted_camspace = cv2.undistortPoints(np.asarray(uvd[:,:2].reshape((1,-1,2)).copy()), np.asarray(cam.camera_mtx), cam.k.r)
            xyz_camera_space = np.hstack((xy_undistorted_camspace.squeeze(), col(uvd[:,2])))
            xyz_camera_space[:,:2] *= col(xyz_camera_space[:,2]) # scale x,y by z
            if camera_space:
                return xyz_camera_space
            other_answer = xyz_camera_space - row(cam.view_mtx[:,3]) # translate
            result = other_answer.dot(cam.view_mtx[:,:3]) # rotate
        except: # slow way, probably not so good. But doesn't require cv2.undistortPoints.
            cam.v = np.ones_like(uvd)
            ch.minimize(cam - uvd, x0=[cam.v], method='dogleg', options={'disp': 0})
            result = cam.v.r
        return result
コード例 #22
0
    def compute_dr_wrt(self, obj):
        if obj not in (self.a, self.b):
            return None
            
        sz = self.a.r.size
        if not hasattr(self, 'indices') or self.indices.size != sz*3:
            self.indptr = np.arange(0,(sz+1)*3,3)
            idxs = col(np.arange(0,sz))
            idxs = np.hstack([idxs, idxs, idxs])
            idxs = idxs.reshape((-1,3,3))
            idxs = idxs.transpose((0,2,1)).ravel()
            self.indices = idxs

        if obj is self.a:
            # m = self.Bx
            # matvec = lambda x : _call_einsum_matvec(m, x)
            # matmat = lambda x : _call_einsum_matmat(m, x)
            # return sp.linalg.LinearOperator((self.a1.size*3, self.a1.size*3), matvec=matvec, matmat=matmat)
            data = self.Bx.ravel()
            result = sp.csc_matrix((data, self.indices, self.indptr), shape=(sz, sz))
            return -result


        elif obj is self.b:
            # m = self.Ax
            # matvec = lambda x : _call_einsum_matvec(m, x)
            # matmat = lambda x : _call_einsum_matmat(m, x)
            # return sp.linalg.LinearOperator((self.a1.size*3, self.a1.size*3), matvec=matvec, matmat=matmat)
            data = self.Ax.ravel()
            result = sp.csc_matrix((data, self.indices, self.indptr), shape=(sz, sz))
            return -result
コード例 #23
0
ファイル: ch_ops.py プロジェクト: bearpaw/chumpy
    def compute_d1(self):
        # To stay consistent with numpy, we must upgrade 1D arrays to 2D
        ar = row(self.a.r) if len(self.a.r.shape)<2 else self.a.r.reshape((-1, self.a.r.shape[-1]))
        br = col(self.b.r) if len(self.b.r.shape)<2 else self.b.r.reshape((self.b.r.shape[0], -1))

        if ar.ndim <= 2:
            return sp.kron(sp.eye(ar.shape[0], ar.shape[0]),br.T)
        else:
            raise NotImplementedError
コード例 #24
0
ファイル: common.py プロジェクト: chenliu0831/opendr
def dr_wrt_vc(visible, visibility, f, barycentric, frustum, vc_size, num_channels):
    # Each pixel relies on three verts
    IS = np.tile(col(visible), (1, 3)).ravel()
    JS = col(f[visibility.ravel()[visible]].ravel())

    bc = barycentric.reshape((-1,3))
    data = np.asarray(bc[visible,:], order='C').ravel()

    IS = np.concatenate([IS*num_channels+k for k in range(num_channels)])
    JS = np.concatenate([JS*num_channels+k for k in range(num_channels)])
    data = np.concatenate([data for i in range(num_channels)])
    # IS = np.concatenate((IS*3, IS*3+1, IS*3+2))
    # JS = np.concatenate((JS*3, JS*3+1, JS*3+2))
    # data = np.concatenate((data, data, data))

    ij = np.vstack((IS.ravel(), JS.ravel()))
    result = sp.csc_matrix((data, ij), shape=(frustum['width']*frustum['height']*num_channels, vc_size))
    return result
コード例 #25
0
ファイル: topology.py プロジェクト: chenliu0831/opendr
def get_vertices_per_edge(mesh_v, mesh_f):
    """Returns an Ex2 array of adjacencies between vertices, where
    each element in the array is a vertex index. Each edge is included
    only once. If output of get_faces_per_edge is provided, this is used to
    avoid call to get_vert_connectivity()"""

    faces = mesh_f
    cache_fname = '/tmp/verts_per_edge_cache_' + str(zlib.crc32(faces.flatten())) + '.pkl'
    try:
        with open(cache_fname, 'rb') as fp:
            return(pickle.load(fp))
    except:
        vc = sp.coo_matrix(get_vert_connectivity(mesh_v, mesh_f))
        result = np.hstack((col(vc.row), col(vc.col)))
        result = result[result[:,0] < result[:,1]] # for uniqueness

        with open(cache_fname, 'wb') as fp:
            pickle.dump(result, fp, -1)
        return result
コード例 #26
0
ファイル: topology.py プロジェクト: chenliu0831/opendr
def get_vertices_per_edge(mesh_v, mesh_f):
    """Returns an Ex2 array of adjacencies between vertices, where
    each element in the array is a vertex index. Each edge is included
    only once. If output of get_faces_per_edge is provided, this is used to
    avoid call to get_vert_connectivity()"""

    faces = mesh_f
    cache_fname = '/tmp/verts_per_edge_cache_' + str(
        zlib.crc32(faces.flatten())) + '.pkl'
    try:
        with open(cache_fname, 'rb') as fp:
            return (pickle.load(fp))
    except:
        vc = sp.coo_matrix(get_vert_connectivity(mesh_v, mesh_f))
        result = np.hstack((col(vc.row), col(vc.col)))
        result = result[result[:, 0] < result[:, 1]]  # for uniqueness

        with open(cache_fname, 'wb') as fp:
            pickle.dump(result, fp, -1)
        return result
コード例 #27
0
def face_bases(v, f):
    t1 = TriEdges(f, 1, 0, v).reshape((-1,3))
    t2 = TriEdges(f, 2, 0, v).reshape((-1,3))
    #t3 = NormalizedNx3(CrossProduct(t1, t2)).reshape((-1,3))
    #t3 = CrossProduct(t1, t2).reshape((-1,3))
    
    # Problem: cross-product is proportional in length to len(t1)*len(t2)
    # Solution: divide by sqrt(sqrt(len(cross-product)))
    t3 = CrossProduct(t1, t2).reshape((-1,3)); t3 = t3 / col(ch.sum(t3**2., axis=1)**.25)
    result = ch.hstack((t1, t2, t3)).reshape((-1,3,3))
    return result
コード例 #28
0
    def compute_d1(self):
        # To stay consistent with numpy, we must upgrade 1D arrays to 2D
        mtx1r = row(self.mtx1.r) if len(self.mtx1.r.shape)<2 else self.mtx1.r
        mtx2r = col(self.mtx2.r) if len(self.mtx2.r.shape)<2 else self.mtx2.r

        if mtx1r.ndim <= 2:
            return sp.kron(sp.eye(mtx1r.shape[0], mtx1r.shape[0]),mtx2r.T)
        else:
            mtx2f = mtx2r.reshape((-1, mtx2r.shape[-2], mtx2r.shape[-1]))
            mtx2f = np.rollaxis(mtx2f, -1, -2) #transpose basically            
            result = sp.block_diag([np.kron(np.eye(mtx1r.shape[-2], mtx1r.shape[-2]),m2) for m2 in mtx2f])
            assert(result.shape[0] == self.r.size)
            return result
コード例 #29
0
ファイル: common.py プロジェクト: vstarlinger/opendr
def dImage_wrt_2dVerts(observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
    """Construct a sparse jacobian that relates 2D projected vertex positions
    (in the columns) to pixel values (in the rows). This can be done
    in two steps."""
    num_verts = np.int32(num_verts)
    n_channels = np.atleast_3d(observed).shape[2]
    shape = visibility.shape

    # Step 1: get the structure ready, ie the IS and the JS
    IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()

    JS = f[visibility.ravel()[visible]].reshape((-1,1))
    JS = np.hstack((JS*2, JS*2+1)).ravel()

    pxs = np.asarray(visible % shape[1], np.int32)
    pys = np.asarray(np.floor(np.floor(visible) / shape[1]), np.int32)

    if n_channels > 1:
        IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
        JS = np.concatenate([JS for i in range(n_channels)])

    # Step 2: get the data ready, ie the actual values of the derivatives
    ksize=1
    sobel_normalizer = cv2.Sobel(np.asarray(np.tile(row(np.arange(10)), (10, 1)), np.float64), cv2.CV_64F, dx=1, dy=0, ksize=ksize)[5,5]
    xdiff = -cv2.Sobel(observed, cv2.CV_64F, dx=1, dy=0, ksize=ksize) / sobel_normalizer
    ydiff = cv2.Sobel(observed, cv2.CV_64F, dx=0, dy=1, ksize=ksize) / sobel_normalizer

    xdiff = np.atleast_3d(xdiff)
    ydiff = np.atleast_3d(ydiff)

    datas = []

    # The data is weighted according to barycentric coordinates
    bc0 = barycentric[pys, pxs, 0].reshape((-1,1))
    bc1 = barycentric[pys, pxs, 1].reshape((-1,1))
    bc2 = barycentric[pys, pxs, 2].reshape((-1,1))
    for k in range(n_channels):
        dxs = xdiff[pys, pxs, k]
        dys = ydiff[pys, pxs, k]
        if f.shape[1] == 3:
            datas.append(np.hstack((dxs.reshape((-1,1))*bc0,dys.reshape((-1,1))*bc0,dxs.reshape((-1,1))*bc1,dys.reshape((-1,1))*bc1,dxs.reshape((-1,1))*bc2,dys.reshape((-1,1))*bc2)).ravel())
        else:
            datas.append(np.hstack((dxs.reshape((-1,1))*bc0,dys.reshape((-1,1))*bc0,dxs.reshape((-1,1))*bc1,dys.reshape((-1,1))*bc1)).ravel())

    data = np.concatenate(datas)

    ij = np.vstack((IS.ravel(), JS.ravel())).astype(np.int32)
    result = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))

    return result
コード例 #30
0
    def test_maximum(self):
        from chumpy.utils import row, col
        from chumpy import maximum

        # Make sure that when we compare the max of two *identical* numbers,
        # we get the right derivatives wrt both
        the_max = maximum(ch.Ch(1), ch.Ch(1))
        self.assertTrue(the_max.r.ravel()[0] == 1.)
        self.assertTrue(the_max.dr_wrt(the_max.a)[0, 0] == 1.)
        self.assertTrue(the_max.dr_wrt(the_max.b)[0, 0] == 1.)

        # Now test given that all numbers are different, by allocating from
        # a pool of randomly permuted numbers.
        # We test combinations of scalars and 2d arrays.
        rnd = np.asarray(np.random.permutation(np.arange(20)), np.float64)
        c1 = ch.Ch(rnd[:6].reshape((2, 3)))
        c2 = ch.Ch(rnd[6:12].reshape((2, 3)))
        s1 = ch.Ch(rnd[12])
        s2 = ch.Ch(rnd[13])

        eps = .1
        for first in [c1, s1]:
            for second in [c2, s2]:
                the_max = maximum(first, second)

                for which_to_change in [first, second]:

                    max_r0 = the_max.r.copy()
                    max_r_diff = np.max(
                        np.abs(max_r0 - np.maximum(first.r, second.r)))
                    self.assertTrue(max_r_diff == 0)
                    max_dr = the_max.dr_wrt(which_to_change).copy()
                    which_to_change.x = which_to_change.x + eps
                    max_r1 = the_max.r.copy()

                    emp_diff = (the_max.r - max_r0).ravel()
                    pred_diff = max_dr.dot(col(
                        eps * np.ones(max_dr.shape[1]))).ravel()

                    #print('comparing the following numbers/vectors:')
                    #print(first.r)
                    #print(second.r)
                    #print('empirical vs predicted difference:')
                    #print(emp_diff)
                    #print(pred_diff)
                    #print('-----')

                    max_dr_diff = np.max(np.abs(emp_diff - pred_diff))
                    #print('max dr diff: %.2e' % (max_dr_diff,))
                    self.assertTrue(max_dr_diff < 1e-14)
コード例 #31
0
ファイル: geometry.py プロジェクト: cadik/opendr
def SecondFundamentalForm(v, f):    
    from chumpy import hstack, vstack
    from chumpy.linalg import Pinv
    nbrs = MatVecMult(FirstEdgesMtx(v, f, want_big=True), v.ravel()).reshape((-1,3))
    
    b0 = NormalizedNx3(VertNormalsScaled(f=f, v=v)).reshape((-1,3))
    b1 = NormalizedNx3(CrossProduct(b0, nbrs-v)).reshape((-1,3))
    b2 = NormalizedNx3(CrossProduct(b0, b1)).reshape((-1,3))
    
    cnct = get_vert_connectivity(v.r, f)
    ffs = []
    for i in range(v.size/3):
        nbrs = v[np.nonzero(np.asarray(cnct[i].todense()).ravel())[0]] - row(v[i])
        us = nbrs.dot(b2[i])
        vs = nbrs.dot(b1[i])
        hs = nbrs.dot(b0[i])
        coeffs = Pinv(hstack((col((us*.5)**2), col(us*vs), col((vs*.5)**2)))).dot(hs)
        ffs.append(row(coeffs))
        # if i == 3586:
        #     import pdb; pdb.set_trace()

    ffs = vstack(ffs)
    return ffs
コード例 #32
0
    def compute(self):

        # To stay consistent with numpy, we must upgrade 1D arrays to 2D
        ar = sp.csr_matrix((self.a.data, self.a.indices, self.a.indptr),
                           shape=(max(np.sum(self.a.shape[:-1]),
                                      1), self.a.shape[-1]))
        br = col(self.b.r) if len(self.b.r.shape) < 2 else self.b.r.reshape(
            (self.b.r.shape[0], -1))

        if br.ndim <= 1:
            return ar
        elif br.ndim <= 2:
            return sp.kron(ar, sp.eye(br.shape[1], br.shape[1]))
        else:
            raise NotImplementedError
コード例 #33
0
    def compute_d2(self):
        
        # To stay consistent with numpy, we must upgrade 1D arrays to 2D
        mtx1r = row(self.mtx1.r) if len(self.mtx1.r.shape)<2 else self.mtx1.r
        mtx2r = col(self.mtx2.r) if len(self.mtx2.r.shape)<2 else self.mtx2.r

        if mtx2r.ndim <= 1:
            return self.mtx1r
        elif mtx2r.ndim <= 2:
            return sp.kron(mtx1r, sp.eye(mtx2r.shape[1],mtx2r.shape[1]))
        else:
            mtx1f = mtx1r.reshape((-1, mtx1r.shape[-2], mtx1r.shape[-1]))            
            result = sp.block_diag([np.kron(m1, np.eye(mtx2r.shape[-1],mtx2r.shape[-1])) for m1 in mtx1f])
            assert(result.shape[0] == self.r.size)
            return result
コード例 #34
0
ファイル: common.py プロジェクト: chenliu0831/opendr
def draw_visibility_image_internal(gl, v, f):
    """Assumes camera is set up correctly in gl context."""
    gl.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    fc = np.arange(1, len(f)+1)
    fc = np.tile(col(fc), (1, 3))
    fc[:, 0] = fc[:, 0] & 255
    fc[:, 1] = (fc[:, 1] >> 8 ) & 255
    fc[:, 2] = (fc[:, 2] >> 16 ) & 255
    fc = np.asarray(fc, dtype=np.uint8)
    
    draw_colored_primitives(gl, v, f, fc)
    raw = np.asarray(gl.getImage(), np.uint32)
    raw = raw[:,:,0] + raw[:,:,1]*256 + raw[:,:,2]*256*256 - 1
    return raw
コード例 #35
0
def draw_visibility_image_internal(gl, v, f):
    """Assumes camera is set up correctly in gl context."""
    gl.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    fc = np.arange(1, len(f)+1)
    fc = np.tile(col(fc), (1, 3))
    fc[:, 0] = fc[:, 0] & 255
    fc[:, 1] = (fc[:, 1] >> 8 ) & 255
    fc[:, 2] = (fc[:, 2] >> 16 ) & 255
    fc = np.asarray(fc, dtype=np.uint8)

    draw_colored_primitives(gl, v, f, fc)
    raw = np.asarray(gl.getImage(), np.uint32)
    raw = raw[:,:,0] + raw[:,:,1]*256 + raw[:,:,2]*256*256 - 1
    return raw
コード例 #36
0
    def on_changed(self, which):
        if 'a' in which:
            a_csr = sp.csr_matrix(self.a)
            # To stay consistent with numpy, we must upgrade 1D arrays to 2D
            self.ar = sp.csr_matrix((a_csr.data, a_csr.indices, a_csr.indptr),
                                    shape=(max(np.sum(a_csr.shape[:-1]),
                                               1), a_csr.shape[-1]))

        if 'b' in which:
            self.br = col(
                self.b.r) if len(self.b.r.shape) < 2 else self.b.r.reshape(
                    (self.b.r.shape[0], -1))

        if 'a' in which or 'b' in which:
            self.k = sp.kron(self.ar, sp.eye(self.br.shape[1],
                                             self.br.shape[1]))
コード例 #37
0
    def test_transpose(self):
        from chumpy.utils import row, col
        from copy import deepcopy
        for which in ('C', 'F'):  # test in fortran and contiguous mode
            a = ch.Ch(
                np.require(np.zeros(8).reshape((4, 2)), requirements=which))
            b = a.T

            b1 = b.r.copy()
            #dr = b.dr_wrt(a).copy()
            dr = deepcopy(b.dr_wrt(a))

            diff = np.arange(a.size).reshape(a.shape)
            a.x = np.require(a.r + diff, requirements=which)
            b2 = b.r.copy()

            diff_pred = dr.dot(col(diff)).ravel()
            diff_emp = (b2 - b1).ravel()
            np.testing.assert_array_equal(diff_pred, diff_emp)
コード例 #38
0
    def compute_dr_wrt(self, wrt):
        if wrt is not self.v:
            return None
            
        cplus = self.cplus
        cminus = self.cminus
        vplus  = self.f[:,cplus]
        vminus = self.f[:,cminus]
        vplus3 = row(np.hstack([col(vplus*3), col(vplus*3+1), col(vplus*3+2)]))
        vminus3 = row(np.hstack([col(vminus*3), col(vminus*3+1), col(vminus*3+2)]))

        IS = row(np.arange(0,vplus3.size))
        ones = np.ones(vplus3.size)
        shape = (self.f.size, self.v.r.size)
        return sp.csc_matrix((ones, np.vstack([IS, vplus3])), shape=shape) - sp.csc_matrix((ones, np.vstack([IS, vminus3])), shape=shape)
コード例 #39
0
def draw_texcoord_image(glf, v, f, vt, ft, boundarybool_image=None):
    gl = glf
    gl.Disable(GL_TEXTURE_2D)
    gl.DisableClientState(GL_TEXTURE_COORD_ARRAY)

    gl.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    # want vtc: texture-coordinates per vertex (not per element in vc)
    colors = vt[ft.ravel()]

    colors = np.asarray(np.hstack((colors, col(colors[:,0]*0))), np.float64, order='C')
    draw_colored_primitives(gl, v, f, colors)

    if boundarybool_image is not None:
        gl.PolygonMode(GL_FRONT_AND_BACK, GL_LINE)
        draw_colored_primitives(gl, v, f, colors)
        gl.PolygonMode(GL_FRONT_AND_BACK, GL_FILL)

    result = np.asarray(deepcopy(gl.getImage()), np.float64, order='C')[:,:,:2].copy()
    result[:,:,1] = 1. - result[:,:,1]
    return result
コード例 #40
0
ファイル: geometry.py プロジェクト: cadik/opendr
    def compute_dr_wrt(self, wrt):
        if wrt is not self.v:
            return None

        v = self.v.r.reshape(-1,3)
        blocks = -np.einsum('ij,ik->ijk', v, v) * (self.ss**(-3./2.)).reshape((-1,1,1))
        for i in range(3):
            blocks[:,i,i] += self.s_inv

        if True:
            data = blocks.ravel()
            indptr = np.arange(0,(self.v.r.size+1)*3,3)
            indices = col(np.arange(0,self.v.r.size))
            indices = np.hstack([indices, indices, indices])
            indices = indices.reshape((-1,3,3))
            indices = indices.transpose((0,2,1)).ravel()
            result = sp.csc_matrix((data, indices, indptr), shape=(self.v.r.size, self.v.r.size))
            return result
        else:
            matvec = lambda x : np.einsum('ijk,ik->ij', blocks, x.reshape((blocks.shape[0],3))).ravel()
            return sp.linalg.LinearOperator((self.v.r.size,self.v.r.size), matvec=matvec)
コード例 #41
0
ファイル: common.py プロジェクト: chenliu0831/opendr
def draw_texcoord_image(glf, v, f, vt, ft, boundarybool_image=None):
    gl = glf
    gl.Disable(GL_TEXTURE_2D)
    gl.DisableClientState(GL_TEXTURE_COORD_ARRAY)

    gl.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    # want vtc: texture-coordinates per vertex (not per element in vc)
    colors = vt[ft.ravel()]

    colors = np.asarray(np.hstack((colors, col(colors[:,0]*0))), np.float64, order='C')
    draw_colored_primitives(gl, v, f, colors)

    if boundarybool_image is not None:
        gl.PolygonMode(GL_FRONT_AND_BACK, GL_LINE)
        draw_colored_primitives(gl, v, f, colors)
        gl.PolygonMode(GL_FRONT_AND_BACK, GL_FILL)

    result = np.asarray(deepcopy(gl.getImage()), np.float64, order='C')[:,:,:2].copy()
    result[:,:,1] = 1. - result[:,:,1]
    return result
コード例 #42
0
ファイル: renderer.py プロジェクト: classner/opendr
def draw_edge_visibility(gl, v, e, f, hidden_wireframe=True):
    """Assumes camera is set up correctly in gl context."""
    gl.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)

    ec = np.arange(1, len(e) + 1)
    ec = np.tile(col(ec), (1, 3))
    ec[:, 0] = ec[:, 0] & 255
    ec[:, 1] = (ec[:, 1] >> 8) & 255
    ec[:, 2] = (ec[:, 2] >> 16) & 255
    ec = np.asarray(ec, dtype=np.uint8)

    draw_colored_primitives(gl, v, e, ec)

    if hidden_wireframe:
        gl.Enable(GL_POLYGON_OFFSET_FILL)
        gl.PolygonOffset(10.0, 1.0)
        draw_colored_primitives(gl, v, f, fc=np.zeros(f.shape))
        gl.Disable(GL_POLYGON_OFFSET_FILL)

    raw = np.asarray(gl.getImage(), np.uint32)
    raw = raw[:, :, 0] + raw[:, :, 1] * 256 + raw[:, :, 2] * 256 * 256 - 1
    return raw
コード例 #43
0
ファイル: renderer.py プロジェクト: cadik/opendr
def draw_edge_visibility(gl, v, e, f, hidden_wireframe=True):
    """Assumes camera is set up correctly in gl context."""
    gl.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    ec = np.arange(1, len(e)+1)
    ec = np.tile(col(ec), (1, 3))
    ec[:, 0] = ec[:, 0] & 255
    ec[:, 1] = (ec[:, 1] >> 8 ) & 255
    ec[:, 2] = (ec[:, 2] >> 16 ) & 255
    ec = np.asarray(ec, dtype=np.uint8)
    
    draw_colored_primitives(gl, v, e, ec)
    
    if hidden_wireframe:
        gl.Enable(GL_POLYGON_OFFSET_FILL)
        gl.PolygonOffset(10.0, 1.0)
        draw_colored_primitives(gl, v, f, fc=np.zeros(f.shape))
        gl.Disable(GL_POLYGON_OFFSET_FILL)
    
    raw = np.asarray(gl.getImage(), np.uint32)
    raw = raw[:,:,0] + raw[:,:,1]*256 + raw[:,:,2]*256*256 - 1
    return raw
コード例 #44
0
ファイル: camera.py プロジェクト: cadik/opendr
 def compute_r(self):
     result = ProjectPoints.compute_r(self)
     return np.hstack((result, col(self.z_coords.r)))
コード例 #45
0
ファイル: camera.py プロジェクト: cadik/opendr
 def compute_r(self):
     return (cv2.Rodrigues(self.rt.r)[0].dot(self.v.r.T) + col(self.t.r)).T.copy()
コード例 #46
0
ファイル: common.py プロジェクト: chenliu0831/opendr
def dImage_wrt_2dVerts(observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
    """Construct a sparse jacobian that relates 2D projected vertex positions
    (in the columns) to pixel values (in the rows). This can be done
    in two steps."""

    n_channels = np.atleast_3d(observed).shape[2]
    shape = visibility.shape

    # Step 1: get the structure ready, ie the IS and the JS
    IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
    JS = col(f[visibility.ravel()[visible]].ravel())
    JS = np.hstack((JS*2, JS*2+1)).ravel()

    pxs = np.asarray(visible % shape[1], np.int32)
    pys = np.asarray(np.floor(np.floor(visible) / shape[1]), np.int32)

    if n_channels > 1:
        IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
        JS = np.concatenate([JS for i in range(n_channels)])

    # Step 2: get the data ready, ie the actual values of the derivatives
    ksize=1
    sobel_normalizer = cv2.Sobel(np.asarray(np.tile(row(np.arange(10)), (10, 1)), np.float64), cv2.CV_64F, dx=1, dy=0, ksize=ksize)[5,5]
    xdiff = -cv2.Sobel(observed, cv2.CV_64F, dx=1, dy=0, ksize=ksize) / sobel_normalizer
    ydiff = -cv2.Sobel(observed, cv2.CV_64F, dx=0, dy=1, ksize=ksize) / sobel_normalizer

    xdiff = np.atleast_3d(xdiff)
    ydiff = np.atleast_3d(ydiff)

    datas = []

    # The data is weighted according to barycentric coordinates
    bc0 = col(barycentric[pys, pxs, 0])
    bc1 = col(barycentric[pys, pxs, 1])
    bc2 = col(barycentric[pys, pxs, 2])
    for k in range(n_channels):
        dxs = xdiff[pys, pxs, k]
        dys = ydiff[pys, pxs, k]
        if f.shape[1] == 3:
            datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1,col(dxs)*bc2,col(dys)*bc2)).ravel())
        else:
            datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1)).ravel())

    data = np.concatenate(datas)

    ij = np.vstack((IS.ravel(), JS.ravel()))
    result = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))

    return result
コード例 #47
0
ファイル: camera.py プロジェクト: cadik/opendr
 def view_matrix(self):
     R = cv2.Rodrigues(self.rt.r)[0]
     return np.hstack((R, col(self.t.r)))
コード例 #48
0
ファイル: test_sh.py プロジェクト: cadik/opendr
    def test_spherical_harmonics(self):
        global visualize
        if visualize:
            plt.ion()
    
        # Get mesh
        v, f = get_sphere_mesh()

        from geometry import VertNormals
        vn = VertNormals(v=v, f=f)
        #vn =  Ch(mesh.estimate_vertex_normals())

        # Get camera
        cam, frustum = getcam()
    
        # Get renderer
        from renderer import ColoredRenderer
        cam.v = v
        cr = ColoredRenderer(f=f, camera=cam, frustum=frustum, v=v)
    
        sh_red = SphericalHarmonics(vn=vn, light_color=np.array([1,0,0]))
        sh_green = SphericalHarmonics(vn=vn, light_color=np.array([0,1,0]))
    
        cr.vc = sh_red + sh_green
        
        ims_baseline = []
        for comp_idx, subplot_idx in enumerate([3,7,8,9,11,12,13,14,15]):
        
            sh_comps = np.zeros(9)
            sh_comps[comp_idx] = 1
            sh_red.components =  Ch(sh_comps)
            sh_green.components =  Ch(-sh_comps)
            
            newim = cr.r.reshape((frustum['height'], frustum['width'], 3))
            ims_baseline.append(newim)

            if visualize:
                plt.subplot(3,5,subplot_idx)
                plt.imshow(newim)
                plt.axis('off')
            
        offset = row(.4 * (np.random.rand(3)-.5))
        #offset = row(np.array([1.,1.,1.]))*.05
        vn_shifted = (vn.r + offset)
        vn_shifted = vn_shifted / col(np.sqrt(np.sum(vn_shifted**2, axis=1)))
        vn_shifted = vn_shifted.ravel()
        vn_shifted[vn_shifted>1.] = 1
        vn_shifted[vn_shifted<-1.] = -1
        vn_shifted = Ch(vn_shifted)
        cr.replace(sh_red.vn, vn_shifted)
        if True:
            for comp_idx in range(9):
                if visualize:
                    plt.figure(comp_idx+2)
        
                sh_comps = np.zeros(9)
                sh_comps[comp_idx] = 1
                sh_red.components =  Ch(sh_comps)
                sh_green.components =  Ch(-sh_comps)
        
                pred = cr.dr_wrt(vn_shifted).dot(col(vn_shifted.r.reshape(vn.r.shape) - vn.r)).reshape((frustum['height'], frustum['width'], 3))
                if visualize:
                    plt.subplot(1,2,1)
                    plt.imshow(pred)
                    plt.title('pred (comp %d)' % (comp_idx,))        
                    plt.subplot(1,2,2)
                    
                newim = cr.r.reshape((frustum['height'], frustum['width'], 3))
                emp = newim - ims_baseline[comp_idx]
                if visualize:
                    plt.imshow(emp)
                    plt.title('empirical (comp %d)' % (comp_idx,))
                pred_flat = pred.ravel()
                emp_flat = emp.ravel()
                nnz = np.unique(np.concatenate((np.nonzero(pred_flat)[0], np.nonzero(emp_flat)[0])))
                
                if comp_idx != 0:
                    med_diff = np.median(np.abs(pred_flat[nnz]-emp_flat[nnz]))
                    med_obs = np.median(np.abs(emp_flat[nnz]))
                    if comp_idx == 4 or comp_idx == 8:
                        self.assertTrue(med_diff / med_obs < .6)
                    else:
                        self.assertTrue(med_diff / med_obs < .3)
                if visualize:
                    plt.axis('off')
コード例 #49
0
ファイル: common.py プロジェクト: chenliu0831/opendr
def dImage_wrt_2dVerts_bnd(observed, visible, visibility, barycentric, image_width, image_height, num_verts, f, bnd_bool):
    """Construct a sparse jacobian that relates 2D projected vertex positions
    (in the columns) to pixel values (in the rows). This can be done
    in two steps."""

    n_channels = np.atleast_3d(observed).shape[2]
    shape = visibility.shape

    # Step 1: get the structure ready, ie the IS and the JS
    IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
    JS = col(f[visibility.ravel()[visible]].ravel())
    JS = np.hstack((JS*2, JS*2+1)).ravel()

    pxs = np.asarray(visible % shape[1], np.int32)
    pys = np.asarray(np.floor(np.floor(visible) / shape[1]), np.int32)

    if n_channels > 1:
        IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
        JS = np.concatenate([JS for i in range(n_channels)])

    # Step 2: get the data ready, ie the actual values of the derivatives
    ksize = 1
    bndf = bnd_bool.astype(np.float64)
    nbndf = np.logical_not(bnd_bool).astype(np.float64)
    sobel_normalizer = cv2.Sobel(np.asarray(np.tile(row(np.arange(10)), (10, 1)), np.float64), cv2.CV_64F, dx=1, dy=0, ksize=ksize)[5,5]

    bnd_nan = bndf.reshape((observed.shape[0], observed.shape[1], -1)).copy()
    bnd_nan.ravel()[bnd_nan.ravel()>0] = np.nan
    bnd_nan += 1
    obs_nonbnd = np.atleast_3d(observed) * bnd_nan

    ydiffnb, xdiffnb = nangradients(obs_nonbnd)

    observed = np.atleast_3d(observed)
    
    if observed.shape[2] > 1:
        ydiffbnd, xdiffbnd, _ = np.gradient(observed)        
    else:
        ydiffbnd, xdiffbnd = np.gradient(observed.squeeze())
        ydiffbnd = np.atleast_3d(ydiffbnd)
        xdiffbnd = np.atleast_3d(xdiffbnd)

    # This corrects for a bias imposed boundary differences begin spread over two pixels
    # (by np.gradients or similar) but only counted once (since OpenGL's line
    # drawing spans 1 pixel)
    xdiffbnd *= 2.0
    ydiffbnd *= 2.0

    xdiffnb = -xdiffnb
    ydiffnb = -ydiffnb
    xdiffbnd = -xdiffbnd
    ydiffbnd = -ydiffbnd
    # ydiffnb *= 0
    # xdiffnb *= 0

    if False:
        import matplotlib.pyplot as plt
        plt.figure()
        plt.subplot(121)
        plt.imshow(xdiffnb)
        plt.title('xdiffnb')
        plt.subplot(122)
        plt.imshow(xdiffbnd)
        plt.title('xdiffbnd')
        # import pdb; pdb.set_trace()

    idxs = np.isnan(xdiffnb.ravel())
    xdiffnb.ravel()[idxs] = xdiffbnd.ravel()[idxs]

    idxs = np.isnan(ydiffnb.ravel())
    ydiffnb.ravel()[idxs] = ydiffbnd.ravel()[idxs]

    if True: # should be right thing
        xdiff = xdiffnb
        ydiff = ydiffnb
    else:  #should be old way
        xdiff = xdiffbnd
        ydiff = ydiffbnd


    # TODO: NORMALIZER IS WRONG HERE
    # xdiffnb = -cv2.Sobel(obs_nonbnd, cv2.CV_64F, dx=1, dy=0, ksize=ksize) / np.atleast_3d(cv2.Sobel(row(np.arange(obs_nonbnd.shape[1])).astype(np.float64), cv2.CV_64F, dx=1, dy=0, ksize=ksize))
    # ydiffnb = -cv2.Sobel(obs_nonbnd, cv2.CV_64F, dx=0, dy=1, ksize=ksize) / np.atleast_3d(cv2.Sobel(col(np.arange(obs_nonbnd.shape[0])).astype(np.float64), cv2.CV_64F, dx=0, dy=1, ksize=ksize))
    #
    # xdiffnb.ravel()[np.isnan(xdiffnb.ravel())] = 0.
    # ydiffnb.ravel()[np.isnan(ydiffnb.ravel())] = 0.
    # xdiffnb.ravel()[np.isinf(xdiffnb.ravel())] = 0.
    # ydiffnb.ravel()[np.isinf(ydiffnb.ravel())] = 0.

    # xdiffnb = np.atleast_3d(xdiffnb)
    # ydiffnb = np.atleast_3d(ydiffnb)
    #
    # xdiffbnd = -cv2.Sobel(observed, cv2.CV_64F, dx=1, dy=0, ksize=ksize) / sobel_normalizer
    # ydiffbnd = -cv2.Sobel(observed, cv2.CV_64F, dx=0, dy=1, ksize=ksize) / sobel_normalizer
    #
    # xdiff = xdiffnb * np.atleast_3d(nbndf)
    # xdiff.ravel()[np.isnan(xdiff.ravel())] = 0
    # xdiff += xdiffbnd*np.atleast_3d(bndf)
    #
    # ydiff = ydiffnb * np.atleast_3d(nbndf)
    # ydiff.ravel()[np.isnan(ydiff.ravel())] = 0
    # ydiff += ydiffbnd*np.atleast_3d(bndf)

    #import pdb; pdb.set_trace()

    #xdiff = xdiffnb
    #ydiff = ydiffnb

    #import pdb; pdb.set_trace()

    datas = []

    # The data is weighted according to barycentric coordinates
    bc0 = col(barycentric[pys, pxs, 0])
    bc1 = col(barycentric[pys, pxs, 1])
    bc2 = col(barycentric[pys, pxs, 2])
    for k in range(n_channels):
        dxs = xdiff[pys, pxs, k]
        dys = ydiff[pys, pxs, k]
        if f.shape[1] == 3:
            datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1,col(dxs)*bc2,col(dys)*bc2)).ravel())
        else:
            datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1)).ravel())

    data = np.concatenate(datas)

    ij = np.vstack((IS.ravel(), JS.ravel()))
    result = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))

    return result
コード例 #50
0
ファイル: geometry.py プロジェクト: cadik/opendr
 def compute_r(self):
     return (self.v.r.reshape(-1,3) / col(self.s)).reshape(self.v.r.shape)
コード例 #51
0
ファイル: test_renderer.py プロジェクト: dimatura/opendr
    def test_cam_derivatives(self):
        mesh, lightings, camera, frustum, renderers = self.load_basics()

        camparms = {
            'c': {'mednz' : 2.2e-2, 'meannz': 4.2e-2, 'desc': 'center of proj diff', 'eps0': 4., 'eps1': .1},
            #'f': {'mednz' : 2.5e-2, 'meannz': 6e-2, 'desc': 'focal diff', 'eps0': 100., 'eps1': .1},
            't': {'mednz' : 1.2e-1, 'meannz': 3.0e-1, 'desc': 'trans diff', 'eps0': .25, 'eps1': .1},
            'rt': {'mednz' : 8e-2, 'meannz': 1.8e-1, 'desc': 'rot diff', 'eps0': 0.02, 'eps1': .5},
            'k': {'mednz' : 7e-2, 'meannz': 5.1e-1, 'desc': 'distortion diff', 'eps0': .5, 'eps1': .05}
        }

        for renderer in renderers:

            im_shape = renderer.shape
            lighting = lightings[renderer.num_channels]

            # Render a rotating mesh
            mesh = get_earthmesh(trans=np.array([0,0,5]), rotation = np.array([math.pi/2.,0,0]))        
            mesh_verts = Ch(mesh.v.flatten())
            camera.v = mesh_verts
            lighting.v = mesh_verts
            renderer.vc = lighting
            renderer.camera = camera


            for atrname, info in camparms.items():

                # Get pixels and derivatives
                r = renderer.r

                atr = lambda : getattr(camera, atrname)
                satr = lambda x : setattr(camera, atrname, x)

                atr_size = atr().size
                dr = renderer.dr_wrt(atr())

                # Establish a random direction
                tmp = np.random.rand(atr().size) - .5
                direction = (tmp / np.linalg.norm(tmp))*info['eps0']
                #direction = np.sin(np.ones(atr_size))*info['eps0']
                #direction = np.zeros(atr_size)
                # try:
                #     direction[4] = 1.
                # except: pass
                #direction *= info['eps0']
                eps = info['eps1']

                # Render going forward in that direction
                satr(atr().r + direction*eps/2.)
                rfwd = renderer.r

                # Render going backward in that direction
                satr(atr().r - direction*eps/1.)
                rbwd = renderer.r

                # Put back
                satr(atr().r + direction*eps/2.)

                # Establish empirical and predicted derivatives
                dr_empirical = (np.asarray(rfwd, np.float64) - np.asarray(rbwd, np.float64)).ravel() / eps
                dr_predicted = dr.dot(col(direction.flatten())).reshape(dr_empirical.shape)

                images = OrderedDict()
                images['shifted %s' % (atrname,)] = np.asarray(rfwd, np.float64)-.5
                images[r'empirical %s' % (atrname,)] = dr_empirical
                images[r'predicted %s' % (atrname,)] = dr_predicted
                images[info['desc']] = dr_predicted - dr_empirical

                nonzero = images[info['desc']][np.nonzero(images[info['desc']]!=0)[0]]

                mederror = np.median(np.abs(nonzero))
                meanerror = np.mean(np.abs(nonzero))
                if visualize:
                    matplotlib.rcParams.update({'font.size': 18})
                    plt.figure(figsize=(6*3, 2*3))
                    for idx, title in enumerate(images.keys()):
                        plt.subplot(1,len(images.keys()), idx+1)
                        im = process(images[title].reshape(im_shape), vmin=-.5, vmax=.5)
                        plt.title(title)
                        plt.imshow(im)

                    print '%s: median nonzero %.2e' % (atrname, mederror,)
                    print '%s: mean nonzero %.2e' % (atrname, meanerror,)
                    plt.draw()
                    plt.show()

                self.assertLess(meanerror, info['meannz'])
                self.assertLess(mederror, info['mednz'])
コード例 #52
0
ファイル: test_renderer.py プロジェクト: dimatura/opendr
    def test_vert_derivatives(self):

        mesh, lightings, camera, frustum, renderers = self.load_basics()

        for renderer in renderers:

            lighting = lightings[renderer.num_channels]
            im_shape = renderer.shape

            # Render a rotating mesh
            mesh = get_earthmesh(trans=np.array([0,0,5]), rotation = np.array([math.pi/2.,0,0]))        
            mesh_verts = Ch(mesh.v.flatten())  
            camera.set(v=mesh_verts)
            lighting.set(v=mesh_verts)
            renderer.set(camera=camera)
            renderer.set(vc=lighting)

            # Get pixels and derivatives
            r = renderer.r
            dr = renderer.dr_wrt(mesh_verts)
            
            # Establish a random direction
            direction = (np.random.rand(mesh.v.size).reshape(mesh.v.shape)-.5)*.1 + np.sin(mesh.v*10)*.2
            direction *= .5
            eps = .2

            # Render going forward in that direction
            mesh_verts = Ch(mesh.v+direction*eps/2.)
            lighting.set(v=mesh_verts)
            renderer.set(v=mesh_verts, vc=lighting)
            rfwd = renderer.r
            
            # Render going backward in that direction
            mesh_verts = Ch(mesh.v-direction*eps/2.)
            lighting.set(v=mesh_verts)
            renderer.set(v=mesh_verts, vc=lighting)
            rbwd = renderer.r

            # Establish empirical and predicted derivatives
            dr_empirical = (np.asarray(rfwd, np.float64) - np.asarray(rbwd, np.float64)).ravel() / eps
            dr_predicted = dr.dot(col(direction.flatten())).reshape(dr_empirical.shape) 

            images = OrderedDict()
            images['shifted verts'] = np.asarray(rfwd, np.float64)-.5
            images[r'empirical verts $\left(\frac{dI}{dV}\right)$'] = dr_empirical
            images[r'predicted verts $\left(\frac{dI}{dV}\right)$'] = dr_predicted
            images['difference verts'] = dr_predicted - dr_empirical

            nonzero = images['difference verts'][np.nonzero(images['difference verts']!=0)[0]]

            if visualize:
                matplotlib.rcParams.update({'font.size': 18})
                plt.figure(figsize=(6*3, 2*3))
                for idx, title in enumerate(images.keys()):
                    plt.subplot(1,len(images.keys()), idx+1)
                    im = process(images[title].reshape(im_shape), vmin=-.5, vmax=.5)
                    plt.title(title)
                    plt.imshow(im)
                    
                print 'verts: median nonzero %.2e' % (np.median(np.abs(nonzero)),)
                print 'verts: mean nonzero %.2e' % (np.mean(np.abs(nonzero)),)
                plt.draw()
                plt.show()

            self.assertLess(np.mean(np.abs(nonzero)), 7e-2)
            self.assertLess(np.median(np.abs(nonzero)), 4e-2)
コード例 #53
0
ファイル: test_renderer.py プロジェクト: dimatura/opendr
    def test_lightpos_derivatives(self):
        
        mesh, lightings, camera, frustum, renderers = self.load_basics()
        

        for renderer in renderers:

            im_shape = renderer.shape
            lighting = lightings[renderer.num_channels]

            # Render a rotating mesh
            mesh = get_earthmesh(trans=np.array([0,0,5]), rotation = np.array([math.pi/2.,0,0]))        
            mesh_verts = Ch(mesh.v.flatten())
            camera.set(v=mesh_verts)


            # Get predicted derivatives wrt light pos
            light1_pos = Ch(np.array([-1000,-1000,-1000]))
            lighting.set(light_pos=light1_pos, v=mesh_verts)
            renderer.set(vc=lighting, v=mesh_verts)
            
            dr = renderer.dr_wrt(light1_pos).copy()            

            # Establish a random direction for the light
            direction = (np.random.rand(3)-.5)*1000.
            eps = 1.
        
            # Find empirical forward derivatives in that direction
            lighting.set(light_pos = light1_pos.r + direction*eps/2.)
            renderer.set(vc=lighting)
            rfwd = renderer.r
        
            # Find empirical backward derivatives in that direction
            lighting.set(light_pos = light1_pos.r - direction*eps/2.)
            renderer.set(vc=lighting)
            rbwd = renderer.r
        
            # Establish empirical and predicted derivatives
            dr_empirical = (np.asarray(rfwd, np.float64) - np.asarray(rbwd, np.float64)).ravel() / eps
            dr_predicted = dr.dot(col(direction.flatten())).reshape(dr_empirical.shape)

            images = OrderedDict()
            images['shifted lightpos'] = np.asarray(rfwd, np.float64)-.5
            images[r'empirical lightpos $\left(\frac{dI}{dL_p}\right)$'] = dr_empirical
            images[r'predicted lightpos $\left(\frac{dI}{dL_p}\right)$'] = dr_predicted
            images['difference lightpos'] = dr_predicted-dr_empirical

            nonzero = images['difference lightpos'][np.nonzero(images['difference lightpos']!=0)[0]]

            if visualize:
                matplotlib.rcParams.update({'font.size': 18})
                plt.figure(figsize=(6*3, 2*3))
                for idx, title in enumerate(images.keys()):
                    plt.subplot(1,len(images.keys()), idx+1)
                    im = process(images[title].reshape(im_shape), vmin=-.5, vmax=.5)
                    plt.title(title)
                    plt.imshow(im)
                
                plt.show()
                print 'lightpos: median nonzero %.2e' % (np.median(np.abs(nonzero)),)
                print 'lightpos: mean nonzero %.2e' % (np.mean(np.abs(nonzero)),)
            self.assertLess(np.mean(np.abs(nonzero)), 2.4e-2)
            self.assertLess(np.median(np.abs(nonzero)), 1.2e-2)
コード例 #54
0
ファイル: test_renderer.py プロジェクト: dimatura/opendr
    def test_color_derivatives(self):
        
        mesh, lightings, camera, frustum, renderers = self.load_basics()
        
        for renderer in renderers:

            im_shape = renderer.shape
            lighting = lightings[renderer.num_channels]

            # Get pixels and dI/dC
            mesh = get_earthmesh(trans=np.array([0,0,5]), rotation = np.array([math.pi/2.,0,0]))        
            mesh_verts = Ch(mesh.v)
            mesh_colors = Ch(mesh.vc)

            camera.set(v=mesh_verts)            

            # import pdb; pdb.set_trace()
            # print '-------------------------------------------'
            #lighting.set(vc=mesh_colors, v=mesh_verts)

            lighting.vc = mesh_colors[:,:renderer.num_channels]
            lighting.v = mesh_verts

            renderer.set(v=mesh_verts, vc=lighting)

            r = renderer.r
            dr = renderer.dr_wrt(mesh_colors).copy()

            # Establish a random direction
            eps = .4
            direction = (np.random.randn(mesh.v.size).reshape(mesh.v.shape)*.1 + np.sin(mesh.v*19)*.1).flatten()

            # Find empirical forward derivatives in that direction
            mesh_colors = Ch(mesh.vc+direction.reshape(mesh.vc.shape)*eps/2.)
            lighting.set(vc=mesh_colors[:,:renderer.num_channels])
            renderer.set(vc=lighting)
            rfwd = renderer.r

            # Find empirical backward derivatives in that direction
            mesh_colors = Ch(mesh.vc-direction.reshape(mesh.vc.shape)*eps/2.)
            lighting.set(vc=mesh_colors[:,:renderer.num_channels])
            renderer.set(vc=lighting)
            rbwd = renderer.r

            dr_empirical = (np.asarray(rfwd, np.float64) - np.asarray(rbwd, np.float64)).ravel() / eps
            dr_predicted = dr.dot(col(direction.flatten())).reshape(dr_empirical.shape)

            images = OrderedDict()
            images['shifted colors'] = np.asarray(rfwd, np.float64)-.5
            images[r'empirical colors $\left(\frac{dI}{dC}\right)$'] = dr_empirical
            images[r'predicted colors $\left(\frac{dI}{dC}\right)$'] = dr_predicted
            images['difference colors'] = dr_predicted-dr_empirical

            nonzero = images['difference colors'][np.nonzero(images['difference colors']!=0)[0]]

            if visualize:
                matplotlib.rcParams.update({'font.size': 18})
                plt.figure(figsize=(6*3, 2*3))
                for idx, title in enumerate(images.keys()):
                    plt.subplot(1,len(images.keys()), idx+1)
                    im = process(images[title].reshape(im_shape), vmin=-.5, vmax=.5)
                    plt.title(title)
                    plt.imshow(im)
                    
                plt.show()
                print 'color: median nonzero %.2e' % (np.median(np.abs(nonzero)),)
                print 'color: mean nonzero %.2e' % (np.mean(np.abs(nonzero)),)
            self.assertLess(np.mean(np.abs(nonzero)), 2e-2)
            self.assertLess(np.median(np.abs(nonzero)), 4.5e-3)