Пример #1
0
def pose_skeleton(Gs, skel_dict, cvs=None, x_mat=None):
    Xm = skel_dict.get('rootMat', np.eye(
        3, 4, dtype=np.float32)) if x_mat is None else x_mat
    if cvs is None: cvs = skel_dict['chanValues']
    ISCV.pose_skeleton(Gs, skel_dict['Ls'], skel_dict['jointParents'],
                       skel_dict['jointChans'], skel_dict['jointChanSplits'],
                       cvs, Xm)
Пример #2
0
def detect_dots_with_box(data, pixel_threshold, opts, x1, y1, x2, y2):
    """
	Extract the dots from data contained in a single frame. The detected dots are then filtered based on
	the min/max dot size, circularity, etc.
	"""
    min_min, max_max = 2, opts['max_dot_size'] * 2

    if isinstance(pixel_threshold, tuple) and len(pixel_threshold) == 3:
        dots = ISCV.detect_bright_dots(data, pixel_threshold[0],
                                       pixel_threshold[1], pixel_threshold[2],
                                       x1, y1, x2, y2)
    else:
        dots = ISCV.detect_bright_dots(data, pixel_threshold, pixel_threshold,
                                       pixel_threshold, x1, y1, x2, y2)

    min_ds, max_ds, circ = opts['min_dot_size']**4, opts[
        'max_dot_size']**4, opts['circularity_threshold']**2
    filtered_dots = [
        dot for dot in dots
        if (dot.x1 - dot.x0 + dot.y1 -
            dot.y0) > min_min and (dot.x1 - dot.x0 + dot.y1 - dot.y0) < max_max
        and (dot.sxx * dot.syy) > min_ds and (dot.sxx * dot.syy) < max_ds
        and dot.sxx < circ * dot.syy and dot.syy < circ * dot.sxx
    ]

    height, width, chans = data.shape
    psc = np.array([2.0 / width, -2.0 / width], dtype=np.float32)
    pof = np.array([-1.0, height / float(width)], dtype=np.float32)

    dotScreenCoords = np.array([[dot.sx, dot.sy] for dot in filtered_dots],
                               dtype=np.float32).reshape(-1, 2) * psc + pof

    return filtered_dots, dotScreenCoords
Пример #3
0
def make_quad_distortion_mesh(ooa=1.0,
                              w=64,
                              h=64,
                              Kox=0,
                              Koy=0,
                              dist=(0.29, 0.22)):
    startGL()
    xsc, ysc, w4 = 1.0 / w, 1.0 / h, w * 4
    vs, vts = [], []
    #quads2 = list(range(0,w4,4)) + list(range(w4-3,w4*h,w4)) + list(range(w4*h-2,w4*(h-1),-4)) + list(range(w4*(h-1)+3,0,-w4))
    quads = np.arange(w4 * h, dtype=np.int32)
    v0 = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]],
                  dtype=np.float32)
    for y in range(h):
        for x in range(w):
            xs = v0 + [x, y, 0]
            vs.extend(xs)
            vts.extend(xs[:, :2])
    vs = np.float32(
        (np.float32(vs) * [2 * xsc, 2 * ysc, 0] - [1, 1, 1]) * [1, ooa, 1])
    vs[:, 2] *= -1  # TODO
    vts = np.float32(np.float32(vts) * [xsc, ysc])
    ISCV.undistort_points(vs, -float(Kox), -float(Koy), float(dist[0]),
                          float(dist[1]), vs)
    vs = vbo.VBO(vs, usage='GL_STATIC_DRAW_ARB')
    vts = vbo.VBO(vts, usage='GL_STATIC_DRAW_ARB')
    quads = vbo.VBO(quads,
                    target=GL.GL_ELEMENT_ARRAY_BUFFER,
                    usage='GL_STATIC_DRAW_ARB')
    return vs, vts, quads
Пример #4
0
def testDerivatives(skelDict, chanValues, effectorData, effectorTargets):
    ''' skelDict specifies a skeleton.
		Given an initial skeleton pose (chanValues), effectors (ie constraints: joint, offset, weight, target), solve for the skeleton pose.
		Effector weights and targets are 3x4 matrices.
		* Setting 1 in the weight's 4th column makes a position constraint.
		* Setting 100 in the weight's first 3 columns makes an orientation constraint.
	'''
    rootMat = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]],
                       dtype=np.float32)
    effectorJoints, effectorOffsets, effectorWeights, usedChannels, usedCAEs, usedCAEsSplits = effectorData
    jointParents = skelDict['jointParents']
    Gs = skelDict['Gs']
    Ls = skelDict['Ls']
    jointChans = skelDict['jointChans']
    jointChanSplits = skelDict['jointChanSplits']
    numChannels = jointChanSplits[-1]
    numEffectors = len(effectorJoints)
    numUsedChannels = len(usedChannels)
    channelMats = np.zeros((numChannels, 3, 4), dtype=np.float32)
    usedEffectors = np.where(effectorWeights.reshape(-1) != 0)[0]
    numUsedEffectors = len(usedEffectors)
    effectors = np.zeros((numEffectors, 3, 4), dtype=np.float32)
    residual = np.zeros((numEffectors, 3, 4), dtype=np.float32)
    derrors = np.zeros((numUsedChannels, numEffectors, 3, 4), dtype=np.float32)
    steps = np.ones((numUsedChannels), dtype=np.float32) * 1.0
    steps[np.where(jointChans[usedChannels] < 3)[0]] = 30.
    delta = np.zeros((numUsedChannels), dtype=np.float32)
    JJTB = np.zeros((numEffectors * 12), dtype=np.float32)
    Character.pose_skeleton_with_chan_mats(channelMats, Gs, chanValues,
                                           rootMat)
    bestScore = ISCV.pose_effectors(effectors, residual, Gs, effectorJoints,
                                    effectorOffsets, effectorWeights,
                                    effectorTargets)
    print(bestScore, chanValues[usedChannels])
    ISCV.derror_dchannel(derrors, channelMats, usedChannels, usedCAEs,
                         usedCAEsSplits, jointChans, effectors,
                         effectorWeights)
    JT = derrors.reshape(numUsedChannels, -1)
    B = residual.reshape(-1)
    residual2 = np.zeros(residual.shape, dtype=np.float32)
    # JT[ci] = dresidual_dci
    for uci in xrange(numUsedChannels):
        ci = usedChannels[uci]
        tmp = chanValues[ci]
        d_ci = max(0.001, abs(chanValues[ci] * 0.001))
        chanValues[ci] += d_ci
        Character.pose_skeleton(Gs, skelDict, chanValues, rootMat)
        bestScore = ISCV.pose_effectors(effectors, residual2, Gs,
                                        effectorJoints, effectorOffsets,
                                        effectorWeights, effectorTargets)
        #print (bestScore)
        diff = (residual2.reshape(-1) - B) / -d_ci
        if np.dot(JT[uci], diff) / np.sqrt(1e-10 + np.dot(JT[uci], JT[uci]) *
                                           np.dot(diff, diff)) < 0.99:
            print(
                uci, ci,
                np.dot(JT[uci], diff) /
                np.sqrt(1e-10 + np.dot(JT[uci], JT[uci]) * np.dot(diff, diff)))
        chanValues[ci] = tmp
Пример #5
0
def animateHead(newFrame):
    global ted_geom, ted_geom2, ted_shape, tony_geom, tony_shape, tony_geom2, tony_obj, ted_obj, diff_geom, c3d_frames
    global tony_shape_vector, tony_shape_mat, ted_lo_rest, ted_lo_mat, ted_lo_which, c3d_points
    global md, movies
    tony_geom.image, tony_geom.bindImage, tony_geom.bindId = ted_geom.image, ted_geom.bindImage, ted_geom.bindId  # reuse the texture!
    fo = 55
    MovieReader.readFrame(md, seekFrame=((newFrame + fo) / 2))
    view = QApp.view()
    frac = (newFrame % 200) / 100.
    if (frac > 1.0): frac = 2.0 - frac
    fi = newFrame % len(c3d_frames)

    frame = c3d_frames[fi][ted_lo_which]
    which = np.where(frame[:, 3] == 0)[0]
    x3ds = frame[which, :3]
    #print which,x3ds.shape,ted_lo_rest.shape,ted_lo_mat.shape
    bnds = np.array([[0, 1]] * ted_lo_mat.shape[0], dtype=np.float32)
    #print len(ted_lo_which),len(which),ted_lo_which,which
    tony_shape_vector[:] = fitLoResShapeMat(ted_lo_rest,
                                            ted_lo_mat,
                                            x3ds,
                                            Aoffset=10.0,
                                            Boffset=3.0,
                                            x_0=tony_shape_vector,
                                            indices=which,
                                            bounds=bnds)
    #global tony_shape_vectors; tony_shape_vector[:] = tony_shape_vectors[newFrame%len(tony_shape_vectors)]

    #tony_shape_vector *= 0.
    #tony_shape_vector += (np.random.random(len(tony_shape_vector)) - 0.5)*0.2
    if 1:
        ted_shape_v = np.dot(ted_shape_mat_T, tony_shape_vector).reshape(-1, 3)
    else:
        import ISCV
        ted_shape_v = np.zeros_like(ted_obj['v'])
        ISCV.dot(ted_shape_mat_T, tony_shape_vector, ted_shape_v.reshape(-1))
    tony_shape_v = ted_shape_v
    #tony_shape_v = tony_shape['v']*frac
    ted_geom.setVs(ted_obj['v'] + ted_shape_v)  #ted_shape['v'] * frac)
    tony_geom.setVs(tony_obj['v'] + tony_shape_v -
                    np.array([200, 0, 0], dtype=np.float32))
    ted_geom2.setVs(ted_obj['v'] * (1.0 - frac) +
                    tony_tedtopo_obj['v'] * frac +
                    np.array([200, 0, 0], dtype=np.float32))
    #if len(ted_shape_v) == len(tony_shape_v):
    #	tony_geom2.setVs(tony_obj['v'] + ted_shape_v - [400,0,0])
    #	diff_geom.setVs(ted_obj['v'] + tony_shape_v - ted_shape_v - [600,0,0])

    #print [c3d_labels[i] for i in which]
    surface_points.vertices = np.dot(ted_lo_mat.T,
                                     tony_shape_vector).T + ted_lo_rest
    surface_points.colour = [0, 1, 0, 1]  # green
    c3d_points.vertices = x3ds
    c3d_points.colour = [1, 0, 0, 1]  # red
    QApp.app.updateGL()
Пример #6
0
def find_labels(x2ds, model, x2ds_indices, model_indices, threshold,
                labels_out):
    A, B = fit_points(x2ds[x2ds_indices], model[model_indices])
    print 'cf', np.dot(x2ds[x2ds_indices], A) + B - model[model_indices]
    cloud = ISCV.HashCloud2D(model, threshold)
    L = np.dot(x2ds, A) + B
    scores, matches, matches_splits = cloud.score(L)
    sc = ISCV.min_assignment_sparse(scores, matches, matches_splits,
                                    threshold**2, labels_out)
    ms = np.sum(labels_out != -1)
    return ((sc - (len(x2ds) - len(model)) * (threshold**2)) /
            len(model))**0.5, ms
Пример #7
0
def eval_shape_predictor(predictor, img, rect):
    rect = np.array(rect, dtype=np.int32)
    ref_pinv, ref_shape, forest_splits, forest_leaves, anchor_idx, deltas = predictor
    if 1:  # all-in-one C 1.8ms
        current_shape = ref_shape.copy()
        forest_leaves2 = forest_leaves.reshape(forest_leaves.shape[0],
                                               forest_leaves.shape[1],
                                               forest_leaves.shape[2], -1)
        ISCV.eval_shape_predictor(ref_pinv, ref_shape, forest_splits,
                                  forest_leaves2, anchor_idx, deltas, img,
                                  rect, current_shape)
        return current_shape
Пример #8
0
def process_frame(deinterlacing, detectingWands, frame, opts, pair):
    ci, md = pair
    img = get_movie_frame(md, frame, deinterlacing)
    #data = filter_movie_frame(img, small_blur, large_blur)
    #img, data = get_processed_movie_frame(md, frame, small_blur, large_blur, deinterlacing)
    QApp.view().cameras[ci + 1].invalidateImageData()
    """
	if 1:  # show the filtered image
		img[:] = data
		pass
	if 0:  # crush the image to see the blobs
		lookup = np.zeros(256, dtype=np.uint8)
		lookup[threshold_bright:] = 255
		lookup[255 - threshold_dark_inv:threshold_bright] = 128
		img[:] = lookup[img]
	"""
    if 1:
        good_darks, pts0, good_lights, pts1, data = get_dark_and_light_points(
            img, frame, ci, opts)
        if 1:  # show the filtered image
            #print "data before insertion", type(data), data.shape
            #sys.exit(0)
            img[:] = data
        if 0:  # crush the image to see the blobs
            lookup = np.zeros(256, dtype=np.uint8)
            lookup[threshold_bright:] = 255
            lookup[255 - threshold_dark_inv:threshold_bright] = 128
            img[:] = lookup[img]
        # good_darks, pts0 = Detect.detect_dots(255-data, opts['threshold_dark_inv'], opts)
        # good_lights,pts1 = Detect.detect_dots(data, opts['threshold_bright'], opts)
        print ci, frame, len(pts0), len(pts1), 'good points (darks,lights)'

        if detectingWands:
            ratio = 2.0
            x2d_threshold = 0.5
            straightness_threshold = 0.01 * 2
            match_threshold = 0.07 * 2
            x2ds_labels = -np.ones(pts1.shape[0], dtype=np.int32)
            x2ds_splits = np.array([0, pts1.shape[0]], dtype=np.int32)
            ISCV.label_T_wand(pts1, x2ds_splits, x2ds_labels, ratio,
                              x2d_threshold, straightness_threshold,
                              match_threshold)
            print x2ds_labels

            for r, li in zip(good_lights, x2ds_labels):
                if li != -1:  # make some red boxes
                    dx, dy = 10, 10
                    img[int(r.sy - dy):int(r.sy + dy),
                        int(r.sx - dx):int(r.sx + dx), 0] = 128
    else:
        pts0 = pts1 = []
    return (pts0, pts1)
Пример #9
0
def getMapping(hi_geo, triangles, lo_geo, threshold=20.0):
    '''given a hi-res geometry and topology, and a lo-res geometry, find the triangles and barycentric weights that
	when applied to the hi-res geometry, best fit the lo-res geometry.
	The mapping is returned as a list of weight triples and a list of index triples, per vertex.
	The output vertex is the weighted sum of the extracted indicated source vertices.'''
    is3D = (hi_geo.shape[1] == 3)
    lunterp = lunterp3D if is3D else lunterp2D
    numVertsHi = hi_geo.shape[0]
    numVertsLo = lo_geo.shape[0]
    weights = np.zeros((numVertsLo, 3), dtype=np.float32)
    indices = -np.ones((numVertsLo, 3), dtype=np.int32)
    import ISCV
    cloud = ISCV.HashCloud3D(hi_geo, threshold) if is3D else ISCV.HashCloud2D(
        hi_geo, threshold)
    scores, matches, matches_splits = cloud.score(lo_geo.copy())
    # the indices of the closest 3 hi verts to each lo vert
    D = [
        matches[m0 + np.argsort(scores[m0:m1])[:3]] if m0 != m1 else []
        for m0, m1 in zip(matches_splits[:-1], matches_splits[1:])
    ]
    # for speed-up, compute all the triangles involving each hi vertex.
    T = [[] for x in xrange(numVertsHi)]
    for ti, tri in enumerate(triangles):
        for tj in tri:
            T[tj].append(tri)
    bads = []
    for vi, (lo_x, nearIndices, ws,
             xis) in enumerate(zip(lo_geo, D, weights, indices)):
        best = -10
        for ni in nearIndices:
            for tri in T[ni]:
                xws = lunterp(hi_geo[tri], lo_x)
                sc = np.min(xws)
                if sc > best:  # pick the best triangle (the one that it's closest to being inside)
                    best = sc
                    xis[:] = tri
                    ws[:] = xws
                    if best >= 0: break
            if best >= 0: break
        # the vertex *might* not be inside any of these triangles
        if best < -0.1:
            bads.append(vi)
            ws[:] = 0.0  # ensure there's no weight
            xis[:] = -1  # and no label
    if len(bads):
        print 'vertices outside', len(bads)
        print bads[:10], '...'
    which = np.where(indices[:, 0] != -1)[0]
    print len(which), 'vertices inside'
    return which, weights[which], indices[which]
Пример #10
0
def copy_joints(src, tgt, copyData):
    src_Ls = src.Ls
    src_jointChans = src.jointChans
    src_jointChanSplits = src.jointChanSplits
    src_chanValues = src.chanValues
    tgt_Ls = tgt.Ls
    tgt_jointChans = tgt.jointChans
    tgt_jointChanSplits = tgt.jointChanSplits
    tgt_chanValues = tgt.chanValues
    jointMapping, jointSwizzles, jointOffsets = copyData
    ISCV.copy_joints(src_Ls, src_jointChans, src_jointChanSplits,
                     src_chanValues, tgt_Ls, tgt_jointChans,
                     tgt_jointChanSplits, tgt_chanValues, jointMapping,
                     jointSwizzles, jointOffsets)
Пример #11
0
def errorFunction(X, n_cameras, n_points, x2d_splits, x2ds_labels, x2ds):
	camera_params = X[:n_cameras * 11].reshape((n_cameras, 11))
	x3ds = X[n_cameras * 11:].reshape((n_points, 3))

	projected_x2ds = np.zeros_like(x2ds)
	for camVec, c0, c1 in zip(camera_params, x2d_splits[:-1], x2d_splits[1:]):
		P, distortion = vecToMat(camVec)
		x3d_labels = np.int32([x2ds_labels[i] for i in xrange(c0, c1)])
		proj_x2ds, proj_splits, proj_labels = ISCV.project(np.float32(x3ds[x3d_labels]), x3d_labels, np.float32([P]))
		assert np.all(x3d_labels == proj_labels)
		ISCV.distort_points(proj_x2ds, float(camVec[9]), float(camVec[10]), float(distortion[0]), float(distortion[1]), proj_x2ds)
		projected_x2ds[c0:c1, :] = proj_x2ds

	return (projected_x2ds - x2ds).ravel()
Пример #12
0
 def test_trianglesToEdgeList(self, triangles, numVerts=0):
     '''Convert a list of triangle indices to an array of up-to-10 neighbouring vertices per vertex (following anticlockwise order).'''
     if numVerts is None:
         numVerts = np.max(triangles) + 1 if len(triangles) else 1
     if numVerts < 1: numVerts = 1  # avoid empty arrays
     T = [dict() for t in xrange(numVerts)]
     P = [dict() for t in xrange(numVerts)]
     for t0, t1, t2 in triangles:
         T[t0][t1], T[t1][t2], T[t2][t0] = t2, t0, t1
         P[t1][t0], P[t2][t1], P[t0][t2] = t2, t0, t1
     S = np.zeros((numVerts, 10), dtype=np.int32)
     for vi, (Si, es, ps) in enumerate(zip(S, T, P)):
         Si[:] = vi
         if not es: continue
         v = min(es.keys())
         while v in ps:
             v = ps.pop(v)
         for li in xrange(10):
             Si[li] = v
             if v not in es: break
             v = es.pop(v, vi)
     import ISCV
     ret = ISCV.trianglesToEdgeList(triangles, numVerts)
     assert np.all(S == ret), repr(S) + repr(ret)
     return ret
Пример #13
0
def detect_wand(x2ds_data, x2ds_splits, mats, thresh=20. / 2000., x3d_threshold=1000000.):
	Ps = np.array([m[2] / np.linalg.norm(m[2][0, :3]) for m in mats], dtype=np.float32)
	wand_x3ds = np.array([[160, 0, 0], [0, 0, 0], [-80, 0, 0], [0, 0, -120], [0, 0, -240]], dtype=np.float32)
	x2ds_labels = -np.ones(x2ds_data.shape[0], dtype=np.int32)
	ISCV.label_T_wand(x2ds_data, x2ds_splits, x2ds_labels, 2.0, 0.5, 0.01, 0.07)
	x2ds_labels2 = x2ds_labels.copy()
	count = np.sum(x2ds_labels2 != -1) / 5
	if count < 3: return None, None, None
	x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps)
	count = ISCV.project_and_clean(x3ds, Ps, x2ds_data, x2ds_splits, x2ds_labels, x2ds_labels2, thresh ** 2, thresh ** 2, x3d_threshold)
	if count < 3: return None, None, None
	x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps)
	assert np.all(x3ds_labels == [0, 1, 2, 3, 4]), 'ERROR: Labels do not match' # skip if somehow not all points seen
	assert np.max(x3ds ** 2) < 1e9, 'ERROR: Values out of bounds' + repr(x3ds)
	mat = rigid_align_points(wand_x3ds, x3ds)
	x3ds = np.dot(wand_x3ds, mat[:3, :3].T) + mat[:, 3]
	return x3ds, x3ds_labels, x2ds_labels2
Пример #14
0
def bake_ball_joints(skelDict):
    """
	For every 3 DoF joint, multiply in matrices to reduce gimbal lock.
	Includes pre-conversion python code.
	
	Args:
		skelDict (GskelDict): The Skeleton to process.
		
	Requires:
		ISCV.bake_ball_joints
	"""
    Ls = skelDict['Ls']
    jointChans = skelDict['jointChans']
    jointChanSplits = skelDict['jointChanSplits']
    chanValues = skelDict['chanValues']
    if not skelDict.has_key('Ls_orig'): skelDict['Ls_orig'] = Ls.copy()
    Ls_orig = skelDict['Ls_orig']
    ISCV.bake_ball_joints(Ls, jointChans, jointChanSplits, chanValues)
Пример #15
0
    def project(self, x3ds, aspect):
        # set the buffer as the texture
        K, RT, P, ks, T, wh = self.mat

        #num_pts = len(x3ds)
        #x2ds, splits, labels = ISCV.project(x3ds, np.arange(num_pts,dtype=np.int32), P[:3,:4].reshape(1,3,4))
        #x2s = 1e10*np.ones((num_pts,2),dtype=np.float32)
        #x2s[labels,:] = x2ds

        # project the 3D vertices into the camera using the projection matrix
        proj = np.dot(x3ds, P[:3, :3].T) + P[:3, 3]
        ds = -proj[:, 2]
        x2s = proj[:, :2] / ds.reshape(-1, 1)
        # distort the projections using the camera lens
        ISCV.distort_points(x2s, float(-K[0, 2]), float(-K[1, 2]),
                            float(ks[0]), float(ks[1]), x2s)
        # convert to texture coordinates
        x2s *= [0.5, -0.5 * aspect]
        x2s += 0.5
        self.x2ds = x2s
        if 0:  # wip
            self.ds = ds
            GL.glClearColor(0, 0, 0, 1)
            GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)

            GL.glMatrixMode(GL.GL_PROJECTION)
            GL.glLoadIdentity()
            GL.glMultMatrixf(
                np.array([[1, 0, 0, 0], [0, aspect, 0, 0], [0, 0, -1, -1],
                          [0, 0, cameraInterest * -0.02, 0]],
                         dtype=np.float32))

            GL.glMatrixMode(GL.GL_MODELVIEW)
            GL.glLoadIdentity()
            GL.glMultMatrixd(P.T)
            GL.glDisable(GL.GL_TEXTURE_2D)
            GL.glEnable(GL.GL_DEPTH_TEST)
            GL.glShadeModel(GL.GL_FLAT)
            GL.glDisable(GL.GL_LIGHTING)
            GL.glEnableClientState(GL.GL_VERTEX_ARRAY)
            GL.glVertexPointerf(x3ds)
            self.tris.bind()
            GL.glDrawElementsui(GL.GL_TRIANGLES, self.tris)
            self.tris.unbind()
Пример #16
0
    def cook(self, location, interface, attrs):
        if not self.useFrame(interface.frame(), attrs['frameRange']): return
        x3ds = interface.attr('x3ds')
        if x3ds is None:
            self.logger.error('No x3ds found at: %s' % location)
            return

        x3ds_labels = interface.attr('x3ds_labels')
        if x3ds_labels is None:
            self.logger.error('No 3D labels found at: %s' % location)
            return

        x2dsLocation = attrs['x2ds']
        x2ds, splits = interface.attr('x2ds',
                                      atLocation=x2dsLocation), interface.attr(
                                          'x2ds_splits',
                                          atLocation=x2dsLocation)
        if x2ds is None or splits is None:
            self.logger.error('No detections found at: %s' % x2dsLocation)
            return

        calLocation = attrs['calibration']
        Ps = interface.attr('Ps', atLocation=calLocation)
        if Ps is None:
            self.logger.error('No calibration data found at: %s' % calLocation)
            return

        import ISCV
        x2d_threshold, pred_2d_threshold = 6. / 2000., 100. / 2000
        clouds = ISCV.HashCloud2DList(x2ds, splits,
                                      max(pred_2d_threshold, x2d_threshold))
        sc, labels, _ = clouds.project_assign(
            np.ascontiguousarray(x3ds, dtype=np.float32), x3ds_labels, Ps,
            x2d_threshold)

        mats = interface.attr('mats', atLocation=calLocation)
        camPositions = np.array([m[4] for m in mats], dtype=np.float32)
        normals = np.zeros_like(x3ds)
        for xi, (x3d, label3d) in enumerate(zip(x3ds, x3ds_labels)):
            camIds = [
                interface.findCameraIdFromRayId(rayId, splits)
                for rayId in np.where(labels == label3d)[0]
            ]
            if not camIds: continue
            # camPos = Ps[camIds][:, :3, 3]
            camPos = camPositions[camIds]
            rays = camPos - [x3d] * len(camPos)
            rays = np.float32([ray / np.linalg.norm(ray) for ray in rays])
            raysDps = np.dot(rays, rays.T)
            bestRay = np.sum(raysDps > 0, axis=0).argmax()
            # goodRays = np.where(raysDps[bestRay] > 0.05)[0]
            normals[xi] = rays[bestRay]

        interface.setAttr('normals', normals)
Пример #17
0
def unbake_ball_joints(skelDict):
    """
		
	Args:
		skelDict (GskelDict): The Skeleton to process.
		Ls_orig (float[?}): Unbaked arrangement of Local Matrices of the skeleton's 3-DoF joints.
		
	Returns:
		None: Results are a transformation of the skelDict
		
	Requires:
		ISCV.unbake_ball_joints
	"""
    Ls = skelDict['Ls']
    jointChans = skelDict['jointChans']
    jointChanSplits = skelDict['jointChanSplits']
    chanValues = skelDict['chanValues']
    if not skelDict.has_key('Ls_orig'): skelDict['Ls_orig'] = Ls.copy()
    Ls_orig = skelDict['Ls_orig']
    ISCV.unbake_ball_joints(Ls, jointChans, jointChanSplits, chanValues,
                            Ls_orig)
Пример #18
0
def findCloseVerts(xs, threshold=80.0):
    import ISCV
    cloud = ISCV.HashCloud3D(xs, threshold)
    scores, matches, matches_splits = cloud.score(xs)
    good = (scores < (threshold**2))
    D = [
        matches[m0:m1][np.where(good[m0:m1])[0]]
        for m0, m1 in zip(matches_splits[:-1], matches_splits[1:])
    ]
    print 'avg verts', np.mean(map(len, D))
    #for vi,Di in enumerate(D): Di.discard(vi); D[vi] = np.array(list(Di),dtype=np.int32)
    return D
Пример #19
0
def pointsToEdges(points, mapping_list=None):
    # mapping_list is such that i is mapped to mapping_list[i]
    from scipy.spatial import Delaunay
    tris = Delaunay(points).simplices
    edges = ISCV.trianglesToEdgeList(
        np.max(tris) + 1 if len(tris) else 1, tris)  # (numVerts,10)
    edgeList = set()
    for vi, el in enumerate(edges):
        which = np.where(el > vi)[0]
        edgeList.update(zip(which, el[which]))
    edgeList = np.int32(list(edgeList))
    if mapping_list is not None: edgeList = np.int32(mapping_list)[edgeList]
    return edgeList
Пример #20
0
def solve_x3ds_normals(x2ds, splits, labels, Ps, rays, robust=True):
    x3ds, x3ds_labels, E, x2ds_labels = ISCV.solve_x3ds(
        x2ds, splits, labels, Ps, robust)

    x3ds_normals = np.ones_like(x3ds)
    for xi, label in enumerate(x3ds_labels):
        rayIndices = np.where(labels == label)[0]
        x3ds_normals[xi] = np.sum(rays[rayIndices], axis=0)

    # Normalise the ray directions
    x3ds_normals /= (np.sum(x3ds_normals * x3ds_normals,
                            axis=1)**0.5).reshape(-1, 1)

    return x3ds, x3ds_labels, x3ds_normals, E, x2ds_labels
Пример #21
0
def test_2D(frames, x3ds, detections, mats, x2d_threshold=0.025):
    '''Test the labelling of a 2d point sequence by propagating the labels to the next frame.'''
    import IO
    print 'loading 2d'
    print 'num frames', len(frames)
    prev_x2ds, prev_splits = detections[frames[0]]
    prev_vels = np.zeros_like(prev_x2ds)
    clouds = ISCV.HashCloud2DList(prev_x2ds, prev_splits, 6. / 2000.)
    x3ds_labels = np.arange(len(x3ds), dtype=np.int32)
    Ps = np.array([m[2] / (m[0][0, 0]) for m in mats], dtype=np.float32)
    sc, prev_labels, _ = Label.project_assign(clouds, x3ds, x3ds_labels, Ps,
                                              6. / 2000.)

    ret = []
    for fi in frames:
        x2ds, splits = detections[fi]
        clouds = ISCV.HashCloud2DList(x2ds, splits, x2d_threshold)
        sc, labels, vels = clouds.assign_with_vel(prev_x2ds, prev_vels,
                                                  prev_splits, prev_labels,
                                                  x2d_threshold)
        prev_x2ds, prev_splits, prev_labels, prev_vels = x2ds, splits, labels, vels
        ret.append(labels)
    return ret
Пример #22
0
def scoreIK(skelDict, chanValues, effectorData, effectorTargets, rootMat=None):
    """
	Args:
		skelDict (GskelDict): The Skeleton to process

	Returns:
		?

	Requires:
		Character.pose_skeleton
		ISCV.score_effectors
	"""
    Character.pose_skeleton(skelDict['Gs'], skelDict, chanValues, rootMat)
    return (
        ISCV.score_effectors(skelDict['Gs'], effectorData[0], effectorData[1],
                             effectorData[2], effectorTargets) /
        np.sum(effectorData[1]))**0.5
Пример #23
0
def setFrame(newFrame):
    global frame, view, allSkels, points, joints, bones
    frame = newFrame
    for Gs3, Ls3, skelDict3, animData3, skel3 in allSkels:
        dofs3 = animData3[frame % len(animData3)]
        Gs3 = ASFReader.pose_skeleton(Gs3, Ls3, skelDict3['jointParents'],
                                      skelDict3['jointDofs'],
                                      skelDict3['dofSplits'], dofs3)
        skel3.vertices[:] = Gs3[:, :, 3]

    global md, img, g_detectingDots, g_readingMovie
    if g_readingMovie and md is not None:
        try:
            MovieReader.readFrame(md, seekFrame=(frame - videoFrameOffset) / 4)
        except:
            frame = videoFrameOffset
            MovieReader.readFrame(md, seekFrame=(frame - videoFrameOffset) / 4)
        if g_detectingDots:
            ret = ISCV.detect_bright_dots(img, 254, 200, 190)
            good = [
                r for r in ret
                if min(r.sxx, r.syy) > 0.1 and min(r.sxx, r.syy) < 100.0
            ]  # and r.sxy*r.sxy<=0.01*r.sxx*r.syy]
            print len(good), 'good points'
            for r in good:
                #print r.sx,r.sy,r.sxx,r.sxy,r.syy
                img[int(r.sy - 5):int(r.sy + 5),
                    int(r.sx - 5):int(r.sx + 5), :] = [0, 255, 0]
        view.refreshImageData()
    global animJoints, stablePointsGroups, displayFrames, groupRepresentatives
    pfr = np.searchsorted(goodFrames, frame)
    points.vertices = displayFrames[pfr % len(displayFrames)]
    if animJoints is not None:
        joints.vertices[:] = animJoints[pfr % len(animJoints)]
    bones.vertices[::2] = joints.vertices
    bones.vertices[1::2] = points.vertices[
        groupRepresentatives[stablePointsGroups]]

    view.updateGL()
Пример #24
0
def get_labels(frames, x3ds_seq, detections_seq, mats, x2d_threshold=0.01):
    '''Project all the 3d points in all the views and label the detections.'''
    num_cameras = len(mats)
    ret = {}
    Ps = np.array([m[2] / (m[0][0, 0]) for m in mats], dtype=np.float32)
    for fi in frames:
        print fi, '\r',
        x3ds, x3ds_labels = x3ds_seq[fi]
        x2ds_raw_data, splits = detections_seq[fi][0]
        assert (num_cameras + 1 == len(splits))
        x2ds_labels = -np.ones(len(x2ds_raw_data), dtype=np.int32)
        x2ds_data, _ = Calibrate.undistort_dets(x2ds_raw_data, splits, mats)
        if len(x2ds_data):
            clouds = ISCV.HashCloud2DList(x2ds_data, splits, x2d_threshold)
            sc, x2ds_labels, x2ds_vels = Label.project_assign(
                clouds, x3ds, x3ds_labels, Ps, x2d_threshold)
            zeros = np.where(x2ds_labels == -1)[0]
            # these lines remove all the data for the unlabelled points
            x2ds_data[zeros] = -1
            x2ds_raw_data[zeros] = -1
        ret[fi] = x2ds_raw_data, splits, x2ds_labels
    return ret
Пример #25
0
def skeleton_marker_positions(skelDict,
                              rootMat,
                              chanValues,
                              effectorLabels,
                              effectorData,
                              markerWeights=None):
    """
	Based on the pose implied by the chanValues and rootMat, compute the 3D world-space
	positions of the markers.
	
	Multiple effectors may determine the position of the marker. effectorLabels provides this mapping.
	
	The weights for the markers, if any, are set by markerWeights.
	
	Args:
		skelDict (GskelDict): the skeleton
		rootMat (float[3][4]): reference frame of the Skeleton.
		chanValues (float[]) List of channel values to pose the skeleton
		effectorLabels : the marker that each effector determines
		effectorData : (effectorJoints, effectorOffsets, ...)
		markerWeights : the weight that each effector has on its marker
		
	Returns:
		int[]: Labels for the 3D positions of the markers.
		float[][3]: 3D positions of where the target would be in the pose.
		
	Requires:
		Character.pose_skeleton
		ISCV.marker_positions
		
	"""
    Character.pose_skeleton(skelDict['Gs'], skelDict, chanValues, rootMat)
    labels = np.unique(effectorLabels)
    els2 = np.int32([list(labels).index(x) for x in effectorLabels])
    x3ds = ISCV.marker_positions(skelDict['Gs'], effectorData[0],
                                 effectorData[1], els2, markerWeights)
    return x3ds, labels
Пример #26
0
def solve_x3ds(x2ds, splits, labels, Ps, robust=True):
    """
	Given some labelled 2d points, generate labelled 3d positions for every multiply-labelled point and equations for
	every single-labelled point.
	
	Args:
		x2ds (float[][2]): 2D Detections.
		splits (int[]): Indices of ranges of 2Ds per camera.
		labels (int[]: Labels of x2ds.
		Ps (?): Projection matrices of the cameras?
		robust (bool): Robustness Flag (requires more Rays to reconstruct). Default = True

	Returns:
		float[][3]: "x3ds" - the resulting 3D reconstructions.
		int[]: "x3d_labels" - the labels for the 3D points.
		??: "E[singles]" - Equations describing 2D detections not born of the 3D yet.
		int[] "singles_labels" - labels for the 2D contributions.
		
	Requires:
		ISCV.solve_x3ds
	"""
    return ISCV.solve_x3ds(
        x2ds, splits, labels, Ps,
        robust)  # x3ds, x3d_labels, E[single_rays], single_ray_labels
Пример #27
0
	def undistort_points(self, x2ds, x2ds_out=None):
		if self.cameraDistortion is None and x2ds_out is not None: x2ds_out[:] = x2ds
		if self.cameraDistortion is None: return
		if x2ds_out is None: x2ds_out = x2ds
		ISCV.undistort_points	(x2ds, -float(self.cameraKox), -float(self.cameraKoy),\
								float(self.cameraDistortion[0]), float(self.cameraDistortion[1]), x2ds_out)
Пример #28
0
def intersect_rays(x2ds,
                   splits,
                   Ps,
                   mats,
                   seed_x3ds=None,
                   tilt_threshold=0.0002,
                   x2d_threshold=0.01,
                   x3d_threshold=30.0,
                   min_rays=3,
                   numPolishIts=3,
                   forceRayAgreement=False,
                   visibility=None):
    """
	Given 2D detections, we would like to find bundles of rays from different cameras that have a common solution.
	For each pair of rays, we can solve for a 3D point. Each such solve has a residual: we want to find low residual pairs.

	Closer together camera pairs and cameras with more unlabelled markers should have more matches.
	Visit the camera pairs by order of distance-per-unlabelled-marker score (lower is better).

	For a given camera pair, each ray can be given an order which is the tilt (angle between the ray from the camera to
	that ray and a line perpendicular to a reference plain containing both camera centres).

	tilt = asin(norm(raydir^(c2-c1)).ocdir))
	TODO: compare atan2(raydir^(c2-c1).ocdir,|raydir^(c2-c1)^ocdir|)

	Precisely the rays with the same tilt (within tolerance) intersect.
	This fails only if the first camera is looking directly at the second.

	For each pair of cameras, sort the unassigned rays by tilt and read off the matches.
	(DON'T match if there are two candidates with the same tilt on the same camera.)
	For each match, solve the 3D point.
	Naively, this costs ~NumDetections^2.
	However, if we project the point in all the cameras and assign rays then we can soak up all the rays in the other cameras.
	The maximum number of matches should be ~NumPoints.
	So the dominant cost becomes project assign (NumPoints * NumCameras using hash).

	Polish all the 3D points.
	Check for any 3D merges (DON'T merge if there are two rays from the same camera).
	Project all the points in all the cameras and reassign.
	Cull any points with fewer than 2 rays.
	Potentially repeat for the remaining unassigned rays.

	Args:
		x2ds (float[][2]): 2D Detections.
		splits (int): Indices of ranges of 2Ds per camera.
		Ps (?): Projection matrices of the cameras?
		mats (GcameraMat[]): Camera Matrices.
		seed_x3ds (float[][3]): existing 3D data? Default = None.
		tilt_threshold (float): Slack factor for tilt pairing = 0.0002
		x2d_threshold (float): What's this? Default = 0.01
		x3d_threshold (float): What's this? = 30.0
		min_rays (int): Min number of rays to reconstruct. Default = 3.

	Returns:
		float[][3]: (x3ds_ret) List of 3D points produced as a result of intersecting the 2Ds
		int[]: (labels) List of labels corresponding to the x3ds.

	Requires:
		ISCV.compute_E
		ISCV.HashCloud2DList
		ISCV.HashCloud3D
		clouds.project_assign

	"""
    Ks = np.array(zip(*mats)[0], dtype=np.float32)
    RTs = np.array(zip(*mats)[1], dtype=np.float32)
    Ts = np.array(zip(*mats)[4], dtype=np.float32)
    if visibility is not None:
        ret2 = ISCV.intersect_rays_base(x2ds, splits, Ps, Ks, RTs, Ts,
                                        seed_x3ds, tilt_threshold,
                                        x2d_threshold, x3d_threshold, min_rays,
                                        numPolishIts, forceRayAgreement,
                                        visibility)
    else:
        ret2 = ISCV.intersect_rays2(x2ds, splits, Ps, Ks, RTs, Ts, seed_x3ds,
                                    tilt_threshold, x2d_threshold,
                                    x3d_threshold, min_rays, numPolishIts,
                                    forceRayAgreement)
    return ret2

    import itertools
    numCameras = len(splits) - 1
    numDets = splits[-1]
    labels = -np.ones(numDets, dtype=np.int32)
    E = ISCV.compute_E(x2ds, splits, Ps)
    rays = dets_to_rays(x2ds, splits, mats)
    Ts = np.array([m[4] for m in mats], dtype=np.float32)

    def norm(a):
        return a / (np.sum(a**2)**0.5)

    tilt_axes = np.array([
        norm(np.dot([-m[0][0, 2], -m[0][1, 2], m[0][0, 0]], m[1][:3, :3]))
        for m in mats
    ],
                         dtype=np.float32)
    corder = np.array(list(itertools.combinations(range(numCameras), 2)),
                      dtype=np.int32)  # all combinations ci < cj
    #corder = np.array(np.concatenate([zip(range(ci),[ci]*ci) for ci in xrange(1,numCameras)]),dtype=np.int32)
    clouds = ISCV.HashCloud2DList(x2ds, splits, x2d_threshold)
    x3ds_ret = []
    if seed_x3ds is not None:
        x3ds_ret = list(seed_x3ds)
        # initialise labels from seed_x3ds
        _, labels, _ = clouds.project_assign_visibility(
            seed_x3ds, np.arange(len(x3ds_ret), dtype=np.int32), Ps,
            x2d_threshold, visibility)
    # visit the camera pairs by distance-per-unlabelledmarker
    #camDists = np.array([np.sum((Ts - Ti)**2, axis=1) for Ti in Ts],dtype=np.float32)
    #for oit in range(10):
    #if len(corder) == 0: break
    #urcs = np.array([1.0/(np.sum(labels[splits[ci]:splits[ci+1]]==-1)+1e-10) for ci in xrange(numCameras)],dtype=np.float32)
    #scmat = camDists*np.array([np.maximum(urcs,uci) for uci in urcs],dtype=np.float32)
    #scores = scmat[corder[:,0],corder[:,1]]
    #so = np.argsort(scores)
    #corder = corder[so]
    #for it in range(10):
    #if len(corder) == 0: break
    #ci,cj = corder[0]
    #corder = corder[1:]
    for ci in xrange(numCameras):
        for cj in xrange(ci + 1, numCameras):
            ui, uj = np.where(
                labels[splits[ci]:splits[ci + 1]] == -1)[0], np.where(
                    labels[splits[cj]:splits[cj + 1]] == -1)[0]
            if len(ui) == 0 or len(uj) == 0: continue
            ui += splits[ci]
            uj += splits[cj]
            axis = Ts[cj] - Ts[ci]
            tilt_i = np.dot(map(norm, np.cross(rays[ui], axis)), tilt_axes[ci])
            tilt_j = np.dot(map(norm, np.cross(rays[uj], axis)),
                            tilt_axes[ci])  # NB tilt_axes[ci] not a bug
            io = np.argsort(tilt_i)
            jo = np.argsort(tilt_j)
            ii, ji = 0, 0
            data = []
            while ii < len(io) and ji < len(jo):
                d0, d1 = tilt_i[io[ii]], tilt_j[jo[ji]]
                diff = d0 - d1
                if abs(diff) < tilt_threshold:
                    # test for colliding pairs
                    # if ii+1 < len(io) and tilt_i[io[ii+1]]-d0 < tilt_threshold: ii+=2; continue
                    # if ji+1 < len(jo) and tilt_j[jo[ji+1]]-d1 < tilt_threshold: ji+=2; continue
                    # test for colliding triples
                    # if ii > 0 and d0-tilt_i[io[ii-1]] < tilt_threshold: ii+=1; continue
                    # if ji > 0 and d1-tilt_j[jo[ji-1]] < tilt_threshold: ji+=1; continue
                    d = [ui[io[ii]], uj[jo[ji]]]
                    data.append(d)
                    ii += 1
                    ji += 1
                elif diff < 0:
                    ii += 1
                else:
                    ji += 1
            if len(data) != 0:
                # intersect rays
                for d in data:
                    E0, e0 = E[d, :, :3].reshape(-1, 3), E[d, :, 3].reshape(-1)
                    x3d = np.linalg.solve(
                        np.dot(E0.T, E0) + np.eye(3) * 1e-7, -np.dot(E0.T, e0))
                    sc, labels_out, _ = clouds.project_assign_visibility(
                        np.array([x3d], dtype=np.float32),
                        np.array([0], dtype=np.int32), Ps, x2d_threshold,
                        visibility)
                    tmp = np.where(labels_out == 0)[0]
                    if len(tmp) >= min_rays:
                        tls_empty = np.where(labels[tmp] == -1)[0]
                        if len(tls_empty) >= min_rays:
                            labels[tmp[tls_empty]] = len(x3ds_ret)
                            x3ds_ret.append(x3d)
    # TODO: polish, merge, reassign, cull, repeat
    # merge
    if False:
        x3ds_ret = np.array(x3ds_ret, dtype=np.float32).reshape(-1, 3)
        cloud = ISCV.HashCloud3D(x3ds_ret, x3d_threshold)
        scores, matches, matches_splits = cloud.score(x3ds_ret)
        mergers = np.where(matches_splits[1:] - matches_splits[:-1] > 1)[0]
        for li in mergers:
            i0, i1 = matches_splits[li:li + 2]
            collisions = np.where(scores[i0:i1] < x3d_threshold**2)[0]
            if len(collisions) > 1:
                collisions += i0
                #print 'merger',li,i0,i1,scores[i0:i1] # TODO merge these (frame 7854)

    # now cull the seed_x3ds, because they could confuse matters
    if seed_x3ds is not None:
        labels[np.where(labels < len(seed_x3ds))] = -1

    minNumRays1 = np.min(
        [len(np.where(labels == l)[0]) for l in np.unique(labels)])
    maxNumRays1 = np.max(
        [len(np.where(labels == l)[0]) for l in np.unique(labels) if l != -1])

    # final polish
    x3ds_ret, x3ds_labels, E_x2ds_single, x2ds_single_labels = solve_x3ds(
        x2ds, splits, labels, Ps)
    # throw away the single rays and their 3d points by renumbering the generated 3d points
    # _,labels,_ = clouds.project_assign_visibility(x3ds_ret, None, Ps, x2d_threshold, visibility)
    minNumRays3 = np.min(
        [len(np.where(labels == l)[0]) for l in np.unique(labels)])
    maxNumRays3 = np.max(
        [len(np.where(labels == l)[0]) for l in np.unique(labels) if l != -1])
    _, labels, _ = clouds.project_assign(x3ds_ret, None, Ps, x2d_threshold)
    minNumRays2 = np.min(
        [len(np.where(labels == l)[0]) for l in np.unique(labels)])
    maxNumRays2 = np.max(
        [len(np.where(labels == l)[0]) for l in np.unique(labels) if l != -1])
    x3ds_ret, x3ds_labels, E_x2ds_single, x2ds_single_labels = solve_x3ds(
        x2ds, splits, labels, Ps)
    ret = x3ds_ret, labels
    return ret
Пример #29
0
def test_solve_x3ds(x2ds, splits, labels, Ps):
    """
	Given some labelled 2d points, generate labelled 3d positions for every multiply-labelled point and equations for
	every single-labelled point.
	
	Is this the Python code before translation into C?
	
	Args:
		x2ds (float[][2]): 2D Detections.
		splits (int[]): Indices of ranges of 2Ds per camera.
		labels (int[]: Labels of x2ds.
		Ps (?): Projection matrices of the cameras?

	Returns:
		float[][3]: "x3ds" - the resulting 3D reconstructions.
		int[]: "x3d_labels" - the labels for the 3D points.
		??: "E[singles]" - Equations describing 2D detections not born of the 3D yet.
		int[] "singles_labels" - labels for the 2D contributions.
		
	Requires:
		ISCV.compute_E
		linsolveN3
	"""
    x3ds2, x3d_labels2, E2, x2d_labels2 = solve_x3ds(x2ds, splits, labels, Ps)

    numLabels = max(labels) + 1
    counts = np.zeros(numLabels, dtype=np.int32)
    # take care to throw away unlabelled rays
    counts[:] = np.bincount(labels + 1,
                            minlength=numLabels + 1)[1:numLabels + 1]
    # find all the 2+ ray labels
    x3d_labels = np.array(np.where(counts >= 2)[0], dtype=np.int32)
    # find all the single ray labels
    x2d_labels = np.array(np.where(counts == 1)[0], dtype=np.int32)
    E = ISCV.compute_E(x2ds, splits, Ps)
    label_dis = -np.ones(
        len(x2ds),
        dtype=np.int32)  # the indices of the detection for each label
    label_splits = np.zeros(numLabels + 1, dtype=np.int32)
    np.cumsum(counts, out=label_splits[1:], dtype=np.int32)
    index = label_splits.copy()
    for c0, c1 in zip(splits[:-1], splits[1:]):
        ls = labels[c0:c1]
        label_dis[index[ls]] = range(c0, c1)
        index[ls] += 1
    # compute the 3d points
    x3ds = np.zeros((len(x3d_labels), 3), dtype=np.float32)
    for li, x in zip(x3d_labels, x3ds):
        dis = label_dis[label_splits[li]:label_splits[li + 1]]
        linsolveN3(E, dis, x)
        err = np.dot(E[dis, :, :3].reshape(-1, 3), x) + E[dis, :,
                                                          3].reshape(-1)
        err = err.reshape(-1, 2)
        err = np.sum(err**2, axis=1)
        err = 50.0 / (50.0 + err)
        #print err
        E[dis, :] *= err.reshape(-1, 1, 1)
        linsolveN3(E, dis, x)

    #print 'diff',np.sum((x3ds-x3ds2)**2,axis=1)
    assert (np.allclose(x3ds, x3ds2, 1e-6, 1e-3))
    assert (np.allclose(x3d_labels, x3d_labels2))
    #assert(np.allclose(E[label_splits[x2d_labels]],E2,1e-6,1e-6))
    assert (np.allclose(x2d_labels, x2d_labels2))
    return x3ds, x3d_labels2, E2, x2d_labels2
Пример #30
0
    def cook(self, location, interface, attrs):
        if not self.useFrame(interface.frame(), attrs['frameRange']): return
        if not attrs['calibration'] or not attrs['x3ds']: return

        calibrationLocation = attrs['calibration']
        Ps = interface.attr('Ps', atLocation=calibrationLocation)
        mats = interface.attr('mats', atLocation=calibrationLocation)
        if Ps is None:
            if mats is None:
                self.logger.warning('Could not find calibration data at: %s' %
                                    calibrationLocation)
                return

            Ps = interface.getPsFromMats(mats)
            if Ps is None: return

        # Get the x3ds (and labels if available) from the cooked location
        x3ds = interface.attr('x3ds', atLocation=attrs['x3ds'])
        if x3ds is None:
            self.logger.error('No 3D points found at: %s' % attrs['x3ds'])
            return

        which_labels = interface.attr('which_labels')
        if which_labels is None:
            which_labels = np.arange(len(x3ds))

        x3ds = np.ascontiguousarray(x3ds, dtype=np.float32)
        normals = interface.attr('normals', atLocation=attrs['x3ds'])

        if 'x3dIndex' in attrs and attrs['x3dIndex'] >= 0:
            idx = attrs['x3dIndex']
            x3ds = x3ds[idx].reshape(1, -1)
            which_labels = [idx]

        # Check if we've got visibility lods
        visibilityLod = None
        if 'skeleton' in attrs and attrs['skeleton']:
            skeletonLoc = attrs['skeleton']
            skelDict = interface.attr('skelDict', atLocation=skeletonLoc)
            visibilityLod = interface.getChild('visibilityLod',
                                               parent=skeletonLoc)
            if attrs['useVisibility'] and visibilityLod is None:
                self.logger.error('No visibility LODs found at skeleton: %s' %
                                  attrs['skeleton'])
                return

            mats = interface.attr('mats', atLocation=calibrationLocation)
            cameraPositions = np.array([m[4] for m in mats], dtype=np.float32)

        if self.visibility is None:
            self.visibility = ISCV.ProjectVisibility.create()

        # if attrs['useVisibility'] and normals is not None and visibilityLod is not None:
        if attrs['useVisibility'] and visibilityLod is not None:
            lodNames = visibilityLod['names']
            lodTris = visibilityLod['tris']
            lodVerts = visibilityLod['verts']
            lodNormals = visibilityLod['faceNormals']
            tris = lodVerts[lodTris]

            if attrs['useNormals'] and normals is not None:
                self.visibility.setNormalsAndLods(normals, tris,
                                                  cameraPositions,
                                                  np.concatenate((lodNormals)),
                                                  attrs['intersect_threshold'],
                                                  attrs['generateNormals'])
            else:
                self.visibility.setLods(tris, cameraPositions,
                                        np.concatenate((lodNormals)),
                                        attrs['intersect_threshold'],
                                        attrs['generateNormals'])

            x2ds, x2ds_splits, x2d_labels = ISCV.project_visibility(
                x3ds, which_labels, Ps, self.visibility)
        elif attrs['useNormals'] and normals is not None:
            self.visibility.setNormals(normals)
            x2ds, x2ds_splits, x2d_labels = ISCV.project_visibility(
                x3ds, which_labels, Ps, self.visibility)
        else:
            x2ds, x2ds_splits, x2d_labels = ISCV.project(
                x3ds, which_labels, Ps)

        # Distort if needed
        if 'distort' in attrs and attrs['distort']:
            for ci, (s, e) in enumerate(zip(x2ds_splits[:-1],
                                            x2ds_splits[1:])):
                K, RT, P, ks, T, wh = mats[ci]
                dets = x2ds[s:e]
                ISCV.distort_points(dets, float(-K[0, 2]), float(-K[1, 2]),
                                    float(ks[0]), float(ks[1]), dets)
                x2ds[s:e] = dets

        detsAttrs = {
            'x2ds': x2ds,
            'x2ds_splits': x2ds_splits,
            'labels': x2d_labels,
            'x2ds_colour': eval(attrs['colour']),
            'x2ds_pointSize': attrs['pointSize']
        }

        if 'cameraOffset' in attrs and attrs['cameraOffset'] > 0:
            x2ds_splits_render = np.insert(
                x2ds_splits, np.zeros(attrs['cameraOffset'], dtype=np.int32),
                0)
            detsAttrs['x2ds_splits_render'] = x2ds_splits_render

        interface.createChild(interface.name(),
                              'detections',
                              atLocation=interface.parentPath(),
                              attrs=detsAttrs)