Ejemplo n.º 1
0
def cb(frame):
    global g_record, g_frame
    g_frame = frame
    global g_camera_rays, g_camera_mat
    #print 'in cb'
    img = freenect.sync_get_video()[0]
    geom_mesh = QApp.app.getLayer('geom_mesh')
    geom_mesh.setImage(img)

    if 1:
        depths = freenect.sync_get_depth(format=freenect.DEPTH_REGISTERED)[0]
        #print 'depths',np.median(depths)

        if 0:  # recording
            if frame not in g_record: return
            img, depths = g_record[frame]['video'], g_record[frame]['depths']
            g_record[frame] = {'video': img.copy(), 'depths': depths.copy()}
            if frame == 99: IO.save('dump', g_record)

        depths_sum = np.array(depths != 0, dtype=np.int32)
        lookup = np.array([0, 1, 0.5, 1.0 / 3, 0.25], dtype=np.float32)
        if 1:  # average
            depths_lo = np.array(depths[::2, ::2] + depths[1::2, ::2] +
                                 depths[::2, 1::2] + depths[1::2, 1::2],
                                 dtype=np.float32)
            depths_lo = depths_lo * lookup[
                (depths_sum[::2, ::2] + depths_sum[1::2, ::2] +
                 depths_sum[::2, 1::2] +
                 depths_sum[1::2, 1::2]).reshape(-1)].reshape(depths_lo.shape)
        else:  # fullsize
            depths_lo = depths * lookup[depths_sum.reshape(-1)].reshape(
                depths_lo.shape)

        K, RT, P, ks, T, wh = g_camera_mat
        vs = depths_to_points(g_camera_rays, T, depths_lo)
        geom_mesh.setVs(vs.reshape(-1, 3))

    #QApp.view().setImage(img, img.shape[0], img.shape[1], img.shape[2])
    #camera = QApp.view().camera
    #geom_mesh.image = camera.image
    #geom_mesh.bindImage = camera.bindImage
    #geom_mesh.bindId = camera.bindId
    global g_predictor, reference_3d, geo_vs, geo_vts
    h, w, _3 = img.shape

    global g_prev_vs
    try:
        g_prev_vs
    except:
        g_prev_vs = None
    use_prev_vs = True

    if g_prev_vs is None:
        reference_3d[:, :2] = g_predictor['ref_shape'] * [100, 100]
    tmp = Face.detect_face(img,
                           g_predictor) if g_prev_vs is None else g_prev_vs
    tmp = Face.track_face(img, g_predictor, tmp)
    if use_prev_vs: g_prev_vs = tmp
    if frame == 0 or Face.test_reboot(img, g_prev_vs): g_prev_vs = None
    geo_vts[:len(tmp)] = tmp
    geo_vts[:, 1] = img.shape[0] - geo_vts[:, 1]

    current_shape = geo_vts[:len(tmp)].copy()

    if 1:
        ds = extract_depths(vs, current_shape * 0.5)
        M, inliers = Calibrate.rigid_align_points_inliers(ds,
                                                          reference_3d,
                                                          scale=True,
                                                          threshold_ratio=5.0)
        ds = np.dot(ds, M[:3, :3].T) + M[:, 3]
        which = np.where(np.sum((reference_3d - ds)**2, axis=1) < 100 * 100)[0]
        reference_3d[which] = reference_3d[which] * 0.99 + ds[which] * 0.01
        reference_3d[
            inliers] = reference_3d[inliers] * 0.95 + ds[inliers] * 0.05
        ds[:] = reference_3d[:]
        M[1, 3] += 1000
        M[0, 3] -= 300
    else:
        M = np.eye(3, 4, dtype=np.float32)
        M[1, 3] += 1000
    geom_mesh.setPose(M.reshape(1, 3, 4))

    ref_pinv = g_predictor['ref_pinv']
    xform = np.dot(ref_pinv, current_shape)
    ut, s, v = np.linalg.svd(xform)
    s = (s[0] * s[1])**-0.5
    xform_inv = np.dot(v.T, ut.T) * s
    current_shape = np.dot(current_shape - np.mean(current_shape, axis=0),
                           xform_inv) * 100.
    geo_vs[:] = 0
    geo_vs[:len(current_shape), :2] = current_shape
    geo_vs[:70] = reference_3d
    #geo_vs[:68,:] += [0,100,5500]
    #print geo_vts[:4],w,h
    geo_mesh = QApp.app.getLayer('geo_mesh')
    geo_mesh.setVs(geo_vs,
                   vts=geo_vts *
                   np.array([1.0 / w, 1.0 / h], dtype=np.float32))
    geo_mesh.setImage(img)
    #geo_mesh.transforms[0][:,:3] = [[1,0,0],[0,1,0],[0,0,1],[0,1000,0.1]]

    if 1:
        global g_model
        w, h = 160, 160
        shp = geo_vs[:68, :2]
        shape_u, tex_u, A_inv, mn = Face.fit_aam(g_model, tmp, img)
        Face.render_aam(g_model, A_inv * 0.5, mn * 0.5, shape_u, tex_u, img)

    img_mesh = QApp.app.getLayer('img_mesh')
    img_mesh.setImage(img)

    QApp.view().updateGL()
Ejemplo n.º 2
0
 x2d_threshold = 0.08  # - it * 0.04/50.
 Ps = np.array([m[2] / (m[0][0, 0]) for m in mats], dtype=np.float32)
 u2s, _ = Calibrate.undistort_dets(x2s, x2s_splits, mats)
 x3s, x3s_labels, E, x2d_labels = Recon.solve_x3ds(
     u2s, x2s_splits, x2s_labels_original, Ps, True)
 clouds = ISCV.HashCloud2DList(u2s, x2s_splits, x2d_threshold)
 sc, x2s_labels, _ = Label.project_assign(clouds, x3s, x3s_labels, Ps,
                                          x2d_threshold)
 print 'it', it, sc
 tiara_xis = np.where(x3s_labels < len(VICON_tiara_x3ds))[0]
 tiara_lis = x3s_labels[tiara_xis]
 tiara_true = VICON_tiara_x3ds[tiara_lis] + [0, 1000, 0]
 tiara_xs = x3s[tiara_xis]
 # now solve the tiara into place by finding a rigid transform
 RT, inliers = Calibrate.rigid_align_points_inliers(tiara_xs,
                                                    tiara_true,
                                                    scale=True)
 x3s = np.dot(x3s, RT[:3, :3].T) + RT[:, 3]
 x3s[tiara_xis] = tiara_true
 singles = np.where([x in list(x2d_labels) for x in x2s_labels])[0]
 x2s_labels[singles] = -1
 for ci, P in enumerate(
         Ps):  # first pass: solve cameras from 2d-3d correspondences
     x2s_which = np.where(
         map(lambda x: x != -1,
             x2s_labels[x2s_splits[ci]:x2s_splits[ci + 1]])
     )[0] + x2s_splits[ci]
     xls = x2s_labels[x2s_which]
     x3s_which = [list(x3s_labels).index(xi) for xi in xls]
     cv2_mat = Calibrate.cv2_solve_camera_from_3d(
         x3s[x3s_which],