コード例 #1
0
ファイル: demo_icp.py プロジェクト: amiller/rtmodel
def perturb(max_iters=100, mod=10):
    global pnew, uv, err, points_range, rimg, range_image

    # Apply a perturb to points_p
    obj.RT = np.eye(4, dtype='f')
    obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)
    obj.RT[:3,3] += [0,0,-3.0]

    # Rotated object view
    RT = obj.RT
    rp = random_perturbation().astype('f')
    obj.RT = np.dot(rp, obj.RT)
    range_image = obj.range_render(camera.kinect_camera())
    obj.RT = RT

    points_range = range_image.point_model(True)

    # Original object view
    rimg = obj.range_render(camera.kinect_camera())
    pnew = rimg.point_model()

    # Estimate the transformation rp

    for iters in range(max_iters):
        npairs, pnew = model.align(range_image, pnew, rtmodel.RATE1, rtmodel.DIST1, 6)
        #pnew, err, npairs, uv = fasticp.fast_icp(range_image, pnew, 0.1, dist=0.05)
        if iters % mod == 0 or 1:
            #print '%d iterations, [%d] RMS: %.3f' % (iters, npairs, np.sqrt(err))
            window.Refresh()
            pylab.waitforbuttonpress(0.02)
            break

    window.Refresh()
コード例 #2
0
ファイル: demo_icp.py プロジェクト: amiller/rtmodel
def animate_random(max_iters=1000, mod=100):
    global pnew, points_range
    # Apply a perturb to points_p
    obj.RT = np.eye(4, dtype='f')
    obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)
    obj.RT[:3,3] += [0,0,-3.0]
    RT = obj.RT

    prev_rimg = obj.range_render(camera.kinect_camera())
    window.canvas.SetCurrent()
    pnew = prev_rimg.point_model(True)
    points_range = pnew

    if 0:
        obj.RT = np.dot(RT, M)
        rimg = obj.range_render(camera.kinect_camera())
        window.canvas.SetCurrent()
        pm = rimg.point_model(True)
        points_range = pm

        for iters in range(max_iters):
            pnew, err, npairs, uv = fasticp.fast_icp(rimg, pnew, 1000, dist=0.005)
            if iters % mod == 0:
                # print '%d iterations, [%d] RMS: %.3f' % (iters, npairs, np.sqrt(err))
                window.Refresh()
                pylab.waitforbuttonpress(0.02)

        pnew = pm

        window.Refresh()
        pylab.waitforbuttonpress(0.02)        
コード例 #3
0
ファイル: calib.py プロジェクト: amiller/quartet
 def metric_points(depth, points):
     assert len(points) == 3
     assert depth.dtype == np.uint16
     rimg = RangeImage(depth, kinect_camera())
     rimg.compute_points()
     pts = []
     for x,y in points:
         pts.append(rimg.xyz[y,x,:])
     pts = np.concatenate((pts,pts))
     assert pts.shape == (6,3)
     return pts
コード例 #4
0
ファイル: demo_icp.py プロジェクト: amiller/rtmodel
def once():
    global range_image, points_range, RTold, RTguess
    ts, M = seqiter.next()

    # Take the image from an alternate camera location
    obj.RT = np.dot(RTold, M)
    range_image = obj.range_render(camera.kinect_camera())

    points_range = range_image.point_model()
    if RTguess is None:
        range_image.camera.RT = np.eye(4, dtype='f')
    else:
        range_image.camera.RT = RTguess

    p = model.add(range_image)
    if p:
        points_range = p
        RTguess = p.RT

    window.Refresh()
    pylab.waitforbuttonpress(0.02)    
コード例 #5
0
ファイル: demo_icp.py プロジェクト: amiller/rtmodel
def load_obj(name='blueghost'):
    global obj, points, points_p, range_image, points_range, pnew

    #obj = mesh.load_random()
    window.canvas.SetCurrent()
    obj = mesh.load(name)

    obj.calculate_area_lookup()
    obj.RT = np.eye(4, dtype='f')
    obj.RT[:3,3] = -obj.vertices[:,:3].mean(0)
    obj.RT[:3,3] += [0,0,-3.0]

    points = obj.sample_point_cloud(10000)

    # Range image of the original points
    range_image = obj.range_render(camera.kinect_camera())
    range_image.compute_normals()
    points_range = range_image.point_model()
    
    pnew = points_p = points
    #range_image = obj.range_render(np.dot(camera.kinect_camera(), rp))

    window.lookat = obj.RT[:3,3] + obj.vertices[:,:3].mean(0)
    window.Refresh()
コード例 #6
0
ファイル: demo_view3dfull.py プロジェクト: amiller/quartet
import numpy as np
import os
import shutil
import cv2
import datasetfull as dataset
import pylab
from wxpy3d import PointWindow
from OpenGL.GL import *
from rtmodel import pointmodel
from rtmodel.rangeimage import RangeImage
from rtmodel.camera import kinect_camera
cam = kinect_camera()

if not 'window' in globals():
    window = PointWindow(size=(640,480))

def show_rgb(name, image):
    cv2.imshow(name, np.ascontiguousarray(image[:,:,::-1]))

def show_depth(name, depth):
    cv2.imshow(name, 1024./depth)

@window.event
def pre_draw():
    glLightfv(GL_LIGHT0, GL_POSITION, (-40, 200, 100, 0.0))
    glLightfv(GL_LIGHT0, GL_AMBIENT, (0.3, 0.3, 0.3, 0.0))
    glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.3, 0.3, 0.3, 0.0))
    glEnable(GL_LIGHT0)
    glEnable(GL_LIGHTING)
    #glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
    glEnable(GL_COLOR_MATERIAL)
コード例 #7
0
ファイル: demo_objrender.py プロジェクト: amiller/rtmodel
def fwd_cam():
    cam = camera.kinect_camera()
    cam.RT[:3,3] += [0, 0, 3.0]
    return cam