예제 #1
0
def get_movie_frame(md, frame, deinterlacing):
    '''Read a MovieReader frame and return it together with a filtered version.'''
    if deinterlacing:
        field = frame & 1
        frame /= 2
    try:
        MovieReader.readFrame(md, seekFrame=frame)
    except:
        print 'oops', frame
        return None, None
    img = np.frombuffer(md['vbuffer'],
                        dtype=np.uint8).reshape(md['vheight'], md['vwidth'], 3)

    if deinterlacing:  # TODO check even/odd
        y = np.arange(0, md['vheight'], 2)
        if field: img[y, :] = img[y + 1, :]  # odd
        else: img[y + 1, :] = img[y, :]  # even
    return img
예제 #2
0
def setFrame(newFrame):
    global frame, view, allSkels, points, joints, bones
    frame = newFrame
    for Gs3, Ls3, skelDict3, animData3, skel3 in allSkels:
        dofs3 = animData3[frame % len(animData3)]
        Gs3 = ASFReader.pose_skeleton(Gs3, Ls3, skelDict3['jointParents'],
                                      skelDict3['jointDofs'],
                                      skelDict3['dofSplits'], dofs3)
        skel3.vertices[:] = Gs3[:, :, 3]

    global md, img, g_detectingDots, g_readingMovie
    if g_readingMovie and md is not None:
        try:
            MovieReader.readFrame(md, seekFrame=(frame - videoFrameOffset) / 4)
        except:
            frame = videoFrameOffset
            MovieReader.readFrame(md, seekFrame=(frame - videoFrameOffset) / 4)
        if g_detectingDots:
            ret = ISCV.detect_bright_dots(img, 254, 200, 190)
            good = [
                r for r in ret
                if min(r.sxx, r.syy) > 0.1 and min(r.sxx, r.syy) < 100.0
            ]  # and r.sxy*r.sxy<=0.01*r.sxx*r.syy]
            print len(good), 'good points'
            for r in good:
                #print r.sx,r.sy,r.sxx,r.sxy,r.syy
                img[int(r.sy - 5):int(r.sy + 5),
                    int(r.sx - 5):int(r.sx + 5), :] = [0, 255, 0]
        view.refreshImageData()
    global animJoints, stablePointsGroups, displayFrames, groupRepresentatives
    pfr = np.searchsorted(goodFrames, frame)
    points.vertices = displayFrames[pfr % len(displayFrames)]
    if animJoints is not None:
        joints.vertices[:] = animJoints[pfr % len(animJoints)]
    bones.vertices[::2] = joints.vertices
    bones.vertices[1::2] = points.vertices[
        groupRepresentatives[stablePointsGroups]]

    view.updateGL()
예제 #3
0
def retrack_remap_rbfn():
	grip_dir = os.environ['GRIP_DATA']
	movie_fn,_ = QApp.app.loadFilename('Choose a movie to open', grip_dir, 'Movie Files (*.mp4 *.mov *.avi *.flv *.mpg)')
	txt_fn,_ = QApp.app.loadFilename('Choose a text file of frame indices to open', grip_dir, 'Text Files (*.txt)')
	md = MovieReader.open_file(movie_fn, audio=False)
	lines = map(str.strip,(open(txt_fn,'r').readlines()))
	mapping_file = {}
	for l in lines:
		pose_name,frame_number,group_names = l.split(':')
		for gn in group_names.split(','):
			mapping_file.setdefault(gn,{})[pose_name] = int(frame_number)
	print mapping_file.keys()
	print mapping_file
	update_rbfn(md, mapping_file=mapping_file)
예제 #4
0
def import_movie_frames():
    movie_fn, _ = QApp.app.loadFilename(
        'Choose a movie to open', cwd(),
        'Movie Files (*.mp4 *.mov *.avi *.flv *.mpg)')
    if movie_fn == '': return  # cancel
    set_cwd(movie_fn)
    txt_fn, _ = QApp.app.loadFilename(
        'Choose a text file of frame indices to open', cwd(),
        'Text Files (*.txt)')
    md = MovieReader.open_file(movie_fn, audio=False)
    images, shapes = [], []
    if txt_fn == '':
        frames = range(0, md['vmaxframe'], 100)
        #if txt_fn == '': frames = range(30000, 38300, 100)
    else:
        frames = [int(l.split(':')[1]) for l in open(txt_fn, 'r').readlines()]
    for fi in frames:
        print fi, '/', frames[-1]
        MovieReader.readFrame(md, fi)
        add_image(
            np.frombuffer(md['vbuffer'],
                          dtype=np.uint8).reshape(md['vheight'], md['vwidth'],
                                                  3).copy())
    State.push('Import movie frames')
예제 #5
0
def dirty_cb(dirty):
	if '/root/ui/attrs/movie_filename' in dirty:
		fn = State.getKey('/root/ui/attrs/movie_filename')
		global g_md
		g_md = MovieReader.open_file(fn)
		QApp.app.qtimeline.setRange(0,g_md['vmaxframe'])
	for dk in dirty:
		if dk.startswith('/root/ui/attrs/'):
			QApp.app.refresh()
	global g_mode, g_frame, g_rbfn
	if g_mode == 1 and not '/root/sliders/attrs' in dirty: # RBFN view; changing frame sets all the sliders; we avoid that case
		for key in dirty:
			if key.startswith('/root/sliders/attrs'):
				si = g_rbfn['slider_names'].index(key[len('/root/sliders/attrs/'):])
				group,gn,pn,slider_indices,slider_names,pose_splits = rbfn_info_from_frame(g_frame[g_mode])
				print 'rbfn slider value changed:',key,si,'from',group['slider_data'][pn][si],'to',State.getKey(key)
				group['slider_data'][pn][si] = State.getKey(key)
				rbfn_view_cb(g_frame[g_mode]) # TODO, force an update of the geo
예제 #6
0
def main():
    global State, mats, movieFilenames, primitives
    global movies, primitives2D, deinterlacing, detectingWands
    import IO
    import sys, os
    deinterlacing = False
    detectingWands = False
    detectingTiara = False
    dot_detections = None
    detections_filename = None
    frame_offsets = None
    firstFrame, lastFrame = 0, 5000
    drawDotSize = 4.0
    fovX, (ox,
           oy), pan_tilt_roll, tx_ty_tz, distortion = 50., (0,
                                                            0), (0, 0,
                                                                 0), (0, 1250,
                                                                      0), (0,
                                                                           0)
    mats = []
    grip_directory = os.environ['GRIP_DATA']

    if 0:
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 37.9, (0, 0), (
            -66.0, 3.5, -0.2), (4850, 1330, 3280), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat0 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 55.8, (0, 0), (
            -103.6, 3.5, -0.3), (2980, 1380, -2180), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat1 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 49.3, (0, 0), (
            27.9, 4.0, -0.2), (-5340, 1150, 5030), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat2 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        fovX, (ox, oy), pan_tilt_roll, tx_ty_tz, distortion = 50.6, (0, 0), (
            -156.6, 4.9, 0.2), (-105, 1400, -4430), (0, 0)  # roughed in
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mat3 = [
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]
        mats = [mat0, mat1, mat2, mat3]
        xcp_filename = '154535_Cal168_Floor_Final.xcp'
        directory = os.path.join(grip_directory, 'REFRAME')
        movieFilenames = [
            '001E0827_01.MP4', '001F0813_01.MP4', '001G0922_01.MP4',
            '001H0191_01.MP4'
        ]
        #mats,movieFilenames = mats[:1],movieFilenames[:1] # restrict to single-view
        frame_offsets = [119 + 160, 260, 339, 161]
        small_blur, large_blur = 1, 25
        min_dot_size = 1.0
        max_dot_size = 20.0
        circularity_threshold = 3.0
        threshold_bright, threshold_dark_inv = 250, 250  #135,135
    elif 0:
        xcp_filename = '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_01.xcp'
        detections_filename = 'detections.dat'
        detectingTiara = True
        pan_tilt_roll = (0, 0, 90)
        distortion = (0.291979, 0.228389)
        directory = os.path.join(os.environ['GRIP_DATA'], 'ted')
        movieFilenames = [
            '201401211653-4Pico-32_Quad_Dialogue_01_%d.mpg' % xi
            for xi in range(1)
        ]
        firstFrame = 511
        small_blur, large_blur = 1, 20
        min_dot_size = 1.0
        max_dot_size = 16.0
        circularity_threshold = 3.0
        threshold_bright, threshold_dark_inv = 0, 170
    elif 1:
        xcp_filename = '50_Grip_RoomCont_AA_02.xcp'
        detections_filename = 'detections.dat'
        pan_tilt_roll = (0, 0, 0)
        distortion = (0.291979, 0.228389)
        directory = os.path.join(os.environ['GRIP_DATA'], '151110')
        movieFilenames = ['50_Grip_RoomCont_AA_02.v2.mov']
        firstFrame = 0
        small_blur, large_blur = 1, 20
        min_dot_size = 1.0
        max_dot_size = 16.0
        circularity_threshold = 3.0
        threshold_bright, threshold_dark_inv = 170, 170

    attrs = dict([(v, eval(v)) for v in [
        'small_blur', 'large_blur', 'threshold_bright', 'threshold_dark_inv',
        'circularity_threshold', 'min_dot_size', 'max_dot_size'
    ]])

    primitives2D = QGLViewer.makePrimitives2D(([], []), ([], []))
    primitives = []
    if len(movieFilenames) is 1:
        # TODO: time_base, timecode
        K, RT = Calibrate.composeK(fovX, ox, oy), Calibrate.composeRT(
            Calibrate.composeR(pan_tilt_roll), tx_ty_tz, 0)
        mats = [[
            K[:3, :3], RT[:3, :4],
            np.dot(K, RT)[:3, :], distortion, -np.dot(RT[:3, :3].T, RT[:3, 3]),
            [1920, 1080]
        ]]
        camera_ids = ['video']
        movies = [
            MovieReader.open_file(os.path.join(directory, movieFilenames[0]),
                                  audio=False)
        ]
    else:  # hard coded cameras
        if xcp_filename.endswith('.xcp'):
            if detectingTiara:  # gruffalo
                c3d_filename = os.path.join(
                    directory,
                    '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_02.c3d')
                from IO import C3D
                c3d_dict = C3D.read(c3d_filename)
                global c3d_frames
                c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
                    'fps'], c3d_dict['labels']
                c3d_subject = ''  #'TedFace'
                which = np.where(
                    [s.startswith(c3d_subject) for s in c3d_labels])[0]
                c3d_frames = c3d_frames[:, which, :]
                c3d_labels = [c3d_labels[i] for i in which]
                print len(c3d_frames)
            xcp, xcp_data = ViconReader.loadXCP(
                os.path.join(directory, xcp_filename))
            mats.extend(xcp)
        elif xcp_filename.endswith('.cal'):
            from IO import OptitrackReader
            xcp, xcp_data = OptitrackReader.load_CAL(
                os.path.join(directory, xcp_filename))
            mats = xcp
            print 'mats', len(mats), len(movieFilenames)
            assert (len(mats) == len(movieFilenames))
        camera_ids = []
        movies = []
        for ci, mf in enumerate(movieFilenames):
            fo = 0 if frame_offsets is None else frame_offsets[ci]
            movies.append(
                MovieReader.open_file(os.path.join(directory, mf),
                                      audio=False,
                                      frame_offset=fo))
        camera_ids = ['cam_%d' % ci for ci in xrange(len(mats))]
        print len(mats), len(movies), len(camera_ids)
    primitives.append(GLPoints3D([]))
    primitives.append(GLPoints3D([]))
    primitives.append(GLPoints3D([]))
    primitives[0].colour = (0, 1, 1, 0.5)  # back-projected "cyan" points
    primitives[1].colour = (0, 0, 1, 0.5)
    primitives[1].pointSize = 5
    primitives[2].colour = (1, 0, 0, 0.99)

    if len(movieFilenames) != 1 and detections_filename != None:
        try:
            dot_detections = IO.load(detections_filename)[1]
        except:
            numFrames = len(c3d_frames)  # TODO HACK HACK
            dot_detections = movies_to_detections(movies, range(numFrames),
                                                  deinterlacing, attrs)
            IO.save(detections_filename, dot_detections)

        if detectingTiara:
            x3ds_seq = {}
            for fi in dot_detections.keys():
                frame = c3d_frames[(fi - 55) % len(c3d_frames)]
                which = np.array(np.where(frame[:, 3] == 0)[0], dtype=np.int32)
                x3ds_seq[fi] = np.concatenate((VICON_tiara_x3ds + np.array([150,-100,0],dtype=np.float32),frame[which,:3])), \
                      np.concatenate((np.arange(len(VICON_tiara_x3ds),dtype=np.int32),which+len(VICON_tiara_x3ds)))

            dot_labels = get_labels(dot_detections.keys(),
                                    x3ds_seq,
                                    dot_detections,
                                    mats,
                                    x2d_threshold=0.05)

            calibration_fi = 546 - 2 - 6

            RT = tighten_calibration(x3ds_seq[calibration_fi],
                                     dot_labels[calibration_fi], mats)
            for v in c3d_frames:
                v[:, :3] = np.dot(v[:, :3], RT[:3, :3].T) + RT[:, 3]

            if True:
                dot_detections = IO.load(detections_filename)[1]
                x3ds_seq = {}
                for fi in dot_detections.keys():
                    frame = c3d_frames[(fi - 55) % len(c3d_frames)]
                    which = np.array(np.where(frame[:, 3] == 0)[0],
                                     dtype=np.int32)
                    x3ds_seq[fi] = np.concatenate((VICON_tiara_x3ds + np.array([0,1000,0],dtype=np.float32),frame[which,:3])), \
                          np.concatenate((np.arange(len(VICON_tiara_x3ds),dtype=np.int32),which+len(VICON_tiara_x3ds)))

                #dot_labels = get_labels(dot_detections.keys(), x3ds_seq, dot_detections, mats, x2d_threshold = 0.05)

    if detectingTiara:
        primitives.append(GLPoints3D(VICON_tiara_x3ds + [0, 1000, 0]))
        primitives[-1].pointSize = 5

    global track3d, prev_frame, booting, trackGraph
    track3d = Label.Track3D(mats[:len(movies)],
                            x2d_threshold=0.03,
                            x3d_threshold=5.0,
                            min_rays=3,
                            boot_interval=2)  #tilt_threshold = 0.01, gruffalo
    trackGraph = Label.TrackGraph()
    prev_frame = 0
    booting = 1

    from UI import QApp
    from PySide import QtGui
    from GCore import State
    # Modified the options parameter for fields to be the range of acceptable values for the box
    # Previously would crash if small_blur got too low
    QApp.fields = {
        'image filter': [
            ('small_blur', 'Small blur radius',
             'This is part of the image filter which controls the size of smallest detected features.',
             'int', small_blur, {
                 "min": 0,
                 "max": None
             }),
            ('large_blur', 'Large blur radius',
             'This is part of the image filter which controls the size of largest detected features.',
             'int', large_blur, {
                 "min": 0,
                 "max": None
             }),
            ('threshold_bright', 'threshold_bright',
             'This is part of the image filter which controls the size of smallest detected features.',
             'int', threshold_bright, {
                 "min": 0,
                 "max": 255
             }),
            ('threshold_dark_inv', 'threshold_dark_inv',
             'This is part of the image filter which controls the size of largest detected features.',
             'int', threshold_dark_inv, {
                 "min": 0,
                 "max": 255
             }),
            ('circularity_threshold', 'circularity_threshold',
             'How circular?.', 'float', circularity_threshold, {
                 "min": 0,
                 "max": 100
             }),
            ('min_dot_size', 'min_dot_size',
             'min_dot_size smallest detected features.', 'float', min_dot_size,
             {
                 "min": 0,
                 "max": 100
             }),
            ('max_dot_size', 'max_dot_size',
             'max_dot_size largest detected features.', 'float', max_dot_size,
             {
                 "min": 0,
                 "max": 100
             }),
        ]
    }
    State.addKey('dotParams', {'type': 'image filter', 'attrs': attrs})
    State.setSel('dotParams')
    appIn = QtGui.QApplication(sys.argv)
    appIn.setStyle('plastique')
    win = QApp.QApp()
    win.setWindowTitle('Imaginarium Dots Viewer')
    QGLViewer.makeViewer(primitives=primitives,
                         primitives2D=primitives2D,
                         timeRange=(firstFrame, lastFrame),
                         callback=setFrame,
                         mats=mats,
                         camera_ids=camera_ids,
                         movies=movies,
                         pickCallback=picked,
                         appIn=appIn,
                         win=win)
예제 #7
0
def setFrame(frame):
    global State, mats, movieFilenames, primitives
    global movies, primitives2D, deinterlacing, detectingWands, dot_detections, track3d, prev_frame, booting, trackGraph
    key = State.getKey('dotParams/attrs')

    skipping, prev_frame = (frame != prev_frame
                            and frame - 1 != prev_frame), frame
    booting = 10 if skipping else booting - 1

    p0, p1 = [], []

    if True:  #dot_detections is None:

        for pair in enumerate(movies):
            pts = process_frame(deinterlacing, detectingWands, frame, key,
                                pair)
            p0.append(pts[0])
            p1.append(pts[1])

        def make_bounds(lens):
            return np.array([sum(lens[:x]) for x in xrange(len(lens) + 1)],
                            dtype=np.int32)

        data0 = np.array(np.concatenate(p0),
                         dtype=np.float32).reshape(-1, 2), make_bounds(
                             map(len, p0))
        data1 = np.array(np.concatenate(p1),
                         dtype=np.float32).reshape(-1, 2), make_bounds(
                             map(len, p1))
    else:
        #dot_detections = movies_to_detections(movies, [frame], deinterlacing, key)
        data0, data1 = dot_detections[frame] if dot_detections.has_key(
            frame) else dot_detections.values()[0]
        for ci, md in enumerate(movies):
            try:
                MovieReader.readFrame(md, seekFrame=frame)
            except:
                print 'oops', frame
                return None, None
            #img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3)
            QApp.view().cameras[ci + 1].invalidateImageData()
            data0 = data0[0].copy(), data0[
                1]  # so that undistort doesn't modify the raw detections
            data1 = data1[0].copy(), data1[1]
    # TODO, move this to the viewer...
    data0 = ViconReader.frameCentroidsToDets(data0, mats)
    data1 = ViconReader.frameCentroidsToDets(data1, mats)

    primitives2D[0].setData(data0[0], data0[1])
    primitives2D[1].setData(data1[0], data1[1])

    #print x2ds_labels
    if len(movieFilenames) is not 1:
        if 1:
            #x2ds_data, x2ds_splits = data0 # dark points only
            x2ds_data, x2ds_splits = data1  # light points only
            if skipping:
                x3ds, x3ds_labels = track3d.boot(x2ds_data, x2ds_splits)
                #trackGraph = Label.TrackGraph()
            else:
                x3ds, x3ds_labels = track3d.push(x2ds_data, x2ds_splits)
                # coarse bounding box
                if False:
                    for xi, x in zip(x3ds_labels, x3ds):
                        if x[0] < -200 or x[0] > 200 or x[1] < 800 or x[
                                1] > 1200 or x[2] < -50 or x[2] > 300:
                            track3d.x2ds_labels[np.where(
                                track3d.x2ds_labels == xi)[0]] = -1
                            x[:] = 0
            primitives[0].setData(x3ds)
            #trackGraph.push(x3ds,x3ds_labels)
            #primitives[0].graph = trackGraph.drawing_graph()
        elif False:
            Ps = np.array([m[2] / (m[0][0, 0]) for m in mats],
                          dtype=np.float32)
            data = data0  # dark points
            #data = data1 # light points
            x3ds, x2ds_labels = Recon.intersect_rays(data[0],
                                                     data[1],
                                                     Ps,
                                                     mats,
                                                     tilt_threshold=0.003,
                                                     x2d_threshold=0.02,
                                                     x3d_threshold=5.0,
                                                     min_rays=2)
            primitives[0].setData(x3ds)
        if detectingTiara:
            global c3d_frames
            frame = c3d_frames[(frame - 55) % len(c3d_frames)]
            which = np.where(frame[:, 3] == 0)[0]
            x3ds = frame[which, :3]
            #print frame,'len',len(x3ds)
            primitives[1].setData(x3ds)
    QApp.app.refreshImageData()
    QApp.app.updateGL()
예제 #8
0
def main(x2d_filename, xcp_filename, c3d_filename=None):
    '''Generate a 3D view of an x2d file, using the calibration.'''
    global x2d_frames, mats, Ps, c3d_frames, primitives, primitives2D, track3d, prev_frame, track_orn, orn_graph, boot, orn_mapper, mar_mapper
    prev_frame = None
    c3d_frames = None
    if c3d_filename != None:
        c3d_dict = C3D.read(c3d_filename)
        c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
            'fps'], c3d_dict['labels']
    mats, xcp_data = ViconReader.loadXCP(xcp_filename)
    camera_ids = [int(x['DEVICEID']) for x in xcp_data]
    print 'loading 2d'
    x2d_dict = ViconReader.loadX2D(x2d_filename)
    x2d_frames = x2d_dict['frames']
    cameras_info = ViconReader.extractCameraInfo(x2d_dict)
    print 'num frames', len(x2d_frames)
    Ps = [m[2] / (m[0][0, 0]) for m in mats]
    track3d = Label.Track3D(mats)

    primitives = QGLViewer.makePrimitives(vertices=[], altVertices=[])
    primitives2D = QGLViewer.makePrimitives2D(([], [0]))

    global g_all_skels, md
    directory = os.path.join(os.environ['GRIP_DATA'], '151110')
    _, orn_skel_dict = IO.load(os.path.join(directory, 'orn.skel'))
    movie_fn = os.path.join(directory, '50_Grip_RoomCont_AA_02.v2.mov')
    md = MovieReader.open_file(movie_fn,
                               audio=True,
                               frame_offset=0,
                               volume_ups=10)

    asf_filename = os.path.join(directory, 'Martha.asf')
    amc_filename = os.path.join(directory, 'Martha.amc')
    asf_dict = ASFReader.read_ASF(asf_filename)
    mar_skel_dict = ASFReader.asfDict_to_skelDict(asf_dict)
    mar_skel_dict['anim_dict'] = ASFReader.read_AMC(amc_filename, asf_dict)
    for k in ('geom_Vs', 'geom_vsplits', 'geom_Gs'):
        mar_skel_dict[k] = orn_skel_dict[k].copy()
    mar_skel_dict['shape_weights'] = orn_skel_dict['shape_weights']
    mar_skel_dict['geom_dict'] = orn_skel_dict['geom_dict']

    orn_vss = ViconReader.loadVSS(os.path.join(directory, 'Orn.vss'))
    orn_vss_chan_mapping = [
        orn_vss['chanNames'].index(n) for n in orn_skel_dict['chanNames']
    ]
    orn_anim_dict = orn_skel_dict['anim_dict']
    orn_vss_anim = np.zeros(
        (orn_anim_dict['dofData'].shape[0], orn_vss['numChans']),
        dtype=np.float32)
    orn_vss_anim[:, orn_vss_chan_mapping] = orn_anim_dict['dofData']
    orn_anim_dict['dofData'] = orn_vss_anim
    orn_vss['anim_dict'] = orn_anim_dict
    for x in [
            'geom_dict', 'geom_Vs', 'geom_vsplits', 'geom_Gs', 'shape_weights'
    ]:
        orn_vss[x] = orn_skel_dict[x]
    orn_skel_dict = orn_vss

    g_all_skels = {}
    orn_mesh_dict, orn_skel_mesh, orn_geom_mesh = orn_t = Character.make_geos(
        orn_skel_dict)
    g_all_skels['orn'] = (orn_skel_dict, orn_t)
    orn_skel_dict['chanValues'][:] = 0
    Character.updatePoseAndMeshes(orn_skel_dict, orn_skel_mesh, orn_geom_mesh)

    mar_mesh_dict, mar_skel_mesh, mar_geom_mesh = mar_t = Character.make_geos(
        mar_skel_dict)
    g_all_skels['mar'] = (mar_skel_dict, mar_t)

    #ted_mesh_dict, ted_skel_mesh, ted_geom_mesh = ted_t = Character.make_geos(ted_skel_dict)
    #g_all_skels['ted'] = (ted_skel_dict, ted_t)
    #ted_skel_dict['chanValues'][0] += 1000
    #Character.updatePoseAndMeshes(ted_skel_dict, ted_skel_mesh, ted_geom_mesh)

    mnu = orn_skel_dict['markerNamesUnq']
    mns = orn_skel_dict['markerNames']
    effectorLabels = np.array([mnu.index(n) for n in mns], dtype=np.int32)
    orn_graph = Label.graph_from_skel(orn_skel_dict, mnu)
    boot = -10

    track_orn = Label.TrackModel(orn_skel_dict, effectorLabels, mats)

    #ted = GLSkel(ted_skel_dict['Bs'], ted_skel_dict['Gs']) #, mvs=ted_skel_dict['markerOffsets'], mvis=ted_skel_dict['markerParents'])
    #ted = GLSkeleton(ted_skel_dict['jointNames'],ted_skel_dict['jointParents'], ted_skel_dict['Gs'][:,:,3])
    #ted.setName('ted')
    #ted.color = (1,1,0)
    #orn = GLSkeleton(orn_skel_dict['jointNames'],orn_skel_dict['jointParents'], orn_skel_dict['Gs'][:,:,3])
    #orn.setName('orn')
    #orn.color = (0,1,1)

    #square = GLMeshes(names=['square'],verts=[[[0,0,0],[1000,0,0],[1000,1000,0],[0,1000,0]]],vts=[[[0,0],[1,0],[1,1],[0,1]]],faces=[[[0,1,2,3]]],fts=[[[0,1,2,3]]])
    #square.setImageData(np.array([[[0,0,0],[255,255,255]],[[255,255,255],[0,0,0]]],dtype=np.uint8))
    #orn_geom_mesh.setImageData(np.array([[[0,0,0],[255,255,255]],[[255,255,255],[0,0,0]]],dtype=np.uint8))

    P = Calibrate.composeP_fromData((60.8, ), (-51.4, 14.7, 3.2),
                                    (6880, 2860, 5000),
                                    0)  # roughed in camera for 151110
    ks = (0.06, 0.0)
    mat = Calibrate.makeMat(P, ks, (1080, 1920))
    orn_mapper = Opengl.ProjectionMapper(mat)
    orn_mapper.setGLMeshes(orn_geom_mesh)
    orn_geom_mesh.setImage((md['vbuffer'], (md['vheight'], md['vwidth'], 3)))

    mar_mapper = Opengl.ProjectionMapper(mat)
    mar_mapper.setGLMeshes(mar_geom_mesh)
    mar_geom_mesh.setImage((md['vbuffer'], (md['vheight'], md['vwidth'], 3)))

    global g_screen
    g_screen = Opengl.make_quad_distortion_mesh()

    QGLViewer.makeViewer(mat=mat,md=md,layers = {\
		#'ted':ted, 'orn':orn,
		#'ted_skel':ted_skel_mesh,'ted_geom':ted_geom_mesh,\
		#'square':square,



     'orn_skel':orn_skel_mesh,'orn_geom':orn_geom_mesh,\
     'mar_skel':mar_skel_mesh,'mar_geom':mar_geom_mesh,\
      },
    primitives=primitives, primitives2D=primitives2D, timeRange=(0, len(x2d_frames) - 1, 4, 25.0), callback=intersectRaysCB, mats=mats,camera_ids=camera_ids)
예제 #9
0
def intersectRaysCB(fi):
    global x2d_frames, mats, Ps, c3d_frames, view, primitives, primitives2D, track3d, prev_frame, track_orn, orn_graph, boot, g_all_skels, md, orn_mapper, mar_mapper
    skipping = prev_frame is None or np.abs(fi - prev_frame) > 10
    prev_frame = fi
    view = QApp.view()
    points, altpoints = primitives
    g2d = primitives2D[0]
    frame = x2d_frames[fi]
    x2ds_data, x2ds_splits = ViconReader.frameCentroidsToDets(frame, mats)
    g2d.setData(x2ds_data, x2ds_splits)
    if skipping:
        x3ds, x3ds_labels = track3d.boot(x2ds_data, x2ds_splits)
        #trackGraph = Label.TrackGraph()
        boot = -10
    else:
        x3ds, x3ds_labels = track3d.push(x2ds_data, x2ds_splits)
    if False:
        boot = boot + 1
        if boot == 0:
            x2d_threshold_hash = 0.01
            penalty = 10.0  # the penalty for unlabelled points. this number should be about 10. to force more complete labellings, set it higher.
            maxHyps = 500  # the number of hypotheses to maintain.
            print "booting:"
            numLabels = len(orn_graph[0])
            l2x = -np.ones(numLabels, dtype=np.int32)
            label_score = ISCV.label_from_graph(x3ds, orn_graph[0],
                                                orn_graph[1], orn_graph[2],
                                                orn_graph[3], maxHyps, penalty,
                                                l2x)
            clouds = ISCV.HashCloud2DList(x2ds_data, x2ds_splits,
                                          x2d_threshold_hash)
            which = np.array(np.where(l2x != -1)[0], dtype=np.int32)
            pras_score, x2d_labels, vels = Label.project_assign(
                clouds,
                x3ds[l2x[which]],
                which,
                Ps,
                x2d_threshold=x2d_threshold_hash)
            print fi, label_score, pras_score
            labelled_x3ds = x3ds[l2x[which]]
            print track_orn.bootPose(x2ds_data, x2ds_splits, x2d_labels)
        if boot > 0:
            track_orn.push(x2ds_data, x2ds_splits, its=4)
    #x3ds,x2ds_labels = Recon.intersect_rays(x2ds_data, x2ds_splits, Ps, mats, seed_x3ds = None)
    points.setData(x3ds)
    if c3d_frames != None:
        c3ds = c3d_frames[(fi - 832) / 2]
        true_labels = np.array(np.where(c3ds[:, 3] == 0)[0], dtype=np.int32)
        x3ds_true = c3ds[true_labels, :3]
        altpoints.setData(x3ds_true)

    ci = view.cameraIndex() - 1
    if True:  #ci == -1:
        MovieReader.readFrame(md, seekFrame=max((fi - 14) / 4, 0))
        QApp.app.refreshImageData()
    (orn_skel_dict, orn_t) = g_all_skels['orn']
    orn_mesh_dict, orn_skel_mesh, orn_geom_mesh = orn_t
    orn_anim_dict = orn_skel_dict['anim_dict']
    orn_skel_dict['chanValues'][:] = orn_anim_dict['dofData'][fi]
    Character.updatePoseAndMeshes(orn_skel_dict, orn_skel_mesh, orn_geom_mesh)
    (mar_skel_dict, mar_t) = g_all_skels['mar']
    mar_anim_dict = mar_skel_dict['anim_dict']
    mar_mesh_dict, mar_skel_mesh, mar_geom_mesh = mar_t
    Character.updatePoseAndMeshes(mar_skel_dict, mar_skel_mesh, mar_geom_mesh,
                                  mar_anim_dict['dofData'][fi])

    from PIL import Image
    #orn_geom_mesh.setImage((md['vbuffer'],(md['vheight'],md['vwidth'],3)))
    #orn_geom_mesh.refreshImage()

    w, h = 1024, 1024
    cam = view.cameras[0]
    cam.refreshImageData(view)
    aspect = float(max(1, cam.bindImage.width())) / float(
        cam.bindImage.height()) if cam.bindImage is not None else 1.0
    orn_mapper.project(orn_skel_dict['geom_Vs'], aspect)
    data = Opengl.renderGL(w, h, orn_mapper.render, cam.bindId)
    orn_geom_mesh.setImage(data)
    mar_mapper.project(mar_skel_dict['geom_Vs'], aspect)
    data = Opengl.renderGL(w, h, mar_mapper.render, cam.bindId)
    mar_geom_mesh.setImage(data)
    #image = Image.fromstring(mode='RGB', size=(w, h), data=data)
    #image = image.transpose(Image.FLIP_TOP_BOTTOM)
    #image.save('screenshot.png')

    if 0:
        global g_screen
        image = Opengl.renderGL(1920, 1080, Opengl.quad_render,
                                (cam.bindId, g_screen))
        import pylab as pl
        pl.imshow(image)
        pl.show()
    view.updateGL()
예제 #10
0
    def initialise(self, interface, attrs):
        directory = self.resolvePath(attrs['directory'])
        if not directory: return False

        prefix = attrs['prefix']
        prefixFilename = self.resolvePath(attrs['prefixFilename'])
        if prefix and not prefixFilename: return

        calibration = attrs['calibration']
        calibrationFilename = self.resolvePath(attrs['calibrationFilename'])
        calibrationLocation = self.resolvePath(attrs['calibrationLocation'])
        if calibration and (not calibrationFilename
                            and not calibrationLocation):
            return False

        movieFilenames = []
        try:
            for file in os.listdir(directory):
                if prefixFilename and not file.startswith(prefixFilename):
                    continue
                if file.endswith('.avi') or file.endswith(
                        '.mov') or file.endswith('mp4'):
                    movieFilenames.append(os.path.join(directory, file))
        except WindowsError as e:
            self.logger.error('Could not find videos: % s' % str(e))

        if not movieFilenames:
            # TODO: Here we'll have to clear the cameras etc.
            return False

        # Windows will produce a wonky order, i.e. 1, 10, 11, .., 2, 3, ..
        # Use natural sorting to rectify
        movieFilenames.sort(key=self.alphaNumKey)

        self.camera_ids = []
        self.camera_names = []
        self.movies = []
        self.mats = []
        vheights = []
        vwidths = []
        timecodes = []
        hasTimecode = False
        useTimecode = attrs['useTimecode'] if 'useTimecode' in attrs else True

        offset = attrs['offset']
        if 'offsets' in attrs and attrs['offsets']:
            offsets = eval(attrs['offsets'])
        else:
            offsets = [offset] * len(movieFilenames)

        for ci, mf in enumerate(movieFilenames):
            self.logger.info('Loading MovieReader: %s' % mf)
            movieData = MovieReader.open_file(mf,
                                              audio=False,
                                              frame_offset=offsets[ci])

            if movieData['vbuffer'] is not None:
                self.movies.append(movieData)

                self.timecodeOffsets.append(0)
                if 'timecode' in movieData and movieData['timecode']:
                    hasTimecode = True
                    timecodes.append(movieData['timecode'])

        # Make sure we have all the cameras before continuing
        if len(self.movies) != len(movieFilenames):
            self.logger.error('Could not load all movies in sequence')
            return

        # Make sure we have as many time codes as movies (if we have any)
        if hasTimecode and len(self.movies) != len(timecodes):
            self.logger.error('Not all movie files have a time code')
            return

        # See if we can get the offsets using the time codes
        if hasTimecode and useTimecode:
            print 'Video timecodes:', timecodes
            fps_all = [round(m['fps']) for m in self.movies]
            print 'FPS:', fps_all
            timecodeValues = [
                Timecode.TCFtoInt(tc, fps)
                for tc, fps in zip(timecodes, fps_all)
            ]
            tcOrderDesc = [
                timecodes.index(tc) for tc in sorted(timecodes, reverse=True)
            ]

            # Set the first offset to 0
            firstTcIndex = tcOrderDesc[0]
            self.timecodeOffsets[firstTcIndex] = 0
            largestTc = timecodes[firstTcIndex]
            offsetStartIndex = 1

            # We can also get the timecode destination from an incoming location, e.g. 2D detections
            if 'timecodeLocation' in attrs and attrs['timecodeLocation']:
                tcSyncTime = interface.attr(
                    'timecode', atLocation=attrs['timecodeLocation'])
                if tcSyncTime is not None:
                    tcSyncValue = Timecode.TCFtoInt(tcSyncTime, fps_all[0])
                    if tcSyncValue < timecodeValues[firstTcIndex]:
                        self.logger.error(
                            'Sync timecode %s is smaller than video timecodes (%s).'
                            % (tcSyncTime, largestTc))
                        return

                    largestTc = tcSyncTime
                    offsetStartIndex = 0

            self.timecode = largestTc
            self.logger.info('Setting timecode to: %s' % (largestTc))

            # Calculate the offset for each camera to get it up to speed with the target timecode
            # TODO: Replace hard coded timecode fps and multiplier
            timecodeFps, timecodeMultiplier = 25., 2.
            for tcInd in tcOrderDesc[offsetStartIndex:]:
                diff = Timecode.TCSub(largestTc, timecodes[tcInd], timecodeFps)
                self.timecodeOffsets[tcInd] = Timecode.TCFtoInt(
                    diff, timecodeFps) * timecodeMultiplier

        if self.timecodeOffsets:
            print 'Video timecode offsets:', self.timecodeOffsets

        self.camera_ids = [
            'Camera %d' % ci for ci in xrange(len(movieFilenames))
        ]
        self.movies = self.movies

        if not calibrationLocation: calibrationLocation = interface.root()
        if calibrationFilename or interface.hasAttr(
                'mats', atLocation=calibrationLocation):
            if calibrationFilename:
                # TODO: Detect filetype, e.g. .cal and .xcp and handle accordingly
                try:
                    self.mats, rawCalData = OptitrackReader.load_CAL(
                        calibrationFilename)
                    if not self.mats: return False
                except IOError as e:
                    self.logger.error('Could not load calibration file: %s' %
                                      str(e))
                    return False
            else:
                self.mats = interface.attr('mats',
                                           atLocation=calibrationLocation)
                if not self.mats:
                    self.logger.error('Could not find calibration mats: %s' %
                                      calibrationLocation)
                    return False

        else:
            from GCore import Calibrate
            for ci, (cid, md) in enumerate(zip(self.camera_ids, self.movies)):
                if md is not None:
                    self.mats.append(
                        Calibrate.makeUninitialisedMat(
                            ci, (md['vheight'], md['vwidth'])))

        for md in self.movies:
            vheights.append(md['vheight'])
            vwidths.append(md['vwidth'])

        Ps = interface.getPsFromMats(self.mats)
        self.attrs = {
            'vheight': vheights,
            'vwidth': vwidths,
            'camera_ids': self.camera_ids,
            'Ps': Ps,
            'mats': self.mats,
            'colour': eval(attrs['colour'])
        }

        if self.camera_names:
            self.attrs['camera_names'] = self.camera_names

        self.initialised = True
        return True
예제 #11
0
def track_view_cb(fi, attrs):
	# g_mode = 0
	global g_webcam, g_md, g_rbfn, g_predictor
	# runtime options and state
	global g_prev_smooth_shape, g_prev_vs, g_hmc_boot, g_settle, g_head_pan_tilt_roll

	if attrs['using_webcam']:
		if g_webcam is None:
			g_webcam = WebCam()
			g_webcam.Open(State.getKey('/root/ui/attrs/cam_offset') + State.getKey('/root/ui/attrs/webcam_index'))
			g_webcam.SetProperty('FPS', State.getKey('/root/ui/attrs/cam_fps'))
			g_webcam.SetProperty('FRAME_WIDTH', State.getKey('/root/ui/attrs/cam_width'))
			g_webcam.SetProperty('FRAME_HEIGHT', State.getKey('/root/ui/attrs/cam_height'))
		if g_webcam is None:
			img = np.zeros((16,16,3),dtype=np.uint8)
		else:
			img = g_webcam.GetFrame()
			if img is None:
				img = np.zeros((16,16,3),dtype=np.uint8)
	elif g_md is not None:
		MovieReader.readFrame(g_md, seekFrame=fi) # only update the visible camera
		img = np.frombuffer(g_md['vbuffer'], dtype=np.uint8).reshape(g_md['vheight'],g_md['vwidth'],3)
		#QApp.app.qtimeline.setRange(0, g_md['vmaxframe'])
	else:
		img = np.zeros((16,16,3),dtype=np.uint8)
	
	mirror_scale = -1 if attrs['mirroring'] else 1
	rotate = attrs['rotate']

	if g_settle >= 0:
		if g_settle == 0 and g_prev_vs is not None:
			g_hmc_boot = g_prev_vs.copy()
		g_settle = g_settle - 1
	else:
		if attrs['HMC_mode'] and g_hmc_boot is not None: g_prev_vs = g_hmc_boot.copy()
		if attrs['booting'] or Face.test_reboot(img, g_prev_vs):
			g_prev_vs = Face.detect_face(img, g_predictor, 2, rotate)
			g_hmc_boot = None # in case we didn't detect a face
			g_settle = 10 # go into settle mode (10 frames)
			if g_prev_vs is not None:
				State.setKey('/root/ui/attrs/booting',False)
				if attrs['HMC_mode']: g_hmc_boot = g_prev_vs.copy()
	g_prev_vs = Face.track_face(img, g_predictor, g_prev_vs, rotate=rotate)

	# compensate for roll, translation and scale
	norm_shape, head_pan, head_tilt, A = stabilize_shape(g_prev_vs, setting_neutral=attrs['setting_neutral'])
	# dejitter
	if attrs['filtering']:
		g_prev_smooth_shape = filter_data(norm_shape, g_prev_smooth_shape)
	else:
		g_prev_smooth_shape = norm_shape.copy()
	# extract angles from the measured values
	head_pan_tilt_roll = np.degrees(np.arctan2([head_pan*mirror_scale, head_tilt, -mirror_scale*A[1][0]],[2,2,A[1][1]]))
	g_head_pan_tilt_roll = filter_data(head_pan_tilt_roll, g_head_pan_tilt_roll, 3.0)

	camera = QApp.view().camera
	camera.lockedUpright = False
	camera.cameraRoll = (-90*rotate if rotate != -1 else g_head_pan_tilt_roll[2])

	ret = g_prev_smooth_shape.copy()
	if attrs['mirroring']:
		flip_order = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0, 26,25,24,23,22,21,20,19,18,17, 27,28,29,30, 35,34,33,32,31, \
			  45,44,43,42, 47,46, 39,38,37,36, 41,40, 54,53,52,51,50,49,48, 59,58,57,56,55, 64,63,62,61,60, 67,66,65, 69,68]
		ret = ret[flip_order]
	slider_names, slider_values = applyRetarget(g_rbfn, ret)
	#State._setKey('/root/sliders/attrs', dict(zip(slider_names, slider_values))) # NO UNDO
	slider_names.extend(['NeckPan','NeckTilt','NeckRoll'])
	slider_values = np.float32(list(slider_values)+list(g_head_pan_tilt_roll))

	return g_head_pan_tilt_roll.copy(),g_prev_vs.copy(),norm_shape,img,slider_names,slider_values,A
예제 #12
0
def retrack_refresh_rbfn():
	grip_dir = os.environ['GRIP_DATA']
	movie_fn,_ = QApp.app.loadFilename('Choose a movie to open', grip_dir, 'Movie Files (*.mp4 *.mov *.avi *.flv *.mpg)')
	md = MovieReader.open_file(movie_fn, audio=False)
	update_rbfn(md)
예제 #13
0
파일: Face.py 프로젝트: davidsoncolin/IMS
def animateHead(newFrame):
    global ted_geom, ted_geom2, ted_shape, tony_geom, tony_shape, tony_geom2, tony_obj, ted_obj, diff_geom, c3d_frames, extract
    global tony_shape_vector, tony_shape_mat, ted_lo_rest, ted_lo_mat, c3d_points
    global md, movies
    tony_geom.image, tony_geom.bindImage, tony_geom.bindId = ted_geom.image, ted_geom.bindImage, ted_geom.bindId  # reuse the texture!
    fo = 55
    MovieReader.readFrame(md, seekFrame=((newFrame + fo) / 2))
    view = QApp.view()
    for ci in range(0, 4):
        view.cameras[ci + 1].invalidateImageData()
    ci = view.cameras.index(view.camera) - 1
    if ci >= 0:
        MovieReader.readFrame(movies[ci],
                              seekFrame=(newFrame +
                                         fo))  # only update the visible camera
    frac = (newFrame % 200) / 100.
    if (frac > 1.0): frac = 2.0 - frac
    fi = newFrame % len(c3d_frames)

    if ted_skel:  # move the skeleton
        dofs = ted_anim['dofData'][fi * 2 - 120]
        Character.pose_skeleton(ted_skel['Gs'], ted_skel, dofs)
        ted_glskel.setPose(ted_skel['Gs'])
        offset = ted_skel['Gs'][13]  # ted_skel['jointNames'].index('VSS_Head')

        cams = QApp.app.getLayers()['cameras']
        tmp = np.eye(4, 4, dtype=np.float32)
        tmp[:3, :] = offset
        cams.setTransform(tmp)

        if ci >= 0:  # move the camera view to be correct
            camRT = mats[ci][1]
            RT = np.dot(camRT, np.linalg.inv(tmp))
            view.cameras[ci + 1].setRT(RT)

        # update the face geometries to fit the skeleton
        ted_geom.setPose(offset.reshape(1, 3, 4))
        tony_geom.setPose(offset.reshape(1, 3, 4))
        #TODO head_points,c3d_points,surface_points,ted_geom2

    frame = c3d_frames[fi][extract]
    which = np.where(frame[:, 3] == 0)[0]
    x3ds = frame[which, :3]
    #print which,x3ds.shape,ted_lo_rest.shape,ted_lo_mat.shape
    bnds = np.array([[0, 1]] * ted_lo_mat.shape[0], dtype=np.float32)
    tony_shape_vector[:] = OBJReader.fitLoResShapeMat(ted_lo_rest,
                                                      ted_lo_mat,
                                                      x3ds,
                                                      Aoffset=10.0,
                                                      Boffset=3.0,
                                                      x_0=tony_shape_vector,
                                                      indices=which,
                                                      bounds=bnds)
    #global tony_shape_vectors; tony_shape_vector[:] = tony_shape_vectors[newFrame%len(tony_shape_vectors)]

    #tony_shape_vector *= 0.
    #tony_shape_vector += (np.random.random(len(tony_shape_vector)) - 0.5)*0.2
    if 1:
        ted_shape_v = np.dot(ted_shape_mat_T, tony_shape_vector).reshape(-1, 3)
    else:
        ted_shape_v = np.zeros_like(ted_obj['v'])
        ISCV.dot(ted_shape_mat_T, tony_shape_vector, ted_shape_v.reshape(-1))
    tony_shape_v = ted_shape_v
    #tony_shape_v = tony_shape['v']*frac
    ted_geom.setVs(ted_obj['v'] + ted_shape_v)  #ted_shape['v'] * frac)
    tony_geom.setVs(tony_obj['v'] + tony_shape_v -
                    np.array([200, 0, 0], dtype=np.float32))
    ted_geom2.setVs(ted_obj['v'] * (1.0 - frac) +
                    tony_tedtopo_obj['v'] * frac +
                    np.array([200, 0, 0], dtype=np.float32))
    #if len(ted_shape_v) == len(tony_shape_v):
    #	tony_geom2.setVs(tony_obj['v'] + ted_shape_v - [400,0,0])
    #	diff_geom.setVs(ted_obj['v'] + tony_shape_v - ted_shape_v - [600,0,0])

    #print [c3d_labels[i] for i in which]
    surface_points.vertices = np.dot(ted_lo_mat.T,
                                     tony_shape_vector).T + ted_lo_rest
    surface_points.colour = [0, 1, 0, 1]  # green
    c3d_points.vertices = x3ds
    c3d_points.colour = [1, 0, 0, 1]  # red

    QApp.app.refreshImageData()
    QApp.app.updateGL()
예제 #14
0
파일: Face.py 프로젝트: davidsoncolin/IMS
            print 'generating ted skel and anim'
            ASFReader.convertASFAMC_to_SKELANIM(asf_filename, amc_filename,
                                                skelFilename, animFilename)
            ted_skel = IO.load(skelFilename)[1]
            ted_anim = IO.load(animFilename)[1]
        ted_xcp_mats, ted_xcp_data = ViconReader.loadXCP(xcp_filename)

    if True:  # facial animation

        global ted_geom, ted_geom2, ted_shape, tony_geom, tony_shape, tony_geom2, tony_obj, ted_obj, diff_geom, c3d_frames
        global tony_shape_vector, tony_shape_mat, ted_lo_rest, ted_lo_mat, c3d_points
        global md, movies

        ted_dir = os.path.join(os.environ['GRIP_DATA'], 'ted')
        wavFilename = os.path.join(ted_dir, '32T01.WAV')
        md = MovieReader.open_file(wavFilename)
        c3d_filename = os.path.join(
            ted_dir, '201401211653-4Pico-32_Quad_Dialogue_01_Col_wip_02.c3d')
        c3d_dict = C3D.read(c3d_filename)
        c3d_frames, c3d_fps, c3d_labels = c3d_dict['frames'], c3d_dict[
            'fps'], c3d_dict['labels']
        if False:  # only for cleaned-up data
            c3d_subject = 'TedFace'
            which = np.where([s.startswith(c3d_subject)
                              for s in c3d_labels])[0]
            c3d_frames = c3d_frames[:, which, :]
            c3d_labels = [c3d_labels[i] for i in which]
            print c3d_labels
        if False:  # this is for the cleaned-up data (don't apply the other offset...)
            offset = Calibrate.composeRT(Calibrate.composeR((0.0, 0.0, 0)),
                                         (0, 0, -8), 0)  # 0.902
예제 #15
0
def import_movie_cb():
	grip_dir = os.environ['GRIP_DATA']
	movie_fn, _ = QApp.app.loadFilename('Choose a movie to open', grip_dir, 'Movie Files (*.mp4 *.mov *.avi *.flv *.mpg)')
	global md
	md = MovieReader.open_file(movie_fn)
예제 #16
0
def set_frame_cb2(frame):
	global g_predictor, g_predictor_dlib, g_detector
	size = (len(g_predictor['ref_shape'])+4)
	geo_vs = np.zeros((size,3), dtype=np.float32)
	ref_vs = np.zeros((size,3), dtype=np.float32)

	global g_prev_vs
	try: g_prev_vs
	except: g_prev_vs = None
	if 0: # show_images
		global g_jpgs; fn = g_jpgs[frame%len(g_jpgs)]
		img = Face.load_image(fn)
		img = Face.fix_image(img, max_size=640)
		use_prev_vs = False # images need booting every frame
	else: # show_movies
		global md; MovieReader.readFrame(md, seekFrame=frame) # only update the visible camera
		img = np.frombuffer(md['vbuffer'], dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3)
		use_prev_vs = True
		
	if 0: # undistort_stuff
		global g_screen
		global g_tid, g_bid
		g_tid,g_bid = Opengl.bind_streaming_image(img, g_tid, g_bid)
		img = Opengl.renderGL(img.shape[1], img.shape[0], Opengl.quad_render, (g_tid, g_screen, 0.85))
		#Opengl.unbind_image(bid)

	if 0: # rotated_image
		img = img.transpose((1,0,2)).copy()
	if 0: # gamma_image
		lookup = np.array([int(((x/255.0)**0.4545)*255.0) for x in range(256)], dtype=np.uint8)
		img = lookup[img]
	#img[:,600:1000] = 0 #img[:,200:600].copy()
	if 0: # test_rotate
		import scipy; img = scipy.misc.imrotate(img, frame, interp='bilinear')
	if 0: # test_rotate_right
		import scipy; img[:,-img.shape[0]:] = scipy.misc.imrotate(img[:,-img.shape[0]:], frame, interp='bilinear')
	if 0: # test_filter_image
		img = ISCV.filter_image(img,4,16)

	w,h = img.shape[1]*0.5,img.shape[0]*0.5

	boot = g_prev_vs
	if boot is None: boot = Face.detect_face(img, g_predictor, 2) # ,-1) # put -1 at end to boot at any angle
	tmp = Face.track_face(img, g_predictor, boot)
	if use_prev_vs and boot is not None: g_prev_vs = tmp
	if frame == 0 or Face.test_reboot(img, g_prev_vs): g_prev_vs = None
	global template_vs
	geo_vs[:size-4,:2] = tmp
	geo_vs[size-4:size,:2] = Face.get_boundary(geo_vs[:size-4,:2], template_vs)

	if 0: # show_aam
		global g_aam_model
		shape_u, tex_u, A_inv, mn  = Face.fit_aam(g_aam_model, tmp, img)
		Face.render_aam(g_aam_model, A_inv*0.1, mn*0.1, shape_u, tex_u, img)
		su,tu = Face.normalized_aam_coords(g_aam_model, shape_u, tex_u)
		res = Face.aam_residual(g_aam_model, tmp, img)
		QApp.view().displayText = [(10,100,'%f' % np.linalg.norm(tu)),(10,125,'%f' % np.linalg.norm(su)),(10,150,'%f'%res)]

	if 0: # show_extracted_texture
		global g_aam_model_indices,g_aam_model_weights
		pixels = Face.extract_texture(img, geo_vs[:size,:2], g_aam_model_indices, g_aam_model_weights)
		global template_vs
		Face.render_texture(pixels, img, template_vs, g_aam_model_indices, g_aam_model_weights)

	geo_mesh = QApp.app.getLayer('geo_mesh')
	geo_mesh.setVs(geo_vs)
	geo_mesh.transforms[0][:,:3] = [[1,0,0],[0,1,0],[0,0,1],[-w,1000-h,0.1]]
	image_mesh = QApp.app.getLayer('image_mesh')
	image_mesh.setVs(np.array([[-w,-h,0],[w,-h,0],[w,h,0],[-w,h,0]], dtype=np.float32))
	image_mesh.setImage(img)
	QApp.view().updateGL()
예제 #17
0
def update_rbfn(md, short_name='Take', mapping_file=None):
	global g_rbfn, g_predictor
	# TODO these groups must have weights, this can't initialise weights
	groups, slider_splits, slider_names, marker_names = extract_groups(g_rbfn)

	# update the neutral
	if mapping_file:
		fi = mapping_file[mapping_file.keys()[0]]['Neutral']
	else:
		g = groups[0][1]
		print g.keys()
		active_poses = [pn for pn in g['marker_data'].keys() if pn not in g.get('disabled', [])]
		ni = [ap.rsplit('_',2)[1]=='Neutral' for ap in active_poses].index(True)
		fi = int(active_poses[ni].rsplit('_',2)[2])
	print 'neutral on frame',fi
	MovieReader.readFrame(md, fi)
	img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3).copy()
	vs = Face.detect_face(img, g_predictor)
	vs = Face.track_face(img, g_predictor, vs)
	clear_neutral()
	g_rbfn['neutral'] = stabilize_shape(vs)[0]
	for (gn,group) in groups:
		gmd,gsd,gis = {},{},{}
		for pose_key,pose_data in group['marker_data'].iteritems():
			sd = group['slider_data'][pose_key]
			test_short_name,pose_name,frame_number = pose_key.rsplit('_',2)
			assert(test_short_name == short_name)
			fi = int(frame_number)
			print fi
			if mapping_file:
				if pose_name not in mapping_file[gn]:
					print 'WARNING: pose %s missing; removing from rbfn' % pose_name
					continue
				fi = mapping_file[gn].pop(pose_name)
				print 'remapping to',fi
			MovieReader.readFrame(md, fi)
			img = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3).copy()
			vs = Face.detect_face(img, g_predictor)
			if vs is None:
				print 'failed to boot'
				for vi in range(max(fi-300,0),fi):
					MovieReader.readFrame(md, vi)
					img2 = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3).copy()
					vs = Face.detect_face(img2, g_predictor)
					if vs is not None:
						print 'booted on frame',vi
						for vi2 in range(vi+1,fi):
							MovieReader.readFrame(md, vi2)
							img2 = np.frombuffer(md['vbuffer'],dtype=np.uint8).reshape(md['vheight'],md['vwidth'],3).copy()
							vs = Face.track_face(img2, g_predictor, vs)
						break
					if vi == fi-1: print 'don\'t know what to do'
			vs = Face.track_face(img, g_predictor, vs)
			#Face.show_image(img,vs)
			#vs, head_pan, head_tilt, A = stabilize_shape(vs)
			print pose_name
			#tmp = pose_data.reshape(-1,3)[:,:2]
			#Face.show_image(None,tmp-np.mean(tmp,axis=0),(vs-np.mean(vs,axis=0))*5)
			pose_data = np.hstack((vs,np.zeros((vs.shape[0],1),dtype=np.float32)))
			pose_key = '_'.join((short_name,pose_name,str(fi)))
			gmd[pose_key] = pose_data
			gsd[pose_key] = sd
			gis[pose_key] = JPEG.compress(img)
		group['marker_data'] = gmd
		group['slider_data'] = gsd
		group['images'] = gis
	if mapping_file: print 'left overs:',mapping_file
예제 #18
0
def generateSkeleton(cacheId=622,
                     x3d_filename='',
                     perc=0.9,
                     triangleThreshold=1000,
                     thresholdDistance=25.,
                     useFrames=range(0, 514),
                     numComps=30,
                     labelGraphThreshold=4,
                     stepSize=1):

    directory = os.path.join(os.environ['GRIP_DATA'],
                             '140113_A2_GRIP_GenPeople')
    c3d_filename = 'ROM.c3d'
    cameraB = 7292, 34, (47, 2.3, -0.2), (67, 788, 79)
    cameraA = 5384, 49.8, (5.4, 1.1,
                           -0.7), (4, 1135, 0
                                   )  #5045,52,(5.6,0.9,-0.7),(0,1130,0)
    camera = cameraA
    startFrame = 0

    tempDir = os.environ['GRIP_TEMP']

    #import logging
    #logging.basicConfig(level=logging.DEBUG)
    from IO import C3D

    graph_out_fn = None

    # labelGraphThreshold = 4
    # stepSize = 1

    if True:  # bo data
        c = C3D.read(os.path.join(directory, c3d_filename))
        c3d_frames, c3d_fps = c['frames'], c['fps']
        pointLabels = c['labels']
        print 'c3d fps = ', c3d_fps
        numFramesVisiblePerPoint = np.sum(c3d_frames[:, :, 3] == 0, axis=0)
        numPointsVisiblePerFrame = np.sum(c3d_frames[:, :, 3] == 0, axis=1)
        print 'threshold', 0.40 * len(c3d_frames)
        goodPoints = np.where(
            numFramesVisiblePerPoint > 0.90 * len(c3d_frames))[0]
        goodFrames = np.where(
            np.sum(c3d_frames[:, goodPoints,
                              3] == 0, axis=1) == len(goodPoints))[0]
        print len(goodPoints), len(
            goodFrames)  # 290 x 6162 (80%), 283 x 8729 (90%), 275x10054 (96%)
        frames = c3d_frames[goodFrames, :, :][:, goodPoints, :][:, :, :3]
        pointLabels = [pointLabels[g] for g in goodPoints]
        #badPoint = pointLabels.index('BoDense:A_Neck_1')

        data_fn = 'W90-28-10.IO'
        skel_out_fn = None
        triangleThreshold = 1000.
    else:  # orn data
        # cacheId = 622
        # perc = 0.9
        # triangleThreshold = 1000.      # Bone threshold
        # thresholdDistance = 25.        # Joint threshold
        # useFrames = range(2370, 3500)
        # useFrames = range(2650, 3500)
        # useFrames = range(2600, 3480)
        # useFrames = range(2650, 3480)
        # useFrames = range(0, 2000)
        # useFrames = range(0, 514)
        #useFrames = [] #range(0, 1000)
        #useFrames.extend(range(2650, 3480))
        #useFrames.extend(range(4824, 5253))
        # numComps = 30

        # useFrames = range(0, 333)
        # useFrames = range(4824, 5253)

        print 'CacheId:', cacheId
        print 'Good point percentage:', perc
        print 'Triangle threshold:', triangleThreshold
        print 'Distance threshold:', thresholdDistance
        print 'Frames:', useFrames[0], '-', useFrames[-1]

        _, x3d_data = IO.load(x3d_filename)
        data_fn = 'W90-28-8.romtracks_T%d.IO' % cacheId
        location = '/root/tracks'
        # location = '/root/skeleton/reconstruction/collection/c3ds'
        c3d_frames = x3d_data[location]['x3ds']
        print c3d_frames.shape
        c3d_frames = np.transpose(c3d_frames, axes=(1, 0, 2))
        #frames = frames[:, blueIds, :]
        print c3d_frames.shape
        pointLabels = x3d_data[location]['x3ds_labels']

        if False:
            goodPoints = np.arange(c3d_frames.shape[1])
            goodFrames = np.arange(len(c3d_frames))
        else:
            numFramesVisiblePerPoint = np.sum(c3d_frames[useFrames, :, 3] == 0,
                                              axis=0)
            numPointsVisiblePerFrame = np.sum(c3d_frames[useFrames, :, 3] == 0,
                                              axis=1)
            goodPoints = np.where(
                numFramesVisiblePerPoint > perc * len(useFrames))[0]
            goodFrames = np.where(
                np.sum(c3d_frames[:, goodPoints,
                                  3] == 0, axis=1) == len(goodPoints))[0]

        print '# Good points: %d | # Good frames: %d' % (len(goodPoints),
                                                         len(goodFrames))
        print goodFrames[:4]
        frames = c3d_frames[goodFrames, :, :][:, goodPoints, :][:, :, :3]
        pointLabels = [int(pointLabels[g]) for g in goodPoints]

        skel_out_fn = None
        graph_out_fn = None

    data = frames[::stepSize, :, :].copy()
    first_time_only = not os.path.exists(os.path.join(tempDir, data_fn))
    if first_time_only:  # generate the file
        M = ASFReader.greedyTriangles(
            data,
            numComps,
            triangleThreshold=triangleThreshold,
            thresholdDistance=thresholdDistance**2)  # only every Nth frame
        IO.save(os.path.join(tempDir, 'M90_T%d.IO' % cacheId), M)
        _, M = IO.load(os.path.join(tempDir, 'M90_T%d.IO' % cacheId))
        stabilizedPointToGroup, stabilizedPointResiduals, stabilizedFrames = ASFReader.assignAndStabilize(
            data,
            M['RTs'][M['triIndices'][:28]],
            thresholdDistance=thresholdDistance**2)
        W = {
            'stabilizedPointToGroup': stabilizedPointToGroup,
            'stabilizedPointResiduals': stabilizedPointResiduals,
            'stabilizedFrames': stabilizedFrames
        }
        IO.save(os.path.join(tempDir, data_fn), W)
    else:
        _data = IO.load(os.path.join(tempDir, data_fn))[1]
        stabilizedPointToGroup = _data['stabilizedPointToGroup']
        stabilizedPointResiduals = _data['stabilizedPointResiduals']
        stabilizedFrames = _data['stabilizedFrames']

    print 'numFrames = %d' % len(stabilizedFrames)
    print 'number of labelled points %d' % np.sum(stabilizedPointToGroup != -1)
    print 'RMS of labelled points %fmm' % np.sqrt(
        np.mean(
            stabilizedPointResiduals[np.where(stabilizedPointToGroup != -1)]))
    first_time_only = True
    print stabilizedPointToGroup
    num_groups = max(stabilizedPointToGroup) + 1
    stabilized_groups = [
        np.where(stabilizedPointToGroup == gi)[0] for gi in range(num_groups)
    ]
    if first_time_only:
        if True:  # tighten the fit
            # thresh = [10,10,9,9] #,10,10,9,7,9,9,6,9,9,9,]
            thresh = [
                thresholdDistance, thresholdDistance, thresholdDistance - 1,
                thresholdDistance - 1, thresholdDistance - 2,
                thresholdDistance - 2
            ]
            # thresh = [20, 20, 19, 19, 10, 10, 9, 9]
            for t in thresh:
                #stabilizedPointToGroup[badPoint] = -1 # unlabel
                RTs = ASFReader.stabilizeAssignment(data,
                                                    stabilizedPointToGroup)
                stabilizedPointToGroup, stabilizedPointResiduals, stabilizedFrames = ASFReader.assignAndStabilize(
                    data, RTs, thresholdDistance=float(t)**2)
                print 'number of labelled points %d' % np.sum(
                    stabilizedPointToGroup != -1)
                print 'RMS of labelled points %fmm' % np.sqrt(
                    np.mean(stabilizedPointResiduals[np.where(
                        stabilizedPointToGroup != -1)]))
        else:
            RTs = ASFReader.stabilizeAssignment(data, stabilizedPointToGroup)
            stabilizedPointToGroup, stabilizedPointResiduals, stabilizedFrames = ASFReader.assignAndStabilize(
                data, RTs, thresholdDistance=10.**2)

        global animJoints, stablePointsGroups, displayFrames, groupRepresentatives
        stablePointsData = ASFReader.sharedStablePoints(RTs, threshold=3.**2)
        stablePointsGroups = [sp[0] for sp in stablePointsData]
        stablePoints = np.array([sp[2] for sp in stablePointsData],
                                dtype=np.float32)
        print 'num stable points', len(stablePoints)

    def residual(gi, leaf_indices, RTs):
        '''given a group and a list of attachment points, choose the best attachment point and return the residual.'''
        tmp = [(ASFReader.transform_pair_residual(RTs[gi], RTs[gj]), gj)
               for gj in leaf_indices]
        return min(tmp)

    # make a skeleton from stabilizedPointToGroup
    root_group = 0
    leaf_nodes = set([root_group])
    skel_group_indices = [root_group]
    skel_joint_parents = [-1]
    groups = set(range(stabilizedPointToGroup.max() + 1))
    groups.remove(root_group)
    RTs = ASFReader.stabilizeAssignment(data, stabilizedPointToGroup)
    joints = []
    joints.append(np.mean(data[0, stabilized_groups[root_group]], axis=0))
    bones = []
    bones.append([])
    G = np.eye(3, 4, dtype=np.float32)
    G[:, 3] = np.mean(data[0, stabilized_groups[root_group]], axis=0)
    Gs = [G]
    while groups:
        residuals = [(residual(gi, leaf_nodes, RTs), gi) for gi in groups]
        (((res, O), parent), group) = min(residuals)
        groups.remove(group)
        leaf_nodes.add(group)
        skel_group_indices.append(group)
        pi = skel_group_indices.index(parent)
        skel_joint_parents.append(pi)
        joint_world = np.float32(O)
        joints.append(joint_world)
        bones.append([np.mean(data[0, stabilized_groups[group]], axis=0) - O])
        bones[pi].append(joint_world - joints[pi])
        print group, parent
        G = np.eye(3, 4, dtype=np.float32)
        G[:, 3] = O
        Gs.append(G)
    print skel_group_indices
    print skel_joint_parents

    numJoints = len(skel_joint_parents)
    jointNames = map(str, skel_group_indices)
    jointIndex = dict(zip(jointNames, range(len(jointNames))))
    jointParents = skel_joint_parents
    jointChans = [0, 1, 2] + [3, 4, 5] * numJoints
    jointChanSplits = [0, 3, 6]
    for x in range(numJoints - 1):
        jointChanSplits.append(jointChanSplits[-1])
        jointChanSplits.append(jointChanSplits[-1] + 3)
    dofNames = [
        jointNames[ji] +
        [':tx', ':ty', ':tz', ':rx', ':ry', ':rz'][jointChans[di]]
        for ji in range(numJoints)
        for di in range(jointChanSplits[2 * ji], jointChanSplits[2 * ji + 2])
    ]
    numDofs = len(dofNames)

    def mult_inv(Gs_pi, Gs_gi):
        # Gs_pi^-1 Gs_gi = Ls_gi
        R = np.linalg.inv(Gs_pi[:3, :3])
        ret = np.dot(R, Gs_gi)
        ret[:, 3] -= np.dot(R, Gs_pi[:, 3])
        return ret

    Ls = np.float32([
        mult_inv(Gs[pi], Gs[gi]) if pi != -1 else Gs[gi]
        for gi, pi in enumerate(skel_joint_parents)
    ])
    Bs = bones

    print map(len, Bs)

    markerParents = [
        skel_group_indices.index(gi) for gi in stabilizedPointToGroup
        if gi != -1
    ]
    markerNames = [('%d' % pi) for pi, gi in enumerate(stabilizedPointToGroup)
                   if gi != -1]
    labelNames = [('%d' % pointLabels[pi])
                  for pi, gi in enumerate(stabilizedPointToGroup) if gi != -1]
    markerOffsets = [
        np.dot(Gs[skel_group_indices.index(gi)][:3, :3].T,
               data[0][pi] - Gs[skel_group_indices.index(gi)][:3, 3])
        for pi, gi in enumerate(stabilizedPointToGroup) if gi != -1
    ]

    skel_dict = {
        'name': 'skeleton',
        'numJoints': int(numJoints),
        'jointNames': jointNames,  # list of strings
        'jointIndex': jointIndex,  # dict of string:int
        'jointParents': np.int32(jointParents),
        'jointChans': np.int32(jointChans),  # 0 to 5 : tx,ty,tz,rx,ry,rz
        'jointChanSplits': np.int32(jointChanSplits),
        'chanNames': dofNames,  # list of strings
        'chanValues': np.zeros(numDofs, dtype=np.float32),
        'numChans': int(numDofs),
        'Bs': Bs,
        'Ls': np.float32(Ls),
        'Gs': np.float32(Gs),
        'markerParents': np.int32(markerParents),
        'markerNames': markerNames,
        'markerOffsets': np.float32(markerOffsets),
        'markerWeights': np.ones(len(markerNames), dtype=np.float32),
        'rootMat': np.eye(3, 4, dtype=np.float32),
        'labelNames': labelNames
    }

    if graph_out_fn is not None and labelGraphThreshold != -1:
        print 'Generating labelling graph...'
        from GCore import Label as GLabel
        c3d_data = c3d_frames[goodFrames, :, :][:, goodPoints, :][:, :, :]
        c3d_data = c3d_data[::stepSize, :, :]
        # graph = GLabel.graph_from_c3ds(skel_dict, markerNames, c3d_data, threshold=3)
        graph = GLabel.graph_from_c3ds(skel_dict,
                                       markerNames,
                                       c3d_data,
                                       threshold=labelGraphThreshold)
        IO.save(graph_out_fn, {'/root/graph': {'label_graph': graph}})
        print 'Labelling graph saved to:', graph_out_fn

    if skel_out_fn is not None: IO.save(skel_out_fn, skel_dict)

    def test_skeleton(sd):
        '''TODO, write some code to verify that a dict actually is a skeleton.'''
        assert isinstance(sd['name'], str), 'name key should be a string'
        numJoints = sd['numJoints']
        assert isinstance(numJoints, int), 'numJoints key should be an int'

    animJoints = None
    showStabilized = False
    if showStabilized:
        displayFrames = stabilizedFrames
        pointToGroup = stabilizedPointToGroup
    else:  # show animated
        displayFrames = frames  #c3d_frames[:,:,:3]
        displayLabels = pointLabels
        if first_time_only:  # generate the file
            framesRTs = ASFReader.stabilizeAssignment(displayFrames,
                                                      stabilizedPointToGroup)
            IO.save(
                os.path.join(tempDir, 'tmp90-28.IO'), {
                    'framesRTs': framesRTs,
                    'stabilizedPointToGroup': stabilizedPointToGroup,
                    'stablePoints': stablePoints,
                    'stablePointsGroups': stablePointsGroups
                })
        for k, v in IO.load(os.path.join(tempDir,
                                         'tmp90-28.IO'))[1].iteritems():
            locals()[k] = v
        animJoints = ASFReader.unstabilize(stablePoints,
                                           framesRTs[stablePointsGroups])
        print 'animJoints shape', animJoints.shape
        pointToGroup = -np.ones(displayFrames.shape[1], dtype=np.int32)
        print goodPoints.shape, pointToGroup.shape, stabilizedPointToGroup.shape
        pointToGroup = stabilizedPointToGroup
        #pointToGroup[goodPoints] = stabilizedPointToGroup # for displayFrames = c3d_frames[:,:,:3]
    groupRepresentatives = ASFReader.groupRepresentatives(
        data, stabilizedPointToGroup)
    numJoints = len(stablePoints)
    boneEdges = np.array(range(2 * numJoints), dtype=np.int32)
    boneVertices = np.zeros((numJoints * 2, 3), dtype=np.float32)
    boneVertices[::2] = stablePoints
    boneVertices[1::2] = displayFrames[
        0, groupRepresentatives[stablePointsGroups]]

    #import cv2
    #movie = cv2.VideoCapture(directory+movieFilename)
    #frameOk, frameData = movie.read()
    #global md
    #md = {'buffer':frameData, 'height':frameData.shape[0], 'width':frameData.shape[1]}

    global app, win, view, frame, points, joints, bones
    app = QtGui.QApplication(sys.argv)
    app.setStyle('plastique')
    win = QtGui.QMainWindow()
    win.setFocusPolicy(QtCore.Qt.StrongFocus)  # get keyboard events
    win.setWindowTitle('Imaginarium Skeleton Reconstruction Test %d' % cacheId)
    panel = GViewer.QGLPanel()
    view = panel.view
    view.setMinimumWidth(640)
    view.setMinimumHeight(480)
    win.setCentralWidget(panel)
    timelineDock = QtGui.QDockWidget('Timeline')
    timeline = UI.QTimeline(win)
    timeline.cb = setFrame
    timeline.setRange(0, goodFrames[-1])
    timelineDock.setWidget(timeline)
    timelineDock.setFeatures(QtGui.QDockWidget.DockWidgetMovable
                             | QtGui.QDockWidget.DockWidgetFloatable)

    frame = startFrame
    view.addCamera(UI.QGLViewer.Camera('default'))
    grid = GLGrid()
    view.primitives.append(grid)

    points = GLPoints3D(displayFrames[frame])
    from colorsys import hsv_to_rgb
    colorTable = np.array([
        hsv_to_rgb((h * 0.618033988749895) % 1, 0.5, 0.95)
        for h in xrange(max(pointToGroup) + 2)
    ],
                          dtype=np.float32)
    colorTable[-1] = 0
    points.colours = colorTable[pointToGroup]
    #points.names = displayLabels
    #points.pointSize = 3
    view.primitives.append(points)
    joints = GLPoints3D(stablePoints)
    joints.names = map(str, xrange(len(stablePoints)))
    view.primitives.append(joints)
    bones = GLBones(boneVertices, boneEdges)
    view.primitives.append(bones)
    win.addDockWidget(QtCore.Qt.BottomDockWidgetArea, timelineDock)
    win.show()

    global md, img, g_detectingDots, g_readingMovie
    md, img, g_detectingDots = None, None, False
    g_readingMovie = False
    if g_readingMovie:
        md = MovieReader.open_file(os.path.join(directory, movieFilename))
        img = np.frombuffer(md['vbuffer'],
                            dtype=np.uint8).reshape(md['vheight'],
                                                    md['vwidth'], 3)
        view.setImageData(md['vbuffer'], md['vheight'], md['vwidth'], 3)

    global allSkels
    allSkels = []

    app.connect(app, QtCore.SIGNAL('lastWindowClosed()'), app.quit)
    sys.exit(app.exec_())
예제 #19
0
    def cook(self, location, interface, attrs):
        if not self.initialised: return
        self.frame = interface.frame()
        imgs = []

        offset = attrs['offset'] if 'offset' in attrs else 0
        stepSize = attrs['step'] if 'step' in attrs else 1

        # Check if we are looking through a single active camera or not as that will be more efficient.
        # Here we are not interested in knowing whether or not we found anything
        activeCameraIdx = interface.attr('activeCameraIdx',
                                         atLocation=interface.root(),
                                         log=False)
        if 'onlyActiveCamera' in attrs and attrs[
                'onlyActiveCamera'] and activeCameraIdx is not None and activeCameraIdx != -1:
            frameNum = max(
                (self.frame + offset + self.timecodeOffsets[activeCameraIdx]) *
                stepSize, 0)
            md = self.movies[activeCameraIdx]

            try:
                MovieReader.readFrame(md,
                                      seekFrame=frameNum,
                                      playingAudio=False)
            except:
                self.logger.error(
                    'Could not read frame: %d for active camera %d' %
                    (self.frame, activeCameraIdx))
                return

            img = np.frombuffer(md['vbuffer'], dtype=np.uint8).reshape(
                md['vheight'], md['vwidth'], 3)
            imgs.append(img)

        else:
            # Process all cameras (slower but necessary for processes/Ops that need all the data)
            for ci, md in enumerate(self.movies):
                try:
                    frameNum = max(
                        (self.frame + offset + self.timecodeOffsets[ci]) *
                        stepSize, 0)
                    MovieReader.readFrame(md,
                                          seekFrame=frameNum,
                                          playingAudio=False)
                    img = np.frombuffer(md['vbuffer'], dtype=np.uint8).reshape(
                        md['vheight'], md['vwidth'], 3)
                    imgs.append(img)

                except:
                    self.logger.error(
                        'Could not read frame: %d for camera %d' %
                        (self.frame, ci))
                    return

        self.attrs['imgs'] = imgs
        interface.createChild(interface.name(),
                              'cameras',
                              atLocation=interface.parentPath(),
                              attrs=self.attrs)

        if self.timecode: interface.setAttr('timecode', self.timecode)