def main(self): self.setup_atb() self.fig.window.push_handlers(self.on_init, self.on_mouse_drag, self.on_mouse_scroll, self.on_key_press, self.on_key_release) self.fig.window.push_handlers(atb.glumpy.Handlers(self.fig.window)) self.fig.window.push_handlers(self.on_draw) self.fig.window.set_title(self.title) glumpy.show()
def main(self): self.setup_atb() self.fig.window.push_handlers( self.on_init, self.on_mouse_drag, self.on_mouse_scroll, self.on_key_press, self.on_key_release) self.fig.window.push_handlers(atb.glumpy.Handlers(self.fig.window)) self.fig.window.push_handlers(self.on_draw) self.fig.window.set_title(self.title) glumpy.show()
def play(z, T=5.): """ T: duration in second of a period TODO: currently failing on MacOsX - use numpyGL? """ global t, t0, frames N_X, N_Y, N_frame = z.shape import glumpy fig = glumpy.figure((N_X, N_Y)) Z = z[:, :, 0].T.astype(np.float32) image = glumpy.image.Image(Z) #, interpolation='nearest', colormap=glumpy.colormap.Grey, vmin=0, vmax=1) t0, frames, t = 0, 0, 0 @fig.event def on_draw(): fig.clear() image.draw(x=0, y=0, z=0, width=fig.width, height=fig.height ) @fig.event def on_key_press(symbol, modifiers): if symbol == glumpy.window.key.TAB: if fig.window.get_fullscreen(): fig.window.set_fullscreen(0) else: fig.window.set_fullscreen(1) if symbol == glumpy.window.key.ESCAPE: import sys sys.exit() @fig.event def on_idle(dt): global t, t0, frames t += dt frames = frames + 1 if t-t0 > 5.0: fps = float(frames)/(t-t0) print('FPS: %.2f (%d frames in %.2f seconds)' % (fps, frames, t-t0)) frames, t0 = 0, t # computing the frame more closely to the actual time Z[...] = z[:, :, np.int(np.mod(t, T)/T * N_frame)].T.astype(np.float32) #Z[...] = z[:, :, frames % N_frame].T.astype(np.float32) image.update() fig.redraw() glumpy.show()
def dantien(feed_func, layout, update_rate=5): ts = TimeSeries(feed_func) cols = len(layout[0]) rows = len(layout) fig = glumpy.figure() for x, y in product(range(cols), range(rows)): cons = layout[y][x] subfig = fig.add_figure(cols=cols, rows=rows, position=[x,rows-y-1]) cons(subfig, ts) @fig.timer(update_rate) def update(_): ts.eat() @fig.event('on_idle') def idle(_): fig.redraw() glumpy.show()
import OpenGL.GL as gl from glumpy import figure, show top_left = figure() top_right = top_left.split('right') bottom_left = top_left.split('bottom', size=.75) bottom_right = top_right.split('bottom', size=.25) @top_left.event def on_draw(): top_left.clear(1, 0, 0, 1) @top_right.event def on_draw(): top_right.clear(0, 0, 1, 1) @bottom_right.event def on_draw(): bottom_right.clear(0, 1, 0, 1) @bottom_left.event def on_draw(): bottom_left.clear(1, 1, 1, 1) show()
@fig.event def on_draw(): fig.clear() sim.draw() @fig.event def on_idle(dt): if sim.frames_until_pause != 0: sim.frames_until_pause -= 1 try: sim.learner.next() except: logging.exception('error while training !') sys.exit() if opts.save_frames and 0 == sim.frames % opts.save_frames: save_frame(sim.frames_saved, fig.width, fig.height) sim.frames_saved += 1 fig.redraw() @fig.event def on_key_press(key, modifiers): if key == glumpy.window.key.ESCAPE: sys.exit() if key == glumpy.window.key.SPACE: sim.frames_until_pause = sim.frames_until_pause == 0 and -1 or 0 if key == glumpy.window.key.ENTER: if sim.frames_until_pause >= 0: sim.frames_until_pause = 1 glumpy.show()
fig32 = fig3.add_figure(cols=2, rows=2, position=[0, 1], size=[1, 1]) fig33 = fig3.add_figure(cols=2, rows=2, position=[1, 0], size=[1, 2]) @fig1.event def on_draw(): fig1.clear(1, 0, 0, 1) @fig2.event def on_draw(): fig2.clear(0, 1, 0, 1) @fig31.event def on_draw(): fig31.clear(0, 0, 1, 1) @fig32.event def on_draw(): fig32.clear(0, 0, 0, 1) @fig33.event def on_draw(): fig33.clear(1, 1, 1, 1) show()
Z[...] = FD.SV.transpose() I.update() fig.redraw() if EnableVideo: fileName = FILE + str(FD.t / 100) + ".jpg" FD.save_video(fig, fileName) else: #Retrieve the receiver signals from the computing device to the host FD.saveOutput() #FD.receivers_signals # Save simulation data: receivers, dt, dr, dz FD.save_data(FILE) sys.exit() glumpy.show() else: # main loop while (FD.t < TimeIter): FD.RunCL() if FD.t % 500 == 0: print FD.t, " of total iterations: ", TimeIter print time.time() - start #Retrieve the receiver signals from the computing device to the host FD.saveOutput() #FD.receivers_signals # Save simulation data: receivers, dt, dr, dz FD.save_data(FILE)
def main(simulator): fig = glumpy.figure() world = fig.add_figure() @fig.event def on_init(): gl.glEnable(gl.GL_BLEND) gl.glEnable(gl.GL_DEPTH_TEST) gl.glEnable(gl.GL_BLEND) gl.glEnable(gl.GL_COLOR_MATERIAL) gl.glColorMaterial(gl.GL_FRONT_AND_BACK, gl.GL_AMBIENT_AND_DIFFUSE) gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA) @fig.event def on_draw(): fig.clear() # draw the opengl driving world. w = world.width h = world.height x = world.x y = world.y gl.glBegin(gl.GL_QUADS) gl.glColor(0.2, 0.2, 0.2) gl.glNormal(0, 0, 1) gl.glVertex(x, y, 0) gl.glVertex(x + w, y, 0) gl.glVertex(x + w, y + h, 0) gl.glVertex(x, y + h, 0) gl.glEnd() z = min(w, h) / 300. gl.glPushMatrix() gl.glLoadIdentity() gl.glTranslate(x + w / 2., y + h / 2., 10) gl.glScale(z, z, 1) a, b = simulator.agent.position gl.glTranslate(-a, -b, 0) gl.glLight(gl.GL_LIGHT0, gl.GL_POSITION, (0, 0, 100, 1)) # draw lanes. gl.glLineWidth(2) gl.glColor(0, 0, 0) for lane in simulator.lanes: gl.glBegin(gl.GL_LINE_STRIP) for a, b in lane: gl.glVertex(a, b, 1) gl.glEnd() # draw cars. simulator.leader.draw(sys.modules[__name__], 1, 0, 0) simulator.agent.draw(sys.modules[__name__], 1, 1, 0) gl.glPopMatrix() @fig.event def on_idle(dt): global elapsed elapsed += dt while elapsed > simulator.dt: elapsed -= simulator.dt try: simulator.step() except StopIteration: sys.exit() fig.redraw() @fig.event def on_key_press(key, modifiers): if key == glumpy.window.key.ESCAPE: sys.exit() elif key == glumpy.window.key.SPACE: global ESTIMATE ESTIMATE ^= ESTIMATE else: simulator.reset() fig.redraw() glumpy.show()
def browser(data_path, video_path, pts_path, cam_intrinsics_path): record = Temp() record.path = None record.writer = None c = Temp() c.captures = [cv2.VideoCapture(path) for path in video_path] total_frames = min([cap.get(7) for cap in c.captures]) record.fps = min([cap.get(5) for cap in c.captures]) r, img_arr = c.captures[0].read() if len(c.captures)==2: r, img_arr2 =c.captures[1].read() img_arr = cv2.cvtColor(img_arr, cv2.COLOR_BGR2RGB) fig = glumpy.figure((img_arr.shape[1], img_arr.shape[0])) image = glumpy.Image(img_arr) image.x, image.y = 0,0 # gaze object gaze = Temp() gaze.list = np.load(pts_path) # gaze.x_pos = gaze.list[:,0] # gaze.y_pos = gaze.list[:,1] # gaze.dt = gaze.list[:,2] gaze_list = list(gaze.list) gaze_point = Point(color=(255,0,0,0.3), scale=40.0) positions_by_frame = [[] for frame in range(int(gaze_list[-1][-1]) + 1)] while gaze_list: s = gaze_list.pop(0) frame = int(s[-1]) positions_by_frame[frame].append({'x': s[0], 'y': s[1], 'dt': s[2]}) gaze.map = positions_by_frame # keyframe list object framelist = Temp() framelist.keyframes = [] framelist.otherframes = [] cam_intrinsics = Temp() cam_intrinsics.H_map = [] g_pool = Temp() if cam_intrinsics_path is not None: cam_intrinsics.K = np.load(cam_intrinsics_path[0]) cam_intrinsics.dist_coefs = np.load(cam_intrinsics_path[1]) atb.init() bar = Bar("Browser", data_path, total_frames, framelist, dict(label="Controls", help="Scene controls", color=(50,50,50), alpha=50, text='light', position=(10, 10), size=(200, 440))) def draw(): gaze_point.draw() def on_draw(): fig.clear(0.0, 0.0, 0.0, 1.0) image.draw(x=image.x, y=image.y, z=0.0, width=fig.width, height=fig.height) draw() def on_close(): pass print "Close event !" def on_idle(dt): bar.update_fps(dt) sleep(0.03) if bar.play or bar.get_single: # load new images r, img1 = c.captures[0].read() if len(c.captures)==2: r, img2 =c.captures[1].read() if r and img1.shape != img2.shape: img2 = cv2.resize(img2,(img1.shape[1],img1.shape[0])) if not r: bar.play.value = 0 return bar.frame_num.value +=1 #stop playback when at the end of file. if bar.frame_num.value == 0: bar.play.value = 0 # Extract corresponing Pupil posistions. # Here we are taking only the first values of the frame for positions hence 0 index try: x_screen, y_screen = denormalize((gaze.map[bar.frame_num.value][0]['x'], gaze.map[bar.frame_num.value][0]['y']), fig.width, fig.height, flip_y=True) img1[int(y_screen), int(x_screen)] = [255,255,255] # update gaze.x_screen, gaze.y_screen /OPENGL COORIDANTE SYSTEM gaze.x_screen,gaze.y_screen = flip_horizontal((x_screen,y_screen), fig.height) gaze_point.update(( gaze.x_screen, gaze.y_screen)) print x_screen, y_screen except: pass if cam_intrinsics_path is not None and bar.display.value is not 0: # undistor world image img1 = cv2.undistort(img1, cam_intrinsics.K, cam_intrinsics.dist_coefs) # Undistort the gaze point based on the distortion coefs x_screen, y_screen = undistort_point((x_screen, y_screen), cam_intrinsics.K, cam_intrinsics.dist_coefs) if bar.display.value in (2,3): # homography mapping overlay, H = homography_map(img2, img1) # map img1 onto img2 (the world onto the source video) # cam_intrinsics.H_map.append([bar.frame_num.value, H]) if overlay is not None: pt_homog = np.array([x_screen, y_screen, 1]) pt_homog = np.dot(H, pt_homog) pt_homog /= pt_homog[-1] # normalize the gaze.pts x_screen, y_screen, z = pt_homog img1=overlay #overwrite img with the overlay if bar.display.value == 3: # cv2.circle(img2, (int(x_screen), int(y_screen)), 10, (0,255,0,100), 1) img1=img2 #overwrite img1 with the source video # update the img array img_arr[...] = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB) #recorder logic if bar.record_video.value and not bar.record_running.value: record.path = os.path.join(bar.data_path, "out.avi") record.writer = cv2.VideoWriter(record.path,cv2.cv.CV_FOURCC(*'DIVX'),record.fps, (img1.shape[1],img1.shape[0]) ) bar.record_running.value = 1 if bar.record_video.value and bar.record_running.value: # Save image frames to video writer try: cv2.circle(img1, (int(x_screen), int(y_screen)), 20, (0,255,0,100), 1) except: pass record.writer.write(img1) # Finish all recordings, clean up. if not bar.record_video.value and bar.record_running.value: record.writer = None bar.record_running.value = 0 #just grab one image. bar.get_single = 0 image.update() fig.redraw() if bar.exit: on_close() fig.window.stop() fig.window.push_handlers(on_idle) fig.window.push_handlers(atb.glumpy.Handlers(fig.window)) fig.window.push_handlers(on_draw) fig.window.push_handlers(on_close) fig.window.set_title("Browser") fig.window.set_position(0,0) glumpy.show()
def glumpy_viewer(img_array, arrays_to_print = [], commands=None, cmap=None, window_shape=(512, 512), contrast_norm=None ): """ Setup and start glumpy main loop to visualize Image array `img_array`. img_array - an array-like object whose elements are float32 or uint8 ndarrays that glumpy can show. larray objects work here. arrays_to_print - arrays whose elements will be printed to stdout after a keypress changes the current position. """ if contrast_norm not in (None, 'each', 'all'): raise ValueError('contrast_norm', contrast_norm) if contrast_norm == 'all': np.array(img_array, 'float32') img_array -= img_array.min() img_array /= max(img_array.max(), 1e-12) try: n_imgs = len(img_array) except TypeError: n_imgs = None state = dict( pos=0, fig=glumpy.figure((window_shape[1], window_shape[0])), I=glumpy.Image(img_array[0], colormap=cmap), len=n_imgs ) fig = state['fig'] if commands is None: commands = _commands @fig.event def on_draw(): fig.clear() state['I'].draw(x=0, y=0, z=0, width=fig.width, height=fig.height) @fig.event def on_key_press(symbol, modifiers): if chr(symbol) not in commands: print 'unused key', chr(symbol), modifiers return pos = state['pos'] commands[chr(symbol)](state) if pos == state['pos']: return else: img_i = img_array[state['pos']] if contrast_norm == 'each': # -- force copy img_i = np.array(img_i, 'float32') img_i -= img_i.min() img_i /= max(img_i.max(), 1e-12) #print img_i.shape #print img_i.dtype #print img_i.max() #print img_i.min() state['I'] = glumpy.Image(img_i, colormap=cmap, vmin=0.0, vmax=1.0 ) print state['pos'], [o[state['pos']] for o in arrays_to_print] fig.redraw() glumpy.show()