def trigger_video(self): if self.recording: self.recording=False print 'Video off' self.video.close() self.video=None else: from ffmpegwriter import FFMPEG_VideoWriter print 'Video on' self.recording=True self.video=FFMPEG_VideoWriter(os.path.join(database['basepath'],'Video{}.mp4'.format(time.strftime("%c"))), self.ui.screen.get_rect().size, ergonomy['animation_fps'],"libx264") return 1
class User(): #User controls uniaue states in the UI : a single object can be grabbed or focused for keyboard actions at a time #Non unique states such as hovering or selecting are controlled by each widget debug_mode=0 profile_mode=0 paused=0 recording=0 #for video making last_recorded=0 use_pil=0 def __init__(self,**kwargs): self.state='idle' self.arrow=pgsprite.Sprite() self.arrow.rect=pgrect.Rect(0,0,4,4) self.arrow.radius=2 self.arrow.mask=pgmask.Mask((4,4)) self.arrow.mask.fill() self.focused_on=None self.grabbed=None self.just_clicked=None #for double clicks self.status=None self.ui=None self.evt=EventCommander(self) self.evt_per_ui={} self.mouseover=None #Emote used for mouseover text self.screen_scale=1 self.screen_trans=array((0,0)) self.dragging = False #position from which user starts dragging on the screen self.started_click=0 def pause(self,do=True): self.paused=do self.evt.paused=do def setpos(self,pos): self.arrow.rect.center=pos def focus_on(self,item): if self.focused_on: self.focused_on.rm_state('focus') self.focused_on=item if item : return item.set_state('focus') else : return False def unfocus(self): if self.focused_on : self.focused_on.rm_state('focus') self.focused_on=None return True else : return False def grab(self,item): self.kill_mouseover() if self.grabbed : self.grabbed.rm_state('grab') self.grabbed=item if item : return item.set_state('grab') else : return False def ungrab(self): if self.grabbed : self.grabbed.rm_state('grab') self.grabbed=None return True else : return False def set_ui(self,ui,kill=True,**kwargs): if kill: self.kill_ui(self.ui) self.ui=ui if not self.ui in self.evt_per_ui: self.evt_per_ui[self.ui]=EventCommander(self) self.evt=self.evt_per_ui[self.ui] if not kwargs.get('no_launch',False): self.ui.launch() return True def kill_ui(self,ui): if ui: ui.kill() del self.evt_per_ui[ui] def set_status(self,status): if self.status==status: return self.status=status try: evt=Event(affects=self, type='status') self.evt.pass_event(evt,self,True) except Exception as e: print e pass def set_mouseover(self,txt,anim=None,**kwargs): self.ui.set_mouseover(txt,anim,**kwargs) def kill_mouseover(self): self.ui.kill_mouseover() def react(self,evt): if 'anim' in evt.type: if evt.args[0].item==self.mouseover and 'stop' in evt.type: self.mouseover.kill() self.mouseover=None def mouse_pos(self): return self.scale_vec(pg.mouse.get_pos())-self.screen_trans def mouse_rel(self): return self.scale_vec(pg.mouse.get_rel()) def scale_vec(self,vec,invert=True): if self.screen_scale !=1: if invert: return tuple(rint(vec[i]/user.screen_scale) for i in (0,1)) else: #only for blitting return tuple(rint(vec[i]*user.screen_scale) for i in (0,1)) else: return vec def trigger_video(self): if self.recording: self.recording=False print 'Video off' self.video.close() self.video=None else: from ffmpegwriter import FFMPEG_VideoWriter print 'Video on' self.recording=True self.video=FFMPEG_VideoWriter(os.path.join(database['basepath'],'Video{}.mp4'.format(time.strftime("%c"))), self.ui.screen.get_rect().size, ergonomy['animation_fps'],"libx264") return 1 def screenshot(self): import time try: print 'Screenshot taken' pg.image.save(self.ui.screen,os.path.join(database['basepath'],'Screenshot{}.png'.format(time.strftime("%c"))) ) except Exception as e: print e return 1 def add_video_frame(self,img=None,enhance=False): if img is None: img =self.ui.screen if ['show_cursor_on_video']: img.blit(ICONLIB["mycursor"],pg.mouse.get_pos()) pil_string_image = pg.image.tostring(img, "RGB") if enhance: pil_image = pilImage.fromstring("RGB",img.get_rect().size,pil_string_image) enhancer=pilImageEnhance.Sharpness(pil_image) pil_image=enhancer.enhance(1.2) pil_string_image=pil_image.tostring() #new=pg.surfarray.array2d(img) #self.video.write_frame(new) self.video.write_frame(pil_string_image )#pg.image.tostring(img, "RGB")) return def debug(self,item): struct=None dic=None if hasattr(item,'debug'): deb=item.debug() if deb==None: return if hasattr( deb,'keys'): dic=deb elif hasattr(deb,'__iter__'): struct=tuple(deb) if not struct: if not dic: dic=item.__dict__ struct=tuple(('{}:{}'.format(i,j), lambda:1) for i,j in dic.iteritems() ) self.ui.float_menu(struct,scrollable='v') def accessibility_input_scheme(self,event): '''Options for converting the input scheme, e.g. for tablet.''' if event.type==pg.MOUSEBUTTONDOWN: #Danger of using real time: if there is lag, click becomes long! self.started_click=pg.time.get_ticks() if event.type==pg.MOUSEMOTION: if not self.dragging and pg.mouse.get_pressed()[0]: if event.rel[0] or event.rel[1]: self.dragging=event.pos #if self.state=='idle': #self.state='drag' if event.type==pg.MOUSEBUTTONUP: click_duration =pg.time.get_ticks()-self.started_click if event.button==1: if click_duration >ergonomy['long_click_duration'] and ergonomy['long_left_equals_right_click']: if not self.dragging or max(( array(self.dragging)-event.pos )**2) < ergonomy['mouse_tremor']**2: dic= event.dict dic['button']=3 event=pg.event.Event(event.type,dic ) #event.button=3 #AFTER EVERYTHING, STOP DRAGGING if self.dragging: self.dragging=False if self.state=='drag': self.state='idle' return event def keymap(self,event): interp=interpret_input(event) if interp=='CTRL+p': return self.screenshot() if interp=='CTRL+ALT+v' and database['edit_mode']: return self.trigger_video() if interp=='CTRL+d' and database['edit_mode'] : self.debug_mode=1-self.debug_mode print 'Debug:', self.debug_mode return True