class Gui(QtGui.QMainWindow): def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) self.video = Video(cv2.VideoCapture(0)) self._timer = QtCore.QTimer(self) self._timer.timeout.connect(self.play) self._timer.start(27) self.update() def play(self): ''' capture frame and display ''' try: self.video.capture_next_frame() self.ui.videoFrame.setPixmap( self.video.convert_frame()) self.ui.videoFrame.setScaledContents(True) except TypeError: print "No frame" def mousePressEvent(self, event): ''' click mouse and put point on opencv window ''' self.video.add_point(QtCore.QPoint(event.pos()), self.ui.videoFrame.geometry())
def set_webcam(self, device): u"""webcamの設定.""" self.settei.settings["webcam"] = True self.settei.settings["device"] = device self.video = None outdir = self.settei.settings["outdir"] playfile = self.playfile = device logfunc = self.main_view.write_log self.video = Video(playfile, outdir, logfunc, webcam=True) # webcamが接続されていれば初期設定 if self.video.check_webcam() is True: self.video.set_bounding(self.settei.settings["bounding"]) self.video.set_verbose(self.settei.settings["verbose"]) self.video.set_detecttype(self.settei.settings["detecttype"]) self.main_view.set_webcam_view() top = self.settei.settings["detectionTop"] bottom = self.settei.settings["detectionBottom"] left = self.settei.settings["detectionLeft"] right = self.settei.settings["detectionRight"] self.set_detectionarea(top, bottom, left, right) self.set_imgscale() self.set_speed() self.control_video("START") else: self.main_view.set_webcam_view(webcam=False)
def download(self, index, **kwargs): ''' Resumes the download of item at index 'index' from the playlist ''' vobj = self.res[index-1] video = Video(vobj) return video.download(**kwargs) # params
def make_movie_list(self): files = os.listdir('.') suffix_list = ['avi', 'mp4'] # print(files) for item in files: s = item if s.split('.')[-1] in suffix_list: description_name = s[0] + '_des.txt' #print(description_name) if os.path.exists(description_name): f = open(description_name, 'r') description = f.read() f.close() else: description = '' bullet_name = s[0] + '_bullet.txt' #print(bullet_name) if os.path.exists(bullet_name): f = open(bullet_name, 'r') bullet = f.read() f.close() else: bullet = '' video = Video(item) length = video.get_time() full_item = (item, description, bullet, length) self.movie_list.append(full_item) for item in self.movie_list: file_path = os.path.join(self.base_cache, item[0]) if not os.path.exists(file_path): os.makedirs(file_path) make_cache(item[0], file_path)
def read_fovs(file): data = np.genfromtxt(file, dtype=None, unpack=True, delimiter='\t') prev_vid = data[0][0] fovs = [] idx = 0 videos = [] for d in data: vid = d[0] # print str(vid), str(prev_vid) if vid == prev_vid: # lat, lon, compass, R, alpha fov = FOV(data[idx][2], data[idx][3], data[idx][4], data[idx][5], data[idx][6]) fovs.append(fov) else: # new video v = Video(fovs) v.id = prev_vid videos.append(v) # print v.to_str() # new fovs fovs = [] fov = FOV(data[idx][2], data[idx][3], data[idx][4], data[idx][5], data[idx][6]) fovs.append(fov) idx = idx + 1 prev_vid = vid print "number of videos", len(videos) return videos
def procVideo(s, tweet_json, outputDir): if 'extended_entities' in tweet_json: media = tweet_json['extended_entities'].get('media', []) vid_type = media[0]['type'] if vid_type == "video": title = None if 'title' in media[0]['additional_media_info']: title = media[0]['additional_media_info']['title'] duration = None if 'duration_millis' in media[0]['video_info']: duration = media[0]['video_info']['duration_millis'] url = '' for v in media[0]['video_info']['variants']: if 'bitrate' in v: if v['bitrate'] > 0: url = v['url'] if url != '': file_name = str(s.tweet_id) + '.mp4' file = outputDir + file_name s.video = Video(s.tweet_id, file, url, title, duration, vid_type) elif vid_type == "animated_gif": title = None duration = None url = media[0]['video_info']['variants'][0]['url'] file_name = str(s.tweet_id) + '.gif' file = outputDir + file_name s.video = Video(s.tweet_id, file, url, title, duration, vid_type)
def download(self, index, **kwargs): ''' Resumes the download of item at index 'index' from the playlist ''' vobj = self.res[index - 1] video = Video(vobj) return video.download(**kwargs) # params
def __init__(self, title, rating, duration, cast, storyline, poster_image, trailer_url): Video.__init__(self, title, duration, cast) # Parent Constructor call self.rating = rating # The movie rating self.storyline = storyline # What the movie is about self.trailer_youtube_url = trailer_url # The YouTube URL of the movie. self.poster_image_url = poster_image # An image of the movie poster
def __init__(self, parent=None): super(StreamingWindow, self).__init__(parent) super().setTitleName("Streaming Window") super().basicUI() self.initUi() self.video = Video(self, QSize(self.frm.width(), self.frm.height()))
def read_data(file): data = np.genfromtxt(file, unpack=True, delimiter='\t') prev_vid = 1 fovs = [] idx = 0 videos = [] for vid in data[1]: if vid == prev_vid: #lat, long, compass, R, alpha fov = FOV(data[2][idx], data[3][idx], data[4][idx], data[5][idx], data[6][idx]) fovs.append(fov) else: # new video v = Video(fovs) v.id = vid videos.append(v) # print v.to_str() # new fovs fovs = [] fov = FOV(data[2][idx], data[3][idx], data[4][idx], data[5][idx], data[6][idx]) fovs.append(fov) idx += 1 prev_vid = vid return videos
def openImage(self, *args): self.openDialog.set_current_folder(path.expanduser("~")) self.openDialog.set_select_multiple(False) self.openDialog.set_filter(self.builder.get_object("imageFilter")) response = self.openDialog.run() self.openDialog.hide() if (response == Gtk.ResponseType.OK): self.disableUI() g.file = self.openDialog.get_filename() try: self.video = Video() img = cv2.imread(g.file) h, w = img.shape[:2] if (not self.checkMemory(w, h)): raise MemoryError() self.window.set_title(path.split(g.file)[1] + " - " + UI.TITLE) self.saveDialog.set_current_name("") self.sharpen = Sharpen(g.file, True) self.builder.get_object("alignTab").set_sensitive(False) self.builder.get_object("stackTab").set_sensitive(False) self.builder.get_object("processTab").set_sensitive(True) self.tabs.set_current_page(UI.SHARPEN_TAB) self.frame.set_from_file(g.file) except MemoryError as error: pass except: # Open Failed self.showErrorDialog( "There was an error opening the image, make sure it is a valid image." ) self.enableUI()
def main(): # Disable OAuthlib's HTTPS verification when running locally. # *DO NOT* leave this option enabled in production. os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1" vid = Video(TITLE, CATEGORYID, STATUS) youtube = get_authenticated_service() request = youtube.videos().insert( part="snippet,status", body={ "snippet": { "categoryId": vid.getCategoryId(), "description": "Description of uploaded video.", "title": vid.getTitle() }, "status": { "privacyStatus": vid.getStatus() } }, # Pointer to video file media_body=MediaFileUpload("vid.mp4")) response = request.execute() print(response)
def read_data(file): data = np.genfromtxt(file, unpack=True) prev_vid = 1 fovs = [] idx = 0 videos = [] for vid in data[1]: if vid == prev_vid: fov = FOV(data[3][idx],data[4][idx],data[10][idx],data[12][idx],data[13][idx]) fovs.append(fov) else: # new video v = Video(fovs) v.id = vid videos.append(v) # print v.to_str() # new fovs fovs = [] fov = FOV(data[3][idx],data[4][idx],data[10][idx],data[12][idx],data[13][idx]) fovs.append(fov) idx = idx + 1 prev_vid = vid return videos
def getListofVideos(self): """ This method is used to get a list of all available videos in the VHH-MMSI system. :return: This method returns a list of video objects (Class-type: Video) which holds all video specific meta-data. """ print("load list of videos ... ") print("send request: " + str(self.API_VIDEO_SEARCH_ENDPOINT)) res = requests.get(self.API_VIDEO_SEARCH_ENDPOINT ) #, verify=self.__pem_path) # params=params, print("receive response") res_json = res.json() video_instance_list = [] for i in range(0, len(res_json)): entry = res_json[i] vid = int(entry['id']) originalFileName = entry['originalFileName'] url = entry['url'] processed_flag = entry['processed'] # filter frame_counter videos and amX videos if not "video-framecounter" in originalFileName and not "eyeland" in originalFileName and not "am-" in originalFileName: video_instance = Video(self.__core_config) video_instance.create_video( vid=vid, originalFileName=originalFileName, url=url, download_path=self.__video_download_path, processed_flag=processed_flag) video_instance_list.append(video_instance) return video_instance_list
def videos_by_pl(self, youtube, pl_id): plvideos_response = youtube.playlistItems().list( part="snippet", playlistId=pl_id, maxResults=50).execute() Videos = [] for item in plvideos_response["items"]: snippet = item["snippet"] if snippet is not None: plid = item["id"] title = snippet["title"] desc = snippet["description"] resourceId = snippet["resourceId"] if resourceId is not None: vidId = resourceId["videoId"] try: thumbnails = snippet["thumbnails"] defUrl = thumbnails["default"]["url"] medUrl = thumbnails["medium"]["url"] Videos.append( Video(vidId, title, desc, plid, defUrl, medUrl)) except KeyError, e: defUrl = "/static/images/404.jpg" Videos.append(Video(vidId, title, desc, plid, defUrl, None)) pass except: pass
def read_fovs(file): data = np.genfromtxt(file, dtype=None, unpack=True, delimiter='\t') prev_vid = data[0][0] fovs = [] idx = 0 videos = [] for d in data: vid = d[0] # print str(vid), str(prev_vid) if vid == prev_vid: # lat, lon, compass, R, alpha fov = FOV(data[idx][2],data[idx][3],data[idx][4],data[idx][5],data[idx][6]) fovs.append(fov) else: # new video v = Video(fovs) v.id = prev_vid videos.append(v) # print v.to_str() # new fovs fovs = [] fov = FOV(data[idx][2],data[idx][3],data[idx][4],data[idx][5],data[idx][6]) fovs.append(fov) idx = idx + 1 prev_vid = vid print "number of videos", len(videos) return videos
def setVideo(self, dataPoint, storage): if self.dataPoint is not None: self.dataPoint.clearFeatures() dataPoint.loadFeatures(storage) self.dataPoint = dataPoint self.video = Video(dataPoint.videoPath) self.setFullTimeLabel()
def __init__(self, frames): self.frames = frames self.tmats = [] # (frame, M, diff) self.count = 0 self.total = 0 self.minX = 0 self.minY = 0 self.maxX = 0 self.maxY = 0 video = Video() self.height, self.width = video.getFrame(g.file, 0).shape[:2]
def generate_video_traffic(job): ''' Generate traffic file for the whole video. Args: in_db_file - should have all the images for which traffic is generated job - the same as for process_video ''' assertParamIsThere(job, 'in_db_file') assertParamIsThere(job, 'out_video_dir') setParamUnlessThere(job, 'frame_range', '[::]') assertParamIsThere(job, 'video_dir') video = Video(video_dir=job['video_dir']) camera = video.build_camera() assert op.exists(atcity(job['in_db_file'])), \ 'in db %s does not exist' % atcity(job['in_db_file']) conn_in = sqlite3.connect(atcity(job['in_db_file'])) c_in = conn_in.cursor() c_in.execute('SELECT time FROM images') timestamps = c_in.fetchall() conn_in.close() cad = Cad() if 'speed_kph' in job: model = TrafficModel(camera, video, cad=cad, speed_kph=job['speed_kph']) elif 'num_cars' in job: model = TrafficModelRandom(camera, video, cad=cad, num_cars_mean=job['num_cars']) else: assert False diapason = Diapason(len(timestamps), job['frame_range']) traffic = {'in_db_file': job['in_db_file']} traffic['frames'] = [] for frame_id in diapason.frame_range: logging.info('generating traffic for frame %d' % frame_id) timestamp = timestamps[frame_id][0] time = parseTimeString(timestamp) traffic_frame = model.get_next_frame(time) traffic_frame['frame_id'] = frame_id # for validating traffic['frames'].append(traffic_frame) return traffic
def blendAverage(frames, file, ref, minX, maxX, minY, maxY, drizzleFactor, drizzleInterpolation, conn): video = Video() stackedImage = None for frame, M, diff in frames: image = video.getFrame(file, frame) image = transform(image, ref, M, minX, maxX, minY, maxY, drizzleFactor, drizzleInterpolation).astype(np.float64) if stackedImage is None: stackedImage = image else: stackedImage += image conn.send("Stacking Frames") return stackedImage
def readConfiguration(self): theFile = open(self.theFile) values = theFile.readline().split(' ') self.numberOfVideos = int(values[0]) self.numberOfEndpoints = int(values[1]) self.numberOfRequestDescription = int(values[2]) self.numberOfCacheServer = int(values[3]) self.cacheServerCapacity = int(values[4]) values = theFile.readline().split(' ') self.videos = [Video(int(el)) for el in values] for i in range(0, self.numberOfEndpoints): values = theFile.readline().split(' ') latency = int(values[0]) n = int(values[1]) theList = [] for j in range(0, n): values = theFile.readline().split(' ') theList.append([int(values[0]), int(values[1])]) self.endpoints.append(Endpoint(latency, theList)) for i in range(0, self.numberOfRequestDescription): values = theFile.readline().split(' ') self.requests.append( EndpointRequests(int(values[2]), int(values[0]), int(values[1]))) theFile.close()
def _retrieve_videos(self, limit=20, offset=0, session=None, order=None, private=0): from Video import Video album_id = self._get_safe('album_id') if album_id: videos = api.request('/album/' + album_id + '/videos', method="GET", params=dict(private=private, order=order, session=session, offset=offset, limit=limit, album=album_id)) if videos: return [ Video(meta={'video': video}) for video in videos['videos'] ] else: return False else: return False
def inputVideo(self, startTime=0, endTime=0): WebDriverWait(self.bot, 10).until( EC.presence_of_element_located( (By.CLASS_NAME, "upload-btn-input"))) file_input_element = self.bot.find_elements_by_class_name( "upload-btn-input")[0] # Check if file has correct .mp4 extension, else throw error. self.video = Video(self.userRequest["dir"], self.userRequest["vidTxt"]) self.video.createVideo() if not startTime == 0 and endTime == 0: self.video.customCrop(startTime, endTime) while not os.path.exists( self.userRequest["dir"]): # Wait for path to exist time.sleep(1) abs_path = os.path.join(os.getcwd(), self.userRequest["dir"]) file_input_element.send_keys(abs_path)
def getTrendingData(): url = "https://www.youtube.com/feed/trending" response = requests.get(url) soup = BeautifulSoup(response.text, features="lxml") #list that will store Video objects videos_list = [] #finds all the videos in the html videos_in_html = soup.findAll("div", class_="yt-lockup-content") #for every video get title, desc and tags then store in a list for content in videos_in_html: try: title = getTitle(content) description = getDesc(content) video_href = getHref(content) video_url = "https://www.youtube.com{}".format(video_href) tags = getTags(video_url) curr_video = Video(title, description, tags) videos_list.append(curr_video) except Exception as e: ... return videos_list
def openImageSequence(self, *args): self.openDialog.set_current_folder(path.expanduser("~")) self.openDialog.set_select_multiple(True) self.openDialog.set_filter(self.builder.get_object("imageFilter")) response = self.openDialog.run() self.openDialog.hide() if (response == Gtk.ResponseType.OK): try: g.file = self.openDialog.get_filenames() self.video = Video() self.video.checkMemory() thread = Thread(target=self.video.run, args=()) thread.start() self.disableUI() except MemoryError as error: self.enableUI()
def _get_videos(videos_type, **kwargs): videos = api.request('/videos/' + videos_type, method="GET", params=kwargs) if videos and 'videos' in videos: return [Video(meta={'video': video}) for video in videos['videos']] else: return False
def __init__(self): inf = open('book.txt', 'r') if inf.closed: print("Error opening file.") for i in inf.readlines(): mystring = i.strip().split('|') CardCatalog.append(Book(*mystring)) inf.close() inf = open('periodic.txt', 'r') if inf.closed: print("Error opening file.") for i in inf.readlines(): mystring = i.strip().split('|') CardCatalog.append(Periodical(*mystring)) inf.close() inf = open('video.txt', 'r') if inf.closed: print("Error opening file.") for i in inf.readlines(): mystring = i.strip().split('|') CardCatalog.append(Video(*mystring)) inf.close() inf = open('film.txt', 'r') if inf.closed: print("Error opening file.") for i in inf.readlines(): mystring = i.strip().split('|') CardCatalog.append(Film(*mystring)) inf.close()
def read_Video(): file_names = [] path = "F:\\Desktop\\opencv_detection\\" for i in range(0,50): label = random.randint(0,11) video = Video(i,path+"video"+str(i%3+1)+".mp4","label"+str(label),label,0) file_names.append(video) return file_names
def get_videos_details(youtube, video_id): results = youtube.videos().list( part="snippet,contentDetails,statistics", id=video_id, ).execute() video_url = "https://www.youtube.com/watch?v=" + video_id title = results["items"][0]["snippet"]["title"] thumbnail_url = results["items"][0]["snippet"]["thumbnails"]["default"][ "url"] original_image_url = results["items"][0]["snippet"]["thumbnails"]["high"][ "url"] duration = results["items"][0]["contentDetails"]["duration"] views = results["items"][0]["statistics"]["viewCount"] video = Video(video_id, views, duration, title, video_url, thumbnail_url, original_image_url) updates_list.append(video.add_video())
def __init__(self, stackedImage, isFile=False): if (isFile): # Single image provided video = Video() stackedImage = video.getFrame(None, stackedImage) else: # Use the higher bit depth version from the stacking process pass stackedImage = cv2.cvtColor(stackedImage, cv2.COLOR_BGR2RGB) self.h, self.w = stackedImage.shape[:2] self.sharpenedImage = stackedImage self.debluredImage = stackedImage self.finalImage = stackedImage self.calculateCoefficients(stackedImage) self.processAgain = False self.processDeblurAgain = False self.processColorAgain = False
def __init__(self): self.root = Tk.Tk() self.view = View(self.root, self) self.video_model = Video() self.user_input = '' self.recording = False self.video_data = '' self.output = ''
def set_video_cli(self, playfile): u"""ビデオの初期設定.""" print(playfile) tmppath = self.settei.settings["outdir"] + playfile.replace( self.settei.settings["playdir"], "") recursive_outdir = str(Path(tmppath).parent) self.playfile = playfile self.video = Video(playfile, recursive_outdir) self.video.set_bounding(self.settei.settings["bounding"]) self.video.set_detecttype(self.settei.settings["detecttype"]) self.video.set_verbose(self.settei.settings["verbose"]) top = self.settei.settings["detectionTop"] bottom = self.settei.settings["detectionBottom"] left = self.settei.settings["detectionLeft"] right = self.settei.settings["detectionRight"] #順番大事 self.video.set_imgscale(self.settei.settings["imgscale"]) self.video.set_detectionarea(top, bottom, left, right)
def start(self): self.play_flag = 1 self.video = Video() self._timer1 = QTimer(self) try: self._timer1.timeout.connect(self.play) finally: self._timer1.start(0) self.update()
def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) self.video = Video(cv2.VideoCapture(0)) self._timer = QtCore.QTimer(self) self._timer.timeout.connect(self.play) self._timer.start(27) self.update()
def __init__( self, motors, target=None, ): rospy.init_node('naf') #port = "/dev/ttyUSB1" self.publishers = self.initialize_publisher(motors) self.angle = np.zeros((len(self.publishers), )) self.velocity = np.zeros((len(self.publishers), )) self.initialize_subscriber(motors) self.video = Video([84, 84]) if target is not None: self.target = target self.rate = rospy.Rate(5)
def Main(): train = False print 'training started' if train: v = Video() n = NormalizedRGB() for i in range(100): frm = v.outFrame() n.getRGB(frm) norm = n.normalized() plain = v.imagePlanes(norm) sample_bg(plain[1]) print 'training ends...' del v m = MainUI() gtk.main()
def Main(): train=False print 'training started' if train: v=Video() n=NormalizedRGB() for i in range(100): frm=v.outFrame() n.getRGB(frm) norm=n.normalized() plain=v.imagePlanes(norm) sample_bg(plain[1]) print 'training ends...' del v m=MainUI() gtk.main()
def __init__(self, config = None): Log.debug("GameEngine class init (GameEngine.py)...") self.mainMenu = None #placeholder for main menu object - to prevent reinstantiation self.createdGuitarScene = False #MFH - so we only create ONE guitarscene...! self.currentScene = None self.versionString = version #stump: other version stuff moved to allow full version string to be retrieved without instantiating GameEngine self.uploadVersion = "%s-4.0" % Version.PROGRAM_NAME #akedrou - the version passed to the upload site. self.dataPath = Version.dataPath() Log.debug(self.versionString + " starting up...") Log.debug("Python version: " + sys.version.split(' ')[0]) Log.debug("Pygame version: " + str(pygame.version.ver) ) Log.debug("PyOpenGL version: " + OpenGL.__version__) Log.debug("Numpy version: " + np.__version__) Log.debug("PIL version: " + Image.VERSION) Log.debug("sys.argv: " + repr(sys.argv)) Log.debug("os.name: " + os.name) Log.debug("sys.platform: " + sys.platform) if os.name == 'nt': import win32api Log.debug("win32api.GetVersionEx(1): " + repr(win32api.GetVersionEx(1))) elif os.name == 'posix': Log.debug("os.uname(): " + repr(os.uname())) """ Constructor. @param config: L{Config} instance for settings """ self.tutorialFolder = "tutorials" if not config: config = Config.load() self.config = config fps = self.config.get("video", "fps") self.tasks = [] self.frameTasks = [] self.fps = fps self.currentTask = None self.paused = [] self.running = True self.clock = pygame.time.Clock() self.title = self.versionString self.restartRequested = False # evilynux - Check if theme icon exists first, then fallback on FoFiX icon. themename = self.config.get("coffee", "themename") themeicon = os.path.join(Version.dataPath(), "themes", themename, "icon.png") fofixicon = os.path.join(Version.dataPath(), "fofix_icon.png") icon = None if os.path.exists(themeicon): icon = themeicon elif os.path.exists(fofixicon): icon = fofixicon self.video = Video(self.title, icon) if self.config.get("video", "disable_screensaver"): self.video.disableScreensaver() self.audio = Audio() self.frames = 0 self.fpsEstimate = 0 self.priority = self.config.get("engine", "highpriority") self.show_fps = self.config.get("video", "show_fps") self.advSettings = self.config.get("game", "adv_settings") self.restartRequired = False self.quicksetRestart = False self.quicksetPerf = self.config.get("quickset", "performance") self.scrollRate = self.config.get("game", "scroll_rate") self.scrollDelay = self.config.get("game", "scroll_delay") Log.debug("Initializing audio.") frequency = self.config.get("audio", "frequency") bits = self.config.get("audio", "bits") stereo = self.config.get("audio", "stereo") bufferSize = self.config.get("audio", "buffersize") self.audio.open(frequency = frequency, bits = bits, stereo = stereo, bufferSize = bufferSize) self.cmdPlay = 0 self.cmdMode = None self.cmdDiff = None self.cmdPart = None self.gameStarted = False self.world = None self.audioSpeedFactor = 1.0 Log.debug("Initializing video.") #myfingershurt: ensuring windowed mode starts up in center of the screen instead of cascading positions: os.environ['SDL_VIDEO_WINDOW_POS'] = 'center' width, height = [int(s) for s in self.config.get("video", "resolution").split("x")] fullscreen = self.config.get("video", "fullscreen") multisamples = self.config.get("video", "multisamples") self.video.setMode((width, height), fullscreen = fullscreen, multisamples = multisamples) Log.debug("OpenGL version: " + glGetString(GL_VERSION)) Log.debug("OpenGL vendor: " + glGetString(GL_VENDOR)) Log.debug("OpenGL renderer: " + glGetString(GL_RENDERER)) Log.debug("OpenGL extensions: " + ' '.join(sorted(glGetString(GL_EXTENSIONS).split()))) if self.video.default: self.config.set("video", "fullscreen", False) self.config.set("video", "resolution", "800x600") if self.config.get("video", "shader_use"): shaders.set(os.path.join(Version.dataPath(), "shaders")) # Enable the high priority timer if configured if self.priority: Log.debug("Enabling high priority timer.") self.fps = 0 # High priority # evilynux - This was generating an error on the first pass (at least under # GNU/Linux) as the Viewport was not set yet. try: viewport = glGetIntegerv(GL_VIEWPORT) except: viewport = [0, 0, width, height] h = viewport[3] - viewport[1] w = viewport[2] - viewport[0] geometry = (0, 0, w, h) self.svg = SvgContext(geometry) glViewport(int(viewport[0]), int(viewport[1]), int(viewport[2]), int(viewport[3])) self.startupMessages = self.video.error self.input = Input() self.view = View(self, geometry) self.resizeScreen(w, h) self.resource = Resource(Version.dataPath()) self.mainloop = self.loading self.menuMusic = False self.setlistMsg = None # Load game modifications Mod.init(self) self.addTask(self.input, synchronized = False) self.addTask(self.view, synchronized = False) self.addTask(self.resource, synchronized = False) self.data = Data(self.resource, self.svg) ##MFH: Animated stage folder selection option #<themename>\Stages still contains the backgrounds for when stage rotation is off, and practice.png #subfolders under Stages\ will each be treated as a separate animated stage set self.stageFolders = [] currentTheme = themename stagespath = os.path.join(Version.dataPath(), "themes", currentTheme, "backgrounds") themepath = os.path.join(Version.dataPath(), "themes", currentTheme) if os.path.exists(stagespath): self.stageFolders = [] allFolders = os.listdir(stagespath) #this also includes all the stage files - so check to see if there is at least one .png file inside each folder to be sure it's an animated stage folder for name in allFolders: aniStageFolderListing = [] thisIsAnAnimatedStageFolder = False try: aniStageFolderListing = os.listdir(os.path.join(stagespath,name)) except Exception: thisIsAnAnimatedStageFolder = False for aniFile in aniStageFolderListing: if os.path.splitext(aniFile)[1] == ".png" or os.path.splitext(aniFile)[1] == ".jpg" or os.path.splitext(aniFile)[1] == ".jpeg": #we've found at least one .png file here, chances are this is a valid animated stage folder thisIsAnAnimatedStageFolder = True if thisIsAnAnimatedStageFolder: self.stageFolders.append(name) i = len(self.stageFolders) if i > 0: #only set default to first animated subfolder if one exists - otherwise use Normal! defaultAniStage = str(self.stageFolders[0]) else: defaultAniStage = "Normal" Log.debug("Default animated stage for " + currentTheme + " theme = " + defaultAniStage) aniStageOptions = dict([(str(self.stageFolders[n]),self.stageFolders[n]) for n in range(0, i)]) aniStageOptions.update({"Normal":_("Slideshow")}) if i > 1: #only add Random setting if more than one animated stage exists aniStageOptions.update({"Random":_("Random")}) Config.define("game", "animated_stage_folder", str, defaultAniStage, text = _("Animated Stage"), options = aniStageOptions ) #MFH: here, need to track and check a new ini entry for last theme - so when theme changes we can re-default animated stage to first found lastTheme = self.config.get("game","last_theme") if lastTheme == "" or lastTheme != currentTheme: #MFH - no last theme, and theme just changed: self.config.set("game","animated_stage_folder",defaultAniStage) #force defaultAniStage self.config.set("game","last_theme",currentTheme) selectedAnimatedStage = self.config.get("game", "animated_stage_folder") if selectedAnimatedStage != "Normal" and selectedAnimatedStage != "Random": if not os.path.exists(os.path.join(stagespath,selectedAnimatedStage)): Log.warn("Selected animated stage folder " + selectedAnimatedStage + " does not exist, forcing Normal.") self.config.set("game","animated_stage_folder","Normal") #MFH: force "Standard" currently selected animated stage folder is invalid else: Config.define("game", "animated_stage_folder", str, "None", text = _("Animated Stage"), options = ["None",_("None")]) Log.warn("No stages\ folder found, forcing None setting for Animated Stage.") self.config.set("game","animated_stage_folder", "None") #MFH: force "None" when Stages folder can't be found try: fp, pathname, description = imp.find_module("CustomTheme",[themepath]) theme = imp.load_module("CustomTheme", fp, pathname, description) self.theme = theme.CustomTheme(themepath, themename) except ImportError: self.theme = Theme(themepath, themename) self.addTask(self.theme) self.input.addKeyListener(FullScreenSwitcher(self), priority = True) self.input.addSystemEventListener(SystemEventHandler(self)) self.debugLayer = None self.startupLayer = None self.loadingScreenShown = False self.graphicMenuShown = False Log.debug("Ready.")
class MainWidget(QMainWindow): def __init__(self ,parent=None): super(MainWidget ,self).__init__(parent) self.raw_img = np.array([]) self.play_flag = 0 self.demo_path = './demoVideo/Demo.avi' # 设置主窗口 self.setWindowIcon(QIcon('./icons/windowIcon.png')) self.showFullScreen() self.setWindowTitle(self.tr("位姿估计")) self.imageLabel=QLabel() #self.img = QPixmap.fromImage(QImage("640_4800.jpg")) #te.setPixmap(self.img) self.imageLabel.setMaximumSize(480, 360) self.imageLabel.setAlignment(Qt.AlignCenter) self.setCentralWidget(self.imageLabel) # 设置工具栏 self.toolbar() # 设置菜单栏 self.menu() # 停靠窗口1 dock1= QDockWidget(self.tr("进行位姿测量的帧"),self) dock1.setFeatures(QDockWidget.DockWidgetMovable) dock1.setAllowedAreas(Qt.LeftDockWidgetArea|Qt. RightDockWidgetArea) self.te1 = QLabel() dock1.setWidget(self.te1) dock1.setMaximumSize(320, 240) dock1.setMinimumSize(320, 240) self.addDockWidget(Qt.RightDockWidgetArea,dock1) # 停靠窗口2 dock2=QDockWidget ( self.tr("位姿测量结果"),self) dock2.setFeatures(QDockWidget.DockWidgetFloatable|QDockWidget . DockWidgetClosable) self.te2=QTextEdit( self.tr("本窗口将显示位姿处理结果")) dock2.setWidget(self.te2) dock2.setMinimumSize(320, 240) self.addDockWidget(Qt.RightDockWidgetArea,dock2) def toolbar(self): # 定义开始,停止,估计位姿和退出,这四种动作 quitAction = QAction(QIcon('./icons/quit.png'), u'退出', self) quitAction.triggered.connect(self.quit) startAction = QAction(QIcon('./icons/start.png'), u'开始采集', self) startAction.triggered.connect(self.start) stopAction = QAction(QIcon('./icons/stop.png'), u'停止采集', self) stopAction.triggered.connect(self.stop) estimateAction = QAction(QIcon('./icons/estimate.png'), u'估计位姿', self) estimateAction.triggered.connect(self.estimate) # 动作: 拍摄标定用的图片 calibCaptureAction = QAction(QIcon('./icons/camera.png'), u'拍照', self) self.connect(calibCaptureAction, SIGNAL("triggered()"), self.calibCapture) # 动作: 录制视频 recordVideoAction = QAction(QIcon('./icons/record.png'), u'录像', self) self.connect(recordVideoAction, SIGNAL("triggered()"), self.recordVideo) # 把按钮和动作连接到一起 toolBar = self.addToolBar("开始") toolBar.addAction(startAction) toolBar = self.addToolBar("停止") toolBar.addAction(stopAction) toolBar = self.addToolBar("退出") toolBar.addAction(quitAction) toolBar = self.addToolBar("估计") toolBar.addAction(estimateAction) toolBar = self.addToolBar("拍照") toolBar.addAction(calibCaptureAction) toolBar = self.addToolBar("录像") toolBar.addAction(recordVideoAction) def menu(self): # 定义动作: # 动作: 标定 calibAction = QAction(self.tr("标定相机"), self) calibAction.setStatusTip(self.tr("标定相机")) self.connect(calibAction, SIGNAL("triggered()"), self.calib_Cam) # 动作: 播放白天演示视频 dayDemoAction = QAction(self.tr("演示1"), self) self.connect(dayDemoAction, SIGNAL("triggered()"), self.dayDemo) # 动作: 打开位姿测量设置窗口 estimateSettingAction = QAction(self.tr("位姿测量设置"), self) self.connect(estimateSettingAction, SIGNAL("triggered()"), self.estimateSetting) # Action: Select demo_path demoPathSettingAction = QAction(self.tr("演示路径设置"), self) self.connect(demoPathSettingAction, SIGNAL("triggered()"), self.setDemoPath) menubar = self.menuBar() calibMenu = menubar.addMenu(u'&标定') demoMenu = menubar.addMenu(u'&演示视频') settingMenu = menubar.addMenu(u'&设置') calibMenu.addAction(calibAction) demoMenu.addAction(dayDemoAction) demoMenu.addAction(demoPathSettingAction) settingMenu.addAction( estimateSettingAction) def start(self): self.play_flag = 1 self.video = Video() self._timer1 = QTimer(self) try: self._timer1.timeout.connect(self.play) finally: self._timer1.start(0) self.update() def play(self): try: self.video.captureRawFrame() self.imageLabel.setPixmap(self.video.convertFrame()) except TypeError: print "No frame" def playAndRecord(self): try: self.rec_video.captureRawFrame() self.rec_video.record() self.imageLabel.setPixmap(self.rec_video.convertFrame()) except TypeError: print "No frame" def stop(self): if self.play_flag ==1: self._timer1.stop() self.video.vs.stop() elif self.play_flag == 2: self._timer1.stop() self.video.capture.release() elif self.play_flag == 3: self._timer1.stop() self.rec_video.vs.stop() self.rec_video.writer.release() self.play_flag = 0 def quit(self): try: self._timer1.stop() finally: sys.exit(0) def estimate(self): img = self.video.raw_img try: chessboard_h = self.settingDialog.height chessboard_w = self.settingDialog.width chessboard_size = self.settingDialog.size cmxDir = self.settingDialog.cmxDir except: self.settingDialog = settingDlg() self.settingDialog.show() pos = poseEstimate(img,chessboard_h, chessboard_w, chessboard_size, cmxDir) img = pos.solvePnP() # Save result to txt file in experiment_data self.saveData('test',pos.rvec_str,pos.tvec_str, pos.err_str) try: img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) height, width = img.shape[:2] img = QImage(img, width, height, QImage.Format_RGB888) img = QPixmap.fromImage(img) except: return None img = img.scaledToHeight(320) img = img.scaledToWidth(240) self.te1.setPixmap(img) if pos.ret ==1: self.te2.setText(self.tr("旋转向量为:")) #self.te2.append('') self.te2.append(pos.rvec_str) self.te2.append(self.tr("平移向量为:")) #self.te2.append('') self.te2.append(pos.tvec_str) self.te2.append('') self.te2.append(self.tr("重投影误差是:")+pos.err_str) else: self.te2.setText(self.tr("No Chessboard!")) def calib_Cam(self): ''' 打开标定对话框,并输入相关参数进行标定,标定结果被保存在./calibFile/calib.npz ''' self.inputDialog = InputDlg() self.inputDialog.show() def calibCapture(self): ''' 每隔3秒拍摄一张照片,并保存在一个单独的文件夹中,一共拍摄15张 与此同时用户应该手持标定板,不断摆出不同的位置和角度让相机拍摄。 最后程序还应该输出一个包含了图片路径的list,用于calib_Cam的标定 ''' img = self.video.raw_img cv2.imwrite('./calibFile/calibImages/'+str(int(time.time()))+'.jpg', img) def recordVideo(self): self.play_flag = 3 self.rec_video = recordVideo() self.rec_video.initRecord() self._timer1 = QTimer(self) try: self._timer1.timeout.connect(self.playAndRecord) finally: self._timer1.start(100) self.update() def dayDemo(self): ''' 播放提前录制好的白天标定板不断运动的图像 ''' self.startDemo(self.demo_path) def nightDemo(self): ''' 播放提前录制好的夜间标定板不断运动的视频 ''' pass def startDemo(self,file): self.play_flag = 2 self.video = playDemoVideo(file) self._timer1 = QTimer(self) try: self._timer1.timeout.connect(self.play) finally: self._timer1.start(100) self.update() def saveData(self,experiment_name,rvec, tvec, error): f = open('./experiment_data/'+str(experiment_name)+'.txt','a') f.writelines(rvec+","+tvec+","+error+'\n') f.close() def estimateSetting(self): self.settingDialog=settingDlg() self.settingDialog.show() def setDemoPath(self): demo_QStringList = QFileDialog.getOpenFileNames(self, "多文件选择", "./demoVideo", "All Files (*);;AVI Files (*.avi)") self.demo_path = str(demo_QStringList.join("<join>")).split("<join>")[0]
def __init__(self,parent=None): self.debugMode = True self.debugVideoPath = '/Users/071cht/Desktop/Lab/jaabagui/testt.mjpeg.avi' QMainWindow.__init__(self,parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) self.installEventFilter(self) self.setFocusPolicy(Qt.StrongFocus) #add new slider # self.positionSlider=QSlider(Qt.Horizontal) # self.positionSlider.setGeometry (800,800,100,30) # self.positionSlider.setRange(0, 0) # self.positionSlider.sliderMoved.connect(self.setPosition) #setup Video #video player self.mediaPlayer1 = QMediaPlayer(None, QMediaPlayer.VideoSurface) self.mediaPlayer2 = QMediaPlayer(None, QMediaPlayer.VideoSurface) self.mediaPlayer2.setNotifyInterval(10) #self.mediaPlayer.metaDataChanged.connect(self.metaDataChanged) self.mediaPlayer1.durationChanged.connect(self.durationChanged) self.mediaPlayer1.positionChanged.connect(self.positionChanged) self.mediaPlayer2.positionChanged.connect(self.positionChanged) #self.mediaPlayer2.positionChanged.connect(self.paintEvent) #visualizetion self.scene = QGraphicsScene() self.ui.graphicsView.setScene(self.scene) #self.scene.setBackgroundBrush(Qt.black) self.videoItem1 = QGraphicsVideoItem() self.videoItem2 = Video() self.scene.addItem(self.videoItem1) self.scene.addItem(self.videoItem2) self.mediaPlayer1.setVideoOutput(self.videoItem1) self.mediaPlayer2.setVideoOutput(self.videoItem2) #slider bar self.ui.horizontalSlider.setRange(0, 0) self.ui.horizontalSlider.sliderMoved.connect(self.setPosition) # self.ui.horizontalSlider.sliderPressed.connect(self.sliderPressed) #draw on video self.flyCanvas= TargetView() self.scene.addItem(self.flyCanvas) #give reference to target view self.flyCanvas.setWindowReference(self) #lineEdit signals: self.ui.lineEdit.returnPressed.connect(self.lineEditChanged) #callbacks self.ui.actionQuit.triggered.connect(self.quit) self.ui.actionLoad_Project.triggered.connect(self.loadVideo) self.ui.actionImport_Labels.triggered.connect(self.loadLabels) #self.ui.buttonPlay.clicked[bool].connect(self.setToggleText) self.ui.buttonPlay.clicked.connect(self.play) self.ui.actionSave.triggered.connect(self.saveLabels) ## print self.ui.graphicsView.sizeHint() #behavior Button self.ui.buttonBehavior.clicked.connect(self.behaviorButtonClick) self.ui.buttonNone.clicked.connect(self.noneButtonClick) #initialization self.loaded = False self.videoFilename = None self.frame_count=None self.width=None self.height=None self.frame_trans=None self.previous_frame=0 self.current_frame=0 self.behaviorButtonStart = False self.noneButtonStart = False self.currentFly=1 #initialize flyInfo #self.setCurrentFly(self.currentFly) # register flyid changed callback self.flyCanvas.onCurrentFlyIdChanged(self.currentFlyIdChangedCallback) self.flyCanvas.setCurrentFlyId(self.currentFly) # when double click on video, change fly id in target view self.videoItem2.onDoubleClick(self.flyCanvas.setCurrentFlyIdByXY) ######################## # DEBUG PART HERE!!!!! # ######################## if (self.debugMode): self.debugLoadVideo()
def __init__(self, config = None): """ Constructor. @param config: L{Config} instance for settings """ if not config: config = Config.load() self.config = config fps = self.config.get("video", "fps") tickrate = self.config.get("engine", "tickrate") Engine.__init__(self, fps = fps, tickrate = tickrate) pygame.init() self.title = _("Frets on Fire") self.restartRequested = False self.handlingException = False self.video = Video(self.title) self.audio = Audio() Log.debug("Initializing audio.") frequency = self.config.get("audio", "frequency") bits = self.config.get("audio", "bits") stereo = self.config.get("audio", "stereo") bufferSize = self.config.get("audio", "buffersize") self.audio.pre_open(frequency = frequency, bits = bits, stereo = stereo, bufferSize = bufferSize) pygame.init() self.audio.open(frequency = frequency, bits = bits, stereo = stereo, bufferSize = bufferSize) Log.debug("Initializing video.") width, height = [int(s) for s in self.config.get("video", "resolution").split("x")] fullscreen = self.config.get("video", "fullscreen") multisamples = self.config.get("video", "multisamples") self.video.setMode((width, height), fullscreen = fullscreen, multisamples = multisamples) # Enable the high priority timer if configured if self.config.get("engine", "highpriority"): Log.debug("Enabling high priority timer.") self.timer.highPriority = True viewport = glGetIntegerv(GL_VIEWPORT) h = viewport[3] - viewport[1] w = viewport[2] - viewport[0] geometry = (0, 0, w, h) self.img = ImgContext(geometry) glViewport(int(viewport[0]), int(viewport[1]), int(viewport[2]), int(viewport[3])) self.input = Input() self.view = View(self, geometry) self.resizeScreen(w, h) self.resource = Resource(Version.dataPath()) self.server = None self.sessions = [] self.mainloop = self.loading # Load game modifications Mod.init(self) theme = Config.load(self.resource.fileName("theme.ini")) Theme.open(theme) # Make sure we are using the new upload URL if self.config.get("game", "uploadurl").startswith("http://kempele.fi"): self.config.set("game", "uploadurl", "http://fretsonfire.sourceforge.net/play") self.addTask(self.audio, synchronized = False) self.addTask(self.input, synchronized = False) self.addTask(self.view) self.addTask(self.resource, synchronized = False) self.data = Data(self.resource, self.img) self.input.addKeyListener(FullScreenSwitcher(self), priority = True) self.input.addSystemEventListener(SystemEventHandler(self)) self.debugLayer = None self.startupLayer = None self.loadingScreenShown = False Log.debug("Ready.")
class GameEngine(Engine): """The main game engine.""" def __init__(self, config = None): """ Constructor. @param config: L{Config} instance for settings """ if not config: config = Config.load() self.config = config fps = self.config.get("video", "fps") tickrate = self.config.get("engine", "tickrate") Engine.__init__(self, fps = fps, tickrate = tickrate) pygame.init() self.title = _("Frets on Fire") self.restartRequested = False self.handlingException = False self.video = Video(self.title) self.audio = Audio() Log.debug("Initializing audio.") frequency = self.config.get("audio", "frequency") bits = self.config.get("audio", "bits") stereo = self.config.get("audio", "stereo") bufferSize = self.config.get("audio", "buffersize") self.audio.pre_open(frequency = frequency, bits = bits, stereo = stereo, bufferSize = bufferSize) pygame.init() self.audio.open(frequency = frequency, bits = bits, stereo = stereo, bufferSize = bufferSize) Log.debug("Initializing video.") width, height = [int(s) for s in self.config.get("video", "resolution").split("x")] fullscreen = self.config.get("video", "fullscreen") multisamples = self.config.get("video", "multisamples") self.video.setMode((width, height), fullscreen = fullscreen, multisamples = multisamples) # Enable the high priority timer if configured if self.config.get("engine", "highpriority"): Log.debug("Enabling high priority timer.") self.timer.highPriority = True viewport = glGetIntegerv(GL_VIEWPORT) h = viewport[3] - viewport[1] w = viewport[2] - viewport[0] geometry = (0, 0, w, h) self.img = ImgContext(geometry) glViewport(int(viewport[0]), int(viewport[1]), int(viewport[2]), int(viewport[3])) self.input = Input() self.view = View(self, geometry) self.resizeScreen(w, h) self.resource = Resource(Version.dataPath()) self.server = None self.sessions = [] self.mainloop = self.loading # Load game modifications Mod.init(self) theme = Config.load(self.resource.fileName("theme.ini")) Theme.open(theme) # Make sure we are using the new upload URL if self.config.get("game", "uploadurl").startswith("http://kempele.fi"): self.config.set("game", "uploadurl", "http://fretsonfire.sourceforge.net/play") self.addTask(self.audio, synchronized = False) self.addTask(self.input, synchronized = False) self.addTask(self.view) self.addTask(self.resource, synchronized = False) self.data = Data(self.resource, self.img) self.input.addKeyListener(FullScreenSwitcher(self), priority = True) self.input.addSystemEventListener(SystemEventHandler(self)) self.debugLayer = None self.startupLayer = None self.loadingScreenShown = False Log.debug("Ready.") def setStartupLayer(self, startupLayer): """ Set the L{Layer} that will be shown when the all the resources have been loaded. See L{Data} @param startupLayer: Startup L{Layer} """ self.startupLayer = startupLayer def isDebugModeEnabled(self): return bool(self.debugLayer) def setDebugModeEnabled(self, enabled): """ Show or hide the debug layer. @type enabled: bool """ if enabled: self.debugLayer = DebugLayer(self) else: self.debugLayer = None def toggleFullscreen(self): """ Toggle between fullscreen and windowed mode. @return: True on success """ if not self.video.toggleFullscreen(): # on windows, the fullscreen toggle kills our textures, se we must restart the whole game self.input.broadcastSystemEvent("restartRequested") self.config.set("video", "fullscreen", not self.video.fullscreen) return True self.config.set("video", "fullscreen", self.video.fullscreen) return True def restart(self): """Restart the game.""" if not self.restartRequested: self.restartRequested = True self.input.broadcastSystemEvent("restartRequested") else: # evilynux - With self.audio.close(), calling self.quit() results in # a crash. Calling the parent directly as a workaround. Engine.quit(self) def quit(self): self.audio.close() Engine.quit(self) def resizeScreen(self, width, height): """ Resize the game screen. @param width: New width in pixels @param height: New height in pixels """ self.view.setGeometry((0, 0, width, height)) self.img.setGeometry((0, 0, width, height)) def isServerRunning(self): return bool(self.server) def startServer(self): """Start the game server.""" if not self.server: Log.debug("Starting server.") self.server = Server(self) self.addTask(self.server, synchronized = False) def connect(self, host): """ Connect to a game server. @param host: Name of host to connect to @return: L{Session} connected to remote server """ Log.debug("Connecting to host %s." % host) session = ClientSession(self) session.connect(host) self.addTask(session, synchronized = False) self.sessions.append(session) return session def stopServer(self): """Stop the game server.""" if self.server: Log.debug("Stopping server.") self.removeTask(self.server) self.server = None def disconnect(self, session): """ Disconnect a L{Session} param session: L{Session} to disconnect """ if session in self.sessions: Log.debug("Disconnecting.") self.removeTask(session) self.sessions.remove(session) def loadImgDrawing(self, target, name, fileName, textureSize = None): """ Load an SVG drawing synchronously. @param target: An object that will own the drawing @param name: The name of the attribute the drawing will be assigned to @param fileName: The name of the file in the data directory @param textureSize Either None or (x, y), in which case the file will be rendered to an x by y texture @return: L{ImgDrawing} instance """ return self.data.loadImgDrawing(target, name, fileName, textureSize) def loading(self): """Loading state loop.""" done = Engine.run(self) self.clearScreen() if self.data.essentialResourcesLoaded(): if not self.loadingScreenShown: self.loadingScreenShown = True Dialogs.showLoadingScreen(self, self.data.resourcesLoaded) if self.startupLayer: self.view.pushLayer(self.startupLayer) self.mainloop = self.main self.view.render() self.video.flip() return done def clearScreen(self): self.img.clear(*Theme.backgroundColor) def main(self): """Main state loop.""" # Tune the scheduler priority so that transitions are as smooth as possible if self.view.isTransitionInProgress(): self.boostBackgroundThreads(False) else: self.boostBackgroundThreads(True) done = Engine.run(self) self.clearScreen() self.view.render() if self.debugLayer: self.debugLayer.render(1.0, True) self.video.flip() return done def run(self): try: return self.mainloop() except KeyboardInterrupt: sys.exit(0) except SystemExit: sys.exit(0) except Exception, e: def clearMatrixStack(stack): try: glMatrixMode(stack) for i in range(16): glPopMatrix() except: pass if self.handlingException: # A recursive exception is fatal as we can't reliably reset the GL state sys.exit(1) self.handlingException = True Log.error("%s: %s" % (e.__class__, e)) import traceback traceback.print_exc() clearMatrixStack(GL_PROJECTION) clearMatrixStack(GL_MODELVIEW) Dialogs.showMessage(self, unicode(e)) self.handlingException = False return True
__author__ = 'Rik Smit' parser = argparse.ArgumentParser(description='Creating cutous from video using Vatic annotations.') parser.add_argument('videofile', help='Location of the video file') parser.add_argument('annotationsfile', help='Location of the annotations file') parser.add_argument('outputdir', help='Location of the directory to store the cutouts') parser.add_argument('--cutoutsize', help='Dimensions of the cutouts.', default=(100, 100)) parser.add_argument('--framestep', help='Use every <framestep>th frame.', default=25) parser.add_argument('--negativescount', help='Amount of negatives to cutout for each frame.', default=50) parser.add_argument('--pos', action='store_false', help="Whether to exclude extracting positives") parser.add_argument('--neg', action='store_false', help="Whether to exclude extracting negatives") args = parser.parse_args() videoFileName = args.videofile cutoutsDirectory = args.outputdir annotationsFileName = args.annotationsfile video = Video(videoFileName, annotationsFileName) video.load() video.cutoutSize = args.cutoutsize video.negExamplesPerFrame = args.negativescount video.framesStep = args.framestep video.extractObjects(cutoutsDirectory, extractPositives=args.pos, extractNegatives=args.neg)
def testSetMode(self): v = Video() assert v.setMode((640, 480)) assert v.screen v.flip()
def __init__(self, filename, filenameFormat, extractor, n=2): Video.__init__(self, filename, n) self.filename = filename self.format = filenameFormat self.extractor = extractor self.currentIndex = 0
plt.show() print "Estimate of camera motion -- x: %s y: %s" % (f.hor,f.vert) print 'im adjusted' f.show_adjust(f.hor,f.vert) return f def plot_flow(): im1 = vid.step() hor = [] vert = [] for i in range(200): im2 = vid.step() f = Flow(im1,im2) print (f.hor,f.vert) hor.append(f.hor) vert.append(f.vert) im1 = im2.copy() return (hor,vert) if __name__ == "__main__": vid = Video(DATA_PATH) #(hor,vert) = plot_flow() #plt.figure() #plt.plot(hor) #plt.hold() #plt.plot(vert) #plt.show() vid.step(stepsize=172) f = next_flow()
class MainUI: def __init__(self): self.isBanner_mode=True self.final=None self.design=7 self.color=None #Initialize systems objects if TEST_MODE: self.vid=cv2.VideoCapture(os.path.join(tests, "demo.avi")) self.v=Video() #----------------testing purpose--------------# self.back=RemoveBackground() self.norm=NormalizedRGB() self.replace=Replace() #print self.replace self.tshirt=DetectShirt() self.p_mat=np.array(np.mat([[0,0],[100,0],[100,100],[0,100]],np.float32)) self.design_template=cv2.imread(templates+"Green.png") #----------------------------------------------------- self.__glade__=gtk.Builder() self.__ui__=self.__glade__.add_from_file(os.path.join(assets, "main_ui.glade")) self.__main_win__=self.__glade__.get_object("vdr_main") self.drawing_area=self.__glade__.get_object("da1") self.about=self.__glade__.get_object("abt") self.about.connect("clicked",self.show_abt) self.about_dia=self.__glade__.get_object("about_dia") #init Drawing Area self.drawing_area.realize() self.drawing_area.set_size_request(config.width, config.height) #get canvas self.canvas=self.drawing_area.window #get GC self.gc=self.canvas.new_gc() #draw rectnagle #print self.canvas,self.gc self.gc.set_background(gdk.Color(0,0,0,0)) self.gc.set_foreground(gdk.Color(255,0,0,0)) #print dir(self.canvas) self.canvas.draw_rectangle(self.gc,False,10,10,100,200) self.canvas.draw_line(self.gc,10,10,100,100) #decorate Mainwindows self.__main_win__.modify_bg(gtk.STATE_NORMAL,gtk.gdk.Color(0,0,0,0)) self.__main_win__.modify_fg(gtk.STATE_NORMAL,gtk.gdk.Color(0,255,0,0)) self.tview1=self.__glade__.get_object("tv1") self.lstore1=self.__glade__.get_object("ls1") self.tview2=self.__glade__.get_object("tv2") self.lstore2=self.__glade__.get_object("ls2") self.apply_btn=self.__glade__.get_object("apply") self.apply_btn.connect("clicked",self.change_avtar) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Red.png'),red]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Green.png'),green]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Blue.png'),blue]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Yellow.png'),yellow]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Pink.png'),pink]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Brown.png'),brown]) self.lstore2.append([gtk.gdk.pixbuf_new_from_file(templates+'Blue.png'),plain]) self.lstore2.append([gtk.gdk.pixbuf_new_from_file(templates+'Nike.png'),temp1]) self.lstore2.append([gtk.gdk.pixbuf_new_from_file(templates+'Reebok.png'),temp2]) #treeview = gtk.TreeView(self.lstore1) self.tview1.set_model(self.lstore1) self.tview2.set_model(self.lstore2) self.tview1.modify_base(gtk.STATE_NORMAL,gtk.gdk.Color(0,0,0,0)) self.tview2.modify_base(gtk.STATE_NORMAL,gtk.gdk.Color(0,0,0,0)) cell = gtk.CellRendererPixbuf() column = gtk.TreeViewColumn("Pixbuf", cell) column.add_attribute(cell, "pixbuf", 0) self.tview1.append_column(column) cell.set_padding(30,50) cell2=gtk.CellRendererPixbuf() column2 = gtk.TreeViewColumn("Pixbuf", cell2) column2.add_attribute(cell2, "pixbuf", 0) self.tview2.append_column(column2) cell2.set_padding(30,50) #self.canvas.draw_ self.drawing_area.set_app_paintable(True) self.drawing_area.connect("expose-event",self.display_frame) selection_color=self.tview1.get_selection() selection_color.connect("changed",self.change_cloth_plain) selection_design=self.tview2.get_selection() selection_design.connect("changed",self.change_cloth_design) self.canvas.set_background(gtk.gdk.Color(0,0,0,0)) self.__main_win__.connect("delete-event",gtk.mainquit) self.img=cv2.cv.LoadImage(os.path.join(assets, "Banner.png")) # self.__main_win__.show() self.__main_win__.maximize() def show_abt(self,event): self.about_dia.run() self.about_dia.hide() def change_avtar(self,event): print 'ok' if self.isBanner_mode: self.img=cv2.cv.LoadImage(os.path.join(assets, "Banner_2.png")) self.timer_id=gtk.timeout_add(20*1000,self.reset) self.isBanner_mode=False self.drawing_area.queue_draw() def reset(self): self.img=cv2.cv.LoadImage(templates+"Banner.png") self.isBanner_mode=True self.drawing_area.queue_draw() gtk.timeout_remove(self.timer_id) def display_frame(self,a,b): self.drawing_area.window.draw_rectangle(self.drawing_area.get_style().white_gc,False,0,0,799,599) if self.isBanner_mode: self.canvas.draw_rgb_image(self.gc,1,1,798,598,gtk.gdk.RGB_DITHER_NORMAL,self.img.tostring(),2400) #self.final=self.getOutput_frames() #self.final=cv2.cvtColor(self.final,cv2.cv.CV_BGR2RGB) elif not self.isBanner_mode: if self.final==None: self.final=self.getOutput_frames() self.final=cv2.cvtColor(self.final,cv2.cv.CV_BGR2RGB) self.drawing_area.queue_draw() return else: self.canvas.draw_rgb_image(self.gc,1,1,798,598,gtk.gdk.RGB_DITHER_NORMAL,self.final.tostring(),2400) self.final=self.getOutput_frames() self.final=cv2.cvtColor(self.final,cv2.cv.CV_BGR2RGB) self.drawing_area.queue_draw() #self.canvas.draw_rgb_image(self.gc,1,1,798,598,gtk.gdk.RGB_DITHER_NORMAL,self.final.tostring(),2400) def getOutput_frames(self): # using VideoCapture for test if TEST_MODE: _,frame=self.vid.read() #NOTE: Testing without camera. uncomment this to feed from camera. else: frame=self.v.outFrame() #NOTE: feeding from Camera. self.norm.getRGB(frame) #input to Normalized RGB norm_rgb=self.norm.normalized() #normalized RGB print 'Got normalized RGB ' rgb_planes=self.v.imagePlanes(norm_rgb) #-------bg subtraction part----------- # load background samples self.back.loadBackground() self.back.getFrames(rgb_planes[1]) self.back.subtract_back(norm_rgb) subtracted=self.back.remove(frame) print 'background subtracted now' self.tshirt.getFrames(norm_rgb) mask,cntr=self.tshirt.detect_shirt() print 'found tshirt' self.replace.getFrames(subtracted,mask) res=self.replace.replace_color(self.color) if self.design is not 7: res=self.replace.replace_design(cntr,self.p_mat, self.design_template, res) #replace.replace_design(cntr, p_mat,design_template,res) #cv2.imshow("subtracted", res) return res def change_cloth_plain(self,selected): global color model,iter=selected.get_selected() color=model.get_value(iter,1) self.color=color def change_cloth_design(self,selected): global design model,iter=selected.get_selected() self.design=model.get_value(iter,1) if self.design==8: self.design_template=cv2.imread(templates+"nike.png",cv2.cv.CV_LOAD_IMAGE_UNCHANGED) if self.design==9: self.design_template=cv2.imread(templates+"Rbk.png",cv2.cv.CV_LOAD_IMAGE_UNCHANGED)
class jaabaGUI(QMainWindow): """ controller for the blob labeling GUI""" def __init__(self,parent=None): self.debugMode = True self.debugVideoPath = '/Users/071cht/Desktop/Lab/jaabagui/testt.mjpeg.avi' QMainWindow.__init__(self,parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) self.installEventFilter(self) self.setFocusPolicy(Qt.StrongFocus) #add new slider # self.positionSlider=QSlider(Qt.Horizontal) # self.positionSlider.setGeometry (800,800,100,30) # self.positionSlider.setRange(0, 0) # self.positionSlider.sliderMoved.connect(self.setPosition) #setup Video #video player self.mediaPlayer1 = QMediaPlayer(None, QMediaPlayer.VideoSurface) self.mediaPlayer2 = QMediaPlayer(None, QMediaPlayer.VideoSurface) self.mediaPlayer2.setNotifyInterval(10) #self.mediaPlayer.metaDataChanged.connect(self.metaDataChanged) self.mediaPlayer1.durationChanged.connect(self.durationChanged) self.mediaPlayer1.positionChanged.connect(self.positionChanged) self.mediaPlayer2.positionChanged.connect(self.positionChanged) #self.mediaPlayer2.positionChanged.connect(self.paintEvent) #visualizetion self.scene = QGraphicsScene() self.ui.graphicsView.setScene(self.scene) #self.scene.setBackgroundBrush(Qt.black) self.videoItem1 = QGraphicsVideoItem() self.videoItem2 = Video() self.scene.addItem(self.videoItem1) self.scene.addItem(self.videoItem2) self.mediaPlayer1.setVideoOutput(self.videoItem1) self.mediaPlayer2.setVideoOutput(self.videoItem2) #slider bar self.ui.horizontalSlider.setRange(0, 0) self.ui.horizontalSlider.sliderMoved.connect(self.setPosition) # self.ui.horizontalSlider.sliderPressed.connect(self.sliderPressed) #draw on video self.flyCanvas= TargetView() self.scene.addItem(self.flyCanvas) #give reference to target view self.flyCanvas.setWindowReference(self) #lineEdit signals: self.ui.lineEdit.returnPressed.connect(self.lineEditChanged) #callbacks self.ui.actionQuit.triggered.connect(self.quit) self.ui.actionLoad_Project.triggered.connect(self.loadVideo) self.ui.actionImport_Labels.triggered.connect(self.loadLabels) #self.ui.buttonPlay.clicked[bool].connect(self.setToggleText) self.ui.buttonPlay.clicked.connect(self.play) self.ui.actionSave.triggered.connect(self.saveLabels) ## print self.ui.graphicsView.sizeHint() #behavior Button self.ui.buttonBehavior.clicked.connect(self.behaviorButtonClick) self.ui.buttonNone.clicked.connect(self.noneButtonClick) #initialization self.loaded = False self.videoFilename = None self.frame_count=None self.width=None self.height=None self.frame_trans=None self.previous_frame=0 self.current_frame=0 self.behaviorButtonStart = False self.noneButtonStart = False self.currentFly=1 #initialize flyInfo #self.setCurrentFly(self.currentFly) # register flyid changed callback self.flyCanvas.onCurrentFlyIdChanged(self.currentFlyIdChangedCallback) self.flyCanvas.setCurrentFlyId(self.currentFly) # when double click on video, change fly id in target view self.videoItem2.onDoubleClick(self.flyCanvas.setCurrentFlyIdByXY) ######################## # DEBUG PART HERE!!!!! # ######################## if (self.debugMode): self.debugLoadVideo() # add label UI related when load video def showEvent(self, evt): super(jaabaGUI, self).showEvent(evt) ##### HERE THE WINDOW IS LOADED!!!!!!!! # self.loadLabelUI() def loadLabelUI(self): #labels self.labelScene = QGraphicsScene() self.ui.graphLabels.setScene(self.labelScene) # the size is only accurate after the window fully displayed labelUIWidth = self.ui.graphLabels.width() labelUIHeight = self.ui.graphLabels.height()-1 self.labelScene.setSceneRect(0,0,labelUIWidth,labelUIHeight) self.labelUI = LabelUI() # visiableWidth = 850 # height = 30 # visiableFrameNum = 850 self.labelUI.setWidthPerFrame(850.0/850.0) # print '850/500',850.0/850.0b # print 'length_perframe is ', self.labelUI.widthPerFrame # 850 is the original length of graphLabel total_length= self.labelUI.widthPerFrame * self.frame_count self.labelUI.setVisiableSize(total_length,30) # set start position self.labelUI.setPos(labelUIWidth/2,0) print 'frame_count is ', self.frame_count print 'total length is', total_length self.labelScene.addItem(self.labelUI) # middle line ui self.labelUIMiddleLine = LabelUIMiddleLine() self.labelScene.addItem(self.labelUIMiddleLine) self.labelUIMiddleLine.setPos(labelUIWidth/2,0) # self.labelUI.setPos(QPointF(-100,0)) self.writeLog('Label UI loaded') def eventFilter(self, obj, event): if (event.type() == PyQt5.QtCore.QEvent.KeyPress): # http://qt-project.org/doc/qt-4.8/qt.html#Key-enum key = event.key() if (key == Qt.Key_Up) : curr_frame= int(float(self.ui.lineEdit.text())) curr_frame= curr_frame-30 media_position= int(round(curr_frame*self.frame_trans)) # print curr_frame, media_position self.mediaPlayer1.setPosition(media_position) self.mediaPlayer2.setPosition(media_position) # print 'down -30' elif (key == Qt.Key_Right): curr_frame= int(float(self.ui.lineEdit.text())) # print 'right +1' # print curr_frame curr_frame= curr_frame+1 media_position= int(round(curr_frame*self.frame_trans)) # print 'curr_frame',curr_frame # print 'frame_trans',self.frame_trans # print ' curr_frame*self.frame_trans',curr_frame*self.frame_trans # print 'media_position',media_position # print curr_frame, media_position self.mediaPlayer1.setPosition(media_position) self.mediaPlayer2.setPosition(media_position) # self.mediaPlayerPositionChanged(media_position) elif (key == Qt.Key_Left): curr_frame= int(float(self.ui.lineEdit.text())) curr_frame= curr_frame-1 media_position= int(round(curr_frame*self.frame_trans)) self.mediaPlayer1.setPosition(media_position) self.mediaPlayer2.setPosition(media_position) # print 'left -1' elif (key == Qt.Key_Down): curr_frame= int(float(self.ui.lineEdit.text())) curr_frame= curr_frame+30 media_position= int(round(curr_frame*self.frame_trans)) self.mediaPlayer1.setPosition(media_position) self.mediaPlayer2.setPosition(media_position) # print 'up +30' return True return False # ###actions starts from here### def quit(self): QApplication.quit() def loadVideo(self): # print QMediaPlayer.supportedMimeTypes() self.writeLog("Loading video...") self.videoFilename = QFileDialog.getOpenFileName(self, 'Open File', '.')[0] if not self.videoFilename: self.writeLog("User cancelled - no video loaded") return else: cap=cv2.VideoCapture(self.videoFilename) self.frame_count=cap.get(cv2.CAP_PROP_FRAME_COUNT) self.width=cap.get(3) self.height=cap.get(4) self.mediaPlayer2.setMedia(QMediaContent(QUrl.fromLocalFile(self.videoFilename ))) self.mediaPlayer1.setMedia(QMediaContent(QUrl.fromLocalFile(self.videoFilename ))) self.ui.buttonPlay.setEnabled(True) # self.mediaPlayer2.setVideoOutput(self.videoItem2) # self.mediaPlayer1.setVideoOutput(self.videoItem1) # size= self.videoItem2.nativeSize() # # print size ## print self.mediaPlayer.duration() ## print self.mediaPlayer.metaData() self.writeLog("Video loaded!") # init label related ui self.loadLabelUI() def debugLoadVideo(self): self.videoFilename = self.debugVideoPath cap=cv2.VideoCapture(self.videoFilename) self.frame_count=cap.get(cv2.CAP_PROP_FRAME_COUNT) self.width=cap.get(3) self.height=cap.get(4) self.mediaPlayer2.setMedia(QMediaContent(QUrl.fromLocalFile(self.videoFilename ))) self.mediaPlayer1.setMedia(QMediaContent(QUrl.fromLocalFile(self.videoFilename ))) self.ui.buttonPlay.setEnabled(True) self.writeLog("Video loaded!") QTimer.singleShot(1000, self.loadLabelUI) def play(self): self.videoItem1.setAspectRatioMode(0) self.videoItem2.setAspectRatioMode(0) self.scene.setSceneRect(0,0,self.ui.graphicsView.width(),self.ui.graphicsView.height()) self.videoItem1.setSize(QSizeF(self.ui.graphicsView.width()/2,self.ui.graphicsView.height())) self.videoItem2.setSize(QSizeF(self.ui.graphicsView.width()/2,self.ui.graphicsView.height())) self.videoItem1.setPos(QPointF(0,0)) self.videoItem2.setPos(QPointF(self.ui.graphicsView.width()/2,0)) self.flyCanvas.setPos(QPointF(self.ui.graphicsView.width()/2,0)) # custom function setXYScale self.videoItem2.setXYScale(self.width,self.height,self.ui.graphicsView.width()/2,self.ui.graphicsView.height()) self.flyCanvas.setXYScale(self.width,self.height,self.ui.graphicsView.width()/2,self.ui.graphicsView.height()) if self.mediaPlayer1.state() == QMediaPlayer.PlayingState: self.ui.buttonPlay.setIcon(self.ui.style().standardIcon(PyQt5.QtWidgets.QStyle.SP_MediaPlay)) self.ui.buttonPlay.setText("Play") self.mediaPlayer1.pause() self.writeLog("Video paused") else: self.ui.buttonPlay.setIcon(self.ui.style().standardIcon(PyQt5.QtWidgets.QStyle.SP_MediaPause)) self.ui.buttonPlay.setText("Stop") self.mediaPlayer1.play() self.writeLog("Playing video") if self.mediaPlayer2.state() == QMediaPlayer.PlayingState: self.mediaPlayer2.pause() else: self.mediaPlayer2.play() def loadLabels(self): self.writeLog("Loading labels from file...") self.labelFilename = QFileDialog.getOpenFileName(self, 'Open File', '.')[0] self.labelUI.labelData = pickle.load(open(self.labelFilename,"rb")) self.writeLog("Label loaded from file:" + self.labelFilename) def saveLabels(self): # Now it can only save to current file. Will add an poput window to choose path later pickle.dump( self.labelUI.labelData, open( "newLabels.p", "wb" ) ) def setPosition(self, position): self.mediaPlayer1.setPosition(position) self.mediaPlayer2.setPosition(position) # when position of media changed, set slider and text box accordingly. def positionChanged(self, position): #test change labelui position # self.labelUI.startLabel(); # self.labelUI.update() previous_frame= self.previous_frame curr_frame= int(round(position/self.frame_trans)) self.current_frame=curr_frame frame_change= previous_frame-curr_frame move_width= frame_change * self.labelUI.widthPerFrame self.previous_frame= curr_frame self.labelUI.moveBy(move_width,0) self.labelUI.setCurrentFrame(curr_frame) # enforce labelUI paint once self.labelUI.update() # self.labelUI.setPos(self.labelUI.mapToParent(1,0)); # self.labelUI.update() # # print 'triggered position' # # print position # # print 'cur position' # # print self.mediaPlayer2.position() self.updateLineEdit(position) self.updateSliderAndGraph(position) # self.ui.horizontalSlider.setValue(position) # if isinstance(self.frame_trans,float): # # # print type(position),position # # # print type(self.frame_trans),self.frame_trans # # # print position/self.frame_trans # self.ui.lineEdit.setText(str(int(round(position/self.frame_trans)))) # self.flyCanvas.getFrame(int(round(position/self.frame_trans))) # self.flyCanvas.isManualCalled = True; # self.flyCanvas.update() # self.writeLog(str(position)) # # self.updateMediaControlUI(position) # # self.flyCanvas.update() def updateSliderAndGraph(self, position): self.ui.horizontalSlider.setValue(position) if isinstance(self.frame_trans,float): self.flyCanvas.getFrame(int(round(position/self.frame_trans))) self.flyCanvas.isManualCalled = True self.flyCanvas.update() #self.writeLog(str(position)) def updateLineEdit(self, position): # # print self.width # # print self.height if isinstance(self.frame_trans,float): # # print type(position),position # # print type(self.frame_trans),self.frame_trans # # print position/self.frame_trans self.ui.lineEdit.setText(str(int(round(position/self.frame_trans)))) def durationChanged(self, duration): self.ui.horizontalSlider.setRange(0, duration) self.frame_trans=self.mediaPlayer1.duration()/self.frame_count ## print self.frame_trans #def eventFilter(self,source,event): #if (event.type()==PyQt5.QtCore.QEvent.MousePress and source is self.videoItem2): # pos=event.pos() # # print('mouse position: (%d,%d)' % (pos.x(),pos.y())) # return PyQt5.QtGui.QWidget.eventFilter(self, source, event) def writeLog(self,text): self.ui.log.setText(text) # def eventFilter (self.ui.lineEdit,event): # if event.type()==PyQt5.QtCore.QEvent def lineEditChanged(self): #set position of media curr_frame= int(float(self.ui.lineEdit.text())) media_position= int(round(curr_frame*self.frame_trans)) self.mediaPlayer1.setPosition(media_position) self.mediaPlayer2.setPosition(media_position) # print 'setPosition' # print media_position # print 'after set' # print self.mediaPlayer2.position() # self.updateSliderAndGraph(media_position) def behaviorButtonClick(self): # flip flag self.behaviorButtonStart = not self.behaviorButtonStart # check click to start or stop if (self.behaviorButtonStart): # start labeling self.labelUI.startLabel(self.ui.comboBox.currentIndex(),'',self.current_frame) self.writeLog('start labeling') else: # stop lableing self.labelUI.stopLabel() self.writeLog('stop labeling') def noneButtonClick(self): # flip flag self.noneButtonStart = not self.noneButtonStart # check click to start or stop if (self.noneButtonStart): # start labeling self.labelUI.startLabel(self.ui.comboBox.currentIndex(),'_none',self.current_frame) self.writeLog('start labeling') else: # stop lableing self.labelUI.stopLabel() self.writeLog('stop labeling') # set CurrentFly when fly changed! def setCurrentFly(self,fly): self.currentFly = fly self.ui.flyInfo.setPlainText('FlyID:' + str(self.currentFly)) self.flyCanvas.currentFly=fly def currentFlyIdChangedCallback(self,fly): print 'callback!!!!!'; self.currentFly = fly self.ui.flyInfo.setPlainText('FlyID:' + str(self.currentFly))
class GameEngine(object): """The main game engine.""" def __init__(self, config = None): """ Constructor. @param config: L{Config} instance for settings """ if not config: config = Config.load() self.config = config self.fps = self.config.get("video", "fps") pygame.init() self.title = _("Frets on Fire") self.restartRequested = False self.handlingException = False self.video = Video(self.title) self.audio = Audio() log.debug("Initializing audio.") frequency = self.config.get("audio", "frequency") bits = self.config.get("audio", "bits") stereo = self.config.get("audio", "stereo") bufferSize = self.config.get("audio", "buffersize") self.audio.pre_open(frequency = frequency, bits = bits, stereo = stereo, bufferSize = bufferSize) pygame.init() self.audio.open(frequency = frequency, bits = bits, stereo = stereo, bufferSize = bufferSize) log.debug("Initializing video.") width, height = [int(s) for s in self.config.get("video", "resolution").split("x")] fullscreen = self.config.get("video", "fullscreen") multisamples = self.config.get("video", "multisamples") self.video.setMode((width, height), fullscreen = fullscreen, multisamples = multisamples) viewport = glGetIntegerv(GL_VIEWPORT) h = viewport[3] - viewport[1] w = viewport[2] - viewport[0] geometry = (0, 0, w, h) self.img = ImgContext(geometry) glViewport(int(viewport[0]), int(viewport[1]), int(viewport[2]), int(viewport[3])) self.input = Input() self.view = View(self, geometry) self.resizeScreen(w, h) self.resource = Resource(Version.dataPath()) self.server = None self.mainloop = self.loading # Load game modifications Mod.init(self) theme = Config.load(self.resource.fileName("theme.ini")) Theme.open(theme) # Make sure we are using the new upload URL if self.config.get("game", "uploadurl").startswith("http://kempele.fi"): self.config.set("game", "uploadurl", "http://fretsonfire.sourceforge.net/play") self.running = True self.timer = FpsTimer() self.tickDelta = 0 self.task = TaskEngine(self) self.task.addTask(self.input, synced = False) self.task.addTask(self.view) self.task.addTask(self.resource, synced = False) self.data = Data(self.resource, self.img) self.input.addKeyListener(FullScreenSwitcher(self), priority = True) self.input.addSystemEventListener(SystemEventHandler(self)) self.debugLayer = None self.startupLayer = None self.loadingScreenShown = False log.debug("Ready.") def enableGarbageCollection(self, enabled): """ Enable or disable garbage collection whenever a random garbage collection run would be undesirable. Disabling the garbage collector has the unfortunate side-effect that your memory usage will skyrocket. """ if enabled: gc.enable() else: gc.disable() def collectGarbage(self): """ Run a garbage collection run. """ gc.collect() def setStartupLayer(self, startupLayer): """ Set the L{Layer} that will be shown when the all the resources have been loaded. See L{Data} @param startupLayer: Startup L{Layer} """ self.startupLayer = startupLayer def isDebugModeEnabled(self): return bool(self.debugLayer) def setDebugModeEnabled(self, enabled): """ Show or hide the debug layer. @type enabled: bool """ if enabled: self.debugLayer = DebugLayer(self) else: self.debugLayer = None def toggleFullscreen(self): """ Toggle between fullscreen and windowed mode. @return: True on success """ if not self.video.toggleFullscreen(): # on windows, the fullscreen toggle kills our textures, se we must restart the whole game self.input.broadcastSystemEvent("restartRequested") self.config.set("video", "fullscreen", not self.video.fullscreen) return True self.config.set("video", "fullscreen", self.video.fullscreen) return True def restart(self): """Restart the game.""" if not self.restartRequested: self.restartRequested = True self.input.broadcastSystemEvent("restartRequested") else: self.quit() def quit(self): self.audio.close() self.task.exit() self.running = False def resizeScreen(self, width, height): """ Resize the game screen. @param width: New width in pixels @param height: New height in pixels """ self.view.setGeometry((0, 0, width, height)) self.img.setGeometry((0, 0, width, height)) def startWorld(self): self.world = World(self) def finishGame(self): self.world.finishGame() self.world = None self.view.pushLayer(MainMenu.MainMenu(self)) def loadImgDrawing(self, target, name, fileName, textureSize = None): """ Load an SVG drawing synchronously. @param target: An object that will own the drawing @param name: The name of the attribute the drawing will be assigned to @param fileName: The name of the file in the data directory @param textureSize Either None or (x, y), in which case the file will be rendered to an x by y texture @return: L{ImgDrawing} instance """ return self.data.loadImgDrawing(target, name, fileName, textureSize) def loading(self): """Loading state loop.""" if self.data.essentialResourcesLoaded(): if not self.loadingScreenShown: self.loadingScreenShown = True Dialogs.showLoadingScreen(self, self.data.resourcesLoaded) if self.startupLayer: self.view.pushLayer(self.startupLayer) self.mainloop = self.main self.view.render() def clearScreen(self): self.img.clear(*Theme.backgroundColor) def main(self): """Main state loop.""" self.view.render() if self.debugLayer: self.debugLayer.render(1.0, True) def run(self): try: self.tickDelta = self.timer.tick() done = self.task.run() self.clearScreen() self.mainloop() self.video.flip() # Calculate FPS every 2 seconds if self.timer.fpsTime >= 2000: self.fpsEstimate = self.timer.get_fps() print ("%.2f fps" % self.fpsEstimate) self.timer.delay(self.fps) return done except KeyboardInterrupt: sys.exit(0) except SystemExit: sys.exit(0) except Exception, e: def clearMatrixStack(stack): try: glMatrixMode(stack) for i in range(16): glPopMatrix() except: pass if self.handlingException: # A recursive exception is fatal as we can't reliably reset the GL state sys.exit(1) self.handlingException = True log.error("%s: %s" % (e.__class__, e)) import traceback traceback.print_exc() clearMatrixStack(GL_PROJECTION) clearMatrixStack(GL_MODELVIEW) Dialogs.showMessage(self, unicode(e)) self.handlingException = False return True
def test_detect(self, trainsetIdxs, testsetIdx, harvest=False, autoAnnotate=False, displayOptions=None): """ Detect objects in a video :param harvest: whether to use harvesting :param autoAnnotate: whether to automate harvesting (for testing purposes) :param displayOptions: some option about what to display in the video :return: """ self.log.info("Performing detection test") framesStep = 10 # take every frameSteps-th frame if not displayOptions: displayOptions = { 'enabled': True, # display anything at all 'frame': True, # video frame image 'detections': False, # detected objects 'suppressed_detections': True, 'sliding_window': False, 'ground_truth': False } samples = self.getTrainSamples(trainsetIdxs) clsf = self.trainFromSamples(samples) # train a classifier based on the given dataset samples datasetId = self.datasetIds[testsetIdx] # get the dataset id given the dataset index videoFileName = os.path.join(self.projectDirectory, 'videos', '%s.MOV' % datasetId) annotationsFileName = os.path.join(self.projectDirectory, 'labels', "%s_output.txt" % datasetId) video = Video(videoFileName, annotationsFileName) video.cutoutSize = self.cutoutSize detectionsTruth = video.getAnnotations() # get the labeled ground truth for this video self.log.debug("Using video %s" % videoFileName) self.suppressor = Suppressor() cap = cv2.VideoCapture(videoFileName) # load the video file results = [] # results for each frame frameIdx = 0 while cap.isOpened(): ret, frame = cap.read() if ret is False: break if ((frameIdx+1) % framesStep) == 0: # use every <frameStep>th frame self.log.debug("Frame %d" % frameIdx) truth = [] for truthDetection in detectionsTruth[frameIdx]: if truthDetection['lost'] is True: # if the detection is not visible in this frame continue truthDetectionCutoutDimensions = video.getCutoutDimensions(truthDetection, frame.shape[:2]) # dimension of the truth in the frame truth.append(truthDetectionCutoutDimensions) detections = self.detectObjects(frame, clsf, truth, display=displayOptions) # detect objects in frame result = self.matchDetections(detections, truth, frame.shape[:2]) # match the detections agains the truth results.append(result) if harvest: detectionsOverlapRatios = result['detectionsOverlapRatios'] # how much the truth overlaps with each detection newSamples = self.annotateDetections(frame, detections, detectionsOverlapRatios, autoAnnotate=autoAnnotate, overlapThreshold=0.3) # (manually) annotate all the found detections if len(newSamples): samples.extend(newSamples) clsf = self.trainFromSamples(samples) # retrain a classifier based on the given dataset samples and new samples frameIdx += 1 cap.release() totalScore = sum([result['score'] for result in results]) totalPrecision = sum([result['precision'] for result in results]) totalRecall = sum([result['recall'] for result in results]) print ("= Average score:") pprint(totalScore/len(results)) print ("= Average precision:") pprint(totalPrecision/len(results)) print ("= Average recall:") pprint(totalRecall/len(results))
def fileProcessing(self): with open("book.txt" , 'r') as bookObj: for line in bookObj: fields = line.split('|') l0 = fields[0] l1 = fields[1] l2 = fields[2] l3 = fields[3] l4 = fields[4] l5 = fields[5] l6 = fields[6] l7 = fields[7] l8 = fields[8] l9 = fields[9] book = Book(l0,l1,l2,l3,l4,l5,l6,l7,l8,l9) book.__class__ = Book cardCatalog.append(book) #for obj in cardCatalog: #obj.display() #print len(cardCatalog) with open("periodic.txt" , 'r') as periodicObj: for line in periodicObj: fields = line.split('|') l0 = fields[0] l1 = fields[1] l2 = fields[2] l3 = fields[3] l4 = fields[4] l5 = fields[5] l6 = fields[6] l7 = fields[7] l8 = fields[8] l9 = fields[9] l10 = fields[10] l11 = fields[11] periodic = Periodic(l0,l1,l2,l3,l4,l5,l6,l7,l8,l9,l10,l11) periodic.__class__ = Periodic cardCatalog.append(periodic) with open("video.txt" , 'r') as videoObj: for line in videoObj: fields = line.split('|') l0 = fields[0] l1 = fields[1] l2 = fields[2] l3 = fields[3] l4 = fields[4] l5 = fields[5] l6 = fields[6] l7 = fields[7] video = Video(l0,l1,l2,l3,l4,l5,l6,l7) video.__class__ = Video cardCatalog.append(video) with open("film.txt" , 'r') as filmObj: for line in filmObj: fields = line.split('|') l0 = fields[0] l1 = fields[1] l2 = fields[2] l3 = fields[3] l4 = fields[4] l5 = fields[5] film = Film(l0,l1,l2,l3,l4,l5) film.__class__ = Film cardCatalog.append(film)
def __init__(self): self.isBanner_mode=True self.final=None self.design=7 self.color=None #Initialize systems objects if TEST_MODE: self.vid=cv2.VideoCapture(os.path.join(tests, "demo.avi")) self.v=Video() #----------------testing purpose--------------# self.back=RemoveBackground() self.norm=NormalizedRGB() self.replace=Replace() #print self.replace self.tshirt=DetectShirt() self.p_mat=np.array(np.mat([[0,0],[100,0],[100,100],[0,100]],np.float32)) self.design_template=cv2.imread(templates+"Green.png") #----------------------------------------------------- self.__glade__=gtk.Builder() self.__ui__=self.__glade__.add_from_file(os.path.join(assets, "main_ui.glade")) self.__main_win__=self.__glade__.get_object("vdr_main") self.drawing_area=self.__glade__.get_object("da1") self.about=self.__glade__.get_object("abt") self.about.connect("clicked",self.show_abt) self.about_dia=self.__glade__.get_object("about_dia") #init Drawing Area self.drawing_area.realize() self.drawing_area.set_size_request(config.width, config.height) #get canvas self.canvas=self.drawing_area.window #get GC self.gc=self.canvas.new_gc() #draw rectnagle #print self.canvas,self.gc self.gc.set_background(gdk.Color(0,0,0,0)) self.gc.set_foreground(gdk.Color(255,0,0,0)) #print dir(self.canvas) self.canvas.draw_rectangle(self.gc,False,10,10,100,200) self.canvas.draw_line(self.gc,10,10,100,100) #decorate Mainwindows self.__main_win__.modify_bg(gtk.STATE_NORMAL,gtk.gdk.Color(0,0,0,0)) self.__main_win__.modify_fg(gtk.STATE_NORMAL,gtk.gdk.Color(0,255,0,0)) self.tview1=self.__glade__.get_object("tv1") self.lstore1=self.__glade__.get_object("ls1") self.tview2=self.__glade__.get_object("tv2") self.lstore2=self.__glade__.get_object("ls2") self.apply_btn=self.__glade__.get_object("apply") self.apply_btn.connect("clicked",self.change_avtar) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Red.png'),red]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Green.png'),green]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Blue.png'),blue]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Yellow.png'),yellow]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Pink.png'),pink]) self.lstore1.append([gtk.gdk.pixbuf_new_from_file(templates+'Brown.png'),brown]) self.lstore2.append([gtk.gdk.pixbuf_new_from_file(templates+'Blue.png'),plain]) self.lstore2.append([gtk.gdk.pixbuf_new_from_file(templates+'Nike.png'),temp1]) self.lstore2.append([gtk.gdk.pixbuf_new_from_file(templates+'Reebok.png'),temp2]) #treeview = gtk.TreeView(self.lstore1) self.tview1.set_model(self.lstore1) self.tview2.set_model(self.lstore2) self.tview1.modify_base(gtk.STATE_NORMAL,gtk.gdk.Color(0,0,0,0)) self.tview2.modify_base(gtk.STATE_NORMAL,gtk.gdk.Color(0,0,0,0)) cell = gtk.CellRendererPixbuf() column = gtk.TreeViewColumn("Pixbuf", cell) column.add_attribute(cell, "pixbuf", 0) self.tview1.append_column(column) cell.set_padding(30,50) cell2=gtk.CellRendererPixbuf() column2 = gtk.TreeViewColumn("Pixbuf", cell2) column2.add_attribute(cell2, "pixbuf", 0) self.tview2.append_column(column2) cell2.set_padding(30,50) #self.canvas.draw_ self.drawing_area.set_app_paintable(True) self.drawing_area.connect("expose-event",self.display_frame) selection_color=self.tview1.get_selection() selection_color.connect("changed",self.change_cloth_plain) selection_design=self.tview2.get_selection() selection_design.connect("changed",self.change_cloth_design) self.canvas.set_background(gtk.gdk.Color(0,0,0,0)) self.__main_win__.connect("delete-event",gtk.mainquit) self.img=cv2.cv.LoadImage(os.path.join(assets, "Banner.png")) # self.__main_win__.show() self.__main_win__.maximize()
class GameEngine(object): """The main game engine.""" def __init__(self, config = None): Log.debug("GameEngine class init (GameEngine.py)...") self.mainMenu = None #placeholder for main menu object - to prevent reinstantiation self.createdGuitarScene = False #MFH - so we only create ONE guitarscene...! self.currentScene = None self.versionString = version #stump: other version stuff moved to allow full version string to be retrieved without instantiating GameEngine self.uploadVersion = "%s-4.0" % Version.PROGRAM_NAME #akedrou - the version passed to the upload site. self.dataPath = Version.dataPath() Log.debug(self.versionString + " starting up...") Log.debug("Python version: " + sys.version.split(' ')[0]) Log.debug("Pygame version: " + str(pygame.version.ver) ) Log.debug("PyOpenGL version: " + OpenGL.__version__) Log.debug("Numpy version: " + np.__version__) Log.debug("PIL version: " + Image.VERSION) Log.debug("sys.argv: " + repr(sys.argv)) Log.debug("os.name: " + os.name) Log.debug("sys.platform: " + sys.platform) if os.name == 'nt': import win32api Log.debug("win32api.GetVersionEx(1): " + repr(win32api.GetVersionEx(1))) elif os.name == 'posix': Log.debug("os.uname(): " + repr(os.uname())) """ Constructor. @param config: L{Config} instance for settings """ self.tutorialFolder = "tutorials" if not config: config = Config.load() self.config = config fps = self.config.get("video", "fps") self.tasks = [] self.frameTasks = [] self.fps = fps self.currentTask = None self.paused = [] self.running = True self.clock = pygame.time.Clock() self.title = self.versionString self.restartRequested = False # evilynux - Check if theme icon exists first, then fallback on FoFiX icon. themename = self.config.get("coffee", "themename") themeicon = os.path.join(Version.dataPath(), "themes", themename, "icon.png") fofixicon = os.path.join(Version.dataPath(), "fofix_icon.png") icon = None if os.path.exists(themeicon): icon = themeicon elif os.path.exists(fofixicon): icon = fofixicon self.video = Video(self.title, icon) if self.config.get("video", "disable_screensaver"): self.video.disableScreensaver() self.audio = Audio() self.frames = 0 self.fpsEstimate = 0 self.priority = self.config.get("engine", "highpriority") self.show_fps = self.config.get("video", "show_fps") self.advSettings = self.config.get("game", "adv_settings") self.restartRequired = False self.quicksetRestart = False self.quicksetPerf = self.config.get("quickset", "performance") self.scrollRate = self.config.get("game", "scroll_rate") self.scrollDelay = self.config.get("game", "scroll_delay") Log.debug("Initializing audio.") frequency = self.config.get("audio", "frequency") bits = self.config.get("audio", "bits") stereo = self.config.get("audio", "stereo") bufferSize = self.config.get("audio", "buffersize") self.audio.open(frequency = frequency, bits = bits, stereo = stereo, bufferSize = bufferSize) self.cmdPlay = 0 self.cmdMode = None self.cmdDiff = None self.cmdPart = None self.gameStarted = False self.world = None self.audioSpeedFactor = 1.0 Log.debug("Initializing video.") #myfingershurt: ensuring windowed mode starts up in center of the screen instead of cascading positions: os.environ['SDL_VIDEO_WINDOW_POS'] = 'center' width, height = [int(s) for s in self.config.get("video", "resolution").split("x")] fullscreen = self.config.get("video", "fullscreen") multisamples = self.config.get("video", "multisamples") self.video.setMode((width, height), fullscreen = fullscreen, multisamples = multisamples) Log.debug("OpenGL version: " + glGetString(GL_VERSION)) Log.debug("OpenGL vendor: " + glGetString(GL_VENDOR)) Log.debug("OpenGL renderer: " + glGetString(GL_RENDERER)) Log.debug("OpenGL extensions: " + ' '.join(sorted(glGetString(GL_EXTENSIONS).split()))) if self.video.default: self.config.set("video", "fullscreen", False) self.config.set("video", "resolution", "800x600") if self.config.get("video", "shader_use"): shaders.set(os.path.join(Version.dataPath(), "shaders")) # Enable the high priority timer if configured if self.priority: Log.debug("Enabling high priority timer.") self.fps = 0 # High priority # evilynux - This was generating an error on the first pass (at least under # GNU/Linux) as the Viewport was not set yet. try: viewport = glGetIntegerv(GL_VIEWPORT) except: viewport = [0, 0, width, height] h = viewport[3] - viewport[1] w = viewport[2] - viewport[0] geometry = (0, 0, w, h) self.svg = SvgContext(geometry) glViewport(int(viewport[0]), int(viewport[1]), int(viewport[2]), int(viewport[3])) self.startupMessages = self.video.error self.input = Input() self.view = View(self, geometry) self.resizeScreen(w, h) self.resource = Resource(Version.dataPath()) self.mainloop = self.loading self.menuMusic = False self.setlistMsg = None # Load game modifications Mod.init(self) self.addTask(self.input, synchronized = False) self.addTask(self.view, synchronized = False) self.addTask(self.resource, synchronized = False) self.data = Data(self.resource, self.svg) ##MFH: Animated stage folder selection option #<themename>\Stages still contains the backgrounds for when stage rotation is off, and practice.png #subfolders under Stages\ will each be treated as a separate animated stage set self.stageFolders = [] currentTheme = themename stagespath = os.path.join(Version.dataPath(), "themes", currentTheme, "backgrounds") themepath = os.path.join(Version.dataPath(), "themes", currentTheme) if os.path.exists(stagespath): self.stageFolders = [] allFolders = os.listdir(stagespath) #this also includes all the stage files - so check to see if there is at least one .png file inside each folder to be sure it's an animated stage folder for name in allFolders: aniStageFolderListing = [] thisIsAnAnimatedStageFolder = False try: aniStageFolderListing = os.listdir(os.path.join(stagespath,name)) except Exception: thisIsAnAnimatedStageFolder = False for aniFile in aniStageFolderListing: if os.path.splitext(aniFile)[1] == ".png" or os.path.splitext(aniFile)[1] == ".jpg" or os.path.splitext(aniFile)[1] == ".jpeg": #we've found at least one .png file here, chances are this is a valid animated stage folder thisIsAnAnimatedStageFolder = True if thisIsAnAnimatedStageFolder: self.stageFolders.append(name) i = len(self.stageFolders) if i > 0: #only set default to first animated subfolder if one exists - otherwise use Normal! defaultAniStage = str(self.stageFolders[0]) else: defaultAniStage = "Normal" Log.debug("Default animated stage for " + currentTheme + " theme = " + defaultAniStage) aniStageOptions = dict([(str(self.stageFolders[n]),self.stageFolders[n]) for n in range(0, i)]) aniStageOptions.update({"Normal":_("Slideshow")}) if i > 1: #only add Random setting if more than one animated stage exists aniStageOptions.update({"Random":_("Random")}) Config.define("game", "animated_stage_folder", str, defaultAniStage, text = _("Animated Stage"), options = aniStageOptions ) #MFH: here, need to track and check a new ini entry for last theme - so when theme changes we can re-default animated stage to first found lastTheme = self.config.get("game","last_theme") if lastTheme == "" or lastTheme != currentTheme: #MFH - no last theme, and theme just changed: self.config.set("game","animated_stage_folder",defaultAniStage) #force defaultAniStage self.config.set("game","last_theme",currentTheme) selectedAnimatedStage = self.config.get("game", "animated_stage_folder") if selectedAnimatedStage != "Normal" and selectedAnimatedStage != "Random": if not os.path.exists(os.path.join(stagespath,selectedAnimatedStage)): Log.warn("Selected animated stage folder " + selectedAnimatedStage + " does not exist, forcing Normal.") self.config.set("game","animated_stage_folder","Normal") #MFH: force "Standard" currently selected animated stage folder is invalid else: Config.define("game", "animated_stage_folder", str, "None", text = _("Animated Stage"), options = ["None",_("None")]) Log.warn("No stages\ folder found, forcing None setting for Animated Stage.") self.config.set("game","animated_stage_folder", "None") #MFH: force "None" when Stages folder can't be found try: fp, pathname, description = imp.find_module("CustomTheme",[themepath]) theme = imp.load_module("CustomTheme", fp, pathname, description) self.theme = theme.CustomTheme(themepath, themename) except ImportError: self.theme = Theme(themepath, themename) self.addTask(self.theme) self.input.addKeyListener(FullScreenSwitcher(self), priority = True) self.input.addSystemEventListener(SystemEventHandler(self)) self.debugLayer = None self.startupLayer = None self.loadingScreenShown = False self.graphicMenuShown = False Log.debug("Ready.") # evilynux - This stops the crowd cheers if they're still playing (issue 317). def quit(self): # evilynux - self.audio.close() crashes when we attempt to restart if not self.restartRequested: self.audio.close() Player.savePlayers() for t in list(self.tasks + self.frameTasks): self.removeTask(t) self.running = False def setStartupLayer(self, startupLayer): """ Set the L{Layer} that will be shown when the all the resources have been loaded. See L{Data} @param startupLayer: Startup L{Layer} """ self.startupLayer = startupLayer def isDebugModeEnabled(self): return bool(self.debugLayer) def setDebugModeEnabled(self, enabled): """ Show or hide the debug layer. @type enabled: bool """ if enabled: self.debugLayer = DebugLayer(self) else: self.debugLayer = None def toggleFullscreen(self): """ Toggle between fullscreen and windowed mode. @return: True on success """ if not self.video.toggleFullscreen(): # on windows, the fullscreen toggle kills our textures, se we must restart the whole game self.input.broadcastSystemEvent("restartRequested") self.config.set("video", "fullscreen", not self.video.fullscreen) return True self.config.set("video", "fullscreen", self.video.fullscreen) return True def restart(self): """Restart the game.""" if not self.restartRequested: self.restartRequested = True self.input.broadcastSystemEvent("restartRequested") else: self.quit() def resizeScreen(self, width, height): """ Resize the game screen. @param width: New width in pixels @param height: New height in pixels """ self.view.setGeometry((0, 0, width, height)) self.svg.setGeometry((0, 0, width, height)) def startWorld(self, players, maxplayers = None, gameMode = 0, multiMode = 0, allowGuitar = True, allowDrum = True, allowMic = False, tutorial = False): self.world = World(self, players, maxplayers, gameMode, multiMode, allowGuitar, allowDrum, allowMic, tutorial) def finishGame(self): if not self.world: Log.notice("GameEngine.finishGame called before World created.") return self.world.finishGame() self.world = None self.gameStarted = False self.view.pushLayer(self.mainMenu) def loadImgDrawing(self, target, name, fileName, textureSize = None): """ Load an SVG drawing synchronously. @param target: An object that will own the drawing @param name: The name of the attribute the drawing will be assigned to @param fileName: The name of the file in the data directory @param textureSize: Either None or (x, y), in which case the file will be rendered to an x by y texture @return: L{ImgDrawing} instance """ return self.data.loadImgDrawing(target, name, fileName, textureSize) #volshebnyi def drawStarScore(self, screenwidth, screenheight, xpos, ypos, stars, scale = None, horiz_spacing = 1.2, space = 1.0, hqStar = False, align = LEFT): minScale = 0.02 w = screenwidth h = screenheight if not scale: scale = minScale elif scale < minScale: scale = minScale if self.data.fcStars and stars == 7: star = self.data.starFC else: star = self.data.starPerfect wide = scale * horiz_spacing if align == CENTER: #center - akedrou (simplifying the alignment...) xpos -= (2 * wide) elif align == RIGHT: #right xpos -= (4 * wide) if stars > 5: for j in range(5): if self.data.maskStars: if self.data.theme == 2: self.drawImage(star, scale = (scale,-scale), coord = (w*(xpos+wide*j)*space**4,h*ypos), color = (1, 1, 0, 1), stretched=11) else: self.drawImage(star, scale = (scale,-scale), coord = (w*(xpos+wide*j)*space**4,h*ypos), color = (0, 1, 0, 1), stretched=11) else: self.drawImage(star, scale = (scale,-scale), coord = (w*(xpos+wide*j)*space**4,h*ypos), stretched=11) else: for j in range(5): if j < stars: if hqStar: star = self.data.star4 else: star = self.data.star2 else: if hqStar: star = self.data.star3 else: star = self.data.star1 self.drawImage(star, scale = (scale,-scale), coord = (w*(xpos+wide*j)*space**4,h*ypos), stretched=11) def drawImage(self, image, scale = (1.0, -1.0), coord = (0, 0), rot = 0, \ color = (1,1,1,1), rect = (0,1,0,1), stretched = 0, fit = 0, \ alignment = CENTER, valignment = 1): """ Draws the image/surface to screen @param image: The openGL surface @param scale: Scale factor (between 0.0 and 1.0, second value must be negative due to texture flipping) @param coord: Where the image will be translated to on the screen @param rot: How many degrees it will be rotated @param color: The color of the image (values are between 0.0 and 1.0) (can have 3 values or 4, if 3 are given the alpha is automatically set to 1.0) @param rect: The surface rectangle, this is used for cropping the texture @param stretched: Stretches the image in one of 5 ways according to following passed values 1) fits it to the width of the viewport 2) fits it to the height of the viewport 11) fits it to the width of the viewport and scales the height while keeping the aspect ratio 12) fits it to the heigh of the viewport and scales the width while keeping the aspect ratio 0) stretches it so it fits the whole viewport Any other values will have the image maintain its size passed by scale @param fit: Adjusts the texture so the coordinate for the y-axis placement can be on the top side (1), bottom side (2), or center point (any other value) of the image @param alignment: Adjusts the texture so the coordinate for x-axis placement can either be on the left side (0), center point (1), or right(2) side of the image @param valignment: Adjusts the texture so the coordinate for y-axis placement can either be on the bottom side (0), center point (1), or top(2) side of the image """ if not isinstance(image, ImgDrawing): return width, height = scale x, y = coord if stretched == 1: # fit to width width = width / image.pixelSize[0] * self.view.geometry[2] elif stretched == 2: # fit to height height = height / image.pixelSize[1] * self.view.geometry[3] elif stretched == 11: # fit to width and keep ratio width = width / image.pixelSize[0] * self.view.geometry[2] height = height / image.pixelSize[0] * self.view.geometry[2] elif stretched == 12: # fit to height and keep ratio width = width / image.pixelSize[1] * self.view.geometry[3] height = height / image.pixelSize[1] * self.view.geometry[3] elif not stretched == 0: # fit to screen width = width / image.pixelSize[0] * self.view.geometry[2] height = height / image.pixelSize[1] * self.view.geometry[3] if fit == 1: #y is on top (not center) y = y - ((image.pixelSize[1] * abs(scale[1]))*.5*(self.view.geometry[3]/480.0)) elif fit == 2: #y is on bottom y = y + ((image.pixelSize[1] * abs(scale[1]))*.5*(self.view.geometry[3]/480.0)) image.setRect(rect) image.setScale(width, height) image.setPosition(x, y) image.setAlignment(alignment) image.setVAlignment(valignment) image.setAngle(rot) image.setColor(color) image.draw() #blazingamer def draw3Dtex(self, image, vertex, texcoord, coord = None, scale = None, rot = None, color = (1,1,1), multiples = False, alpha = False, depth = False, vertscale = 0): ''' Simplifies tex rendering @param image: self.xxx - tells the system which image/resource should be mapped to the plane @param vertex: (Left, Top, Right, Bottom) - sets the points that define where the plane will be drawn @param texcoord: (Left, Top, Right, Bottom) - sets where the texture should be drawn on the plane @param coord: (x,y,z) - where on the screen the plane will be rendered within the 3d field @param scale: (x,y,z) - scales an glplane how far in each direction @param rot: (degrees, x-axis, y-axis, z-axis) a digit in the axis is how many times you want to rotate degrees around that axis @param color: (r,g,b) - sets the color of the image when rendered 0 = No Color, 1 = Full color @param multiples: True/False defines whether or not there should be multiples of the plane drawn at the same time only really used with the rendering of the notes, keys, and flames @param alpha: True/False - defines whether or not the image should have black turned into transparent only really used with hitglows and flames @param depth: True/False - sets the depth by which the object is rendered only really used by keys and notes @param vertscale: # - changes the yscale when setting vertex points only really used by notes ''' if not isinstance(image, ImgDrawing): return if alpha == True: glBlendFunc(GL_SRC_ALPHA, GL_ONE) if len(color) == 4: col_array = np.array([[color[0],color[1],color[2], color[3]], [color[0],color[1],color[2], color[3]], [color[0],color[1],color[2], color[3]], [color[0],color[1],color[2], color[3]]], dtype=np.float32) else: col_array = np.array([[color[0],color[1],color[2], 1], [color[0],color[1],color[2], 1], [color[0],color[1],color[2], 1], [color[0],color[1],color[2], 1]], dtype=np.float32) glEnable(GL_TEXTURE_2D) image.texture.bind() if multiples == True: glPushMatrix() if coord != None: glTranslate(coord[0], coord[1], coord[2]) if rot != None: glRotate(rot[0], rot[1], rot[2], rot[3]) if scale != None: glScalef(scale[0], scale[1], scale[2]) if depth == True: glDepthMask(1) if not isinstance(vertex, np.ndarray): vertex = np.array( [[ vertex[0], vertscale, vertex[1]], [ vertex[2], vertscale, vertex[1]], [ vertex[0], -vertscale, vertex[3]], [ vertex[2], -vertscale, vertex[3]]], dtype=np.float32) if not isinstance(texcoord, np.ndarray): texcoord = np.array( [[texcoord[0], texcoord[1]], [texcoord[2], texcoord[1]], [texcoord[0], texcoord[3]], [texcoord[2], texcoord[3]]], dtype=np.float32) cmgl.drawArrays(GL_TRIANGLE_STRIP, vertices=vertex, colors=col_array, texcoords=texcoord) if depth == True: glDepthMask(0) if multiples == True: glPopMatrix() glDisable(GL_TEXTURE_2D) if alpha == True: glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) #glorandwarf: renamed to retrieve the path of the file def fileExists(self, fileName): return self.data.fileExists(fileName) def getPath(self, fileName): return self.data.getPath(fileName) def loading(self): """Loading state loop.""" done = self.doRun() self.clearScreen() if self.data.essentialResourcesLoaded(): if not self.loadingScreenShown: self.loadingScreenShown = True Dialogs.showLoadingScreen(self, self.data.resourcesLoaded) if self.startupLayer: self.view.pushLayer(self.startupLayer) self.mainloop = self.main self.view.render() self.video.flip() return done def clearScreen(self): self.svg.clear(*self.theme.backgroundColor) def addTask(self, task, synchronized = True): """ Add a task to the engine. @param task: L{Task} to add @type synchronized: bool @param synchronized: If True, the task will be run with small timesteps tied to the engine clock. Otherwise the task will be run once per frame. """ if synchronized: queue = self.tasks else: queue = self.frameTasks if not task in queue: queue.append(task) task.started() def removeTask(self, task): """ Remove a task from the engine. @param task: L{Task} to remove """ queues = self._getTaskQueues(task) for q in queues: q.remove(task) if queues: task.stopped() def _getTaskQueues(self, task): queues = [] for queue in [self.tasks, self.frameTasks]: if task in queue: queues.append(queue) return queues def pauseTask(self, task): """ Pause a task. @param task: L{Task} to pause """ self.paused.append(task) def resumeTask(self, task): """ Resume a paused task. @param task: L{Task} to resume """ self.paused.remove(task) def enableGarbageCollection(self, enabled): """ Enable or disable garbage collection whenever a random garbage collection run would be undesirable. Disabling the garbage collector has the unfortunate side-effect that your memory usage will skyrocket. """ if enabled: gc.enable() else: gc.disable() def collectGarbage(self): """ Run a garbage collection run. """ gc.collect() def _runTask(self, task, ticks = 0): if not task in self.paused: self.currentTask = task task.run(ticks) self.currentTask = None def main(self): """Main state loop.""" done = self.doRun() self.clearScreen() self.view.render() if self.debugLayer: self.debugLayer.render(1.0, True) self.video.flip() # evilynux - Estimate the rendered frames per second. self.frames = self.frames+1 # Estimate every 120 frames when highpriority is True. # Estimate every 2*config.fps when highpriority is False, # if you are on target, that should be every 2 seconds. if( not self.priority and self.frames == (self.fps << 1) ) or ( self.priority and self.frames == 120 ): self.fpsEstimate = self.clock.get_fps() # evilynux - Printing on the console with a frozen binary may cause a crash. if self.show_fps and not Version.isWindowsExe(): print("%.2f fps" % self.fpsEstimate) self.frames = 0 return done def doRun(self): """Run one cycle of the task scheduler engine.""" if not self.frameTasks and not self.tasks: return False for task in self.frameTasks: self._runTask(task) tick = self.clock.get_time() for task in self.tasks: self._runTask(task, tick) self.clock.tick(self.fps) return True def run(self): return self.mainloop()