def _reflash(self, path): """ this will execute the upgrade operation in another process because the SSH connection may hang indefinitely while reflashing and would block the program; setting a timeout to `exec_command` doesn't seem to take effect on some OpenWRT versions so at least we can stop the process using `subprocess.join(timeout=self.UPGRADE_TIMEOUT)` """ self.disconnect() command = self.get_upgrade_command(path) def upgrade(conn, path, timeout): conn.connect() conn.exec_command(command, timeout=timeout) conn.disconnect() subprocess = Process(target=upgrade, args=[self, path, self.UPGRADE_TIMEOUT]) subprocess.start() self.log('Upgrade operation in progress...') subprocess.join(timeout=self.UPGRADE_TIMEOUT) self.log( f'SSH connection closed, will wait {self.RECONNECT_DELAY} seconds before ' 'attempting to reconnect...') sleep(self.RECONNECT_DELAY) # kill the subprocess if it has hanged if subprocess.is_alive(): subprocess.terminate() subprocess.join()
def _fork_and_submit_job(self, job): parent_pipe, child_pipe = Pipe() try: p = Process(target=self._submit_job_to_lsf, args=(child_pipe, parent_pipe, job,)) p.start() except: parent_pipe.close() raise finally: child_pipe.close() try: p.join() result = parent_pipe.recv() if isinstance(result, basestring): raise SubmitError(result) except EOFError: raise SubmitError('Unknown exception submitting job') finally: parent_pipe.close() return result
def crawl( self, origin_oj, solution_id, problem_id, language, code, username, nickname, password): p = Process( target=self._crawl, args=[ origin_oj, solution_id, problem_id, language, code, username, nickname, password ] ) p.start() p.join()
def crawl(self, origin_oj): p = Process( target=self._crawl, args=[origin_oj] ) p.start() p.join()
def start_process(self, command=None, billiard=False, *args, **kwargs): from .models import ExportTask from ..tasks.export_tasks import TaskStates if billiard: proc = Process(daemon=False, *args, **kwargs) proc.start() self.store_pid(pid=proc.pid) proc.join() self.exitcode = proc.exitcode else: proc = subprocess.Popen(command, **kwargs) (self.stdout, self.stderr) = proc.communicate() self.store_pid(pid=proc.pid) self.exitcode = proc.wait() # We need to close the existing connection because the logger could be using a forked process which, # will be invalid and throw an error. connection.close() export_task = ExportTask.objects.get(uid=self.task_uid) if export_task.status == TaskStates.CANCELED.value: from ..tasks.exceptions import CancelException raise CancelException( task_name=export_task.export_provider_task.name, user_name=export_task.cancel_user.username)
def _reflash(self, path): """ this will execute the upgrade operation in another process because the SSH connection may hang indefinitely while reflashing and would block the program; setting a timeout to `exec_command` doesn't seem to take effect so at least we can stop the process using `subprocess.join(timeout=self.UPGRADE_TIMEOUT)` """ def upgrade(conn, path, timeout): conn.connect() conn.exec_command('sysupgrade -v -c {0}'.format(path), timeout=timeout) conn.close() subprocess = Process( target=upgrade, args=[self, path, self.UPGRADE_TIMEOUT] ) subprocess.start() self.log('Upgrade operation in progress...') subprocess.join(timeout=self.UPGRADE_TIMEOUT) self.log('SSH connection closed, will wait {0} seconds before ' 'attempting to reconnect...'.format(self.SLEEP_TIME)) sleep(self.SLEEP_TIME) # kill the subprocess if it has hanged if subprocess.is_alive(): subprocess.terminate() subprocess.join()
def clone_with_timeout(src: str, dest: str, clone_func: Callable[[], None], timeout: float) -> None: """Clone a repository with timeout. Args: src: clone source dest: clone destination clone_func: callable that does the actual cloning timeout: timeout in seconds """ errors: Queue = Queue() process = Process(target=_clone_task, args=(clone_func, errors)) process.start() process.join(timeout) if process.is_alive(): process.terminate() # Give it literally a second (in successive steps of 0.1 second), # then kill it. # Can't use `process.join(1)` here, billiard appears to be bugged # https://github.com/celery/billiard/issues/270 killed = False for _ in range(10): time.sleep(0.1) if not process.is_alive(): break else: killed = True os.kill(process.pid, signal.SIGKILL) raise CloneTimeout(src, timeout, killed) if not errors.empty(): raise CloneFailure(src, dest, errors.get())
def display_graph(self, graphdef): '''display a graph''' if 'mestate' in globals(): self.mestate.console.write("Expression: %s\n" % ' '.join(graphdef.expression.split())) else: self.mestate.child_pipe_send_console.send("Expression: %s\n" % ' '.join(graphdef.expression.split())) #mestate.mlog.reduce_by_flightmodes(mestate.flightmode_selections) #setup the graph, then pass to a new process and display self.mg = grapher.MavGraph() self.mg.set_marker(self.mestate.settings.marker) self.mg.set_condition(self.mestate.settings.condition) self.mg.set_xaxis(self.mestate.settings.xaxis) self.mg.set_linestyle(self.mestate.settings.linestyle) self.mg.set_show_flightmode(self.mestate.settings.show_flightmode) self.mg.set_legend(self.mestate.settings.legend) self.mg.add_mav(self.mestate.mlog) for f in graphdef.expression.split(): self.mg.add_field(f) self.mg.process(self.mestate.flightmode_selections, self.mestate.mlog._flightmodes) self.lenmavlist = len(self.mg.mav_list) if platform.system() == 'Darwin': forking_enable(False) #Important - mg.mav_list is the full logfile and can be very large in size #To avoid slowdowns in Windows (which copies the vars to the new process) #We need to empty this var when we're finished with it self.mg.mav_list = [] child = Process(target=self.mg.show, args=[self.lenmavlist, ]) child.start() self.mestate.mlog.rewind()
def fetch_models(channel_name, tuple_type, authorized_types, input_models, directory): models = [] exceptions = [] # Close django connection to force each Process to create its own as # django orm connection is not fork safe https://code.djangoproject.com/ticket/20562 from django import db db.connections.close_all() for input_model in input_models: args = (channel_name, tuple_type, authorized_types, input_model, directory) proc = Process(target=fetch_model, args=args) models.append((proc, args)) proc.start() for proc, args in models: proc.join() if proc.exitcode != 0: exceptions.append(Exception(f'fetch model failed for args {args}')) # Close django old connections to avoid potential leak db.close_old_connections() if exceptions: raise Exception(exceptions)
def crawl(*args, **kwargs): crawler = CrawlerProcess(get_project_settings()) crawler.crawl(*args, **kwargs) process = Process(target=crawler.start) process.start() process.join() crawler.stop()
def main(): # To assign camera by name: put string(s) in list # Parse command line arguments parser = argparse.ArgumentParser(description='GUI for gaze tracking and pupillometry') parser.add_argument('-eye', dest='eye_file', type=str, help="Work with existing video recording, instead of live feed", default='') parser.add_argument('-world', dest='world_file', type=str, help="Work with existing video recording, instead of live feed", default='') args = parser.parse_args() # to use a pre-recorded video. # Use a string to specify the path to your video file as demonstrated below if args.eye_file == '': eye_src = ["UI154xLE-M", "USB Camera-B4.09.24.1", "FaceTime Camera (Built-in)", "Microsoft", "6000","Integrated Camera"] # to assign cameras directly, using integers as demonstrated below # eye_src = 1 else: # print "Using provide file: %s" % args.filename eye_src = args.eye_file if args.world_file == '': world_src = ["Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"] # to assign cameras directly, using integers as demonstrated below # world_src = 0 else: world_src = args.world_file # Camera video size in pixels (width,height) eye_size = (260,216) #(1280,1024) world_size = (640,480) # on MacOS we will not use os.fork, elsewhere this does nothing. forking_enable(0) # Create and initialize IPC g_pool = Temp() g_pool.pupil_queue = Queue() g_pool.eye_rx, g_pool.eye_tx = Pipe(False) g_pool.quit = RawValue(c_bool,0) # this value will be substracted form the capture timestamp g_pool.timebase = RawValue(c_double,0) # make some constants avaiable g_pool.user_dir = user_dir g_pool.rec_dir = rec_dir g_pool.version = version g_pool.app = 'capture' # set up subprocesses p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size)) # Spawn subprocess: p_eye.start() if platform.system() == 'Linux': # We need to give the camera driver some time before requesting another camera. sleep(0.5) world(g_pool,world_src,world_size) # Exit / clean-up p_eye.join()
def display_graph(self, graphdef): '''display a graph''' if 'mestate' in globals(): self.mestate.console.write("Expression: %s\n" % ' '.join(graphdef.expression.split())) else: self.mestate.child_pipe_send_console.send( "Expression: %s\n" % ' '.join(graphdef.expression.split())) #mestate.mlog.reduce_by_flightmodes(mestate.flightmode_selections) #setup the graph, then pass to a new process and display self.mg = grapher.MavGraph() self.mg.set_marker(self.mestate.settings.marker) self.mg.set_condition(self.mestate.settings.condition) self.mg.set_xaxis(self.mestate.settings.xaxis) self.mg.set_linestyle(self.mestate.settings.linestyle) self.mg.set_show_flightmode(self.mestate.settings.show_flightmode) self.mg.set_legend(self.mestate.settings.legend) self.mg.add_mav(self.mestate.mlog) for f in graphdef.expression.split(): self.mg.add_field(f) self.mg.process(self.mestate.flightmode_selections, self.mestate.mlog._flightmodes) self.lenmavlist = len(self.mg.mav_list) if platform.system() == 'Darwin': forking_enable(False) #Important - mg.mav_list is the full logfile and can be very large in size #To avoid slowdowns in Windows (which copies the vars to the new process) #We need to empty this var when we're finished with it self.mg.mav_list = [] child = Process(target=self.mg.show, args=[ self.lenmavlist, ]) child.start() self.mestate.mlog.rewind()
def crawl(self, origin_oj, username): p = Process(target=self._crawl, args=[ origin_oj, username, ]) p.start() p.join()
def screenshot( task, url, width=settings.BASE_WEBPAGE_PREVIEW_WIDTH, height=settings.BASE_WEBPAGE_PREVIEW_HEIGHT, lifetime=settings.BASE_WEBPAGE_PREVIEW_LIFETIME, ): url_id = sha256() url_id.update(url.encode("utf-8")) url_id.update(bytes(width)) url_id.update(bytes(height)) key = url_id.hexdigest() logger.info(f"Screenshot for {url} @ {width}x{height}: {key}") if key in cache: logger.info(f"Found {key} in cache.") return key logger.info(f"Locking {key}") lock = cache.lock(key) lock.acquire() logger.info("Starting WebEngineScreenshot app") parent_conn, child_conn = Pipe() p = Process(target=WebpageTasks.worker, args=(url, width, height, child_conn)) p.start() image = parent_conn.recv() p.join() if not image: logger.info("WebEngineScreenshot app returned nothing") return None logger.info("Writing WebEngineScreenshot app result to cache") cache.set(key, image, timeout=lifetime) logger.info("Removing WebEngineScreenshot app singleton") return key
def check_user_presence(): for device in Device.objects.all(): lock_id = 'scan-device-lock-{}'.format(device.id) if acquire_lock(lock_id): t = Process(target=presence.scan_device, args=(device, lock_id)) t.start() return "ok"
def main(): timePipe, sigPipe = Pipe() q = Queue() clock = Process(target=tickTock, args=(timePipe, )) testSignal = Process(target=signal, args=(q, sigPipe, [1])) testSignal.start() while True: print(q.get())
def cmd_save(args): '''save a graph''' child = Process(target=save_process, args=[ mestate.last_graph, mestate.child_pipe_send_console, mestate.child_pipe_send_graph, mestate.status.msgs ]) child.start()
def test_set_pdeathsig(self): return_pid = Value('i') p = Process(target=parent_task, args=(return_pid,)) p.start() sleep(3) # wait for setting pdeathsig p.terminate() sleep(3) # wait for process termination with pytest.raises(psutil.NoSuchProcess): proc = psutil.Process(return_pid.value)
def test_set_pdeathsig(self): return_pid = Value('i') p = Process(target=parent_task, args=(return_pid, )) p.start() sleep(3) # wait for setting pdeathsig p.terminate() sleep(3) # wait for process termination with pytest.raises(psutil.NoSuchProcess): proc = psutil.Process(return_pid.value)
def telnet_client(server_id, port): key = 'server-{0}-pid'.format(server_id) pid = cache.get(key) if pid and pid in psutil.get_pid_list(): os.kill(pid, signal.SIGTERM) p = Process(target=TelnetClient, args=(port, 'uptee', server_id)) p.start() cache.set(key, p.pid) p.join()
def crawl(self, oj, solution_id, problem_id, language, code, username, password): p = Process(target=self._crawl, args=[ oj, solution_id, problem_id, language, code, username, password ]) p.start() p.join()
def screenshot_endpoint( self, ip_address=None, port=None, hostname=None, use_ssl=False, use_sni=False, path="/", in_separate_process=False, ): """ Take a screenshot of the given endpoint, save it to a local temporary file, and return the local file path. :param ip_address: The IP address where the web service resides. :param port: The port where the web service resides. :param hostname: The hostname to request. :param use_ssl: Whether or not to use SSL to request the endpoint. :param use_sni: Whether or not the endpoint uses SNI. :param path: The path of the resource to screenshot. :param in_separate_process: Whether or not to take the screenshot in a separate process. This is to address the incredibly long time that the Selenium webdriver can take when it hangs. :return: A tuple containing (1) the local file path where the screenshot was saved and (2) whether or not the screenshot was taken successfully. """ logger.debug( "Now attempting to take a screenshot of the web service at %s:%s (%s). Hostname is %s, SNI support is %s." % (ip_address, port, "using SSL" if use_ssl else "plain HTTP", hostname, use_sni)) self.__set_endpoint( ip_address=ip_address, port=port, hostname=hostname, use_ssl=use_ssl, use_sni=use_sni, path=path, ) self._output_file_path = self.get_temporary_file_path() if in_separate_process: process = Process(target=self.__take_screenshot) try: process.start() process.join(config.selenium_screenshot_delay + config.inspection_screenshot_join_timeout) except IOError as e: if e.errno == errno.EINTR: logger.warning("Interrupted system call error received.") else: raise e finally: if process.is_alive(): print("PROCESS IS ALIVE - PID IS %s" % (process.pid, )) os.kill(process.pid, signal.SIGTERM) else: self.__take_screenshot() return self.output_file_path, FilesystemHelper.does_file_exist( self.output_file_path)
def crawl(self, oj, run_id): p = Process( target=self._crawl, args=[ oj, run_id ] ) p.start() p.join()
def crawl(self, number, passwd): p = Process( target=self._crawl, args=[ number, passwd ] ) p.start() p.join()
def crawl(self, origin_oj, username): p = Process( target=self._crawl, args=[ origin_oj, username, ] ) p.start() p.join()
class NewBilliardMPWorker(CommonWorker): def __init__(self, callback=None): super(NewBilliardMPWorker).__init__(callback) def start(self): self.task = Process(target=common_run, daemon=True) self.task.start() def stop(self): pass
def test_set_pdeathsig(self): success = "done" q = Queue() p = Process(target=parent_task, args=(q, success)) p.start() child_proc = psutil.Process(q.get(timeout=3)) try: p.terminate() assert q.get(timeout=3) == success finally: child_proc.terminate()
def crawl(self, oj, username, password): p = Process( target=self._crawl, args=[ oj, username, password ] ) p.start() p.join()
def main(): # To assign camera by name: put string(s) in list eye_cam_names = ["USB 2.0 Camera","Microsoft", "6000","Integrated Camera","HD USB Camera"] world_src = ["Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"] eye_src = (eye_cam_names,0),(eye_cam_names,1) #first match for eye0 and second match for eye1 # to assign cameras directly, using integers as demonstrated below # eye_src = 4 , 5 #second arg will be ignored for monocular eye trackers # world_src = 1 # to use a pre-recorded video. # Use a string to specify the path to your video file as demonstrated below # eye_src = '/Users/mkassner/Downloads/000/eye0.mkv' , '/Users/mkassner/Downloads/eye.avi' # world_src = "/Users/mkassner/Downloads/000/world.mkv" # Camera video size in pixels (width,height) eye_size = (640,480) world_size = (1280,720) # on MacOS we will not use os.fork, elsewhere this does nothing. forking_enable(0) #g_pool holds variables. Only if added here they are shared across processes. g_pool = Global_Container() # Create and initialize IPC g_pool.pupil_queue = Queue() g_pool.quit = Value(c_bool,0) g_pool.timebase = Value(c_double,0) g_pool.eye_tx = [] # make some constants avaiable g_pool.user_dir = user_dir g_pool.version = get_version(version_file) g_pool.app = 'capture' g_pool.binocular = binocular p_eye = [] for eye_id in range(1+1*binocular): rx,tx = Pipe(False) p_eye += [Process(target=eye, args=(g_pool,eye_src[eye_id],eye_size,rx,eye_id))] g_pool.eye_tx += [tx] p_eye[-1].start() p_world = Process(target=world,args=(g_pool,world_src,world_size)) # world(g_pool,world_src,world_size) p_world.start() p_world.join() # Exit / clean-up for p in p_eye: p.join()
def start_launch_servide_process(ld): """Starts a Launch Service process. To be called from subclasses. Args: ld : LaunchDescription obj. """ # Create the LauchService and feed the LaunchDescription obj. to it. ls = LaunchService() ls.include_launch_description(ld) p = Process(target=ls.run) p.daemon = True #The daemon process is terminated automatically before the main program exits, to avoid leaving orphaned processes running p.start()
def main(): # IPC pupil_queue = Queue() timebase = Value(c_double, 0) cmd_world_end, cmd_launcher_end = Pipe() com0 = Pipe(True) eyes_are_alive = Value(c_bool, 0), Value(c_bool, 0) com1 = Pipe(True) com_world_ends = com0[0], com1[0] com_eye_ends = com0[1], com1[1] p_world = Process( target=world, args=( pupil_queue, timebase, cmd_world_end, com_world_ends, eyes_are_alive, user_dir, app_version, video_sources["world"], ), ) p_world.start() while True: # block and listen for commands from world process. cmd = cmd_launcher_end.recv() if cmd == "Exit": break else: eye_id = cmd p_eye = Process( target=eye, args=( pupil_queue, timebase, com_eye_ends[eye_id], eyes_are_alive[eye_id], user_dir, app_version, eye_id, video_sources["eye%s" % eye_id], ), ) p_eye.start() for p in active_children(): p.join() logger.debug("Laucher exit")
def run_player(self, graphdef): if 'mestate' in globals(): self.mestate.console.write("Running Player...") else: self.mestate.child_pipe_send_console.send("Running Player...") self.player = player.MavPlay() self.player.add_mav(self.mlog) if platform.system() == 'Darwin': forking_enable(False) child = Process(target=self.player.run) child.start()
class CrawlerScript(): def __init__(self): self.process = None self.items = [] self._count = 0 self.queue = None self._init_signals() def _init_signals(self): dispatcher.connect(self._so, signals.spider_opened) dispatcher.connect(self._item_scraped, signals.item_scraped) dispatcher.connect(self._sc, signals.spider_closed) def _so(self): write_in_a_file('spider_opened 1', {'open': 'open!', 'x': self.x, 'process': self.process, 'process-pid': self.process and self.process.pid}, "t.txt") def _sc(self): write_in_a_file('spider_closed', {'scraped items': len(self.items)}, "t.txt") def _item_scraped(self, item, **kwargs): self._count = self._count + 1 write_in_a_file('item scraped', {'count':self._count, 'item': item, 'kwargs':kwargs, 'process': self.process, 'process-pid': self.process and self.process.pid}, "t.txt") self.items.append(item) self.queue.put_nowait(item) def _crawl(self, queue, spider): crawler = CrawlerProcess(get_project_settings()) crawler.crawl(spider) write_in_a_file('signals', {'signals': dir(signals)}, 'task.txt') write_in_a_file('._crawl start', {'process': self.process, 'process-pid': self.process and self.process.pid, 'db': dir(db), 'db.connection': dir(db.connection)}, "t.txt") print(dir(db.connection)) db.connection.close() crawler.start() crawler.stop() write_in_a_file('._crawl ended 1', {'qsize': self.queue.qsize() }, "t.txt") queue.put_nowait(self.items) write_in_a_file('._crawlended after q 2', {'qsize': queue.qsize()}, "t.txt") def crawl(self, spider): queue = Queue() self.queue = Queue() self.process = Process(target=self._crawl, args=(queue, spider)) self.process.start() write_in_a_file('.crawl 1', {'process': self.process, 'process-pid': self.process and self.process.pid, 'queue': self.queue.qsize()}, "t.txt") self.process.join() write_in_a_file('.crawl 2', {'process': self.process, 'process-pid': self.process and self.process.pid, 'queue': self.queue.qsize()}, "t.txt")
def run_crawler_process(params: dict) -> Process: """ Start scrapy spider from a separate process :param dict params: scrapy spider parameters :return: process instance """ process = Process( target=run_crawler, args=(params, ), ) process.start() return process
def run_crawler(params): """ :param params: :return: process instance """ process = Process( target=crawler_process, args=(params, ), ) process.start() return process
class LongCalculation(QProgressDialog): """ Multiprocessing based worker for mesh and eigenvalue calculations. This is necessary to make sure GUI is not blocked while mesh is built, or when eigenvalue calculations are performed. Transformations do not need as much time, unless there is one implemented without numpy vectorized coordinate calculations. """ res = None def __init__(self, fun, args, postprocess, job): """ Build multiprocessing queues and start worker. """ super(LongCalculation, self).__init__(job, "Cancel", 0, 0) self.setModal(True) self.input = Queue() self.output = Queue() self.input.put((fun, args, postprocess)) self.proc = Process(target=worker, args=(self.input, self.output)) self.proc.start() self.timer = QTimer() self.timer.timeout.connect(self.update) self.timer.start(10) def update(self): """ Check if worker is done, and close dialog. """ try: out = self.output.get(block=False) if isinstance(out, basestring): self.setLabelText(out) return if out is None: self.done(0) return self.res = out self.timer.stop() self.proc.join() del self.proc self.done(1) except: pass def cleanUp(self): """ Kill the running processes if cancelled/failed. """ if self.proc: while self.proc.is_alive(): self.proc.terminate() del self.proc self.timer.stop()
def crawl_endpoint_to_file( self, ip_address=None, port=None, hostname=None, use_ssl=False, use_sni=False, start_urls=[], in_separate_process=True, ): """ Start crawling the given endpoint using the given list of URLs and write the results to a local file. :param ip_address: The IP address to crawl. :param port: The port where the application resides. :param hostname: The hostname to submit alongside all requests to the remote endpoint. :param use_ssl: Whether or not to use SSL to connect to the remote web service. :param use_sni: Whether or not to use SNI to connect to the remote web service. :param start_urls: A list of URLs to start crawling from. :param in_separate_process: Whether or not to spawn off a separate process for the crawl. This enables us to call this method multiple times in the same process, as a Twisted reactor can only be started and stopped once per process. :return: A tuple containing (1) the string containing the local file path where crawling results are stored and (2) a ScrapyResultWrapper configured to process the contents of the file. """ temp_file_path = FilesystemHelper.get_temporary_file_path() local_file_path = "%s-%s-%s:%s" % (temp_file_path, self.bot_name, ip_address, port) spider_kwargs = { "input_ip_address": ip_address, "input_start_urls": start_urls, "input_file_path": local_file_path, "input_hostname": hostname, "input_use_ssl": use_ssl, "input_use_sni": use_sni, "input_port": port, } pipeline_settings = self.__get_local_storage_item_pipeline() requested_hostname = hostname if hostname is not None else ip_address settings = self.get_scrapy_settings(item_pipeline=pipeline_settings, hostname=requested_hostname) crawling_config = { "spider_kwargs": spider_kwargs, "settings": settings, } if in_separate_process: process = Process(target=self.__crawl, kwargs=crawling_config) process.start() process.join() process.terminate() else: self.__crawl(**crawling_config) return local_file_path, ScrapyResultWrapper.from_file(local_file_path)
def main(): # To assign camera by name: put string(s) in list eye_src = ["Microsoft", "6000", "Integrated Camera"] world_src = [ "Logitech Camera", "(046d:081d)", "C510", "B525", "C525", "C615", "C920", "C930e" ] # to assign cameras directly, using integers as demonstrated below # eye_src = 1 # world_src = 0 # to use a pre-recorded video. # Use a string to specify the path to your video file as demonstrated below # eye_src = '/Users/mkassner/Pupil/datasets/p1-left/frames/test.avi' # world_src = "/Users/mkassner/Desktop/2014_01_21/000/world.avi" # Camera video size in pixels (width,height) eye_size = (640, 360) world_size = (1280, 720) # on MacOS we will not use os.fork, elsewhere this does nothing. forking_enable(0) # Create and initialize IPC g_pool = Temp() g_pool.pupil_queue = Queue() g_pool.eye_rx, g_pool.eye_tx = Pipe(False) g_pool.quit = RawValue(c_bool, 0) # this value will be substracted form the capture timestamp g_pool.timebase = RawValue(c_double, 0) # make some constants avaiable g_pool.user_dir = user_dir g_pool.rec_dir = rec_dir g_pool.version = version g_pool.app = 'capture' # set up subprocesses p_eye = Process(target=eye, args=(g_pool, eye_src, eye_size)) # Spawn subprocess: p_eye.start() if platform.system() == 'Linux': # We need to give the camera driver some time before requesting another camera. sleep(0.5) world(g_pool, world_src, world_size) # Exit / clean-up p_eye.join()
def main(): # To assign camera by name: put string(s) in list eye_src = ["Microsoft", "6000","Integrated Camera"] world_src = ["Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"] # to assign cameras directly, using integers as demonstrated below # eye_src = 1 # world_src = 0 # to use a pre-recorded video. # Use a string to specify the path to your video file as demonstrated below # eye_src = '/Users/mkassner/Pupil/datasets/p1-left/frames/test.avi' # world_src = "/Users/mkassner/Desktop/2014_01_21/000/world.avi" # Camera video size in pixels (width,height) eye_size = (640,360) world_size = (1280,720) # on MacOS we will not use os.fork, elsewhere this does nothing. forking_enable(0) # Create and initialize IPC g_pool = Temp() g_pool.pupil_queue = Queue() g_pool.eye_rx, g_pool.eye_tx = Pipe(False) g_pool.quit = RawValue(c_bool,0) # this value will be substracted form the capture timestamp g_pool.timebase = RawValue(c_double,0) # make some constants avaiable g_pool.user_dir = user_dir g_pool.rec_dir = rec_dir g_pool.version = version g_pool.app = 'capture' # set up subprocesses p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size)) # Spawn subprocess: p_eye.start() if platform.system() == 'Linux': # We need to give the camera driver some time before requesting another camera. sleep(0.5) world(g_pool,world_src,world_size) # Exit / clean-up p_eye.join()
def run(self, jobs): '''Start the Scrapy engine, and execute all jobs. Parms: jobs ([Job]) - one or more Job objects to be processed. ''' if not isinstance(jobs, collections.Iterable): jobs = [jobs] self.validate(jobs) p = Process(target=self._crawl, args=[jobs]) p.start() return p
def cmd_map(args): '''map command''' from mavflightview import mavflightview_mav, mavflightview_options #mestate.mlog.reduce_by_flightmodes(mestate.flightmode_selections) #setup and process the map options = mavflightview_options() options.condition = mestate.settings.condition options._flightmodes = mestate.mlog._flightmodes options.show_flightmode_legend = mestate.settings.show_flightmode if len(args) > 0: options.types = ','.join(args) [path, wp, fen, used_flightmodes, mav_type] = mavflightview_mav(mestate.mlog, options, mestate.flightmode_selections) child = Process(target=map_process, args=[path, wp, fen, used_flightmodes, mav_type, options]) child.start() mestate.mlog.rewind()
def run_in_childprocess(target, codec=None, *args, **kwargs): assert codec is None or len(codec) == 2, codec queue = Queue() p = Process(target=_wrapper, args=(target, codec, queue, args, kwargs)) p.start() e, r = queue.get() p.join() if e: raise e if codec: r = codec[1](r) return r
def test_value(self, raw=False): if raw: values = [RawValue(code, value) for code, value, _ in self.codes_values] else: values = [Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): assert sv.value == cv[1] proc = Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): assert sv.value == cv[2]
def main(): # To assign camera by name: put string(s) in list eye_src = ["Microsoft", "6000","Integrated Camera"] world_src = ["Logitech Camera","B525", "C525","C615","C920","C930e"] # to assign cameras directly, using integers as demonstrated below # eye_src = 1 # world_src = 0 # to use a pre-recorded video. # Use a string to specify the path to your video file as demonstrated below # eye_src = "/Users/mkassner/Pupil/datasets/eye2_fieldtest/eye 10.avi" # world_src = "/Users/mkassner/Downloads/2013_10_22_M25/000/world.avi" # Camera video size in pixels (width,height) eye_size = (640,360) world_size = (1280,720) # on MacOS we will not use os.fork, elsewhere this does nothing. forking_enable(0) # Create and initialize IPC g_pool = Temp() g_pool.pupil_queue = Queue() g_pool.eye_rx, g_pool.eye_tx = Pipe(False) g_pool.quit = RawValue(c_bool,0) # make some constants avaiable g_pool.user_dir = user_dir g_pool.rec_dir = rec_dir g_pool.version = version # set up subprocesses p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size)) # Spawn subprocess: p_eye.start() # On Linux, we need to give the camera driver some time before requesting another camera. sleep(0.5) # On MacOS cameras using MJPG compression (world camera) need to run in the main process. world(g_pool,world_src,world_size) # Exit / clean-up p_eye.join()
def run(self, jobs): '''Start the Scrapy engine, and execute all jobs. Return consolidated results in a single list. Parms: jobs ([Job]) - one or more Job objects to be processed. Returns: List of objects yielded by the spiders after all jobs have run. ''' if not isinstance(jobs, collections.Iterable): jobs = [jobs] self.validate(jobs) p = Process(target=self._crawl, args=[jobs]) p.start() p.join() p.terminate() return self.results.get()
def start_process(self, command=None, billiard=False, *args, **kwargs): from eventkit_cloud.tasks.models import ExportTaskRecord from eventkit_cloud.tasks import TaskStates if billiard: proc = Process(daemon=False, *args, **kwargs) proc.start() self.store_pid(pid=proc.pid) proc.join() self.exitcode = proc.exitcode else: proc = subprocess.Popen(command, **kwargs) (self.stdout, self.stderr) = proc.communicate() self.store_pid(pid=proc.pid) self.exitcode = proc.wait() # We need to close the existing connection because the logger could be using a forked process which, # will be invalid and throw an error. connection.close() export_task = ExportTaskRecord.objects.filter(uid=self.task_uid).first() if export_task and export_task.status == TaskStates.CANCELED.value: from eventkit_cloud.tasks.exceptions import CancelException raise CancelException(task_name=export_task.export_provider_task.name, user_name=export_task.cancel_user.username)
def start(self): p = Process(target=self._crawl) p.start() p.join()
class Offline_Marker_Detector(Plugin): """ Special version of marker detector for use with videofile source. It uses a seperate process to search all frames in the world.avi file for markers. - self.cache is a list containing marker positions for each frame. - self.surfaces[i].cache is a list containing surface positions for each frame Both caches are build up over time. The marker cache is also session persistent. See marker_tracker.py for more info on this marker tracker. """ def __init__(self,g_pool,menu_conf={'pos':(300,200),'size':(300,300),'collapsed':False},mode="Show Markers and Frames"): super(Offline_Marker_Detector, self).__init__(g_pool) self.menu_conf = menu_conf self.order = .2 # all markers that are detected in the most recent frame self.markers = [] # all registered surfaces if g_pool.app == 'capture': raise Exception('For Player only.') #in player we load from the rec_dir: but we have a couple options: self.surface_definitions = Persistent_Dict(os.path.join(g_pool.rec_dir,'surface_definitions')) if self.surface_definitions.get('offline_square_marker_surfaces',[]) != []: logger.debug("Found ref surfaces defined or copied in previous session.") self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('offline_square_marker_surfaces',[]) if isinstance(d,dict)] elif self.surface_definitions.get('realtime_square_marker_surfaces',[]) != []: logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.") self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('realtime_square_marker_surfaces',[]) if isinstance(d,dict)] else: logger.debug("No surface defs found. Please define using GUI.") self.surfaces = [] # ui mode settings self.mode = mode # edit surfaces self.edit_surfaces = [] #check if marker cache is available from last session self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache')) self.cache = Cache_List(self.persistent_cache.get('marker_cache',[False for _ in g_pool.timestamps])) logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) ) self.init_marker_cacher() #debug vars self.show_surface_idx = c_int(0) self.img_shape = None self.img = None def init_gui(self): self.menu = ui.Scrolling_Menu('Offline Marker Tracker') self.menu.configuration = self.menu_conf self.g_pool.gui.append(self.menu) self.add_button = ui.Thumb('add_surface',setter=self.add_surface,getter=lambda:False,label='Add Surface',hotkey='a') self.g_pool.quickbar.append(self.add_button) self.update_gui_markers() self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext())) def deinit_gui(self): if self.menu: self.g_pool.gui.remove(self.menu) self.menu_conf= self.menu.configuration self.menu= None if self.add_button: self.g_pool.quickbar.remove(self.add_button) self.add_button = None def update_gui_markers(self): pass # self._bar.clear() self.menu.elements[:] = [] self.menu.append(ui.Info_Text('The offline marker tracker will look for markers in the entire video. By default it uses surfaces defined in capture. You can change and add more surfaces here.')) self.menu.append(ui.Button('Close',self.close)) self.menu.append(ui.Selector('mode',self,label='Mode',selection=["Show Markers and Frames","Show marker IDs", "Surface edit mode","Show Heatmaps","Show Metrics"] )) self.menu.append(ui.Info_Text('To see heatmap or surface metrics visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.')) self.menu.append(ui.Button("(Re)-calculate gaze distributions", self.recalculate)) self.menu.append(ui.Button("Export gaze and surface data", self.save_surface_statsics_to_file)) self.menu.append(ui.Button("Add surface", lambda:self.add_surface('_'))) for s in self.surfaces: idx = self.surfaces.index(s) s_menu = ui.Growing_Menu("Surface %s"%idx) s_menu.collapsed=True s_menu.append(ui.Text_Input('name',s)) # self._bar.add_var("%s_markers"%i,create_string_buffer(512), getter=s.atb_marker_status,group=str(i),label='found/registered markers' ) s_menu.append(ui.Text_Input('x',s.real_world_size,label='X size')) s_menu.append(ui.Text_Input('y',s.real_world_size,label='Y size')) s_menu.append(ui.Button('Open Debug Window',s.open_close_window)) #closure to encapsulate idx def make_remove_s(i): return lambda: self.remove_surface(i) remove_s = make_remove_s(idx) s_menu.append(ui.Button('remove',remove_s)) self.menu.append(s_menu) def close(self): self.alive = False def on_window_resize(self,window,w,h): self.win_size = w,h def on_click(self,pos,button,action): if self.mode=="Surface edit mode": if self.edit_surfaces: if action == GLFW_RELEASE: self.edit_surfaces = [] # no surfaces verts in edit mode, lets see if the curser is close to one: else: if action == GLFW_PRESS: surf_verts = ((0.,0.),(1.,0.),(1.,1.),(0.,1.)) x,y = pos for s in self.surfaces: if s.detected and s.defined: for (vx,vy),i in zip(s.ref_surface_to_img(np.array(surf_verts)),range(4)): vx,vy = denormalize((vx,vy),(self.img_shape[1],self.img_shape[0]),flip_y=True) if sqrt((x-vx)**2 + (y-vy)**2) <15: #img pixels self.edit_surfaces.append((s,i)) def advance(self): pass def add_surface(self,_): self.surfaces.append(Offline_Reference_Surface(self.g_pool)) self.update_gui_markers() def remove_surface(self,i): self.surfaces[i].cleanup() del self.surfaces[i] self.update_gui_markers() def recalculate(self): in_mark = self.g_pool.trim_marks.in_mark out_mark = self.g_pool.trim_marks.out_mark section = slice(in_mark,out_mark) # calc heatmaps for s in self.surfaces: if s.defined: s.generate_heatmap(section) # calc metrics: results = [] for s in self.surfaces: gaze_on_srf = s.gaze_on_srf_in_section(section) results.append(len(gaze_on_srf)) self.metrics_gazecount = len(gaze_on_srf) if results == []: logger.warning("No surfaces defined.") return max_res = max(results) results = np.array(results,dtype=np.float32) if not max_res: logger.warning("No gaze on any surface for this section!") else: results *= 255./max_res results = np.uint8(results) results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET) for s,c_map in zip(self.surfaces,results_c_maps): heatmap = np.ones((1,1,4),dtype=np.uint8)*125 heatmap[:,:,:3] = c_map s.metrics_texture = create_named_texture(heatmap.shape) update_named_texture(s.metrics_texture,heatmap) def update(self,frame,events): self.img = frame.img self.img_shape = frame.img.shape self.update_marker_cache() self.markers = self.cache[frame.index] if self.markers == False: self.markers = [] self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed # locate surfaces for s in self.surfaces: if not s.locate_from_cache(frame.index): s.locate(self.markers) if s.detected: pass # events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf}) if self.mode == "Show marker IDs": draw_markers(frame.img,self.markers) # edit surfaces by user if self.mode == "Surface edit mode": window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window),flip_y=True) for s,v_idx in self.edit_surfaces: if s.detected: new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx,new_pos) s.cache = None self.heatmap = None else: # update srf with no or invald cache: for s in self.surfaces: if s.cache == None: s.init_cache(self.cache) #allow surfaces to open/close windows for s in self.surfaces: if s.window_should_close: s.close_window() if s.window_should_open: s.open_window() def init_marker_cacher(self): forking_enable(0) #for MacOs only from marker_detector_cacher import fill_cache visited_list = [False if x == False else True for x in self.cache] video_file_path = os.path.join(self.g_pool.rec_dir,'world.mkv') if not os.path.isfile(video_file_path): video_file_path = os.path.join(self.g_pool.rec_dir,'world.avi') self.cache_queue = Queue() self.cacher_seek_idx = Value('i',0) self.cacher_run = Value(c_bool,True) self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run)) self.cacher.start() def update_marker_cache(self): while not self.cache_queue.empty(): idx,c_m = self.cache_queue.get() self.cache.update(idx,c_m) for s in self.surfaces: s.update_cache(self.cache,idx=idx) def seek_marker_cacher(self,idx): self.cacher_seek_idx.value = idx def close_marker_cacher(self): self.update_marker_cache() self.cacher_run.value = False self.cacher.join() def gl_display(self): """ Display marker and surface info inside world screen """ self.gl_display_cache_bars() for s in self.surfaces: s.gl_display_in_window(self.g_pool.image_tex) if self.mode == "Show Markers and Frames": for m in self.markers: hat = np.array([[[0,0],[0,1],[1,1],[1,0],[0,0]]],dtype=np.float32) hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m)) draw_polyline(hat.reshape((5,2)),color=RGBA(0.1,1.,1.,.3),line_type=GL_POLYGON) draw_polyline(hat.reshape((5,2)),color=RGBA(0.1,1.,1.,.6)) for s in self.surfaces: s.gl_draw_frame(self.img_shape) if self.mode == "Surface edit mode": for s in self.surfaces: s.gl_draw_frame(self.img_shape) s.gl_draw_corners() if self.mode == "Show Heatmaps": for s in self.surfaces: s.gl_display_heatmap() if self.mode == "Show Metrics": #todo: draw a backdrop to represent the gaze that is not on any surface for s in self.surfaces: #draw a quad on surface with false color of value. s.gl_display_metrics() def gl_display_cache_bars(self): """ """ padding = 20. # Lines for areas that have been cached cached_ranges = [] for r in self.cache.visited_ranges: # [[0,1],[3,4]] cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)] # Lines where surfaces have been found in video cached_surfaces = [] for s in self.surfaces: found_at = [] if s.cache is not None: for r in s.cache.positive_ranges: # [[0,1],[3,4]] found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)] cached_surfaces.append(found_at) glMatrixMode(GL_PROJECTION) glPushMatrix() glLoadIdentity() width,height = self.win_size h_pad = padding * (self.cache.length-2)/float(width) v_pad = padding* 1./(height-2) glOrtho(-h_pad, (self.cache.length-1)+h_pad, -v_pad, 1+v_pad,-1,1) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical) glMatrixMode(GL_MODELVIEW) glPushMatrix() glLoadIdentity() color = RGBA(8.,.6,.2,8.) draw_polyline(cached_ranges,color=color,line_type=GL_LINES,thickness=4) color = RGBA(0.,.7,.3,8.) for s in cached_surfaces: glTranslatef(0,.02,0) draw_polyline(s,color=color,line_type=GL_LINES,thickness=2) glMatrixMode(GL_PROJECTION) glPopMatrix() glMatrixMode(GL_MODELVIEW) glPopMatrix() def save_surface_statsics_to_file(self): in_mark = self.g_pool.trim_marks.in_mark out_mark = self.g_pool.trim_marks.out_mark """ between in and out mark report: gaze distribution: - total gazepoints - gaze points on surface x - gaze points not on any surface report: surface visisbility - total frames - surface x visible framecount surface events: frame_no, ts, surface "name", "id" enter/exit for each surface: gaze_on_name_id.csv positions_of_name_id.csv """ section = slice(in_mark,out_mark) metrics_dir = os.path.join(self.g_pool.rec_dir,"metrics_%s-%s"%(in_mark,out_mark)) logger.info("exporting metrics to %s"%metrics_dir) if os.path.isdir(metrics_dir): logger.info("Will overwrite previous export for this section") else: try: os.mkdir(metrics_dir) except: logger.warning("Could not make metrics dir %s"%metrics_dir) return with open(os.path.join(metrics_dir,'surface_visibility.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) # surface visibility report frame_count = len(self.g_pool.timestamps[section]) csv_writer.writerow(('frame_count',frame_count)) csv_writer.writerow(('')) csv_writer.writerow(('surface_name','visible_frame_count')) for s in self.surfaces: if s.cache == None: logger.warning("The surface is not cached. Please wait for the cacher to collect data.") return visible_count = s.visible_count_in_section(section) csv_writer.writerow( (s.name, visible_count) ) logger.info("Created 'surface_visibility.csv' file") with open(os.path.join(metrics_dir,'surface_gaze_distribution.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) # gaze distribution report gaze_in_section = list(chain(*self.g_pool.gaze_positions_by_frame[section])) not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section]) csv_writer.writerow(('total_gaze_point_count',len(gaze_in_section))) csv_writer.writerow(('')) csv_writer.writerow(('surface_name','gaze_count')) for s in self.surfaces: gaze_on_srf = s.gaze_on_srf_in_section(section) gaze_on_srf = set([gp["timestamp"] for gp in gaze_on_srf]) not_on_any_srf -= gaze_on_srf csv_writer.writerow( (s.name, len(gaze_on_srf)) ) csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf) ) ) logger.info("Created 'surface_gaze_distribution.csv' file") with open(os.path.join(metrics_dir,'surface_events.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) # surface events report csv_writer.writerow(('frame_number','timestamp','surface_name','surface_uid','event_type')) events = [] for s in self.surfaces: for enter_frame_id,exit_frame_id in s.cache.positive_ranges: events.append({'frame_id':enter_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'enter'}) events.append({'frame_id':exit_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'exit'}) events.sort(key=lambda x: x['frame_id']) for e in events: csv_writer.writerow( ( e['frame_id'],self.g_pool.timestamps[e['frame_id']],e['srf_name'],e['srf_uid'],e['event'] ) ) logger.info("Created 'surface_events.csv' file") for s in self.surfaces: # per surface names: surface_name = '_'+s.name.replace('/','')+'_'+s.uid # save surface_positions as pickle file save_object(s.cache.to_list(),os.path.join(metrics_dir,'srf_positions'+surface_name)) #save surface_positions as csv with open(os.path.join(metrics_dir,'srf_positons'+surface_name+'.csv'),'wb') as csvfile: csv_writer =csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) csv_writer.writerow(('frame_idx','timestamp','m_to_screen','m_from_screen','detected_markers')) for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache): if in_mark <= idx <= out_mark: if ref_srf_data is not None and ref_srf_data is not False: csv_writer.writerow( (idx,ts,ref_srf_data['m_to_screen'],ref_srf_data['m_from_screen'],ref_srf_data['detected_markers']) ) # save gaze on srf as csv. with open(os.path.join(metrics_dir,'gaze_positions_on_surface'+surface_name+'.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) csv_writer.writerow(('world_frame_idx','world_timestamp','eye_timestamp','x_norm','y_norm','x_scaled','y_scaled','on_srf')) for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache): if in_mark <= idx <= out_mark: if ref_srf_data is not None and ref_srf_data is not False: for gp in s.gaze_on_srf_by_frame_idx(idx,ref_srf_data['m_from_screen']): gp_x,gp_y = gp['norm_pos'] on_srf = (0 <= gp_x <= 1) and (0 <= gp_y <= 1) csv_writer.writerow( (idx,ts,gp['timestamp'],gp_x,gp_y,gp_x*s.real_world_size['x'],gp_x*s.real_world_size['y'],on_srf) ) logger.info("Saved surface positon data and gaze on surface data for '%s' with uid:'%s'"%(s.name,s.uid)) if s.heatmap is not None: logger.info("Saved Heatmap as .png file.") cv2.imwrite(os.path.join(metrics_dir,'heatmap'+surface_name+'.png'),s.heatmap) # if s.detected and self.img is not None: # #let save out the current surface image found in video # #here we get the verts of the surface quad in norm_coords # mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2) # screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2) # #now we convert to image pixel coods # screen_space[:,1] = 1-screen_space[:,1] # screen_space[:,1] *= self.img.shape[0] # screen_space[:,0] *= self.img.shape[1] # s_0,s_1 = s.real_world_size # #no we need to flip vertically again by setting the mapped_space verts accordingly. # mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32) # M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled) # #here we do the actual perspactive transform of the image. # srf_in_video = cv2.warpPerspective(self.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) ) # cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video) # logger.info("Saved current image as .png file.") # else: # logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name) def get_init_dict(self): if self.menu: d = {'menu_conf':self.menu.configuration,'mode':self.mode} else: d = {'menu_conf':self.menu_conf,'mode':self.mode} return d def cleanup(self): """ called when the plugin gets terminated. This happens either voluntarily or forced. if you have a GUI or glfw window destroy it here. """ self.surface_definitions["offline_square_marker_surfaces"] = [rs.save_to_dict() for rs in self.surfaces if rs.defined] self.surface_definitions.close() self.close_marker_cacher() self.persistent_cache["marker_cache"] = self.cache.to_list() self.persistent_cache.close() for s in self.surfaces: s.close_window() self.deinit_gui()
def run_crawl(path): p = Process(target=_crawl, args=['hahahahha']) p.start() p.join()
class Offline_Screen_Tracker(Offline_Surface_Tracker,Screen_Tracker): """ Special version of screen tracker for use with videofile source. It uses a seperate process to search all frames in the world.avi file for markers. - self.cache is a list containing marker positions for each frame. - self.surfaces[i].cache is a list containing surface positions for each frame Both caches are build up over time. The marker cache is also session persistent. See marker_tracker.py for more info on this marker tracker. """ def __init__(self,g_pool,mode="Show Markers and Surfaces", min_marker_perimeter = 100,robust_detection=True, matrix=None): #self.g_pool = g_pool Trim_Marks_Extended_Exist = False for p in g_pool.plugins: if p.class_name == 'Trim_Marks_Extended': Trim_Marks_Extended_Exist = True break if not Trim_Marks_Extended_Exist: from trim_marks_patch import Trim_Marks_Extended g_pool.plugins.add(Trim_Marks_Extended) del Trim_Marks_Extended # heatmap self.matrix = matrix self.heatmap_blur = True self.heatmap_blur_gradation = 0.12 self.heatmap_colormap = "viridis" self.gaze_correction_block_size = '1000' self.gaze_correction_min_confidence = 0.98 self.gaze_correction_k = 2 self.heatmap_use_kdata = False super(Offline_Screen_Tracker, self).__init__(g_pool,mode,min_marker_perimeter,robust_detection) def load_surface_definitions_from_file(self): self.surface_definitions = Persistent_Dict(os.path.join(self.g_pool.rec_dir,'surface_definitions')) if self.surface_definitions.get('offline_square_marker_surfaces',[]) != []: logger.debug("Found ref surfaces defined or copied in previous session.") self.surfaces = [Offline_Reference_Surface_Extended(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('offline_square_marker_surfaces',[]) if isinstance(d,dict)] elif self.surface_definitions.get('realtime_square_marker_surfaces',[]) != []: logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.") self.surfaces = [Offline_Reference_Surface_Extended(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('realtime_square_marker_surfaces',[]) if isinstance(d,dict)] else: logger.debug("No surface defs found. Please define using GUI.") self.surfaces = [] def init_gui(self): self.menu = ui.Scrolling_Menu('Offline Screen Tracker') self.g_pool.gui.append(self.menu) self.update_gui_markers() self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext())) def init_marker_cacher(self): forking_enable(0) #for MacOs only from screen_detector_cacher import fill_cache visited_list = [False if x == False else True for x in self.cache] video_file_path = self.g_pool.capture.source_path timestamps = self.g_pool.capture.timestamps self.cache_queue = Queue() self.cacher_seek_idx = Value('i',0) self.cacher_run = Value(c_bool,True) self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher)) self.cacher.start() def update_marker_cache(self): while not self.cache_queue.empty(): idx,c_m = self.cache_queue.get() self.cache.update(idx,c_m) for s in self.surfaces: s.update_cache(self.cache,camera_calibration=self.camera_calibration,min_marker_perimeter=self.min_marker_perimeter,min_id_confidence=self.min_id_confidence,idx=idx) # if self.cacher_run.value == False: # self.recalculate() # function TMatrixForm.GetMatrix(AMonitor: integer): TStmMatrix; # var # i,j, # LRowCount,LColCount,SHeight,SWidth,SYGap,SXGap,SLeft,STop: integer; # begin # SHeight := 150; # SWidth := 150; # SYGap := 100; # SXGap := 100; # SLeft := 0; # STop := 0; # LRowCount := 3; # LColCount := 3; # SetLength(Result, LRowCount,LColCount); # for i := Low(Result) to High(Result) do # begin # SLeft := ((SWidth+SXGap)*i)+(Screen.Monitors[AMonitor].Width div 2) # -(((SWidth+SXGap)*LColCount) div 2)+((SXGap) div 2); # for j:= Low(Result[i]) to High(Result[i]) do # begin # STop := ((SHeight+SYGap)*j)+(Screen.Monitors[AMonitor].Height div 2) # -(((SHeight+SYGap)*LRowCount) div 2)+(SYGap div 2); # Result[i][j].Left := SLeft; # Result[i][j].Top := STop; # Result[i][j].Width := SWidth; # Result[i][j].Height := SHeight; # end; # end; # end; def matrix_segmentation(self): if not self.mode == 'Show Markers and Surfaces': logger.error('Please, select the "Show Markers and Surfaces" option at the Mode Selector.') return screen_width = 1280 screen_height = -768 def move_srf_to_stm(s,p): """ ######### ######### # 0 . 1 # # lt.rt # # . . # # . . # # 3 . 2 # # lb.rb # # uv #### ######### ######### # 3 . 2 # # . . # # 0 . 1 # # sv #### """ sw = 150./screen_width sh = 150./screen_height before = s.markers.values()[0].uv_coords #before = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32) after = before.copy() after[0] = p after[1] = p + np.array([sw,0]) after[2] = p + np.array([sw,sh]) after[3] = p + np.array([0,sh]) transform = cv2.getPerspectiveTransform(after,before) for m in s.markers.values(): m.uv_coords = cv2.perspectiveTransform(m.uv_coords,transform) n = 3 namei = 0 for i in xrange(0,n): for j in xrange(0,n): namei += 1 sname = 'S'+str(namei) for s in self.surfaces: if s.name == sname: move_srf_to_stm(s, self.matrix[i][j]) for s in self.surfaces: s.invalidate() self.update_gui_markers() def add_matrix_surfaces(self): if not self.mode == 'Show Markers and Surfaces': logger.error('Please, select the "Show Markers and Surfaces" option at the Mode Selector.') return screen_width = 1280 screen_height = -768 def midpoint(v1, v2): return np.array([(v1[0]+v2[0])/2,(v1[1]+v2[1])/2]) def get_m(s, n=3): def get_coord(index,midxy, y=False): if y: rws = screen_height # must flip else: rws = screen_width return ((250./rws)*index)+midxy-(((250./rws)*n)/2)+((100./rws)/2) rwsx = screen_width rwsy = screen_height lt = s.left_top rt = s.right_top lb = s.left_bottom rb = s.right_bottom m = [[[] for _ in xrange(0,n)] for _ in xrange(0,n)] for j in xrange(0,n): xt = get_coord(j,midpoint(lt,rt)[0]) # yt = get_coord(j,midpoint(lt,rt)[1]) # xb = get_coord(j,midpoint(lb,rb)[0]) # yb = get_coord(j,midpoint(lb,rb)[1]) for i in xrange(0,n): yt = get_coord(i,midpoint(lt,rb)[1],True) # yt = get_coord(i,midpoint([xt,yt],[xb,yb])[1],True) m[i][j] = np.array([xt, yt]) return m def create_surface(name): self.surfaces.append(Offline_Reference_Surface_Extended(self.g_pool)) self.surfaces[-1].name = name self.surfaces[-1].real_world_size['x'] = 150 self.surfaces[-1].real_world_size['y'] = 150 # self.surfaces[-1].markers = markers n = 3 for s in self.surfaces: if s.name == 'Screen': self.matrix = get_m(s,n) # markers = s.markers for i in xrange(0,n*n): create_surface('S'+str(i+1)) for s in self.surfaces: s.invalidate() self.update_gui_markers() def screen_segmentation(self): """ no standards here, uv_coords ordering differing from the surface vertice one. 0 . 1 . . 3 . 2 uv 3 . 2 . . 0 . 1 sv """ if not self.mode == 'Show Markers and Surfaces': logger.error('Please, select the "Show Markers and Surfaces" option at the Mode Selector.') return correcly_named = [False, False] for s in self.surfaces: if s.name == 'Left': correcly_named[0] = (s.name == 'Left') if s.name == 'Right': correcly_named[1] = (s.name == 'Right') if not (correcly_named[0] and correcly_named[1]): logger.error('Please, create two identical surfaces and name them as "Left" and "Right".') return for s in self.surfaces: s.real_world_size['x'] = s.real_world_size['x']/2. lt = s.left_top rt = s.right_top lb = s.left_bottom rb = s.right_bottom midtop = np.array([(lt[0]+rt[0])/2,(lt[1]+rt[1])/2]) midbottom = np.array([(lb[0]+rb[0])/2,(lb[1]+rb[1])/2]) if s.name == 'Left': s.right_top = midtop s.right_bottom = midbottom if s.name == 'Right': s.left_top = midtop s.left_bottom = midbottom self.update_gui_markers() def raise_bug(self): raise 's' def update_gui_markers(self): def close(): self.alive = False def set_min_marker_perimeter(val): self.min_marker_perimeter = val self.notify_all_delayed({'subject':'min_marker_perimeter_changed'},delay=1) self.menu.elements[:] = [] self.menu.append(ui.Button('Close',close)) self.menu.append(ui.Slider('min_marker_perimeter',self,min=20,max=500,step=1,setter=set_min_marker_perimeter)) self.menu.append(ui.Info_Text('The offline screen tracker will look for a screen for each frame of the video. By default it uses surfaces defined in capture. You can change and add more surfaces here.')) self.menu.append(ui.Selector('mode',self,setter=self.set_mode,label='Mode',selection=["Show Markers and Surfaces","Show marker IDs","Show Heatmaps","Show Gaze Cloud", "Show Kmeans Correction","Show Mean Correction","Show Metrics"] )) if self.mode == 'Show Markers and Surfaces': self.menu.append(ui.Info_Text('To split the screen in two (left,right) surfaces 1) add two surfaces; 2) name them as "Left" and "Right"; 3) press Left Right segmentation')) self.menu.append(ui.Button("Left Right segmentation",self.screen_segmentation)) self.menu.append(ui.Button("Matrix segmentation", self.matrix_segmentation)) self.menu.append(ui.Button("Add M surfaces", self.add_matrix_surfaces)) self.menu.append(ui.Button("bug", self.raise_bug)) if self.mode == 'Show Kmeans Correction': self.menu.append(ui.Info_Text('Gaze Correction requires a non segmented screen. It requires k equally distributed stimuli on the screen.')) self.menu.append(ui.Text_Input('gaze_correction_block_size',self,label='Block Size')) self.menu.append(ui.Slider('gaze_correction_min_confidence',self,min=0.0,step=0.01,max=1.0,label='Minimun gaze confidence')) self.menu.append(ui.Slider('gaze_correction_k',self,min=1,step=1,max=24,label='K clusters')) if self.mode == 'Show Gaze Cloud': self.menu.append(ui.Slider('gaze_correction_min_confidence',self,min=0.0,step=0.01,max=1.0,label='Minimun gaze confidence')) self.menu.append(ui.Slider('gaze_correction_k',self,min=1,step=1,max=24,label='K clusters')) if self.mode == 'Show Heatmaps': self.menu.append(ui.Info_Text('Heatmap Settings')) self.menu.append(ui.Switch('heatmap_blur',self,label='Blur')) self.menu.append(ui.Slider('heatmap_blur_gradation',self,min=0.01,step=0.01,max=1.0,label='Blur Gradation')) self.menu.append(ui.Selector('heatmap_colormap',self,label='Color Map',selection=['magma', 'inferno', 'plasma', 'viridis', 'jet'])) self.menu.append(ui.Switch('heatmap_use_kdata',self,label='Use K Data')) self.menu.append(ui.Info_Text('Select a section. To see heatmap, surface metrics, gaze cloud or gaze correction visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.')) self.menu.append(ui.Button("(Re)-calculate gaze distributions",self.recalculate)) self.menu.append(ui.Info_Text('To use data from all sections to generate visualizations click the next button instead.')) self.menu.append(ui.Button("(Re)-calculate",self.recalculate_all_sections)) self.menu.append(ui.Button("Add screen surface",lambda:self.add_surface('_'))) self.menu.append(ui.Info_Text('Export gaze metrics. We recalculate metrics for each section when exporting all sections. Press the recalculate button before export the current selected section.')) self.menu.append(ui.Info_Text("Press the export button or type 'e' to start the export for the current section.")) self.menu.append(ui.Button("Export all sections", self.export_all_sections)) self.menu.append(ui.Info_Text('Requires segmentation plugin.')) self.menu.append(ui.Button("Export all distances", self.export_all_distances)) self.menu.append(ui.Button("Precision Report", self.precision_report)) self.menu.append(ui.Button("Slice 1.5 - precision", self.export_all_precision)) for s in self.surfaces: idx = self.surfaces.index(s) s_menu = ui.Growing_Menu("Surface %s"%idx) s_menu.collapsed=True s_menu.append(ui.Text_Input('name',s)) s_menu.append(ui.Text_Input('x',s.real_world_size,label='X size')) s_menu.append(ui.Text_Input('y',s.real_world_size,label='Y size')) s_menu.append(ui.Button('Open Debug Window',s.open_close_window)) #closure to encapsulate idx def make_remove_s(i): return lambda: self.remove_surface(i) remove_s = make_remove_s(idx) s_menu.append(ui.Button('remove',remove_s)) self.menu.append(s_menu) def set_mode(self, value): self.mode = value self.update_gui_markers() def add_surface(self,_): self.surfaces.append(Offline_Reference_Surface_Extended(self.g_pool)) self.surfaces[0].name = 'Screen' self.surfaces[0].real_world_size['x'] = 1280 self.surfaces[0].real_world_size['y'] = 768 # self.surfaces[0].name = 'Left' # self.surfaces[0].real_world_size['x'] = 1280 # self.surfaces[0].real_world_size['y'] = 768 # self.surfaces[1].name = 'Right' # self.surfaces[1].real_world_size['x'] = 1280 # self.surfaces[1].real_world_size['y'] = 768 self.update_gui_markers() # def update(self,frame,events): # super(Offline_Screen_Tracker, self).update(frame, events) # # locate surfaces # for s in self.surfaces: # if not s.locate_from_cache(frame.index): # s.locate(self.markers) # # if s.detected: # # pass # # # events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf}) def recalculate(self): pass # def recalculate(self): # #super(Offline_Screen_Tracker, self).recalculate() # # calc heatmaps # in_mark = self.g_pool.trim_marks.in_mark # out_mark = self.g_pool.trim_marks.out_mark # section = slice(in_mark,out_mark) # for s in self.surfaces: # if s.defined: # s.heatmap_blur = self.heatmap_blur # s.heatmap_blur_gradation = self.heatmap_blur_gradation # s.heatmap_colormap = self.heatmap_colormap # s.heatmap_use_kdata = self.heatmap_use_kdata # s.gaze_correction_block_size = self.gaze_correction_block_size # s.gaze_correction_min_confidence = self.gaze_correction_min_confidence # s.gaze_correction_k = self.gaze_correction_k # s.generate_gaze_cloud(section) # s.generate_gaze_correction(section) # s.generate_mean_correction(section) # s.generate_heatmap(section) # # calc distirbution accross all surfaces. # results = [] # for s in self.surfaces: # gaze_on_srf = s.gaze_on_srf_in_section(section) # results.append(len(gaze_on_srf)) # self.metrics_gazecount = len(gaze_on_srf) # if results == []: # logger.warning("No surfaces defined.") # return # max_res = max(results) # results = np.array(results,dtype=np.float32) # if not max_res: # logger.warning("No gaze on any surface for this section!") # else: # results *= 255./max_res # results = np.uint8(results) # results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET) # for s,c_map in zip(self.surfaces,results_c_maps): # heatmap = np.ones((1,1,4),dtype=np.uint8)*125 # heatmap[:,:,:3] = c_map # s.metrics_texture = Named_Texture() # s.metrics_texture.update_from_ndarray(heatmap) def recalculate_all_sections(self): """ treats all sections as one should not be used to overlaid sections """ # for now, it requires trim_marks_patch.py sections_alive = False for p in self.g_pool.plugins: if p.class_name == 'Trim_Marks_Extended': sections_alive = True if sections_alive: sections = self.g_pool.trim_marks.sections for s in self.surfaces: if s.defined: # assign user defined variables s.heatmap_blur = self.heatmap_blur s.heatmap_blur_gradation = self.heatmap_blur_gradation s.heatmap_use_kdata = self.heatmap_use_kdata s.heatmap_colormap = self.heatmap_colormap s.gaze_correction_block_size = self.gaze_correction_block_size s.gaze_correction_min_confidence = self.gaze_correction_min_confidence s.gaze_correction_k = self.gaze_correction_k # generate visualizations s.generate_gaze_cloud(sections, True) s.generate_gaze_correction(sections, True) s.generate_heatmap(sections, True) s.generate_mean_correction(sections, True) logger.info("Recalculate visualizations done.") else: logger.error("Trim_Marks_Extended not found. Have you opened it?") def gl_display(self): """ Display marker and surface info inside world screen """ super(Offline_Screen_Tracker, self).gl_display() if self.mode == "Show Gaze Cloud": for s in self.surfaces: s.gl_display_gaze_cloud() if self.mode == "Show Kmeans Correction": for s in self.surfaces: s.gl_display_gaze_correction() if self.mode == "Show Heatmap Correction": for s in self.surfaces: s.gl_display_mean_correction() if self.mode == "Show Mean Correction": for s in self.surfaces: s.gl_display_mean_correction() def precision_report(self, custom_tag=None): sections_alive = False if self.g_pool.trim_marks.class_name == 'Trim_Marks_Extended': sections_alive = True segmentation = None for p in self.g_pool.plugins: if p.class_name == 'Segmentation': if p.alive: segmentation = p break if (segmentation is not None) and sections_alive: export_path = os.path.join(self.g_pool.rec_dir,'exports') save_path = os.path.join(export_path,"precision_report") if os.path.isdir(save_path): logger.info("Overwriting data on precision_report") else: try: os.mkdir(save_path) except: logger.warning("Could not make dir %s"%save_path) return angles,x1,y1 = segmentation.scapp_report['Angle'],segmentation.scapp_report['X1'],segmentation.scapp_report['Y1'] unique_distances = sorted(set(zip(angles,x1,y1))) unique_responses = sorted(set(segmentation.scapp_report['ExpcResp'])) segmentation.filter_by_expresp = True segmentation.filter_by_distance = True segmentation.filter_by_angle = False segmentation.mode = 'in out pairs' filtered_gaze = [] metadata=[] for unique_distance in unique_distances: segmentation.distance = str(unique_distance) for unique_response in unique_responses: (s1, s2, s3) = unique_distance metadata.append("r_%s_distance_%s-%s-%s"%(unique_response, s1, s2, s3)) segmentation.expected_response = str(unique_response) segmentation.clean_add_trim() sections = self.g_pool.trim_marks.sections gaze_no_confidence = 0 no_surface = 0 all_gaze = [] for s in self.surfaces: if s.defined: for sec in sections: in_mark = sec[0] out_mark = sec[1] sec = slice(in_mark,out_mark) for frame_idx,c_e in enumerate(s.cache[sec]): if c_e: frame_idx+=sec.start for i, gp in enumerate(s.gaze_on_srf_by_frame_idx(frame_idx,c_e['m_from_screen'])): if gp['base']['confidence'] >= self.gaze_correction_min_confidence: all_gaze.append({'frame':frame_idx,'i':i,'norm_pos':gp['norm_pos'],'metatag':'%s-%s-%s-%s'%(unique_response, s1, s2, s3)}) else: gaze_no_confidence += 1 else: no_surface += 1 if not all_gaze: logger.error("No Gaze points found.") metadata.append("No gaze points found.") return else: gaze_count = len(all_gaze) metadata.append('Found %s frames with no screen/surface.'%no_surface) metadata.append("Found %s gaze points."%gaze_count) metadata.append("Removed '{0}' with confidence < '{1}'".format(gaze_no_confidence, self.gaze_correction_min_confidence)) filtered_gaze.append(all_gaze) if custom_tag: np.save(os.path.join(save_path,'data_ordered_by_metatag'+custom_tag),filtered_gaze) else: np.save(os.path.join(save_path,'data_ordered_by_metatag'),filtered_gaze) #np.savetxt(os.path.join(save_path,'metadata.txt'),metadata) segmentation.clean_custom_events() for unique_distance in unique_distances: segmentation.distance = str(unique_distance) for unique_response in unique_responses: segmentation.expected_response = str(unique_response) segmentation.add_filtered_events() segmentation.auto_trim() filtered_gaze = [] mean_at_zero_cluster = [] norm_gaze = [] for s in self.surfaces: if s.defined: for sec in self.g_pool.trim_marks.sections: section_gaze = [] in_mark = sec[0] out_mark = sec[1] sec = slice(in_mark,out_mark) for frame_idx,c_e in enumerate(s.cache[sec]): if c_e: frame_idx+=sec.start for i, gp in enumerate(s.gaze_on_srf_by_frame_idx(frame_idx,c_e['m_from_screen'])): trial = segmentation.trial_from_timestamp(gp['base']['timestamp']) if gp['base']['confidence'] >= self.gaze_correction_min_confidence: section_gaze.append({'frame':frame_idx,'i':i,'norm_pos':gp['norm_pos'],'trial':trial}) filtered_gaze.append(section_gaze) if custom_tag: np.save(os.path.join(save_path,'data_ordered_by_trial'+custom_tag),filtered_gaze) else: np.save(os.path.join(save_path,'data_ordered_by_trial'),filtered_gaze) else: logger.error("Please, open the segmentation plugin.") def export_all_precision(self): segmentation = None for p in self.g_pool.plugins: if p.class_name == 'Segmentation': if p.alive: segmentation = p break segmentation.onset = 0.0 segmentation.offset = 1.5 for status in range(16): tag = '_%s-%s'%(segmentation.onset,segmentation.offset) tag = tag.replace('.','-') logger.info(str(status)+tag) self.precision_report(tag) segmentation.onset += 0.1 segmentation.offset -= 0.1 logger.info('end') def export_all_distances(self): segmentation = None for p in self.g_pool.plugins: if p.class_name == 'Segmentation': if p.alive: segmentation = p break if segmentation is not None: angles,x1,y1 = segmentation.scapp_report['Angle'],segmentation.scapp_report['X1'],segmentation.scapp_report['Y1'] unique_items = sorted(set(zip(angles,x1,y1))) for unique_distance in unique_items: segmentation.distance = str(unique_distance) segmentation.clean_add_trim() in_mark = self.g_pool.trim_marks.in_mark out_mark = self.g_pool.trim_marks.out_mark # generate visualizations and data self.recalculate_all_sections() export_path = os.path.join(self.g_pool.rec_dir,'exports') save_path = os.path.join(export_path,"distance_%s-%s-%s"%unique_distance) if os.path.isdir(save_path): logger.info("Overwriting data on distance %s-%s-%s"%unique_distance) else: try: os.mkdir(save_path) except: logger.warning("Could not make dir %s"%save_path) return for s in self.surfaces: surface_name = '_'+s.name.replace('/','')+'_'+s.uid if s.heatmap is not None: logger.info("Saved Heatmap as .png file.") cv2.imwrite(os.path.join(save_path,'heatmap'+surface_name+'.png'),s.heatmap) if s.gaze_cloud is not None: logger.info("Saved Gaze Cloud as .png file.") cv2.imwrite(os.path.join(save_path,'gaze_cloud'+surface_name+'.png'),s.gaze_cloud) if s.gaze_correction is not None: logger.info("Saved Gaze Correction as .png file.") cv2.imwrite(os.path.join(save_path,'gaze_correction'+surface_name+'.png'),s.gaze_correction) # export a surface image from the center of the first section for visualization purposes only self.export_section_image(save_path, s, in_mark, out_mark, os.path.join(save_path,'surface'+surface_name+'.png')) # if s.gaze_correction_mean is not None: # logger.info("Saved Gaze Correction Mean as .png file.") # cv2.imwrite(os.path.join(save_path,'gaze_correction_mean'+surface_name+'.png'),s.gaze_correction_mean) np.save(os.path.join(save_path,'source_data'),s.output_data) def export_section_image(self,save_path,s,in_mark,out_mark,surface_path): # lets save out the current surface image found in video seek_pos = in_mark + ((out_mark - in_mark)/2) self.g_pool.capture.seek_to_frame(seek_pos) new_frame = self.g_pool.capture.get_frame() frame = new_frame.copy() self.update(frame, {}) if s.detected and frame.img is not None: #here we get the verts of the surface quad in norm_coords mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2) screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2) #now we convert to image pixel coords screen_space[:,1] = 1-screen_space[:,1] screen_space[:,1] *= frame.img.shape[0] screen_space[:,0] *= frame.img.shape[1] s_0,s_1 = s.real_world_size['x'], s.real_world_size['y'] #now we need to flip vertically again by setting the mapped_space verts accordingly. mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32) M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled) #here we do the actual perspactive transform of the image. srf_in_video = cv2.warpPerspective(frame.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) ) cv2.imwrite(surface_path,srf_in_video) logger.info("Saved: '%s'"%surface_path) else: logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name) def export_all_sections(self): for section in self.g_pool.trim_marks.sections: self.g_pool.trim_marks.focus = self.g_pool.trim_marks.sections.index(section) in_mark = self.g_pool.trim_marks.in_mark out_mark = self.g_pool.trim_marks.out_mark export_path = export_path = os.path.join(self.g_pool.rec_dir,'exports') if os.path.isdir(export_path): logger.info("Will overwrite export_path") else: try: os.mkdir(export_path) except: logger.warning("Could not make metrics_dir %s"%export_path) return metrics_dir = os.path.join(export_path,"%s-%s"%(in_mark,out_mark)) if os.path.isdir(metrics_dir): logger.info("Will overwrite metrics_dir") else: try: os.mkdir(metrics_dir) except: logger.warning("Could not make metrics_dir %s"%metrics_dir) return self.recalculate() self.save_surface_statsics_to_file(slice(in_mark,out_mark), metrics_dir) surface_dir = os.path.join(metrics_dir,'surfaces') for s in self.surfaces: surface_name = '_'+s.name.replace('/','')+'_'+s.uid if s.heatmap is not None: logger.info("Saved Heatmap as .png file.") cv2.imwrite(os.path.join(surface_dir,'heatmap'+surface_name+'.png'),s.heatmap) if s.gaze_cloud is not None: logger.info("Saved Gaze Cloud as .png file.") cv2.imwrite(os.path.join(surface_dir,'gaze_cloud'+surface_name+'.png'),s.gaze_cloud) if s.gaze_correction is not None: logger.info("Saved Gaze Correction as .png file.") cv2.imwrite(os.path.join(surface_dir,'gaze_correction'+surface_name+'.png'),s.gaze_correction) surface_path = os.path.join(surface_dir,'surface'+surface_name+'.png') # export a surface image from the center of the section for visualization purposes only self.export_section_image(surface_dir, s, in_mark, out_mark, surface_path) # lets create alternative versions of the surfaces *.pngs src1 = cv2.imread(surface_path) for g in s.output_data['gaze']: cv2.circle(src1, (int(g[0]),int(g[1])), 3, (0, 0, 0), 0) for c in s.output_data['kmeans']: cv2.circle(src1, (int(c[0]),int(c[1])), 5, (0, 0, 255), -1) cv2.imwrite(os.path.join(surface_dir,'surface-gaze_cloud'+surface_name+'.png'),src1) np.savetxt(os.path.join(surface_dir,'surface-gaze_cloud'+surface_name+'.txt'), s.output_data['gaze']) #src2 = cv2.imread(os.path.join(surface_dir,'heatmap'+surface_name+'.png')) #dst = cv2.addWeighted(src1, .9, src2, .1, 0.0); #cv2.imwrite(os.path.join(surface_dir,'surface-heatmap'+surface_name+'.png'),dst) self.g_pool.capture.seek_to_frame(in_mark) logger.info("Done exporting reference surface data.") def export_raw_data(self): """ .surface_gaze_positions - gaze_timestamp, surface_norm_x, surface_norm_y """ sections_alive = False if self.g_pool.trim_marks.class_name == 'Trim_Marks_Extended': sections_alive = True segmentation = None for p in self.g_pool.plugins: if p.class_name == 'Segmentation': if p.alive: segmentation = p break def get_init_dict(self): return {'mode':self.mode, 'matrix':self.matrix} def on_notify(self,notification): if notification['subject'] == 'gaze_positions_changed': logger.info('Gaze positions changed. Please, recalculate.') #self.recalculate() if notification['subject'] == 'gaze_positions_changed': logger.info('Gaze postions changed. Please, recalculate..') #self.recalculate() elif notification['subject'] == 'surfaces_changed': logger.info('Surfaces changed. Please, recalculate..') #self.recalculate() elif notification['subject'] == 'min_marker_perimeter_changed': logger.info('min_marker.. not implemented') #logger.info('Min marper perimeter adjusted. Re-detecting surfaces.') #self.invalidate_surface_caches() elif notification['subject'] is "should_export": logger.info('should_export.. not implemented')
def output_monitor_queue(out): q = Queue() p = Process(target=enqueue_output, args=(out, q)) p.start() return q, p
class Offline_Surface_Tracker(Surface_Tracker): """ Special version of surface tracker for use with videofile source. It uses a seperate process to search all frames in the world video file for markers. - self.cache is a list containing marker positions for each frame. - self.surfaces[i].cache is a list containing surface positions for each frame Both caches are build up over time. The marker cache is also session persistent. See marker_tracker.py for more info on this marker tracker. """ def __init__(self,g_pool,mode="Show Markers and Surfaces",min_marker_perimeter = 100): super(Offline_Surface_Tracker, self).__init__(g_pool,mode,min_marker_perimeter) self.order = .2 if g_pool.app == 'capture': raise Exception('For Player only.') self.marker_cache_version = 1 self.min_marker_perimeter_cacher = 20 #find even super small markers. The surface locater will filter using min_marker_perimeter #check if marker cache is available from last session self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache')) version = self.persistent_cache.get('version',0) cache = self.persistent_cache.get('marker_cache',None) if cache is None: self.cache = Cache_List([False for _ in g_pool.timestamps]) self.persistent_cache['version'] = self.marker_cache_version elif version != self.marker_cache_version: self.persistent_cache['version'] = self.marker_cache_version self.cache = Cache_List([False for _ in g_pool.timestamps]) logger.debug("Marker cache version missmatch. Rebuilding marker cache.") else: self.cache = Cache_List(cache) logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) ) self.init_marker_cacher() for s in self.surfaces: s.init_cache(self.cache,self.camera_calibration,self.min_marker_perimeter) self.recalculate() def load_surface_definitions_from_file(self): self.surface_definitions = Persistent_Dict(os.path.join(self.g_pool.rec_dir,'surface_definitions')) if self.surface_definitions.get('offline_square_marker_surfaces',[]) != []: logger.debug("Found ref surfaces defined or copied in previous session.") self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('offline_square_marker_surfaces',[]) if isinstance(d,dict)] elif self.surface_definitions.get('realtime_square_marker_surfaces',[]) != []: logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.") self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d) for d in self.surface_definitions.get('realtime_square_marker_surfaces',[]) if isinstance(d,dict)] else: logger.debug("No surface defs found. Please define using GUI.") self.surfaces = [] def init_gui(self): self.menu = ui.Scrolling_Menu('Offline Surface Tracker') self.g_pool.gui.append(self.menu) self.add_button = ui.Thumb('add_surface',setter=lambda x: self.add_surface(),getter=lambda:False,label='Add Surface',hotkey='a') self.g_pool.quickbar.append(self.add_button) self.update_gui_markers() self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext())) def deinit_gui(self): if self.menu: self.g_pool.gui.remove(self.menu) self.menu= None if self.add_button: self.g_pool.quickbar.remove(self.add_button) self.add_button = None def update_gui_markers(self): def close(): self.alive=False def set_min_marker_perimeter(val): self.min_marker_perimeter = val self.notify_all_delayed({'subject':'min_marker_perimeter_changed'},delay=1) self.menu.elements[:] = [] self.menu.append(ui.Button('Close',close)) self.menu.append(ui.Slider('min_marker_perimeter',self,min=20,max=500,step=1,setter=set_min_marker_perimeter)) self.menu.append(ui.Info_Text('The offline surface tracker will look for markers in the entire video. By default it uses surfaces defined in capture. You can change and add more surfaces here.')) self.menu.append(ui.Info_Text("Press the export button or type 'e' to start the export.")) self.menu.append(ui.Selector('mode',self,label='Mode',selection=["Show Markers and Surfaces","Show marker IDs","Show Heatmaps","Show Metrics"] )) self.menu.append(ui.Info_Text('To see heatmap or surface metrics visualizations, click (re)-calculate gaze distributions. Set "X size" and "Y size" for each surface to see heatmap visualizations.')) self.menu.append(ui.Button("(Re)-calculate gaze distributions", self.recalculate)) self.menu.append(ui.Button("Add surface", lambda:self.add_surface())) for s in self.surfaces: idx = self.surfaces.index(s) s_menu = ui.Growing_Menu("Surface %s"%idx) s_menu.collapsed=True s_menu.append(ui.Text_Input('name',s)) s_menu.append(ui.Text_Input('x',s.real_world_size,label='X size')) s_menu.append(ui.Text_Input('y',s.real_world_size,label='Y size')) s_menu.append(ui.Button('Open Debug Window',s.open_close_window)) #closure to encapsulate idx def make_remove_s(i): return lambda: self.remove_surface(i) remove_s = make_remove_s(idx) s_menu.append(ui.Button('remove',remove_s)) self.menu.append(s_menu) def on_notify(self,notification): if notification['subject'] == 'gaze_positions_changed': logger.info('Gaze postions changed. Recalculating.') self.recalculate() elif notification['subject'] == 'surfaces_changed': logger.info('Surfaces changed. Recalculating.') self.recalculate() elif notification['subject'] == 'min_marker_perimeter_changed': logger.info('Min marper perimeter adjusted. Re-detecting surfaces.') self.invalidate_surface_caches() elif notification['subject'] is "should_export": self.save_surface_statsics_to_file(notification['range'],notification['export_dir']) def on_window_resize(self,window,w,h): self.win_size = w,h def add_surface(self): self.surfaces.append(Offline_Reference_Surface(self.g_pool)) self.update_gui_markers() def recalculate(self): in_mark = self.g_pool.trim_marks.in_mark out_mark = self.g_pool.trim_marks.out_mark section = slice(in_mark,out_mark) # calc heatmaps for s in self.surfaces: if s.defined: s.generate_heatmap(section) # calc distirbution accross all surfaces. results = [] for s in self.surfaces: gaze_on_srf = s.gaze_on_srf_in_section(section) results.append(len(gaze_on_srf)) self.metrics_gazecount = len(gaze_on_srf) if results == []: logger.warning("No surfaces defined.") return max_res = max(results) results = np.array(results,dtype=np.float32) if not max_res: logger.warning("No gaze on any surface for this section!") else: results *= 255./max_res results = np.uint8(results) results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET) for s,c_map in zip(self.surfaces,results_c_maps): heatmap = np.ones((1,1,4),dtype=np.uint8)*125 heatmap[:,:,:3] = c_map s.metrics_texture = Named_Texture() s.metrics_texture.update_from_ndarray(heatmap) def invalidate_surface_caches(self): for s in self.surfaces: s.cache = None def update(self,frame,events): self.img_shape = frame.img.shape self.update_marker_cache() # self.markers = [m for m in self.cache[frame.index] if m['perimeter'>=self.min_marker_perimeter] self.markers = self.cache[frame.index] if self.markers == False: self.markers = [] self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed events['surfaces'] = [] # locate surfaces for s in self.surfaces: if not s.locate_from_cache(frame.index): s.locate(self.markers,self.camera_calibration,self.min_marker_perimeter) if s.detected: events['surfaces'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp}) if self.mode == "Show marker IDs": draw_markers(frame.img,self.markers) elif self.mode == "Show Markers and Surfaces": # edit surfaces by user if self.edit_surf_verts: window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window),flip_y=True) for s,v_idx in self.edit_surf_verts: if s.detected: new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx,new_pos) else: # update srf with no or invald cache: for s in self.surfaces: if s.cache == None and s not in [s for s,i in self.edit_surf_verts]: s.init_cache(self.cache,self.camera_calibration,self.min_marker_perimeter) self.notify_all_delayed({'subject':'surfaces_changed'}) #map recent gaze onto detected surfaces used for pupil server for s in self.surfaces: if s.detected: s.gaze_on_srf = [] for p in events.get('gaze_positions',[]): gp_on_s = tuple(s.img_to_ref_surface(np.array(p['norm_pos']))) p['realtime gaze on ' + s.name] = gp_on_s s.gaze_on_srf.append(gp_on_s) #allow surfaces to open/close windows for s in self.surfaces: if s.window_should_close: s.close_window() if s.window_should_open: s.open_window() def init_marker_cacher(self): forking_enable(0) #for MacOs only from marker_detector_cacher import fill_cache visited_list = [False if x == False else True for x in self.cache] video_file_path = self.g_pool.capture.src timestamps = self.g_pool.capture.timestamps self.cache_queue = Queue() self.cacher_seek_idx = Value('i',0) self.cacher_run = Value(c_bool,True) self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher)) self.cacher.start() def update_marker_cache(self): while not self.cache_queue.empty(): idx,c_m = self.cache_queue.get() self.cache.update(idx,c_m) for s in self.surfaces: s.update_cache(self.cache,camera_calibration=self.camera_calibration,min_marker_perimeter=self.min_marker_perimeter,idx=idx) if self.cacher_run.value == False: self.recalculate() def seek_marker_cacher(self,idx): self.cacher_seek_idx.value = idx def close_marker_cacher(self): self.update_marker_cache() self.cacher_run.value = False self.cacher.join() def gl_display(self): """ Display marker and surface info inside world screen """ self.gl_display_cache_bars() super(Offline_Surface_Tracker,self).gl_display() if self.mode == "Show Heatmaps": for s in self.surfaces: s.gl_display_heatmap() if self.mode == "Show Metrics": #todo: draw a backdrop to represent the gaze that is not on any surface for s in self.surfaces: #draw a quad on surface with false color of value. s.gl_display_metrics() def gl_display_cache_bars(self): """ """ padding = 20. # Lines for areas that have been cached cached_ranges = [] for r in self.cache.visited_ranges: # [[0,1],[3,4]] cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)] # Lines where surfaces have been found in video cached_surfaces = [] for s in self.surfaces: found_at = [] if s.cache is not None: for r in s.cache.positive_ranges: # [[0,1],[3,4]] found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)] cached_surfaces.append(found_at) glMatrixMode(GL_PROJECTION) glPushMatrix() glLoadIdentity() width,height = self.win_size h_pad = padding * (self.cache.length-2)/float(width) v_pad = padding* 1./(height-2) glOrtho(-h_pad, (self.cache.length-1)+h_pad, -v_pad, 1+v_pad,-1,1) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical) glMatrixMode(GL_MODELVIEW) glPushMatrix() glLoadIdentity() color = RGBA(.8,.6,.2,.8) draw_polyline(cached_ranges,color=color,line_type=GL_LINES,thickness=4) color = RGBA(0,.7,.3,.8) for s in cached_surfaces: glTranslatef(0,.02,0) draw_polyline(s,color=color,line_type=GL_LINES,thickness=2) glMatrixMode(GL_PROJECTION) glPopMatrix() glMatrixMode(GL_MODELVIEW) glPopMatrix() def save_surface_statsics_to_file(self,export_range,export_dir): """ between in and out mark report: gaze distribution: - total gazepoints - gaze points on surface x - gaze points not on any surface report: surface visisbility - total frames - surface x visible framecount surface events: frame_no, ts, surface "name", "id" enter/exit for each surface: fixations_on_name.csv gaze_on_name_id.csv positions_of_name_id.csv """ metrics_dir = os.path.join(export_dir,'surfaces') section = export_range in_mark = export_range.start out_mark = export_range.stop logger.info("exporting metrics to %s"%metrics_dir) if os.path.isdir(metrics_dir): logger.info("Will overwrite previous export for this section") else: try: os.mkdir(metrics_dir) except: logger.warning("Could not make metrics dir %s"%metrics_dir) return with open(os.path.join(metrics_dir,'surface_visibility.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) # surface visibility report frame_count = len(self.g_pool.timestamps[section]) csv_writer.writerow(('frame_count',frame_count)) csv_writer.writerow(('')) csv_writer.writerow(('surface_name','visible_frame_count')) for s in self.surfaces: if s.cache == None: logger.warning("The surface is not cached. Please wait for the cacher to collect data.") return visible_count = s.visible_count_in_section(section) csv_writer.writerow( (s.name, visible_count) ) logger.info("Created 'surface_visibility.csv' file") with open(os.path.join(metrics_dir,'surface_gaze_distribution.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) # gaze distribution report gaze_in_section = list(chain(*self.g_pool.gaze_positions_by_frame[section])) not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section]) csv_writer.writerow(('total_gaze_point_count',len(gaze_in_section))) csv_writer.writerow(('')) csv_writer.writerow(('surface_name','gaze_count')) for s in self.surfaces: gaze_on_srf = s.gaze_on_srf_in_section(section) gaze_on_srf = set([gp['base']['timestamp'] for gp in gaze_on_srf]) not_on_any_srf -= gaze_on_srf csv_writer.writerow( (s.name, len(gaze_on_srf)) ) csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf) ) ) logger.info("Created 'surface_gaze_distribution.csv' file") with open(os.path.join(metrics_dir,'surface_events.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) # surface events report csv_writer.writerow(('frame_number','timestamp','surface_name','surface_uid','event_type')) events = [] for s in self.surfaces: for enter_frame_id,exit_frame_id in s.cache.positive_ranges: events.append({'frame_id':enter_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'enter'}) events.append({'frame_id':exit_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'exit'}) events.sort(key=lambda x: x['frame_id']) for e in events: csv_writer.writerow( ( e['frame_id'],self.g_pool.timestamps[e['frame_id']],e['srf_name'],e['srf_uid'],e['event'] ) ) logger.info("Created 'surface_events.csv' file") for s in self.surfaces: # per surface names: surface_name = '_'+s.name.replace('/','')+'_'+s.uid # save surface_positions as pickle file save_object(s.cache.to_list(),os.path.join(metrics_dir,'srf_positions'+surface_name)) #save surface_positions as csv with open(os.path.join(metrics_dir,'srf_positons'+surface_name+'.csv'),'wb') as csvfile: csv_writer =csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) csv_writer.writerow(('frame_idx','timestamp','m_to_screen','m_from_screen','detected_markers')) for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache): if in_mark <= idx <= out_mark: if ref_srf_data is not None and ref_srf_data is not False: csv_writer.writerow( (idx,ts,ref_srf_data['m_to_screen'],ref_srf_data['m_from_screen'],ref_srf_data['detected_markers']) ) # save gaze on srf as csv. with open(os.path.join(metrics_dir,'gaze_positions_on_surface'+surface_name+'.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) csv_writer.writerow(('world_timestamp','world_frame_idx','gaze_timestamp','x_norm','y_norm','x_scaled','y_scaled','on_srf')) for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache): if in_mark <= idx <= out_mark: if ref_srf_data is not None and ref_srf_data is not False: for gp in s.gaze_on_srf_by_frame_idx(idx,ref_srf_data['m_from_screen']): csv_writer.writerow( (ts,idx,gp['base']['timestamp'],gp['norm_pos'][0],gp['norm_pos'][1],gp['norm_pos'][0]*s.real_world_size['x'],gp['norm_pos'][1]*s.real_world_size['y'],gp['on_srf']) ) # save fixation on srf as csv. with open(os.path.join(metrics_dir,'fixations_on_surface'+surface_name+'.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) csv_writer.writerow(('id','start_timestamp','duration','start_frame','end_frame','norm_pos_x','norm_pos_y','x_scaled','y_scaled','on_srf')) fixations_on_surface = [] for idx,ref_srf_data in zip(range(len(self.g_pool.timestamps)),s.cache): if in_mark <= idx <= out_mark: if ref_srf_data is not None and ref_srf_data is not False: for f in s.fixations_on_srf_by_frame_idx(idx,ref_srf_data['m_from_screen']): fixations_on_surface.append(f) removed_dublicates = dict([(f['base']['id'],f) for f in fixations_on_surface]).values() for f_on_s in removed_dublicates: f = f_on_s['base'] f_x,f_y = f_on_s['norm_pos'] f_on_srf = f_on_s['on_srf'] csv_writer.writerow( (f['id'],f['timestamp'],f['duration'],f['start_frame_index'],f['end_frame_index'],f_x,f_y,f_x*s.real_world_size['x'],f_y*s.real_world_size['y'],f_on_srf) ) logger.info("Saved surface positon gaze and fixation data for '%s' with uid:'%s'"%(s.name,s.uid)) if s.heatmap is not None: logger.info("Saved Heatmap as .png file.") cv2.imwrite(os.path.join(metrics_dir,'heatmap'+surface_name+'.png'),s.heatmap) logger.info("Done exporting reference surface data.") # if s.detected and self.img is not None: # #let save out the current surface image found in video # #here we get the verts of the surface quad in norm_coords # mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2) # screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2) # #now we convert to image pixel coods # screen_space[:,1] = 1-screen_space[:,1] # screen_space[:,1] *= self.img.shape[0] # screen_space[:,0] *= self.img.shape[1] # s_0,s_1 = s.real_world_size # #no we need to flip vertically again by setting the mapped_space verts accordingly. # mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32) # M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled) # #here we do the actual perspactive transform of the image. # srf_in_video = cv2.warpPerspective(self.img,M, (int(s.real_world_size['x']),int(s.real_world_size['y'])) ) # cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video) # logger.info("Saved current image as .png file.") # else: # logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name) def cleanup(self): """ called when the plugin gets terminated. This happens either voluntarily or forced. if you have a GUI or glfw window destroy it here. """ self.surface_definitions["offline_square_marker_surfaces"] = [rs.save_to_dict() for rs in self.surfaces if rs.defined] self.surface_definitions.close() self.close_marker_cacher() self.persistent_cache["marker_cache"] = self.cache.to_list() self.persistent_cache.close() for s in self.surfaces: s.close_window() self.deinit_gui()
def parent_task(return_pid): p = Process(target=child_process) p.start() sleep(1) # Wait for starting process return_pid.value = p.pid
class Offline_Marker_Detector(Plugin): """ Special version of marker detector for use with videofile source. It uses a seperate process to search all frames in the world.avi file for markers. - self.cache is a list containing marker positions for each frame. - self.surfaces[i].cache is a list containing surface positions for each frame Both caches are build up over time. The marker cache is also session persistent. See marker_tracker.py for more info on this marker tracker. """ def __init__(self,g_pool,gui_settings={'pos':(220,200),'size':(300,300),'iconified':False}): super(Offline_Marker_Detector, self).__init__() self.g_pool = g_pool self.gui_settings = gui_settings self.order = .2 # all markers that are detected in the most recent frame self.markers = [] # all registered surfaces if g_pool.app == 'capture': raise Exception('For Player only.') #in player we load from the rec_dir: but we have a couple options: self.surface_definitions = Persistent_Dict(os.path.join(g_pool.rec_dir,'surface_definitions')) if self.load('offline_square_marker_surfaces',[]) != []: logger.debug("Found ref surfaces defined or copied in previous session.") self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d,gaze_positions_by_frame=self.g_pool.positions_by_frame) for d in self.load('offline_square_marker_surfaces',[]) if isinstance(d,dict)] elif self.load('realtime_square_marker_surfaces',[]) != []: logger.debug("Did not find ref surfaces def created or used by the user in player from earlier session. Loading surfaces defined during capture.") self.surfaces = [Offline_Reference_Surface(self.g_pool,saved_definition=d,gaze_positions_by_frame=self.g_pool.positions_by_frame) for d in self.load('realtime_square_marker_surfaces',[]) if isinstance(d,dict)] else: logger.debug("No surface defs found. Please define using GUI.") self.surfaces = [] # ui mode settings self.mode = c_int(0) # edit surfaces self.edit_surfaces = [] #detector vars self.robust_detection = c_bool(1) self.aperture = c_int(11) self.min_marker_perimeter = 80 #check if marker cache is available from last session self.persistent_cache = Persistent_Dict(os.path.join(g_pool.rec_dir,'square_marker_cache')) self.cache = Cache_List(self.persistent_cache.get('marker_cache',[False for _ in g_pool.timestamps])) logger.debug("Loaded marker cache %s / %s frames had been searched before"%(len(self.cache)-self.cache.count(False),len(self.cache)) ) self.init_marker_cacher() #debug vars self.show_surface_idx = c_int(0) self.recent_pupil_positions = [] self.img_shape = None self.img = None def init_gui(self): import atb pos = self.gui_settings['pos'] atb_label = "Marker Detector" self._bar = atb.Bar(name =self.__class__.__name__+str(id(self)), label=atb_label, help="circle", color=(50, 150, 50), alpha=50, text='light', position=pos,refresh=.1, size=self.gui_settings['size']) self._bar.iconified = self.gui_settings['iconified'] self.update_bar_markers() #set up bar display padding self.on_window_resize(glfwGetCurrentContext(),*glfwGetWindowSize(glfwGetCurrentContext())) def unset_alive(self): self.alive = False def load(self, var_name, default): return self.surface_definitions.get(var_name,default) def save(self, var_name, var): self.surface_definitions[var_name] = var def on_window_resize(self,window,w,h): self.win_size = w,h def on_click(self,pos,button,action): if self.mode.value == 1: if self.edit_surfaces: if action == GLFW_RELEASE: self.edit_surfaces = [] # no surfaces verts in edit mode, lets see if the curser is close to one: else: if action == GLFW_PRESS: surf_verts = ((0.,0.),(1.,0.),(1.,1.),(0.,1.)) x,y = pos for s in self.surfaces: if s.detected: for (vx,vy),i in zip(s.ref_surface_to_img(np.array(surf_verts)),range(4)): vx,vy = denormalize((vx,vy),(self.img_shape[1],self.img_shape[0]),flip_y=True) if sqrt((x-vx)**2 + (y-vy)**2) <15: #img pixels self.edit_surfaces.append((s,i)) def advance(self): pass def add_surface(self): self.surfaces.append(Offline_Reference_Surface(self.g_pool,gaze_positions_by_frame=self.g_pool.positions_by_frame)) self.update_bar_markers() def remove_surface(self,i): self.surfaces[i].cleanup() del self.surfaces[i] self.update_bar_markers() def update_bar_markers(self): self._bar.clear() self._bar.add_button('close',self.unset_alive) self._bar.add_button(" add surface ", self.add_surface, key='a') # when cache is updated, when surface is edited, when trimmarks are changed. # dropdown menue: markers and surface, surface edit mode, heatmaps, metrics self._bar.mode_enum = atb.enum("Mode",{"Show Markers and Frames":0,"Show Marker Id's":4, "Surface edit mode":1,"Show Heatmaps":2,"Show Metrics":3}) self._bar.add_var("Mode",self.mode,vtype=self._bar.mode_enum) self._bar.add_button(" (re)-calculate gaze distributions ", self.recalculate) self._bar.add_button(" Export Gaze and Surface Data ", self.save_surface_statsics_to_file) for s,i in zip(self.surfaces,range(len(self.surfaces)))[::-1]: self._bar.add_var("%s_name"%i,create_string_buffer(512),getter=s.atb_get_name,setter=s.atb_set_name,group=str(i),label='name') self._bar.add_var("%s_markers"%i,create_string_buffer(512), getter=s.atb_marker_status,group=str(i),label='found/registered markers' ) self._bar.add_var("%s_x_scale"%i,vtype=c_float, getter=s.atb_get_scale_x, min=1,setter=s.atb_set_scale_x,group=str(i),label='real width', help='this scale factor is used to adjust the coordinate space for your needs (think photo pixels or mm or whatever)' ) self._bar.add_var("%s_y_scale"%i,vtype=c_float, getter=s.atb_get_scale_y,min=1,setter=s.atb_set_scale_y,group=str(i),label='real height',help='defining x and y scale factor you atumatically set the correct aspect ratio.' ) self._bar.add_var("%s_window"%i,setter=s.toggle_window,getter=s.window_open,group=str(i),label='open in window') # self._bar.add_button("%s_hm"%i, s.generate_heatmap, label='generate_heatmap',group=str(i)) # self._bar.add_button("%s_export"%i, self.save_surface_positions_to_file,data=i, label='export surface data',group=str(i)) self._bar.add_button("%s_remove"%i, self.remove_surface,data=i,label='remove',group=str(i)) def recalculate(self): in_mark = self.g_pool.trim_marks.in_mark out_mark = self.g_pool.trim_marks.out_mark section = slice(in_mark,out_mark) # calc heatmaps for s in self.surfaces: if s.defined: s.generate_heatmap(section) # calc metrics: gaze_in_section = list(chain(*self.g_pool.positions_by_frame[section])) results = [] for s in self.surfaces: gaze_on_srf = s.gaze_on_srf_in_section(section) results.append(len(gaze_on_srf)) self.metrics_gazecount = len(gaze_on_srf) max_res = max(results) results = np.array(results,dtype=np.float32) if not max_res: logger.warning("No gaze on any surface for this section!") else: results *= 255./max_res results = np.uint8(results) results_c_maps = cv2.applyColorMap(results, cv2.COLORMAP_JET) for s,c_map in zip(self.surfaces,results_c_maps): heatmap = np.ones((1,1,4),dtype=np.uint8)*125 heatmap[:,:,:3] = c_map s.metrics_texture = create_named_texture(heatmap) def update(self,frame,recent_pupil_positions,events): self.img = frame.img self.img_shape = frame.img.shape self.update_marker_cache() self.markers = self.cache[frame.index] if self.markers == False: self.markers = [] self.seek_marker_cacher(frame.index) # tell precacher that it better have every thing from here on analyzed # locate surfaces for s in self.surfaces: if not s.locate_from_cache(frame.index): s.locate(self.markers) if s.detected: events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp,'gaze_on_srf':s.gaze_on_srf}) if self.mode.value == 4: draw_markers(frame.img,self.markers) # edit surfaces by user if self.mode.value == 1: window = glfwGetCurrentContext() pos = glfwGetCursorPos(window) pos = normalize(pos,glfwGetWindowSize(window)) pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels for s,v_idx in self.edit_surfaces: if s.detected: pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True) new_pos = s.img_to_ref_surface(np.array(pos)) s.move_vertex(v_idx,new_pos) s.cache = None self.heatmap = None else: # update srf with no or invald cache: for s in self.surfaces: if s.cache == None: s.init_cache(self.cache) #allow surfaces to open/close windows for s in self.surfaces: if s.window_should_close: s.close_window() if s.window_should_open: s.open_window() def init_marker_cacher(self): forking_enable(0) #for MacOs only from marker_detector_cacher import fill_cache visited_list = [False if x == False else True for x in self.cache] video_file_path = os.path.join(self.g_pool.rec_dir,'world.avi') self.cache_queue = Queue() self.cacher_seek_idx = Value(c_int,0) self.cacher_run = Value(c_bool,True) self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run)) self.cacher.start() def update_marker_cache(self): while not self.cache_queue.empty(): idx,c_m = self.cache_queue.get() self.cache.update(idx,c_m) for s in self.surfaces: s.update_cache(self.cache,idx=idx) def seek_marker_cacher(self,idx): self.cacher_seek_idx.value = idx def close_marker_cacher(self): self.update_marker_cache() self.cacher_run.value = False self.cacher.join() def gl_display(self): """ Display marker and surface info inside world screen """ self.gl_display_cache_bars() for s in self.surfaces: s.gl_display_in_window(self.g_pool.image_tex) if self.mode.value in (0,1): for m in self.markers: hat = np.array([[[0,0],[0,1],[1,1],[1,0],[0,0]]],dtype=np.float32) hat = cv2.perspectiveTransform(hat,m_marker_to_screen(m)) draw_gl_polyline(hat.reshape((5,2)),(0.1,1.,1.,.3),type='Polygon') draw_gl_polyline(hat.reshape((5,2)),(0.1,1.,1.,.6)) for s in self.surfaces: s.gl_draw_frame() if self.mode.value == 1: for s in self.surfaces: s.gl_draw_corners() if self.mode.value == 2: for s in self.surfaces: s.gl_display_heatmap() if self.mode.value == 3: #draw a backdrop to represent the gaze that is not on any surface for s in self.surfaces: #draw a quad on surface with false color of value. s.gl_display_metrics() def gl_display_cache_bars(self): """ """ padding = 20. # Lines for areas that have been cached cached_ranges = [] for r in self.cache.visited_ranges: # [[0,1],[3,4]] cached_ranges += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)] # Lines where surfaces have been found in video cached_surfaces = [] for s in self.surfaces: found_at = [] if s.cache is not None: for r in s.cache.positive_ranges: # [[0,1],[3,4]] found_at += (r[0],0),(r[1],0) #[(0,0),(1,0),(3,0),(4,0)] cached_surfaces.append(found_at) glMatrixMode(GL_PROJECTION) glPushMatrix() glLoadIdentity() width,height = self.win_size h_pad = padding * (self.cache.length-2)/float(width) v_pad = padding* 1./(height-2) gluOrtho2D(-h_pad, (self.cache.length-1)+h_pad, -v_pad, 1+v_pad) # ranging from 0 to cache_len-1 (horizontal) and 0 to 1 (vertical) glMatrixMode(GL_MODELVIEW) glPushMatrix() glLoadIdentity() color = (8.,.6,.2,8.) draw_gl_polyline(cached_ranges,color=color,type='Lines',thickness=4) color = (0.,.7,.3,8.) for s in cached_surfaces: glTranslatef(0,.02,0) draw_gl_polyline(s,color=color,type='Lines',thickness=2) glMatrixMode(GL_PROJECTION) glPopMatrix() glMatrixMode(GL_MODELVIEW) glPopMatrix() def save_surface_statsics_to_file(self): in_mark = self.g_pool.trim_marks.in_mark out_mark = self.g_pool.trim_marks.out_mark """ between in and out mark report: gaze distribution: - total gazepoints - gaze points on surface x - gaze points not on any surface report: surface visisbility - total frames - surface x visible framecount surface events: frame_no, ts, surface "name", "id" enter/exit for each surface: gaze_on_name_id.csv positions_of_name_id.csv """ section = slice(in_mark,out_mark) metrics_dir = os.path.join(self.g_pool.rec_dir,"metrics_%s-%s"%(in_mark,out_mark)) logger.info("exporting metrics to %s"%metrics_dir) if os.path.isdir(metrics_dir): logger.info("Will overwrite previous export for this section") else: try: os.mkdir(metrics_dir) except: logger.warning("Could not make metrics dir %s"%metrics_dir) return with open(os.path.join(metrics_dir,'surface_visibility.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) # surface visibility report frame_count = len(self.g_pool.timestamps[section]) csv_writer.writerow(('frame_count',frame_count)) csv_writer.writerow(('')) csv_writer.writerow(('surface_name','visible_frame_count')) for s in self.surfaces: if s.cache == None: logger.warning("The surface is not cached. Please wait for the cacher to collect data.") return visible_count = s.visible_count_in_section(section) csv_writer.writerow( (s.name, visible_count) ) logger.info("Created 'surface_visibility.csv' file") with open(os.path.join(metrics_dir,'surface_gaze_distribution.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) # gaze distribution report gaze_in_section = list(chain(*self.g_pool.positions_by_frame[section])) not_on_any_srf = set([gp['timestamp'] for gp in gaze_in_section]) csv_writer.writerow(('total_gaze_point_count',len(gaze_in_section))) csv_writer.writerow(('')) csv_writer.writerow(('surface_name','gaze_count')) for s in self.surfaces: gaze_on_srf = s.gaze_on_srf_in_section(section) gaze_on_srf = set([gp["timestamp"] for gp in gaze_on_srf]) not_on_any_srf -= gaze_on_srf csv_writer.writerow( (s.name, len(gaze_on_srf)) ) csv_writer.writerow(('not_on_any_surface', len(not_on_any_srf) ) ) logger.info("Created 'surface_gaze_distribution.csv' file") with open(os.path.join(metrics_dir,'surface_events.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) # surface events report csv_writer.writerow(('frame_number','timestamp','surface_name','surface_uid','event_type')) events = [] for s in self.surfaces: for enter_frame_id,exit_frame_id in s.cache.positive_ranges: events.append({'frame_id':enter_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'enter'}) events.append({'frame_id':exit_frame_id,'srf_name':s.name,'srf_uid':s.uid,'event':'exit'}) events.sort(key=lambda x: x['frame_id']) for e in events: csv_writer.writerow( ( e['frame_id'],self.g_pool.timestamps[e['frame_id']],e['srf_name'],e['srf_uid'],e['event'] ) ) logger.info("Created 'surface_events.csv' file") for s in self.surfaces: # per surface names: surface_name = '_'+s.name.replace('/','')+'_'+s.uid # save surface_positions as pickle file save_object(s.cache.to_list(),os.path.join(metrics_dir,'srf_positions'+surface_name)) #save surface_positions as csv with open(os.path.join(metrics_dir,'srf_positons'+surface_name+'.csv'),'wb') as csvfile: csv_writer =csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) csv_writer.writerow(('frame_idx','timestamp','m_to_screen','m_from_screen','detected_markers')) for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache): if in_mark <= idx <= out_mark: if ref_srf_data is not None and ref_srf_data is not False: csv_writer.writerow( (idx,ts,ref_srf_data['m_to_screen'],ref_srf_data['m_from_screen'],ref_srf_data['detected_markers']) ) # save gaze on srf as csv. with open(os.path.join(metrics_dir,'gaze_positions_on_surface'+surface_name+'.csv'),'wb') as csvfile: csv_writer = csv.writer(csvfile, delimiter='\t',quotechar='|', quoting=csv.QUOTE_MINIMAL) csv_writer.writerow(('world_frame_idx','world_timestamp','eye_timestamp','x_norm','y_norm','x_scaled','y_scaled','on_srf')) for idx,ts,ref_srf_data in zip(range(len(self.g_pool.timestamps)),self.g_pool.timestamps,s.cache): if in_mark <= idx <= out_mark: if ref_srf_data is not None and ref_srf_data is not False: for gp in ref_srf_data['gaze_on_srf']: gp_x,gp_y = gp['norm_gaze_on_srf'] on_srf = (0 <= gp_x <= 1) and (0 <= gp_y <= 1) csv_writer.writerow( (idx,ts,gp['timestamp'],gp_x,gp_y,gp_x*s.scale_factor[0],gp_x*s.scale_factor[1],on_srf) ) logger.info("Saved surface positon data and gaze on surface data for '%s' with uid:'%s'"%(s.name,s.uid)) if s.heatmap is not None: logger.info("Saved Heatmap as .png file.") cv2.imwrite(os.path.join(metrics_dir,'heatmap'+surface_name+'.png'),s.heatmap) # if s.detected and self.img is not None: # #let save out the current surface image found in video # #here we get the verts of the surface quad in norm_coords # mapped_space_one = np.array(((0,0),(1,0),(1,1),(0,1)),dtype=np.float32).reshape(-1,1,2) # screen_space = cv2.perspectiveTransform(mapped_space_one,s.m_to_screen).reshape(-1,2) # #now we convert to image pixel coods # screen_space[:,1] = 1-screen_space[:,1] # screen_space[:,1] *= self.img.shape[0] # screen_space[:,0] *= self.img.shape[1] # s_0,s_1 = s.scale_factor # #no we need to flip vertically again by setting the mapped_space verts accordingly. # mapped_space_scaled = np.array(((0,s_1),(s_0,s_1),(s_0,0),(0,0)),dtype=np.float32) # M = cv2.getPerspectiveTransform(screen_space,mapped_space_scaled) # #here we do the actual perspactive transform of the image. # srf_in_video = cv2.warpPerspective(self.img,M, (int(s.scale_factor[0]),int(s.scale_factor[1])) ) # cv2.imwrite(os.path.join(metrics_dir,'surface'+surface_name+'.png'),srf_in_video) # logger.info("Saved current image as .png file.") # else: # logger.info("'%s' is not currently visible. Seek to appropriate frame and repeat this command."%s.name) def get_init_dict(self): d = {} if hasattr(self,'_bar'): gui_settings = {'pos':self._bar.position,'size':self._bar.size,'iconified':self._bar.iconified} d['gui_settings'] = gui_settings return d def cleanup(self): """ called when the plugin gets terminated. This happends either voluntary or forced. if you have an atb bar or glfw window destroy it here. """ self.save("offline_square_marker_surfaces",[rs.save_to_dict() for rs in self.surfaces if rs.defined]) self.close_marker_cacher() self.persistent_cache["marker_cache"] = self.cache.to_list() self.persistent_cache.close() self.surface_definitions.close() for s in self.surfaces: s.close_window() self._bar.destroy()
def cmd_save(args): '''save a graph''' child = Process(target=save_process, args=[mestate.last_graph, mestate.child_pipe_send_console, mestate.child_pipe_send_graph, mestate.status.msgs]) child.start()
def run_crawl(path=None): p = Process(target=_crawl, args=['hahahahha']) p.start()