Esempio n. 1
0
 def __init__(self,
              fields,
              title='MAVProxy: LiveGraph',
              timespan=20.0,
              tickresolution=0.2,
              colors=[
                  'red', 'green', 'blue', 'orange', 'olive', 'cyan',
                  'magenta', 'brown', 'violet', 'purple', 'grey', 'black'
              ]):
     if platform.system() == 'Darwin':
         import billiard as multiprocessing
     else:
         import multiprocessing
     self.fields = fields
     self.colors = colors
     self.title = title
     self.timespan = timespan
     self.tickresolution = tickresolution
     self.values = [None] * len(self.fields)
     if platform.system() == 'Darwin':
         multiprocessing.forking_enable(False)
     self.parent_pipe, self.child_pipe = multiprocessing.Pipe()
     self.close_graph = multiprocessing.Event()
     self.close_graph.clear()
     self.child = multiprocessing.Process(target=self.child_task)
     self.child.start()
Esempio n. 2
0
    def add_export(self):
        # on MacOS we will not use os.fork, elsewhere this does nothing.
        forking_enable(0)

        logger.debug("Adding new export.")
        should_terminate = Value(c_bool, False)
        frames_to_export = Value(c_int, 0)
        current_frame = Value(c_int, 0)

        rec_dir = self.g_pool.rec_dir
        user_dir = self.g_pool.user_dir
        start_frame = self.g_pool.trim_marks.in_mark
        end_frame = self.g_pool.trim_marks.out_mark + 1  # end_frame is exclusive
        frames_to_export.value = end_frame - start_frame

        # Here we make clones of every plugin that supports it.
        # So it runs in the current config when we lauch the exporter.
        plugins = self.g_pool.plugins.get_initializers()

        out_file_path = verify_out_file_path(self.rec_name, self.g_pool.rec_dir)
        process = Export_Process(
            target=export,
            args=(
                should_terminate,
                frames_to_export,
                current_frame,
                rec_dir,
                user_dir,
                start_frame,
                end_frame,
                plugins,
                out_file_path,
            ),
        )
        self.new_export = process
Esempio n. 3
0
	def display_graph(self, graphdef):
	    '''display a graph'''
	    if 'mestate' in globals():
	        self.mestate.console.write("Expression: %s\n" % ' '.join(graphdef.expression.split()))
	    else:
	        self.mestate.child_pipe_send_console.send("Expression: %s\n" % ' '.join(graphdef.expression.split()))
	    #mestate.mlog.reduce_by_flightmodes(mestate.flightmode_selections)

	    #setup the graph, then pass to a new process and display
	    self.mg = grapher.MavGraph()
	    self.mg.set_marker(self.mestate.settings.marker)
	    self.mg.set_condition(self.mestate.settings.condition)
	    self.mg.set_xaxis(self.mestate.settings.xaxis)
	    self.mg.set_linestyle(self.mestate.settings.linestyle)
	    self.mg.set_show_flightmode(self.mestate.settings.show_flightmode)
	    self.mg.set_legend(self.mestate.settings.legend)
	    self.mg.add_mav(self.mestate.mlog)
	    for f in graphdef.expression.split():
	        self.mg.add_field(f)
	    self.mg.process(self.mestate.flightmode_selections, self.mestate.mlog._flightmodes)
	    self.lenmavlist = len(self.mg.mav_list)
	    if platform.system() == 'Darwin':
	    	forking_enable(False)
	    #Important - mg.mav_list is the full logfile and can be very large in size
	    #To avoid slowdowns in Windows (which copies the vars to the new process)
	    #We need to empty this var when we're finished with it
	    self.mg.mav_list = []
	    child = Process(target=self.mg.show, args=[self.lenmavlist, ])
	    child.start()
	    self.mestate.mlog.rewind()
    def display_graph(self, graphdef):
        '''display a graph'''
        if 'mestate' in globals():
            self.mestate.console.write("Expression: %s\n" %
                                       ' '.join(graphdef.expression.split()))
        else:
            self.mestate.child_pipe_send_console.send(
                "Expression: %s\n" % ' '.join(graphdef.expression.split()))
        #mestate.mlog.reduce_by_flightmodes(mestate.flightmode_selections)

        #setup the graph, then pass to a new process and display
        self.mg = grapher.MavGraph()
        self.mg.set_marker(self.mestate.settings.marker)
        self.mg.set_condition(self.mestate.settings.condition)
        self.mg.set_xaxis(self.mestate.settings.xaxis)
        self.mg.set_linestyle(self.mestate.settings.linestyle)
        self.mg.set_show_flightmode(self.mestate.settings.show_flightmode)
        self.mg.set_legend(self.mestate.settings.legend)
        self.mg.add_mav(self.mestate.mlog)
        for f in graphdef.expression.split():
            self.mg.add_field(f)
        self.mg.process(self.mestate.flightmode_selections,
                        self.mestate.mlog._flightmodes)
        self.lenmavlist = len(self.mg.mav_list)
        if platform.system() == 'Darwin':
            forking_enable(False)
        #Important - mg.mav_list is the full logfile and can be very large in size
        #To avoid slowdowns in Windows (which copies the vars to the new process)
        #We need to empty this var when we're finished with it
        self.mg.mav_list = []
        child = Process(target=self.mg.show, args=[
            self.lenmavlist,
        ])
        child.start()
        self.mestate.mlog.rewind()
Esempio n. 5
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        if self.options.get('maxtasksperchild'):
            try:
                from billiard.connection import Connection
                Connection.send_offset
            except (ImportError, AttributeError):
                # billiard C extension not installed
                warning(MAXTASKS_NO_BILLIARD)

        forking_enable(self.forking_enable)
        Pool = (self.BlockingPool if self.options.get('threads', True)
                else self.Pool)
        P = self._pool = Pool(processes=self.limit,
                              initializer=process_initializer,
                              synack=False,
                              **self.options)

        # Create proxy methods
        self.on_apply = P.apply_async
        self.maintain_pool = P.maintain_pool
        self.terminate_job = P.terminate_job
        self.grow = P.grow
        self.shrink = P.shrink
        self.restart = P.restart
        self.maybe_handle_result = P._result_handler.handle_event
        self.handle_result_event = P.handle_result_event
Esempio n. 6
0
    def add_export(self):
        # on MacOS we will not use os.fork, elsewhere this does nothing.
        forking_enable(0)

        logger.debug("Adding new export.")
        should_terminate = Value(c_bool, False)
        frames_to_export = Value(c_int, 0)
        current_frame = Value(c_int, 0)

        rec_dir = self.g_pool.rec_dir
        user_dir = self.g_pool.user_dir
        start_frame = self.g_pool.trim_marks.in_mark
        end_frame = self.g_pool.trim_marks.out_mark + 1  #end_frame is exclusive
        frames_to_export.value = end_frame - start_frame

        # Here we make clones of every plugin that supports it.
        # So it runs in the current config when we lauch the exporter.
        plugins = self.g_pool.plugins.get_initializers()

        out_file_path = verify_out_file_path(self.rec_name,
                                             self.g_pool.rec_dir)
        process = Export_Process(
            target=export,
            args=(should_terminate, frames_to_export, current_frame, rec_dir,
                  user_dir, start_frame, end_frame, plugins, out_file_path))
        self.new_export = process
Esempio n. 7
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        if self.options.get("maxtasksperchild"):
            try:
                import _billiard  # noqa

                _billiard.Connection.send_offset
            except (ImportError, AttributeError):
                # billiard C extension not installed
                warning(MAXTASKS_NO_BILLIARD)

        forking_enable(self.forking_enable)
        Pool = self.BlockingPool if self.options.get("threads", True) else self.Pool
        P = self._pool = Pool(processes=self.limit, initializer=process_initializer, synack=False, **self.options)
        self.on_apply = P.apply_async
        self.on_soft_timeout = P._timeout_handler.on_soft_timeout
        self.on_hard_timeout = P._timeout_handler.on_hard_timeout
        self.maintain_pool = P.maintain_pool
        self.terminate_job = self._pool.terminate_job
        self.grow = self._pool.grow
        self.shrink = self._pool.shrink
        self.restart = self._pool.restart
        self.maybe_handle_result = P._result_handler.handle_event
        self.outbound_buffer = deque()
        self.handle_result_event = P.handle_result_event
        self._active_writes = set()
        self._active_writers = set()
Esempio n. 8
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        if self.options.get('maxtasksperchild') and sys.platform != 'win32':
            try:
                from billiard.connection import Connection
                Connection.send_offset
            except (ImportError, AttributeError):
                # billiard C extension not installed
                warning(MAXTASKS_NO_BILLIARD)

        forking_enable(self.forking_enable)
        Pool = (self.BlockingPool if self.options.get('threads', True)
                else self.Pool)
        P = self._pool = Pool(processes=self.limit,
                              initializer=process_initializer,
                              on_process_exit=process_destructor,
                              synack=False,
                              **self.options)

        # Create proxy methods
        self.on_apply = P.apply_async
        self.maintain_pool = P.maintain_pool
        self.terminate_job = P.terminate_job
        self.grow = P.grow
        self.shrink = P.shrink
        self.flush = getattr(P, 'flush', None)  # FIXME add to billiard
        self.restart = P.restart
Esempio n. 9
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        if self.options.get('maxtasksperchild'):
            try:
                import _billiard  # noqa
                _billiard.Connection.send_offset
            except (ImportError, AttributeError):
                # billiard C extension not installed
                warning(MAXTASKS_NO_BILLIARD)

        forking_enable(self.forking_enable)
        Pool = (self.BlockingPool
                if self.options.get('threads', True) else self.Pool)
        P = self._pool = Pool(processes=self.limit,
                              initializer=process_initializer,
                              synack=False,
                              **self.options)
        self.on_apply = P.apply_async
        self.on_soft_timeout = P._timeout_handler.on_soft_timeout
        self.on_hard_timeout = P._timeout_handler.on_hard_timeout
        self.maintain_pool = P.maintain_pool
        self.terminate_job = self._pool.terminate_job
        self.grow = self._pool.grow
        self.shrink = self._pool.shrink
        self.restart = self._pool.restart
        self.maybe_handle_result = P._result_handler.handle_event
        self.outbound_buffer = deque()
        self.handle_result_event = P.handle_result_event
        self._active_writes = set()
        self._active_writers = set()
Esempio n. 10
0
def main():

    # To assign camera by name: put string(s) in list
    eye_cam_names = [
        "USB 2.0 Camera", "Microsoft", "6000", "Integrated Camera",
        "HD USB Camera"
    ]
    world_src = [
        "Logitech Camera", "(046d:081d)", "C510", "B525", "C525", "C615",
        "C920", "C930e"
    ]
    eye_src = (eye_cam_names,
               0), (eye_cam_names, 1
                    )  #first match for eye0 and second match for eye1

    # to assign cameras directly, using integers as demonstrated below
    # eye_src =  4 , 5 #second arg will be ignored for monocular eye trackers
    # world_src = 1

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Downloads/000/eye0.mkv' , '/Users/mkassner/Downloads/eye.avi'
    # world_src = "/Users/mkassner/Downloads/000/world.mkv"

    # Camera video size in pixels (width,height)
    eye_size = (640, 480)
    world_size = (1280, 720)

    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    #g_pool holds variables. Only if added here they are shared across processes.
    g_pool = Global_Container()

    # Create and initialize IPC
    g_pool.pupil_queue = Queue()
    g_pool.quit = Value(c_bool, 0)
    g_pool.timebase = Value(c_double, 0)
    g_pool.eye_tx = []
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.version = get_version(version_file)
    g_pool.app = 'capture'
    g_pool.binocular = binocular

    p_eye = []
    for eye_id in range(1 + 1 * binocular):
        rx, tx = Pipe(False)
        p_eye += [
            Process(target=eye,
                    args=(g_pool, eye_src[eye_id], eye_size, rx, eye_id))
        ]
        g_pool.eye_tx += [tx]
        p_eye[-1].start()

    world(g_pool, world_src, world_size)

    # Exit / clean-up
    for p in p_eye:
        p.join()
Esempio n. 11
0
def main():
    billiard.forking_enable(False)
    x = billiard.Pool(2)
    x.apply_async(f, (8, ), callback=cb)

    x.close()
    x.join()
Esempio n. 12
0
def opensesame():

    set_paths()
    # Support for multiprocessing when packaged
    # In OS X the multiprocessing module is horribly broken, but a fixed
    # version has been released as the 'billiard' module
    if platform.system() == 'Darwin':
        # Use normal multiprocessing module from python 3.4 and on
        if sys.version_info >= (3, 4):
            from multiprocessing import freeze_support, set_start_method
            freeze_support()
            set_start_method('spawn')
        else:
            from billiard import freeze_support, forking_enable
            freeze_support()
            forking_enable(0)
    else:
        from multiprocessing import freeze_support
        freeze_support()
    # Parse the (optional) environment file that contains special paths, etc.
    from libopensesame.misc import parse_environment_file
    parse_environment_file()
    # Force the new-style Qt API
    import sip
    import qtpy
    sip.setapi('QString', 2)
    sip.setapi('QVariant', 2)
    # Do the basic window initialization
    from qtpy.QtWidgets import QApplication
    # From Qt 5.6 on, QtWebEngine is the default way to render web pages
    # QtWebEngineWidgets must be imported before a QCoreApplication instance is
    # created.
    try:
        from qtpy import QtWebEngineWidgets
    except ImportError:
        pass
    app = QApplication(sys.argv)
    # Enable High DPI display with PyQt5
    if hasattr(qtpy.QtCore.Qt, 'AA_UseHighDpiPixmaps'):
        app.setAttribute(qtpy.QtCore.Qt.AA_UseHighDpiPixmaps)
    from libqtopensesame.qtopensesame import qtopensesame
    opensesame = qtopensesame(app)
    opensesame.__script__ = __file__
    app.processEvents()
    # Install the translator. For some reason, the translator needs to be
    # instantiated here and not in the set_locale() function, otherwise the
    # locale is ignored.
    from qtpy.QtCore import QTranslator
    translator = QTranslator()
    opensesame.set_locale(translator)
    # Now that the window is shown, load the remaining modules and resume the
    # GUI initialization.
    opensesame.resume_init()
    opensesame.restore_window_state()
    opensesame.refresh()
    opensesame.show()
    # Added for OS X, otherwise Window will not appear
    opensesame.raise_()
    # Exit using the application exit status
    sys.exit(app.exec_())
Esempio n. 13
0
    def add_export(self):
        # on MacOS we will not use os.fork, elsewhere this does nothing.
        forking_enable(0)

        logger.debug("Adding new export.")
        should_terminate = RawValue(c_bool, False)
        frames_to_export = RawValue(c_int, 0)
        current_frame = RawValue(c_int, 0)

        data_dir = self.data_dir
        start_frame = self.start_frame.value
        end_frame = self.end_frame.value
        plugins = []

        # Here we make clones of every plugin that supports it.
        # So it runs in the current config when we lauch the exporter.
        for p in self.g_pool.plugins:
            try:
                p_initializer = p.get_class_name(), p.get_init_dict()
                plugins.append(p_initializer)
            except AttributeError:
                pass

        out_file_path = verify_out_file_path(self.rec_name.value,
                                             self.data_dir)
        process = Process(target=export,
                          args=(should_terminate, frames_to_export,
                                current_frame, data_dir, start_frame,
                                end_frame, plugins, out_file_path))
        process.should_terminate = should_terminate
        process.frames_to_export = frames_to_export
        process.current_frame = current_frame
        process.out_file_path = out_file_path
        self.new_export = process
Esempio n. 14
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        if self.options.get('maxtasksperchild') and sys.platform != 'win32':
            try:
                from billiard.connection import Connection
                Connection.send_offset
            except (ImportError, AttributeError):
                # billiard C extension not installed
                warning(MAXTASKS_NO_BILLIARD)

        forking_enable(self.forking_enable)
        Pool = (self.BlockingPool
                if self.options.get('threads', True) else self.Pool)
        P = self._pool = Pool(processes=self.limit,
                              initializer=process_initializer,
                              on_process_exit=process_destructor,
                              synack=False,
                              **self.options)

        # Create proxy methods
        self.on_apply = P.apply_async
        self.maintain_pool = P.maintain_pool
        self.terminate_job = P.terminate_job
        self.grow = P.grow
        self.shrink = P.shrink
        self.flush = getattr(P, 'flush', None)  # FIXME add to billiard
Esempio n. 15
0
    def add_export(self):
        # on MacOS we will not use os.fork, elsewhere this does nothing.
        forking_enable(0)

        logger.debug("Adding new export.")
        should_terminate = RawValue(c_bool,False)
        frames_to_export  = RawValue(c_int,0)
        current_frame = RawValue(c_int,0)

        data_dir = self.data_dir
        start_frame= self.start_frame.value
        end_frame= self.end_frame.value
        plugins = []

        # Here we make clones of every plugin that supports it.
        # So it runs in the current config when we lauch the exporter.
        for p in self.g_pool.plugins:
            try:
                p_initializer = p.get_class_name(),p.get_init_dict()
                plugins.append(p_initializer)
            except AttributeError:
                pass

        out_file_path=verify_out_file_path(self.rec_name.value,self.data_dir)
        process = Process(target=export, args=(should_terminate,frames_to_export,current_frame, data_dir,start_frame,end_frame,plugins,out_file_path))
        process.should_terminate = should_terminate
        process.frames_to_export = frames_to_export
        process.current_frame = current_frame
        process.out_file_path = out_file_path
        self.new_export = process
Esempio n. 16
0
def main():
    # To assign camera by name: put string(s) in list

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='GUI for gaze tracking and pupillometry')
    parser.add_argument('-eye', dest='eye_file', type=str, help="Work with existing video recording, instead of live feed", default='')
    parser.add_argument('-world', dest='world_file', type=str, help="Work with existing video recording, instead of live feed", default='')

    args = parser.parse_args()

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    if args.eye_file == '':
        eye_src = ["UI154xLE-M", "USB Camera-B4.09.24.1", "FaceTime Camera (Built-in)", "Microsoft", "6000","Integrated Camera"]
        # to assign cameras directly, using integers as demonstrated below
        # eye_src = 1
    else:
#        print "Using provide file: %s" % args.filename
        eye_src = args.eye_file

    if args.world_file == '':
        world_src = ["Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"]
        # to assign cameras directly, using integers as demonstrated below
        # world_src = 0
    else:
        world_src = args.world_file

    # Camera video size in pixels (width,height)
    eye_size = (260,216) #(1280,1024)
    world_size = (640,480)


    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool,0)
    # this value will be substracted form the capture timestamp
    g_pool.timebase = RawValue(c_double,0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    g_pool.app = 'capture'
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size))

    # Spawn subprocess:
    p_eye.start()
    if platform.system() == 'Linux':
        # We need to give the camera driver some time before requesting another camera.
        sleep(0.5)

    world(g_pool,world_src,world_size)

    # Exit / clean-up
    p_eye.join()
Esempio n. 17
0
	def start(self,windowSize=(200,10),windowPosition=(0,0),windowColor=(255,255,255),doBorder=True):
		import billiard
		billiard.forking_enable(0)
		self.qTo = billiard.Queue()
		self.qFrom = billiard.Queue()
		self.process = billiard.Process( target=stamperLoop , args=(windowSize,windowPosition,windowColor,doBorder,self.qTo,self.qFrom,) )
		self.process.start()
		return None
Esempio n. 18
0
def forking_enable(enabled):
    try:
        from billiard import forking_enable
    except ImportError:
        try:
            from multiprocessing import forking_enable
        except ImportError:
            return
    forking_enable(enabled)
Esempio n. 19
0
def main():
    # To assign camera by name: put string(s) in list
    eye_cam_names = ["USB 2.0 Camera","Microsoft", "6000","Integrated Camera"]
    world_src = ["Logitech Camera","(046d:081d)", "(046d:0991)", "C510","B525", "C525","C615","C920","C930e"]
    # world_src = ["USB 2.0 Camera","Microsoft", "5000","Integrated Camera"]
    eye_src = (eye_cam_names,0),(eye_cam_names,1) #first match for eye0 and second match for eye1

    # to assign cameras directly, using integers as demonstrated below
    # eye_src =  4 , 5 #second arg will be ignored for monocular eye trackers
    # world_src = 1

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Downloads/eye.avi' , '/Users/mkassner/Downloads/eye.avi'
    # world_src = "/Users/mkassner/Desktop/2014_01_21/000/world.avi"

    # Camera video size in pixels (width,height)
    eye_size = (640,480)
    world_size = (1280,720)


    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    #g_pool holds variables. Only if added here they are shared across processes.
    g_pool = Global_Container()

    # Create and initialize IPC
    g_pool.pupil_queue = Queue()
    g_pool.quit = Value(c_bool,0)
    g_pool.timebase = Value(c_double,0)
    g_pool.eye_tx = []
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = get_version(version_file)
    g_pool.app = 'capture'
    g_pool.binocular = binocular


    p_eye = []
    for eye_id in range(1+1*binocular):
        rx,tx = Pipe(False)
        p_eye += [Process(target=eye, args=(g_pool,eye_src[eye_id],eye_size,rx,eye_id))]
        g_pool.eye_tx += [tx]
        p_eye[-1].start()
        if platform.system() == 'Linux':
            # We need to give the camera driver some time before requesting another camera.
            sleep(0.5)

    world(g_pool,world_src,world_size)


    # Exit / clean-up
    for p in p_eye:
        p.join()
Esempio n. 20
0
 def init_marker_cacher(self):
     forking_enable(0) #for MacOs only
     from marker_detector_cacher import fill_cache
     visited_list = [False if x == False else True for x in self.cache]
     video_file_path =  self.g_pool.capture.src
     self.cache_queue = Queue()
     self.cacher_seek_idx = Value('i',0)
     self.cacher_run = Value(c_bool,True)
     self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter))
     self.cacher.start()
Esempio n. 21
0
 def create(self, w):
     forking_enable(w.no_execv or not w.force_execv)
     pool = w.pool = self.instantiate(w.pool_cls, w.min_concurrency,
                             initargs=(w.app, w.hostname),
                             maxtasksperchild=w.max_tasks_per_child,
                             timeout=w.task_time_limit,
                             soft_timeout=w.task_soft_time_limit,
                             putlocks=w.pool_putlocks,
                             lost_worker_timeout=w.worker_lost_wait)
     return pool
Esempio n. 22
0
 def init_marker_cacher(self):
     forking_enable(0) #for MacOs only
     from marker_detector_cacher import fill_cache
     visited_list = [False if x == False else True for x in self.cache]
     video_file_path =  os.path.join(self.g_pool.rec_dir,'world.avi')
     self.cache_queue = Queue()
     self.cacher_seek_idx = Value(c_int,0)
     self.cacher_run = Value(c_bool,True)
     self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run))
     self.cacher.start()
Esempio n. 23
0
File: main.py Progetto: elmorg/pupil
def main():

    # To assign camera by name: put string(s) in list
    world_src = ["Pupil Cam1 ID2","Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"]
    eye0 = ["Pupil Cam1 ID0","HD-6000","Integrated Camera","HD USB Camera","USB 2.0 Camera"]
    eye1 = ["Pupil Cam1 ID1","HD-6000","Integrated Camera"]
    eye_src = eye0, eye1

    # to assign cameras directly, using integers as demonstrated below
    # eye_src =  1 , 1 #second arg will be ignored for monocular eye trackers
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Downloads/000/eye0.mkv' , '/Users/mkassner/Downloads/eye.avi'
    # world_src = "/Users/mkassner/Downloads/000/world.mkv"

    # Default camera video size in pixels (width,height)
    eye_size = (640,480)
    world_size = (1280,720)

    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    #g_pool holds variables. Only if added here they are shared across processes.
    g_pool = Global_Container()

    # Create and initialize IPC
    g_pool.pupil_queue = Queue()
    g_pool.quit = Value(c_bool,0)
    g_pool.timebase = Value(c_double,0)
    g_pool.eye_tx = []
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.version = get_version(version_file)
    g_pool.app = 'capture'
    g_pool.binocular = binocular
    


    p_eye = []
    for eye_id in range(1+1*binocular):
        eye_end,world_end = Pipe(True)
        p_eye += [Process(target=eye, args=(g_pool,eye_src[eye_id],eye_size,eye_end,eye_id))]
        p_eye[-1].start()
        #wait for ready message from eye to sequentialize startup
        logger.debug(world_end.recv())
        g_pool.eye_tx += [world_end]


    world(g_pool,world_src,world_size)

    # Exit / clean-up
    for p in p_eye:
        p.join()
Esempio n. 24
0
 def init_marker_cacher(self):
     forking_enable(0) #for MacOs only
     from marker_detector_cacher import fill_cache
     visited_list = [False if x == False else True for x in self.cache]
     video_file_path =  self.g_pool.capture.src
     timestamps = self.g_pool.capture.timestamps
     self.cache_queue = Queue()
     self.cacher_seek_idx = Value('i',0)
     self.cacher_run = Value(c_bool,True)
     self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher))
     self.cacher.start()
Esempio n. 25
0
    def run_player(self, graphdef):
        if 'mestate' in globals():
            self.mestate.console.write("Running Player...")
        else:
            self.mestate.child_pipe_send_console.send("Running Player...")
        self.player = player.MavPlay()
        self.player.add_mav(self.mlog)
        if platform.system() == 'Darwin':
            forking_enable(False)

        child = Process(target=self.player.run)
        child.start()
Esempio n. 26
0
 def create(self, w):
     forking_enable(not w.force_execv)
     pool = w.pool = self.instantiate(
         w.pool_cls,
         w.min_concurrency,
         initargs=(w.app, w.hostname),
         maxtasksperchild=w.max_tasks_per_child,
         timeout=w.task_time_limit,
         soft_timeout=w.task_soft_time_limit,
         putlocks=w.pool_putlocks,
         lost_worker_timeout=w.worker_lost_wait)
     return pool
Esempio n. 27
0
 def start(self,file_loc, audio_src):
     # from rec_thread import rec_thread
     try:
         from billiard import forking_enable
         forking_enable(0)
     except ImportError:
         pass
     self.should_close.clear()
     self.process = Process(target=rec_thread, args=(file_loc, audio_src,self.should_close))
     self.process.start()
     try:
         forking_enable(1)
     except:
         pass
Esempio n. 28
0
 def create(self, w):
     threaded = not w.use_eventloop
     forking_enable(w.no_execv or not w.force_execv)
     pool = w.pool = self.instantiate(w.pool_cls, w.min_concurrency,
                         initargs=(w.app, w.hostname),
                         maxtasksperchild=w.max_tasks_per_child,
                         timeout=w.task_time_limit,
                         soft_timeout=w.task_soft_time_limit,
                         putlocks=w.pool_putlocks,
                         lost_worker_timeout=w.worker_lost_wait,
                         with_task_thread=threaded,
                         with_result_thread=threaded,
                         with_supervisor_thread=threaded)
     return pool
Esempio n. 29
0
def main():
    # To assign camera by name: put string(s) in list
    eye_src = ["Microsoft", "6000","Integrated Camera"]
    world_src = ["Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"]

    # to assign cameras directly, using integers as demonstrated below
    # eye_src = 1
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Pupil/datasets/p1-left/frames/test.avi'
    # world_src = "/Users/mkassner/Desktop/2014_01_21/000/world.avi"

    # Camera video size in pixels (width,height)
    eye_size = (640,360)
    world_size = (1280,720)


    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool,0)
    # this value will be substracted form the capture timestamp
    g_pool.timebase = RawValue(c_double,0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    g_pool.app = 'capture'
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size))

    # Spawn subprocess:
    p_eye.start()
    if platform.system() == 'Linux':
        # We need to give the camera driver some time before requesting another camera.
        sleep(0.5)

    world(g_pool,world_src,world_size)



    # Exit / clean-up
    p_eye.join()
Esempio n. 30
0
def main():
    # To assign camera by name: put string(s) in list
    eye_src = ["Microsoft", "6000", "Integrated Camera"]
    world_src = [
        "Logitech Camera", "(046d:081d)", "C510", "B525", "C525", "C615",
        "C920", "C930e"
    ]

    # to assign cameras directly, using integers as demonstrated below
    # eye_src = 1
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Pupil/datasets/p1-left/frames/test.avi'
    # world_src = "/Users/mkassner/Desktop/2014_01_21/000/world.avi"

    # Camera video size in pixels (width,height)
    eye_size = (640, 360)
    world_size = (1280, 720)

    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool, 0)
    # this value will be substracted form the capture timestamp
    g_pool.timebase = RawValue(c_double, 0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    g_pool.app = 'capture'
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool, eye_src, eye_size))

    # Spawn subprocess:
    p_eye.start()
    if platform.system() == 'Linux':
        # We need to give the camera driver some time before requesting another camera.
        sleep(0.5)

    world(g_pool, world_src, world_size)

    # Exit / clean-up
    p_eye.join()
Esempio n. 31
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        forking_enable(self.forking_enable)
        P = self._pool = self.Pool(processes=self.limit,
                                   initializer=process_initializer,
                                   **self.options)
        self.on_apply = P.apply_async
        self.on_soft_timeout = P._timeout_handler.on_soft_timeout
        self.on_hard_timeout = P._timeout_handler.on_hard_timeout
        self.maintain_pool = P.maintain_pool
        self.maybe_handle_result = P._result_handler.handle_event
Esempio n. 32
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        forking_enable(self.forking_enable)
        P = self._pool = self.Pool(processes=self.limit,
                                   initializer=process_initializer,
                                   **self.options)
        self.on_apply = P.apply_async
        self.on_soft_timeout = P._timeout_handler.on_soft_timeout
        self.on_hard_timeout = P._timeout_handler.on_hard_timeout
        self.maintain_pool = P.maintain_pool
        self.maybe_handle_result = P._result_handler.handle_event
Esempio n. 33
0
 def start(self, file_loc, audio_src):
     # from rec_thread import rec_thread
     try:
         from billiard import forking_enable
         forking_enable(0)
     except ImportError:
         pass
     self.should_close.clear()
     self.process = Process(target=rec_thread,
                            args=(file_loc, audio_src, self.should_close))
     self.process.start()
     try:
         forking_enable(1)
     except:
         pass
Esempio n. 34
0
File: t.py Progetto: ask/billiard
def main():
    mp.forking_enable(False)
    initfun()
    x = mp.Pool(1, initializer=initfun)
    time.sleep(10)
    x.apply_async(f, ("x" * 1024**2, ), callback=cb)
    time.sleep(3)
    x.apply_async(f, ("x" * (1024**2), ), callback=cb)
    time.sleep(3)
    x.apply_async(f, ("x" * (1024**2), ), callback=cb)

    time.sleep(30)

    x.close()
    x.join()
Esempio n. 35
0
 def __init__(self, title='MAVProxy: console'):
     if platform.system() == 'Darwin':
         forking_enable(False)
     textconsole.SimpleConsole.__init__(self)
     self.title = title
     self.menu_callback = None
     self.parent_pipe_recv, self.child_pipe_send = Pipe(duplex=False)
     self.child_pipe_recv, self.parent_pipe_send = Pipe(duplex=False)
     self.close_event = Event()
     self.close_event.clear()
     self.child = Process(target=self.child_task)
     self.child.start()
     self.child_pipe_send.close()
     self.child_pipe_recv.close()
     t = threading.Thread(target=self.watch_thread)
     t.daemon = True
     t.start()
Esempio n. 36
0
    def add_exports(self):
        # on MacOS we will not use os.fork, elsewhere this does nothing.
        forking_enable(0)

        outfiles = set()
        for d in self.new_exports:
            logger.debug("Adding new export.")
            should_terminate = RawValue(c_bool, False)
            frames_to_export = RawValue(c_int, 0)
            current_frame = RawValue(c_int, 0)
            start_frame = None
            end_frame = None
            data_dir = d
            plugins = []

            # Here we make clones of every plugin that supports it.
            # So it runs in the current config when we lauch the exporter.
            for p in self.g_pool.plugins:
                try:
                    p_initializer = p.get_class_name(), p.get_init_dict()
                    plugins.append(p_initializer)
                except AttributeError:
                    pass

            #make a unique name created from rec_session and dir name
            rec_session, rec_dir = data_dir.rsplit(os.path.sep, 2)[1:]
            out_name = rec_session + "_" + rec_dir + ".avi"
            out_file_path = os.path.join(self.destination_dir.value, out_name)
            if out_file_path in outfiles:
                logger.error(
                    "This export setting would try to save %s at least twice please rename dirs to prevent this. Skipping File"
                    % out_file_path)
            else:
                outfiles.add(out_file_path)
                logger.info("Exporting to: %s" % out_file_path)

                process = Process(target=export,
                                  args=(should_terminate, frames_to_export,
                                        current_frame, data_dir, start_frame,
                                        end_frame, plugins, out_file_path))
                process.should_terminate = should_terminate
                process.frames_to_export = frames_to_export
                process.current_frame = current_frame
                process.out_file_path = out_file_path
                self.exports.append(process)
Esempio n. 37
0
    def on_start(self):
        forking_enable(self.forking_enable)
        Pool = (self.BlockingPool if self.options.get('threads', True)
                else self.Pool)
        P = self._pool = Pool(processes=self.limit,
                              initializer=process_initializer,
                              on_process_exit=process_destructor,
                              enable_timeouts=True,
                              synack=False,
                              **self.options)

        # Create proxy methods
        self.on_apply = P.apply_async
        self.maintain_pool = P.maintain_pool
        self.terminate_job = P.terminate_job
        self.grow = P.grow
        self.shrink = P.shrink
        self.flush = getattr(P, 'flush', None)  # FIXME add to billiard
Esempio n. 38
0
    def on_start(self):
        forking_enable(self.forking_enable)
        Pool = (self.BlockingPool
                if self.options.get('threads', True) else self.Pool)
        P = self._pool = Pool(processes=self.limit,
                              initializer=process_initializer,
                              on_process_exit=process_destructor,
                              enable_timeouts=True,
                              synack=False,
                              **self.options)

        # Create proxy methods
        self.on_apply = P.apply_async
        self.maintain_pool = P.maintain_pool
        self.terminate_job = P.terminate_job
        self.grow = P.grow
        self.shrink = P.shrink
        self.flush = getattr(P, 'flush', None)  # FIXME add to billiard
Esempio n. 39
0
def main():
    # To assign camera by name: put string(s) in list
    eye_src = ["Microsoft", "6000","Integrated Camera"]
    world_src = ["Logitech Camera","B525", "C525","C615","C920","C930e"]

    # to assign cameras directly, using integers as demonstrated below
    # eye_src = 1
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = "/Users/mkassner/Pupil/datasets/eye2_fieldtest/eye 10.avi"
    # world_src = "/Users/mkassner/Downloads/2013_10_22_M25/000/world.avi"

    # Camera video size in pixels (width,height)
    eye_size = (640,360)
    world_size = (1280,720)


    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool,0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size))

    # Spawn subprocess:
    p_eye.start()
    # On Linux, we need to give the camera driver some time before requesting another camera.
    sleep(0.5)
    # On MacOS cameras using MJPG compression (world camera) need to run in the main process.
    world(g_pool,world_src,world_size)

    # Exit / clean-up
    p_eye.join()
Esempio n. 40
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        if self.options.get('maxtasksperchild'):
            try:
                from billiard import Connection
                Connection.send_offset
            except (ImportError, AttributeError):
                # billiard C extension not installed
                warning(MAXTASKS_NO_BILLIARD)

        forking_enable(self.forking_enable)
        Pool = (self.BlockingPool if self.options.get('threads', True)
                else self.Pool)
        P = self._pool = Pool(processes=self.limit,
                              initializer=process_initializer,
                              synack=False,
                              **self.options)

        # Create proxy methods
        self.on_apply = P.apply_async
        self.on_soft_timeout = P._timeout_handler.on_soft_timeout
        self.on_hard_timeout = P._timeout_handler.on_hard_timeout
        self.maintain_pool = P.maintain_pool
        self.terminate_job = P.terminate_job
        self.grow = P.grow
        self.shrink = P.shrink
        self.restart = P.restart
        self.maybe_handle_result = P._result_handler.handle_event
        self.handle_result_event = P.handle_result_event

        # Holds jobs waiting to be written to child processes.
        self.outbound_buffer = deque()

        # Set of fds being written to (busy)
        self._active_writes = set()

        # Set of active co-routines currently writing jobs.
        self._active_writers = set()
Esempio n. 41
0
 def create(self, w, semaphore=None, max_restarts=None):
     threaded = not w.use_eventloop
     forking_enable(not threaded or (w.no_execv or not w.force_execv))
     procs = w.min_concurrency
     if not threaded:
         semaphore = w.semaphore = BoundedSemaphore(procs)
         max_restarts = 100
     pool = w.pool = self.instantiate(w.pool_cls, w.min_concurrency,
                         initargs=(w.app, w.hostname),
                         maxtasksperchild=w.max_tasks_per_child,
                         timeout=w.task_time_limit,
                         soft_timeout=w.task_soft_time_limit,
                         putlocks=w.pool_putlocks and threaded,
                         lost_worker_timeout=w.worker_lost_wait,
                         with_task_thread=threaded,
                         with_result_thread=threaded,
                         with_supervisor_thread=threaded,
                         max_restarts=max_restarts,
                         semaphore=semaphore)
     return pool
Esempio n. 42
0
    def add_exports(self):
        # on MacOS we will not use os.fork, elsewhere this does nothing.
        forking_enable(0)

        outfiles = set()
        for d in self.new_exports:
            logger.debug("Adding new export.")
            should_terminate = RawValue(c_bool,False)
            frames_to_export  = RawValue(c_int,0)
            current_frame = RawValue(c_int,0)
            start_frame = None
            end_frame = None
            data_dir = d
            plugins = []

            # Here we make clones of every plugin that supports it.
            # So it runs in the current config when we lauch the exporter.
            for p in self.g_pool.plugins:
                try:
                    p_initializer = p.get_class_name(),p.get_init_dict()
                    plugins.append(p_initializer)
                except AttributeError:
                    pass

            #make a unique name created from rec_session and dir name
            rec_session, rec_dir = data_dir.rsplit(os.path.sep,2)[1:]
            out_name = rec_session+"_"+rec_dir+".avi"
            out_file_path = os.path.join(self.destination_dir.value,out_name)
            if out_file_path in outfiles:
                logger.error("This export setting would try to save %s at least twice please rename dirs to prevent this. Skipping File"%out_file_path)
            else:
                outfiles.add(out_file_path)
                logger.info("Exporting to: %s"%out_file_path)

                process = Process(target=export, args=(should_terminate,frames_to_export,current_frame, data_dir,start_frame,end_frame,plugins,out_file_path))
                process.should_terminate = should_terminate
                process.frames_to_export = frames_to_export
                process.current_frame = current_frame
                process.out_file_path = out_file_path
                self.exports.append(process)
Esempio n. 43
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        forking_enable(self.forking_enable)
        Pool = (self.BlockingPool
                if self.options.get('threads', True) else self.Pool)
        P = self._pool = Pool(processes=self.limit,
                              initializer=process_initializer,
                              on_process_exit=process_destructor,
                              synack=False,
                              **self.options)

        # Create proxy methods
        self.on_apply = P.apply_async
        self.maintain_pool = P.maintain_pool
        self.terminate_job = P.terminate_job
        self.grow = P.grow
        self.shrink = P.shrink
        self.flush = getattr(P, 'flush', None)  # FIXME add to billiard
Esempio n. 44
0
 def create(self, w, semaphore=None, max_restarts=None):
     threaded = not w.use_eventloop
     forking_enable(not threaded or (w.no_execv or not w.force_execv))
     procs = w.min_concurrency
     if not threaded:
         semaphore = w.semaphore = BoundedSemaphore(procs)
         max_restarts = 100
     pool = w.pool = self.instantiate(
         w.pool_cls,
         w.min_concurrency,
         initargs=(w.app, w.hostname),
         maxtasksperchild=w.max_tasks_per_child,
         timeout=w.task_time_limit,
         soft_timeout=w.task_soft_time_limit,
         putlocks=w.pool_putlocks and threaded,
         lost_worker_timeout=w.worker_lost_wait,
         threads=threaded,
         max_restarts=max_restarts,
         semaphore=semaphore)
     if w.hub:
         w.hub.on_init.append(partial(self.on_poll_init, pool))
     return pool
Esempio n. 45
0
    def on_start(self):
        """Run the task pool.

        Will pre-fork all workers so they're ready to accept tasks.

        """
        forking_enable(self.forking_enable)
        Pool = (self.BlockingPool if self.options.get('threads', True)
                else self.Pool)
        P = self._pool = Pool(processes=self.limit,
                              initializer=process_initializer,
                              on_process_exit=process_destructor,
                              synack=False,
                              **self.options)

        # Create proxy methods
        self.on_apply = P.apply_async
        self.maintain_pool = P.maintain_pool
        self.terminate_job = P.terminate_job
        self.grow = P.grow
        self.shrink = P.shrink
        self.flush = getattr(P, 'flush', None)  # FIXME add to billiard
Esempio n. 46
0
 def create(self, w, semaphore=None, max_restarts=None):
     threaded = not w.use_eventloop
     forking_enable(not threaded or (w.no_execv or not w.force_execv))
     procs = w.min_concurrency
     if not threaded:
         semaphore = w.semaphore = BoundedSemaphore(procs)
         max_restarts = 100
     allow_restart = self.autoreload_enabled or w.pool_restarts
     pool = w.pool = self.instantiate(w.pool_cls, w.min_concurrency,
                         initargs=(w.app, w.hostname),
                         maxtasksperchild=w.max_tasks_per_child,
                         timeout=w.task_time_limit,
                         soft_timeout=w.task_soft_time_limit,
                         putlocks=w.pool_putlocks and threaded,
                         lost_worker_timeout=w.worker_lost_wait,
                         threads=threaded,
                         max_restarts=max_restarts,
                         allow_restart=allow_restart,
                         semaphore=semaphore)
     if w.hub:
         w.hub.on_init.append(partial(self.on_poll_init, pool))
     return pool
Esempio n. 47
0
import billiard
billiard.forking_enable(0)

from . import trackerLoop
from . import cameraLoop
from . import calibrationLoop

########
# Define a class that spawns a new process to poll the camera and queue images
########
class cameraClass:
	def __init__(self,camIndex,camRes,timestampMethod):
		self.qTo = billiard.Queue()
		self.qFrom = billiard.Queue()
		self.process = billiard.Process( target=cameraLoop.loop , args=(self.qTo,self.qFrom,camIndex,camRes,timestampMethod) )
	def start(self):
		self.process.start()
	def stop(self,):
		self.qTo.put('quit')
		self.process.join(timeout=1)
		if self.process.is_alive():
			self.process.terminate()
		del self.qTo
		del self.qFrom
		return None

########
# Define a class that spawns a new process to manage the camera, do tracking and display a preview window
########
class trackerClass:
	def __init__(self,camIndex,camRes,previewDownsize,previewLoc,faceDetectionScale,eyeDetectionScale,timestampMethod,viewingDistance,stimDisplayWidth,stimDisplayRes,stimDisplayPosition,mirrorDisplayPosition,mirrorDownSize,manualCalibrationOrder,calibrationDotSizeInDegrees,saccadeAlertSizeInDegrees):
Esempio n. 48
0
    def execute(self):
        """See base_runner.execute()."""

        import platform
        # In OS X the multiprocessing module is horribly broken, but a fixed
        # version has been released as the 'billiard' module
        if platform.system() == 'Darwin':
            import billiard as multiprocessing
            multiprocessing.forking_enable(0)
        else:
            import multiprocessing

        from libqtopensesame.misc import process, _
        from libopensesame import misc

        self._workspace_globals = {}
        if os.name == u'nt' or (sys.platform == u'darwin' \
         and not hasattr(sys,"frozen")):
            # Under Windows and OSX, the multiprocess runner assumes that there
            # is a file called `opensesame.py` or `opensesame.pyc`. If this file
            # does not exist, try to copy it from the main script
            # (`opensesame`). If this fails, provide an informative error
            # message.
            os_folder = misc.opensesame_folder()
            # misc.opensesame_folder() doesn't work for OSX and returns None then,
            # so determine OpenSesame's rootdir in another way
            if os_folder is None:
                os_folder = os.path.dirname(
                    os.path.abspath(sys.modules['__main__'].__file__))
            if not os.path.exists(os.path.join(os_folder, u'opensesame.pyc')) \
             and not os.path.exists(os.path.join(os_folder, u'opensesame.py')):
                import shutil
                try:
                    shutil.copyfile(os.path.join(os_folder, u'opensesame'),
                                    os.path.join(os_folder, u'opensesame.py'))
                except Exception as e:
                    return osexception(_(
                        u'Failed to copy `opensesame` to `opensesame.py`, which is required for the multiprocess runner. Please copy the file manually, or select a different runner under Preferences.'
                    ),
                                       exception=e)
        self.channel = multiprocessing.Queue()
        try:
            self.exp_process = process.ExperimentProcess(
                self.experiment, self.channel)
        except Exception as e:
            return osexception(_(u'Failed to initialize experiment process'),
                               exception=e)
        # Start process!
        self.exp_process.start()
        # Wait for experiment to finish.
        # Listen for incoming messages in the meantime.
        while self.exp_process.is_alive() or not self.channel.empty():
            # We need to process the GUI. To make the GUI feel more responsive
            # during pauses, we refresh the GUI more often when paused.
            QtGui.QApplication.processEvents()
            if self.paused:
                for i in range(25):
                    time.sleep(.01)
                    QtGui.QApplication.processEvents()
            # Make sure None is not printed. Ugly hack for a bug in the Queue
            # class?
            self.console.suppress_stdout()
            # Wait for messages. Will throw Exception if no message is received
            # before timeout.
            try:
                msg = self.channel.get(True, 0.05)
            except:
                continue
            # Restore connection to stdout
            self.console.capture_stdout()
            if isinstance(msg, basestring):
                sys.stdout.write(safe_decode(msg, errors=u'ignore'))
                continue
            # Capture exceptions
            if isinstance(msg, Exception):
                return msg
            # The workspace globals are sent as a dict. A special __pause__ key
            # indicates whether the experiment should be paused or resumed.
            if isinstance(msg, dict):
                self._workspace_globals = msg
                if u'__heartbeat__' in msg:
                    self.console.set_workspace_globals(msg)
                    self.main_window.extension_manager.fire(u'heartbeat')
                elif u'__pause__' in msg:
                    if msg[u'__pause__']:
                        self.pause()
                    else:
                        self.resume()
                continue
            # Anything that is not a string, not an Exception, and not None is
            # unexpected
            return osexception(
             u"Illegal message type received from child process: %s (%s)" \
             % (msg, type(msg)))
        # Return None if experiment finished without problems
        return None
Esempio n. 49
0
 def start():
     forking_enable(0)  # Is all you need!
     camProcess = Process(target=cam, args=(0, ))
     camProcess.start()
Esempio n. 50
0
            #block and listen for relevant messages.
            topic,n = cmd_sub.recv()
            if "notify.eye_process.should_start" in topic:
                eye_id = n['eye_id']
                if not eyes_are_alive[eye_id].value:
                    Process(target=eye,
                                name='eye%s'%eye_id,
                                args=(timebase,
                                    eyes_are_alive[eye_id],
                                    ipc_pub_url,
                                    ipc_sub_url,
                                    ipc_push_url,
                                    user_dir,
                                    app_version,
                                    eye_id
                                    )).start()
            elif "notify.launcher_process.should_stop" in topic:
                break
            elif "notify.meta.should_doc" in topic:
                cmd_push.notify({
                    'subject':'meta.doc',
                    'actor':'launcher',
                    'doc':launcher.__doc__})

        for p in active_children(): p.join()


if __name__ == '__main__':
    freeze_support()
    forking_enable(0)
    launcher()
Esempio n. 51
0
            self.close()

    def get(self):
        if not self.alive:
            return None
        self.fill()
        if len(self.pending) > 0:
            return self.pending.pop(0)
        return None

    def qsize(self):
        self.fill()
        return len(self.pending)

    def empty(self):
        return self.qsize() == 0

import platform, os, sys

# we use billiard (and forking disable) on MacOS, and also if USE_BILLIARD environment
# is set. Using USE_BILLIARD allows for debugging of the crazy forking disable approach on
# a saner platform
# As of Python 3.8 the default start method for macOS is spawn and billiard is not required.
if ((platform.system() == 'Darwin' or os.environ.get('USE_BILLIARD',None) is not None)
    and sys.version_info < (3, 8)):
    from billiard import Process, forking_enable, freeze_support, Pipe, Semaphore, Event, Lock
    forking_enable(False)
    Queue = PipeQueue
else:
    from multiprocessing import Process, freeze_support, Pipe, Semaphore, Event, Lock, Queue
Esempio n. 52
0
def opensesame():

    import os, sys, platform
    # Add the folder that contains the OpenSesame modules to the path. This is
    # generally only necessary if OpenSesame is directly run from source,
    # instead from an installation.
    if os.path.exists(os.path.join(os.getcwd(), 'libopensesame')):
        sys.path.insert(0, os.getcwd())
    # Support for multiprocessing when packaged
    # In OS X the multiprocessing module is horribly broken, but a fixed
    # version has been released as the 'billiard' module
    if platform.system() == 'Darwin':
        # Use normal multirpocessing module from python 3.4 and on
        if sys.version_info >= (3, 4):
            from multiprocessing import freeze_support, set_start_method
            freeze_support()
            set_start_method('forkserver')
        else:
            from billiard import freeze_support, forking_enable
            freeze_support()
            forking_enable(0)
    else:
        from multiprocessing import freeze_support
        freeze_support()
    # Parse the (optional) environment file that contains special paths, etc.
    from libopensesame.misc import resource, filesystem_encoding, \
     parse_environment_file
    parse_environment_file()
    # Force the new-style Qt API
    import sip
    import qtpy
    sip.setapi('QString', 2)
    sip.setapi('QVariant', 2)
    # Load debug package (this must be after the working directory change)
    from libopensesame import debug
    # Do the basic window initialization
    from qtpy.QtWidgets import QApplication

    # From Qt 5.6 on, QtWebEngine is the default way to render web pages
    # QtWebEngineWidgets must be imported before a QCoreApplication instance is created
    try:
        from qtpy import QtWebEngineWidgets
    except ImportError:
        pass

    app = QApplication(sys.argv)
    # Enable High DPI display with PyQt5
    if hasattr(qtpy.QtCore.Qt, 'AA_UseHighDpiPixmaps'):
        app.setAttribute(qtpy.QtCore.Qt.AA_UseHighDpiPixmaps)
    from libqtopensesame.qtopensesame import qtopensesame
    opensesame = qtopensesame(app)
    opensesame.__script__ = __file__
    app.processEvents()
    # Import the remaining modules
    from qtpy.QtCore import QObject, QLocale, QTranslator
    import os.path
    # Load the locale for UI translation. The locale can be specified on the
    # command line using the --locale parameter
    locale = QLocale().system().name()
    for i in range(len(sys.argv) - 1):
        if sys.argv[i] == '--locale':
            locale = sys.argv[i + 1]
    qm = resource(os.path.join(u'locale', locale) + u'.qm')
    # Say that we're trying to load de_AT, and it is not found, then we'll try
    # de_DE as fallback.
    if qm is None:
        l = locale.split(u'_')
        if len(l):
            _locale = l[0] + u'_' + l[0].upper()
            qm = resource(os.path.join(u'locale', _locale + u'.qm'))
            if qm is not None:
                locale = _locale
    opensesame._locale = locale
    if qm is not None:
        debug.msg(u'installing %s translator' % qm)
        translator = QTranslator()
        translator.load(qm)
        app.installTranslator(translator)
    else:
        debug.msg(u'no translator found for %s' % locale)
    # Now that the window is shown, load the remaining modules and resume the
    # GUI initialization.
    opensesame.resume_init()
    opensesame.restore_window_state()
    opensesame.refresh()
    opensesame.show()
    # Added for OS X, otherwise Window will not appear
    opensesame.raise_()
    # Exit using the application exit status
    sys.exit(app.exec_())
Esempio n. 53
0
#     # print writer.video_stream.time_base
#     # print writer.

#     for x in xrange(300):
#         frame = cap.get_frame()
#         writer.write_video_frame(frame)
#         # writer.write(frame.img)
#         # print writer.video_stream

#     cap.close()
#     writer.close()

if __name__ == '__main__':
    try:
        from billiard import forking_enable
        forking_enable(0)
    except ImportError:
        pass
    logging.basicConfig(level=logging.DEBUG)

    cap = Audio_Capture('test.wav', 1)

    import time
    time.sleep(2)
    cap.close()
    #mic device
    exit()

    container = av.open('hw:0', format="alsa")
    container = av.open(':0', format="avfoundation")
    print 'container:', container
Esempio n. 54
0
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with OpenSesame.  If not, see <http://www.gnu.org/licenses/>.
"""



import platform
# In OS X the multiprocessing module is horribly broken, but a fixed
# version has been released as the 'billiard' module
if platform.system() == 'Darwin':
	import billiard as multiprocessing
	multiprocessing.forking_enable(0)
else:
	import multiprocessing

class OutputChannel:

	"""Passes messages from child process back to main process."""

	def __init__(self, channel, orig=None):

		"""
		Constructor.

		Arguments:
		channel	--	A multiprocessing.JoinableQueue object that is referenced
					from the main process.
Esempio n. 55
0
def main():

    # To assign camera by name: put string(s) in list
    world_src = [
        "Pupil Cam1 ID2", "Logitech Camera", "(046d:081d)", "C510", "B525",
        "C525", "C615", "C920", "C930e"
    ]
    eye0 = [
        "Pupil Cam1 ID0", "HD-6000", "Integrated Camera", "HD USB Camera",
        "USB 2.0 Camera"
    ]
    eye1 = ["Pupil Cam1 ID1", "HD-6000", "Integrated Camera"]
    eye_src = eye0, eye1

    # to assign cameras directly, using integers as demonstrated below
    # eye_src =  1 , 1 #second arg will be ignored for monocular eye trackers
    # world_src = 0

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Downloads/eye0.mkv' , '/Users/mkassner/Downloads/eye.avi'
    # world_src = "/Users/mkassner/Downloads/000/world.mkv"

    # Default camera video size in pixels (width,height)
    eye_size = (640, 480)
    world_size = (1280, 720)

    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    #g_pool holds variables. Only if added here they are shared across processes.
    g_pool = Global_Container()

    # Create and initialize IPC
    g_pool.pupil_queue = Queue()
    g_pool.quit = Value(c_bool, 0)
    g_pool.timebase = Value(c_double, 0)
    g_pool.eye_tx = []
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.version = get_version(version_file)
    g_pool.app = 'capture'
    g_pool.binocular = binocular

    p_eye = []
    for eye_id in range(1 + 1 * binocular):
        eye_end, world_end = Pipe(True)
        p_eye += [
            Process(target=eye,
                    args=(g_pool, eye_src[eye_id], eye_size, eye_end, eye_id))
        ]
        p_eye[-1].start()
        #wait for ready message from eye to sequentialize startup
        logger.debug(world_end.recv())
        g_pool.eye_tx += [world_end]

    world(g_pool, world_src, world_size)

    # Exit / clean-up
    for p in p_eye:
        p.join()