Beispiel #1
0
def main():
    timePipe, sigPipe = Pipe()
    q = Queue()
    clock = Process(target=tickTock, args=(timePipe, ))
    testSignal = Process(target=signal, args=(q, sigPipe, [1]))
    testSignal.start()
    while True:
        print(q.get())
Beispiel #2
0
def main(param):
    KILLING = False

    KILLING2 = False
    original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
    data = setParam(param=param)
    if __name__ == "__main__":
        printParam(data)

    logger.critical(
        f"######################### NEW JOB: {data.folder} #########################"
    )

    # file_handler_job = logging.FileHandler(os.path.join(data.outputDir, 'log'))
    # file_handler_job.setLevel(logging.INFO)
    # file_handler_job.setFormatter(formatter)
    # logger.addHandler(file_handler_job)

    processes = []
    p = Process(target=processMaster, args=(data, ), name="MASTER")
    p.start()
    processes.append(p)
    logger.info('[i] MASTER started')

    for n in range(data.workers):
        p = Process(target=processWorker, args=(data, ), name="WORKER")
        p.start()
        processes.append(p)
        logger.info(f'[i] WORKER {n+1}/{data.workers} started')

    p = Process(target=processSink, args=(data, ), name="SINK")
    p.start()
    processes.append(p)
    logger.info(f'[i] Sink started')
    ksObj = KillSwitch(data)

    def killswitch(a, b):
        ksObj()
        ksObj()

    signal.signal(signal.SIGINT, killswitch)
    emails, nmbPgsScraped = Producer(data).workYouBastard()

    logger.info("[i] Finalising, \t")
    for p in processes:
        p.terminate()
    logger.info('[i] Done')

    return emails, nmbPgsScraped
    def add_export(self):
        # on MacOS we will not use os.fork, elsewhere this does nothing.
        forking_enable(0)

        logger.debug("Adding new export.")
        should_terminate = RawValue(c_bool, False)
        frames_to_export = RawValue(c_int, 0)
        current_frame = RawValue(c_int, 0)

        data_dir = self.data_dir
        start_frame = self.start_frame.value
        end_frame = self.end_frame.value
        plugins = []

        # Here we make clones of every plugin that supports it.
        # So it runs in the current config when we lauch the exporter.
        for p in self.g_pool.plugins:
            try:
                p_initializer = p.get_class_name(), p.get_init_dict()
                plugins.append(p_initializer)
            except AttributeError:
                pass

        out_file_path = verify_out_file_path(self.rec_name.value,
                                             self.data_dir)
        process = Process(target=export,
                          args=(should_terminate, frames_to_export,
                                current_frame, data_dir, start_frame,
                                end_frame, plugins, out_file_path))
        process.should_terminate = should_terminate
        process.frames_to_export = frames_to_export
        process.current_frame = current_frame
        process.out_file_path = out_file_path
        self.new_export = process
Beispiel #4
0
def clone_with_timeout(src: str, dest: str, clone_func: Callable[[], None],
                       timeout: float) -> None:
    """Clone a repository with timeout.

    Args:
        src: clone source
        dest: clone destination
        clone_func: callable that does the actual cloning
        timeout: timeout in seconds
    """
    errors: Queue = Queue()
    process = Process(target=_clone_task, args=(clone_func, errors))
    process.start()
    process.join(timeout)

    if process.is_alive():
        process.terminate()
        # Give it literally a second (in successive steps of 0.1 second),
        # then kill it.
        # Can't use `process.join(1)` here, billiard appears to be bugged
        # https://github.com/celery/billiard/issues/270
        killed = False
        for _ in range(10):
            time.sleep(0.1)
            if not process.is_alive():
                break
        else:
            killed = True
            os.kill(process.pid, signal.SIGKILL)
        raise CloneTimeout(src, timeout, killed)

    if not errors.empty():
        raise CloneFailure(src, dest, errors.get())
def crawl(*args, **kwargs):
    crawler = CrawlerProcess(get_project_settings())
    crawler.crawl(*args, **kwargs)
    process = Process(target=crawler.start)
    process.start()
    process.join()
    crawler.stop()
    def display_graph(self, graphdef):
        '''display a graph'''
        if 'mestate' in globals():
            self.mestate.console.write("Expression: %s\n" %
                                       ' '.join(graphdef.expression.split()))
        else:
            self.mestate.child_pipe_send_console.send(
                "Expression: %s\n" % ' '.join(graphdef.expression.split()))
        #mestate.mlog.reduce_by_flightmodes(mestate.flightmode_selections)

        #setup the graph, then pass to a new process and display
        self.mg = grapher.MavGraph()
        self.mg.set_marker(self.mestate.settings.marker)
        self.mg.set_condition(self.mestate.settings.condition)
        self.mg.set_xaxis(self.mestate.settings.xaxis)
        self.mg.set_linestyle(self.mestate.settings.linestyle)
        self.mg.set_show_flightmode(self.mestate.settings.show_flightmode)
        self.mg.set_legend(self.mestate.settings.legend)
        self.mg.add_mav(self.mestate.mlog)
        for f in graphdef.expression.split():
            self.mg.add_field(f)
        self.mg.process(self.mestate.flightmode_selections,
                        self.mestate.mlog._flightmodes)
        self.lenmavlist = len(self.mg.mav_list)
        if platform.system() == 'Darwin':
            forking_enable(False)
        #Important - mg.mav_list is the full logfile and can be very large in size
        #To avoid slowdowns in Windows (which copies the vars to the new process)
        #We need to empty this var when we're finished with it
        self.mg.mav_list = []
        child = Process(target=self.mg.show, args=[
            self.lenmavlist,
        ])
        child.start()
        self.mestate.mlog.rewind()
Beispiel #7
0
 def crawl(self, origin_oj, username):
     p = Process(target=self._crawl, args=[
         origin_oj,
         username,
     ])
     p.start()
     p.join()
Beispiel #8
0
    def _fork_and_submit_job(self, job):
        parent_pipe, child_pipe = Pipe()
        try:
            p = Process(target=self._submit_job_to_lsf,
                        args=(child_pipe, parent_pipe, job,))
            p.start()

        except:
            parent_pipe.close()
            raise
        finally:
            child_pipe.close()

        try:
            p.join()

            result = parent_pipe.recv()
            if isinstance(result, basestring):
                raise SubmitError(result)

        except EOFError:
            raise SubmitError('Unknown exception submitting job')
        finally:
            parent_pipe.close()

        return result
 def _reflash(self, path):
     """
     this will execute the upgrade operation in another process
     because the SSH connection may hang indefinitely while reflashing
     and would block the program; setting a timeout to `exec_command`
     doesn't seem to take effect so at least we can stop the process
     using `subprocess.join(timeout=self.UPGRADE_TIMEOUT)`
     """
     def upgrade(conn, path, timeout):
         conn.connect()
         conn.exec_command('sysupgrade -v -c {0}'.format(path),
                           timeout=timeout)
         conn.close()
     subprocess = Process(
         target=upgrade,
         args=[self, path, self.UPGRADE_TIMEOUT]
     )
     subprocess.start()
     self.log('Upgrade operation in progress...')
     subprocess.join(timeout=self.UPGRADE_TIMEOUT)
     self.log('SSH connection closed, will wait {0} seconds before '
              'attempting to reconnect...'.format(self.SLEEP_TIME))
     sleep(self.SLEEP_TIME)
     # kill the subprocess if it has hanged
     if subprocess.is_alive():
         subprocess.terminate()
         subprocess.join()
Beispiel #10
0
def fetch_models(channel_name, tuple_type, authorized_types, input_models,
                 directory):

    models = []
    exceptions = []

    # Close django connection to force each Process to create its own as
    # django orm connection is not fork safe https://code.djangoproject.com/ticket/20562
    from django import db
    db.connections.close_all()

    for input_model in input_models:
        args = (channel_name, tuple_type, authorized_types, input_model,
                directory)
        proc = Process(target=fetch_model, args=args)
        models.append((proc, args))
        proc.start()

    for proc, args in models:
        proc.join()
        if proc.exitcode != 0:
            exceptions.append(Exception(f'fetch model failed for args {args}'))

    # Close django old connections to avoid potential leak
    db.close_old_connections()

    if exceptions:
        raise Exception(exceptions)
Beispiel #11
0
 def screenshot(
     task,
     url,
     width=settings.BASE_WEBPAGE_PREVIEW_WIDTH,
     height=settings.BASE_WEBPAGE_PREVIEW_HEIGHT,
     lifetime=settings.BASE_WEBPAGE_PREVIEW_LIFETIME,
 ):
     url_id = sha256()
     url_id.update(url.encode("utf-8"))
     url_id.update(bytes(width))
     url_id.update(bytes(height))
     key = url_id.hexdigest()
     logger.info(f"Screenshot for {url} @ {width}x{height}: {key}")
     if key in cache:
         logger.info(f"Found {key} in cache.")
         return key
     logger.info(f"Locking {key}")
     lock = cache.lock(key)
     lock.acquire()
     logger.info("Starting WebEngineScreenshot app")
     parent_conn, child_conn = Pipe()
     p = Process(target=WebpageTasks.worker, args=(url, width, height, child_conn))
     p.start()
     image = parent_conn.recv()
     p.join()
     if not image:
         logger.info("WebEngineScreenshot app returned nothing")
         return None
     logger.info("Writing WebEngineScreenshot app result to cache")
     cache.set(key, image, timeout=lifetime)
     logger.info("Removing WebEngineScreenshot app singleton")
     return key
Beispiel #12
0
def main():
    # To assign camera by name: put string(s) in list

    # Parse command line arguments
    parser = argparse.ArgumentParser(description='GUI for gaze tracking and pupillometry')
    parser.add_argument('-eye', dest='eye_file', type=str, help="Work with existing video recording, instead of live feed", default='')
    parser.add_argument('-world', dest='world_file', type=str, help="Work with existing video recording, instead of live feed", default='')

    args = parser.parse_args()

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    if args.eye_file == '':
        eye_src = ["UI154xLE-M", "USB Camera-B4.09.24.1", "FaceTime Camera (Built-in)", "Microsoft", "6000","Integrated Camera"]
        # to assign cameras directly, using integers as demonstrated below
        # eye_src = 1
    else:
#        print "Using provide file: %s" % args.filename
        eye_src = args.eye_file

    if args.world_file == '':
        world_src = ["Logitech Camera","(046d:081d)","C510","B525", "C525","C615","C920","C930e"]
        # to assign cameras directly, using integers as demonstrated below
        # world_src = 0
    else:
        world_src = args.world_file

    # Camera video size in pixels (width,height)
    eye_size = (260,216) #(1280,1024)
    world_size = (640,480)


    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    # Create and initialize IPC
    g_pool = Temp()
    g_pool.pupil_queue = Queue()
    g_pool.eye_rx, g_pool.eye_tx = Pipe(False)
    g_pool.quit = RawValue(c_bool,0)
    # this value will be substracted form the capture timestamp
    g_pool.timebase = RawValue(c_double,0)
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.rec_dir = rec_dir
    g_pool.version = version
    g_pool.app = 'capture'
    # set up subprocesses
    p_eye = Process(target=eye, args=(g_pool,eye_src,eye_size))

    # Spawn subprocess:
    p_eye.start()
    if platform.system() == 'Linux':
        # We need to give the camera driver some time before requesting another camera.
        sleep(0.5)

    world(g_pool,world_src,world_size)

    # Exit / clean-up
    p_eye.join()
Beispiel #13
0
def main():

    # To assign camera by name: put string(s) in list
    eye_cam_names = [
        "USB 2.0 Camera", "Microsoft", "6000", "Integrated Camera",
        "HD USB Camera"
    ]
    world_src = [
        "Logitech Camera", "(046d:081d)", "C510", "B525", "C525", "C615",
        "C920", "C930e"
    ]
    eye_src = (eye_cam_names,
               0), (eye_cam_names, 1
                    )  #first match for eye0 and second match for eye1

    # to assign cameras directly, using integers as demonstrated below
    # eye_src =  4 , 5 #second arg will be ignored for monocular eye trackers
    # world_src = 1

    # to use a pre-recorded video.
    # Use a string to specify the path to your video file as demonstrated below
    # eye_src = '/Users/mkassner/Downloads/000/eye0.mkv' , '/Users/mkassner/Downloads/eye.avi'
    # world_src = "/Users/mkassner/Downloads/000/world.mkv"

    # Camera video size in pixels (width,height)
    eye_size = (640, 480)
    world_size = (1280, 720)

    # on MacOS we will not use os.fork, elsewhere this does nothing.
    forking_enable(0)

    #g_pool holds variables. Only if added here they are shared across processes.
    g_pool = Global_Container()

    # Create and initialize IPC
    g_pool.pupil_queue = Queue()
    g_pool.quit = Value(c_bool, 0)
    g_pool.timebase = Value(c_double, 0)
    g_pool.eye_tx = []
    # make some constants avaiable
    g_pool.user_dir = user_dir
    g_pool.version = get_version(version_file)
    g_pool.app = 'capture'
    g_pool.binocular = binocular

    p_eye = []
    for eye_id in range(1 + 1 * binocular):
        rx, tx = Pipe(False)
        p_eye += [
            Process(target=eye,
                    args=(g_pool, eye_src[eye_id], eye_size, rx, eye_id))
        ]
        g_pool.eye_tx += [tx]
        p_eye[-1].start()

    world(g_pool, world_src, world_size)

    # Exit / clean-up
    for p in p_eye:
        p.join()
    def start_process(self, command=None, billiard=False, *args, **kwargs):
        from .models import ExportTask
        from ..tasks.export_tasks import TaskStates

        if billiard:
            proc = Process(daemon=False, *args, **kwargs)
            proc.start()
            self.store_pid(pid=proc.pid)
            proc.join()
            self.exitcode = proc.exitcode
        else:
            proc = subprocess.Popen(command, **kwargs)
            (self.stdout, self.stderr) = proc.communicate()
            self.store_pid(pid=proc.pid)
            self.exitcode = proc.wait()

        # We need to close the existing connection because the logger could be using a forked process which,
        # will be invalid and throw an error.
        connection.close()
        export_task = ExportTask.objects.get(uid=self.task_uid)
        if export_task.status == TaskStates.CANCELED.value:
            from ..tasks.exceptions import CancelException
            raise CancelException(
                task_name=export_task.export_provider_task.name,
                user_name=export_task.cancel_user.username)
Beispiel #15
0
    def _reflash(self, path):
        """
        this will execute the upgrade operation in another process
        because the SSH connection may hang indefinitely while reflashing
        and would block the program; setting a timeout to `exec_command`
        doesn't seem to take effect on some OpenWRT versions
        so at least we can stop the process using
        `subprocess.join(timeout=self.UPGRADE_TIMEOUT)`
        """
        self.disconnect()
        command = self.get_upgrade_command(path)

        def upgrade(conn, path, timeout):
            conn.connect()
            conn.exec_command(command, timeout=timeout)
            conn.disconnect()

        subprocess = Process(target=upgrade,
                             args=[self, path, self.UPGRADE_TIMEOUT])
        subprocess.start()
        self.log('Upgrade operation in progress...')
        subprocess.join(timeout=self.UPGRADE_TIMEOUT)
        self.log(
            f'SSH connection closed, will wait {self.RECONNECT_DELAY} seconds before '
            'attempting to reconnect...')
        sleep(self.RECONNECT_DELAY)
        # kill the subprocess if it has hanged
        if subprocess.is_alive():
            subprocess.terminate()
            subprocess.join()
Beispiel #16
0
def cmd_save(args):
    '''save a graph'''
    child = Process(target=save_process,
                    args=[
                        mestate.last_graph, mestate.child_pipe_send_console,
                        mestate.child_pipe_send_graph, mestate.status.msgs
                    ])
    child.start()
Beispiel #17
0
def telnet_client(server_id, port):
    key = 'server-{0}-pid'.format(server_id)
    pid = cache.get(key)
    if pid and pid in psutil.get_pid_list():
        os.kill(pid, signal.SIGTERM)
    p = Process(target=TelnetClient, args=(port, 'uptee', server_id))
    p.start()
    cache.set(key, p.pid)
    p.join()
Beispiel #18
0
 def test_set_pdeathsig(self):
     return_pid = Value('i')
     p = Process(target=parent_task, args=(return_pid, ))
     p.start()
     sleep(3)  # wait for setting pdeathsig
     p.terminate()
     sleep(3)  # wait for process termination
     with pytest.raises(psutil.NoSuchProcess):
         proc = psutil.Process(return_pid.value)
Beispiel #19
0
 def crawl(self, oj, solution_id, problem_id, language, code, username,
           password):
     p = Process(target=self._crawl,
                 args=[
                     oj, solution_id, problem_id, language, code, username,
                     password
                 ])
     p.start()
     p.join()
Beispiel #20
0
 def crawl(self, spider):
     queue = Queue()
     self.queue = Queue()
     self.process = Process(target=self._crawl, args=(queue, spider))
     self.process.start()
     write_in_a_file('.crawl 1', {'process': self.process, 'process-pid': self.process and self.process.pid, 'queue': self.queue.qsize()}, "t.txt")
     self.process.join()
     write_in_a_file('.crawl 2', {'process': self.process,
                                'process-pid': self.process and self.process.pid, 'queue': self.queue.qsize()}, "t.txt")
 def screenshot_endpoint(
     self,
     ip_address=None,
     port=None,
     hostname=None,
     use_ssl=False,
     use_sni=False,
     path="/",
     in_separate_process=False,
 ):
     """
     Take a screenshot of the given endpoint, save it to a local temporary file, and return the local
     file path.
     :param ip_address: The IP address where the web service resides.
     :param port: The port where the web service resides.
     :param hostname: The hostname to request.
     :param use_ssl: Whether or not to use SSL to request the endpoint.
     :param use_sni: Whether or not the endpoint uses SNI.
     :param path: The path of the resource to screenshot.
     :param in_separate_process: Whether or not to take the screenshot in a separate process. This is to
     address the incredibly long time that the Selenium webdriver can take when it hangs.
     :return: A tuple containing (1) the local file path where the screenshot was saved and (2) whether or not
     the screenshot was taken successfully.
     """
     logger.debug(
         "Now attempting to take a screenshot of the web service at %s:%s (%s). Hostname is %s, SNI support is %s."
         % (ip_address, port, "using SSL" if use_ssl else "plain HTTP",
            hostname, use_sni))
     self.__set_endpoint(
         ip_address=ip_address,
         port=port,
         hostname=hostname,
         use_ssl=use_ssl,
         use_sni=use_sni,
         path=path,
     )
     self._output_file_path = self.get_temporary_file_path()
     if in_separate_process:
         process = Process(target=self.__take_screenshot)
         try:
             process.start()
             process.join(config.selenium_screenshot_delay +
                          config.inspection_screenshot_join_timeout)
         except IOError as e:
             if e.errno == errno.EINTR:
                 logger.warning("Interrupted system call error received.")
             else:
                 raise e
         finally:
             if process.is_alive():
                 print("PROCESS IS ALIVE - PID IS %s" % (process.pid, ))
                 os.kill(process.pid, signal.SIGTERM)
     else:
         self.__take_screenshot()
     return self.output_file_path, FilesystemHelper.does_file_exist(
         self.output_file_path)
Beispiel #22
0
 def init_marker_cacher(self):
     forking_enable(0) #for MacOs only
     from marker_detector_cacher import fill_cache
     visited_list = [False if x == False else True for x in self.cache]
     video_file_path =  os.path.join(self.g_pool.rec_dir,'world.avi')
     self.cache_queue = Queue()
     self.cacher_seek_idx = Value(c_int,0)
     self.cacher_run = Value(c_bool,True)
     self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,self.cache_queue,self.cacher_seek_idx,self.cacher_run))
     self.cacher.start()
Beispiel #23
0
def main():
    ProcessList = []
    for i in range(0, 100):
        ProcessList.append(Process(target=f, args=(i, )))
    for p in ProcessList:
        p.start()
    sleep(5)
    for p in ProcessList:
        p.join()
    print("main finished")
 def init_marker_cacher(self):
     forking_enable(0) #for MacOs only
     from marker_detector_cacher import fill_cache
     visited_list = [False if x == False else True for x in self.cache]
     video_file_path =  self.g_pool.capture.src
     timestamps = self.g_pool.capture.timestamps
     self.cache_queue = Queue()
     self.cacher_seek_idx = Value('i',0)
     self.cacher_run = Value(c_bool,True)
     self.cacher = Process(target=fill_cache, args=(visited_list,video_file_path,timestamps,self.cache_queue,self.cacher_seek_idx,self.cacher_run,self.min_marker_perimeter_cacher))
     self.cacher.start()
Beispiel #25
0
 def test_set_pdeathsig(self):
     success = "done"
     q = Queue()
     p = Process(target=parent_task, args=(q, success))
     p.start()
     child_proc = psutil.Process(q.get(timeout=3))
     try:
         p.terminate()
         assert q.get(timeout=3) == success
     finally:
         child_proc.terminate()
Beispiel #26
0
 def __init__(self, fun, args, postprocess, job):
     """ Build multiprocessing queues and start worker. """
     super(LongCalculation, self).__init__(job, "Cancel", 0, 0)
     self.setModal(True)
     self.input = Queue()
     self.output = Queue()
     self.input.put((fun, args, postprocess))
     self.proc = Process(target=worker, args=(self.input, self.output))
     self.proc.start()
     self.timer = QTimer()
     self.timer.timeout.connect(self.update)
     self.timer.start(10)
Beispiel #27
0
def start_launch_servide_process(ld):
    """Starts a Launch Service process. To be called from subclasses.

    Args:
         ld : LaunchDescription obj.
    """
    # Create the LauchService and feed the LaunchDescription obj. to it.
    ls = LaunchService()
    ls.include_launch_description(ld)
    p = Process(target=ls.run)
    p.daemon = True  #The daemon process is terminated automatically before the main program exits, to avoid leaving orphaned processes running
    p.start()
Beispiel #28
0
    def run_player(self, graphdef):
        if 'mestate' in globals():
            self.mestate.console.write("Running Player...")
        else:
            self.mestate.child_pipe_send_console.send("Running Player...")
        self.player = player.MavPlay()
        self.player.add_mav(self.mlog)
        if platform.system() == 'Darwin':
            forking_enable(False)

        child = Process(target=self.player.run)
        child.start()
Beispiel #29
0
def run_crawler_process(params: dict) -> Process:
    """
    Start scrapy spider from a separate process
    :param dict params: scrapy spider parameters
    :return: process instance
    """
    process = Process(
        target=run_crawler,
        args=(params, ),
    )
    process.start()

    return process
Beispiel #30
0
def run_crawler(params):
    """

    :param params:
    :return: process instance
    """
    process = Process(
        target=crawler_process,
        args=(params, ),
    )
    process.start()

    return process