def compute_feature_extraction(extractor, data):
    manager = mp.Manager()
    q = manager.Queue(maxsize=8)
    process_list = []

    pre_compute = mp.Process(target=pre_compute_sillhouttes,
                             args=(data, q, extractor))
    pre_compute.start()

    num_processes = 4
    for i in range(num_processes):
        time.sleep(random.random() * 0.1)
        p = mp.Process(
            target=listener,
            args=(extractor, q, i),
        )
        process_list.append(p)
        p.start()

    pre_compute.join()
    for i in range(num_processes + 5):
        q.put((None, None))

    for p in process_list:
        p.join()

    return True
Ejemplo n.º 2
0
    def __init__(self,
                 num_of_process: int,
                 mapper: Callable,
                 reducer: Callable,
                 mapper_queue_size: int = 0,
                 reducer_queue_size: int = 0):
        self._mapper_queue = mp.Queue(maxsize=mapper_queue_size)
        self._reducer_queue = ChunkedQueue(maxsize=reducer_queue_size)
        self._result_queue = ChunkedQueue()
        self._mapper_cmd_queue = [mp.Queue() for _ in range(num_of_process)]
        self._reducer_cmd_queue = [mp.Queue() for _ in range(num_of_process)]
        self._manager_cmd_queue = mp.Queue()

        self._manager_process = mp.Process(target=self._run_manager)
        self._mapper_process = [
            mp.Process(target=self._run_mapper, args=(i, ))
            for i in range(num_of_process)
        ]
        self._reducer_process = [
            mp.Process(target=self._run_reducer, args=(i, ))
            for i in range(num_of_process)
        ]

        self._mapper = mapper
        self._reducer = reducer
        self._num_of_process = num_of_process
Ejemplo n.º 3
0
    def setup(self):
        """
        Set up filesystem in user space for http and https
        so that we can retrieve tiles from remote sources.

        Parameters
        ----------
        tmp_dir: string
            The temporary directory where to create the
            http and https directories
        """
        from simple_httpfs import HttpFs

        if not op.exists(self.http_directory):
            os.makedirs(self.http_directory)
        if not op.exists(self.https_directory):
            os.makedirs(self.https_directory)
        if not op.exists(self.diskcache_directory):
            os.makedirs(self.diskcache_directory)

        self.teardown()

        disk_cache_size = 2**25
        lru_capacity = 400

        def start_fuse(directory, protocol):
            try:
                # this import can cause problems on systems that don't have libfuse
                # installed so let's only try it if absolutely necessary
                from fuse import FUSE

                # This is a bit confusing. I think `fuse` (lowercase) is used
                # above in get_filepath() line 50 and 52. If that's not the
                # case than this assignment is useless and get_filepath() is
                # broken
                fuse = FUSE(
                    HttpFs(
                        protocol,
                        disk_cache_size=disk_cache_size,
                        disk_cache_dir=self.diskcache_directory,
                        lru_capacity=lru_capacity,
                    ),
                    directory,
                    foreground=False,
                    # allow_other=True
                )
            except RuntimeError as e:
                if str(e) != "1":
                    raise e

        proc1 = mp.Process(target=start_fuse,
                           args=[self.http_directory, "http"])
        proc1.start()
        proc1.join()

        proc2 = mp.Process(target=start_fuse,
                           args=[self.https_directory, "https"])
        proc2.start()
        proc2.join()
Ejemplo n.º 4
0
def multicore():
    q = mp.Queue()
    p1 = mp.Process(target=job, args=(q, ))
    p2 = mp.Process(target=job, args=(q, ))
    p1.start()
    p2.start()
    p1.join()
    p2.join()
    res1 = q.get()
    res2 = q.get()
    print('multicore:', res1 + res2)
Ejemplo n.º 5
0
def main():
    enable = mp.Value('i', 0)
    while True:
        #判断是否开始
        sign = input("请输入控制命令:")
        print('sign = {}'.format(sign))
        if sign == '1':  #bytes([0x01])
            print('connect')
            enable.value = 1
            mp.Process(target=motionControl, args=(enable, )).start()
            mp.Process(target=displayImage, args=(enable, )).start()

        elif sign == '2':
            print('disconnect')
            enable.value = 0
Ejemplo n.º 6
0
def test_qoi_run_server(tmpdir, proxy, RE, hw):
    def delayed_sigint(delay):  # pragma: no cover
        time.sleep(delay)
        print("killing")
        os.kill(os.getpid(), signal.SIGINT)

    def run_exp(delay):  # pragma: no cover
        time.sleep(delay)
        print("running exp")

        p = Publisher(proxy[0], prefix=b"raw")
        RE.subscribe(p)
        det = SynSignal(func=lambda: np.ones(10), name="gr")
        RE(bp.count([det], md=dict(analysis_stage="raw")))
        RE(bp.count([det], md=dict(analysis_stage="pdf")))

    # Run experiment in another process (after delay)
    exp_proc = multiprocess.Process(target=run_exp, args=(2, ), daemon=True)
    exp_proc.start()

    # send the message that will eventually kick us out of the server loop
    threading.Thread(target=delayed_sigint, args=(10, )).start()
    L = []
    try:
        print("running server")
        qoi_run_server(_publisher=lambda *x: L.append(x))

    except KeyboardInterrupt:
        print("finished server")
    exp_proc.terminate()
    exp_proc.join()
    assert L
Ejemplo n.º 7
0
def start_record_server():

    tcpserver = RecordServer()
    server_proc = mp.Process(
        target=tcpserver.serve_until_stopped)
    server_proc.start()
    return server_proc
Ejemplo n.º 8
0
    def show(self):
        # credit for this code goes to the higlass-python team: https://github.com/higlass/higlass-python
        for p in list(self.processes.keys()):
            if self.port == p:
                print("delete ", self.processes[p])
                self.processes[p].kill()
                del self.processes[p]
                time.sleep(0.5)
        #print(self.processes)
        uuid = slugid.nice()

        target = partial(eventlet.wsgi.server,
                         sock=eventlet.listen(('localhost', self.port)),
                         site=fApp)

        self.processes[self.port] = mp.Process(
            target=target)  #self.startServer, args=(q,))
        self.processes[self.port].start()

        self.connected = False
        while not self.connected:
            try:
                url = "http://{}:{}/".format('localhost', self.port)
                r = requests.head(url)
                if r.ok:
                    self.connected = True
            except requests.ConnectionError:
                time.sleep(0.2)
Ejemplo n.º 9
0
def run_parallel_async(graph, nprocs=None, sleep=0.2, raise_errors=False):
    if nprocs == 1:
        return run_async(graph, sleep=sleep, raise_errors=raise_errors)

    nprocs = nprocs or mp.cpu_count() // 2

    with mp.Manager() as manager:
        graph = tgraph.create_parallel_compatible_graph(graph, manager)

        ioq = mp.Queue(len(graph.funcs.keys()))
        cpuq = mp.Queue(len(graph.funcs.keys()))

        procs = [mp.Process(target=run_scheduler,
                            args=(graph, sleep, ioq, cpuq, raise_errors))
                 for _ in range(nprocs)]
        for proc in procs:
            proc.start()

        while not tgraph.all_done(graph):
            for task in tgraph.get_ready_tasks(graph):
                graph = tgraph.mark_as_in_progress(graph, task)
                mlog(graph).info(
                    'pid {}: queueing task {}'.format(os.getpid(), task))
                if task in graph.io_bound:
                    ioq.put(task)
                else:
                    cpuq.put(task)

            time.sleep(sleep)

            if raise_errors and sum(not p.exitcode for p in procs):
                raise RuntimeError('An async task has failed. Please check your logs')

        return tgraph.recover_values_from_manager(graph)
Ejemplo n.º 10
0
def run_parallel_async(graph, nprocs=None, sleep=0.2):
    if nprocs == 1:
        return run_async(graph)

    nprocs = nprocs or mp.cpu_count() // 2

    with mp.Manager() as manager:
        graph = tgraph.create_parallel_compatible_graph(graph, manager)

        ioq = mp.Queue(len(graph.funcs.keys()))
        cpuq = mp.Queue(len(graph.funcs.keys()))

        for _ in range(nprocs):
            proc = mp.Process(target=run_scheduler,
                              args=(graph, sleep, ioq, cpuq))
            proc.start()

        while not tgraph.all_done(graph):
            for task in tgraph.get_ready_tasks(graph):
                graph = tgraph.mark_as_in_progress(graph, task)
                mlog(graph).info('pid {}: queueing task {}'.format(
                    os.getpid(), task))
                if task in graph.io_bound:
                    ioq.put(task)
                else:
                    cpuq.put(task)

            time.sleep(sleep)

        return tgraph.recover_values_from_manager(graph)
Ejemplo n.º 11
0
def shodan(self, scope_file, output):
    """
The Shodan module:\n
Look-up information on the target IP address(es) using Shodan's API.\n
A Shodan API key is required.
    """
    asciis.print_art()
    print(
        green(
            "[+] Shodan Module Selected: O.D.I.N. will check Shodan for the provided domains \
and IPs."))

    if __name__ == "__main__":
        report = reporter.Reporter(output)
        scope, ip_list, domains_list = report.prepare_scope(scope_file)

        # Create empty job queue
        jobs = []
        shodan_report = multiprocess.Process(name="Shodan Report",
                                             target=report.create_shodan_table,
                                             args=(ip_list, domains_list))
        jobs.append(shodan_report)

        for job in jobs:
            print(green("[+] Starting new process: {}".format(job.name)))
            job.start()
        for job in jobs:
            job.join()

        report.close_out_reporting()
        print(green("[+] Job's done! Your results are in {}.".format(output)))
Ejemplo n.º 12
0
def start_train(resume):

    urllib.request.urlretrieve(const.url + '/network_file',
                               'deliverables/network.py')
    urllib.request.urlretrieve(const.url + '/config_file',
                               'deliverables/input_params.py')
    urllib.request.urlretrieve(const.url + '/observation_file',
                               'deliverables/observation.py')
    urllib.request.urlretrieve(const.url + '/curriculum_file',
                               'deliverables/curriculum.py')

    num_workers = mp.cpu_count() - 1
    should_stop = mp.Value(c_bool, False)

    while True:
        worker_processes = []

        # create_worker(0, should_stop)

        # Start process 1 - n, running in other processes
        for w_num in range(0, num_workers):
            process = mp.Process(target=create_worker,
                                 args=(w_num, should_stop))
            process.start()
            sleep(0.5)
            worker_processes.append(process)

        try:
            for p in worker_processes:
                p.join()
        except KeyboardInterrupt:
            should_stop.value = True

    print("Looks like we're done")
Ejemplo n.º 13
0
def main():

    initialize()
    game = setup_game()

    with mp.Manager() as manager:

        exc = manager.Queue()

        arg_list = []
        for i in range(0, 3):
            arg_list.append(((i, i + 3), game, False, exc))
        arg_list.append(((5, 5), game, True, exc))

        proc_list = []

        for arg in arg_list:
            proc_list.append(mp.Process(target=function, args=arg))
            proc_list[-1].start()

        print("Number of active children post start: %d" %
              len(mp.active_children()))
        for p in proc_list:
            p.join()
        if (not exc.empty()):
            e = exc.get()
            print(e.message)

    print("Number active children post join: %d " % len(mp.active_children()))
    print(mp.active_children())
    print(mp.current_process())
Ejemplo n.º 14
0
def test_pipespeed():
    c, d = processing.Pipe()
    cond = processing.Condition()
    elapsed = 0
    iterations = 1

    while elapsed < delta:
        iterations *= 2

        p = processing.Process(target=pipe_func, args=(d, cond, iterations))
        cond.acquire()
        p.start()
        cond.wait()
        cond.release()

        result = None
        t = _timer()

        while result != 'STOP':
            result = c.recv()

        elapsed = _timer() - t
        p.join()

    print(iterations, 'objects passed through connection in',elapsed,'seconds')
    print('average number/sec:', iterations/elapsed)
Ejemplo n.º 15
0
    def start(self, log_file='/tmp/hgserver.log', log_level=logging.INFO):

        for puid in list(self.processes.keys()):
            print("terminating:", puid)
            self.processes[puid].terminate()
            del self.processes[puid]

        self.app = create_app(
            self.tilesets,
            __name__,
            log_file=log_file,
            log_level=log_level)

        # we're going to assign a uuid to each server process so that if anything
        # goes wrong, the variable referencing the process doesn't get lost
        uuid = slugid.nice().decode('utf8')
        if self.port is None:
            self.port = get_open_port()
        target = partial(self.app.run,
                         threaded=True,
                         debug=True,
                         host='0.0.0.0',
                         port=self.port,
                         use_reloader=False)
        self.processes[uuid] = mp.Process(target=target)
        self.processes[uuid].start()
        self.connected = False
        while not self.connected:
            try:
                url = 'http://{}:{}/api/v1'.format(self.host, self.port)
                r = requests.head(url)
                if r.ok:
                    self.connected = True
            except requests.ConnectionError as err:
                time.sleep(.2)
Ejemplo n.º 16
0
def run(params:List[Parameter],bt_log:bool, pe:bool) -> pandas.DataFrame: 
    
    njobs = len(params)
    results = []
    jobs = []
    receivers = []
    for i in range(njobs) : 
        initializer = Initializer(params[i].population_size,params[i].landscape) 
        init_pop = initializer.init_with_const(params[i].nucleotides, params[i].base_paires)
        receiver, sender = mp.Pipe(False)
        p = mp.Process(target=ea_without_crossover, args=(init_pop, params[i], bt_log, pe,sender,))
        jobs.append(p)
        receivers.append(receiver)
    
    for i  in range(njobs) : 
        jobs[i].start()
        results.append(receivers[i].recv())
        jobs[i].join()
    
    
    best_solutions = []
    for rst in results : 
        for ind in rst[1]: 
            if ind.fitness == 1.0 : 
                best_solutions.append([ind.rna_sequence, ind.fitness, ind.mfe,1/landscape.ens_defect(params.landscape.target_structure,ind.rna_sequence)])
    df = pandas.DataFrame(best_solutions, columns=["Sequence", "Fitness","MFE","ED"])

    return df
Ejemplo n.º 17
0
def test_tomo_run_server_3d_pencil(tmpdir, proxy, RE, hw):
    def delayed_sigint(delay):  # pragma: no cover
        time.sleep(delay)
        print("killing")
        os.kill(os.getpid(), signal.SIGINT)

    def run_exp(delay):  # pragma: no cover
        time.sleep(delay)
        print("running exp")

        p = Publisher(proxy[0], prefix=b"an")
        RE.subscribe(p)

        RE(
            bp.grid_scan(
                [hw.noisy_det],
                hw.motor3,
                0,
                2,
                2,
                hw.motor1,
                0,
                2,
                2,
                True,
                hw.motor2,
                0,
                2,
                2,
                True,
                md={
                    "tomo": {
                        "type": "pencil",
                        "rotation": "motor1",
                        "translation": "motor2",
                        "stack": "motor3",
                        "center": 1,
                    }
                },
            ))

    # Run experiment in another process (after delay)
    exp_proc = multiprocess.Process(target=run_exp, args=(2, ), daemon=True)
    exp_proc.start()

    # send the message that will eventually kick us out of the server loop
    threading.Thread(target=delayed_sigint, args=(10, )).start()
    L = []
    try:
        print("running server")
        tomo_run_server(_publisher=lambda *x: L.append(x), algorithm="fbp")

    except KeyboardInterrupt:
        print("finished server")
    exp_proc.terminate()
    exp_proc.join()
    assert L
Ejemplo n.º 18
0
def main():

    enable = mp.Value('i', 0)
    imgQueue = mp.Queue(0)
    imgQueueBin = mp.Queue(0)

    while True:
        sign = receive()
        if sign == 1:
            print('connect')
            enable.value = 1
            mp.Process(target=motionControl,
                       args=(enable, imgQueue, imgQueueBin)).start()
            mp.Process(target=displayImage,
                       args=(enable, imgQueue, imgQueueBin)).start()
        elif sign == 2:
            print('disconnect')
            enable.value = 0
Ejemplo n.º 19
0
def people(self, client, domain):
    """
Uses TheHarvester and EmailHunter to locate email addresses and social media profiles. Profiles
are cross-referenced with HaveIBeenPwned, Twitter's API, and search engines to try to find security
breaches, pastes, and social media accounts.\n
Several API keys are required for all of the look-ups: EmailHunter and Twitter.
    """
    asciis.print_art()
    print(
        green(
            "[+] People Module Selected: O.D.I.N. will run only modules for email addresses \
and social media."))

    # Perform prep work for reporting
    setup_reports(client)
    output_report = "reports/{}/OSINT_DB.db".format(client)

    if __name__ == "__main__":
        report = reporter.Reporter(output_report)

        # Create empty job queue
        jobs = []
        company_info = multiprocess.Process(
            name="Company Info Report",
            target=report.create_company_info_table,
            args=(domain, ))
        jobs.append(company_info)
        employee_report = multiprocess.Process(
            name="Employee Report",
            target=report.create_people_table,
            args=(domain, client))
        jobs.append(employee_report)

        for job in jobs:
            print(green("[+] Starting new process: {}".format(job.name)))
            job.start()
        for job in jobs:
            job.join()

        report.close_out_reporting()
        print(
            green("[+] Job's done! Your results are in {}.".format(
                output_report)))
Ejemplo n.º 20
0
 def run(self):
     with mb.Manager() as m:
         results = m.dict()
         p = mb.Process(target=QLWinSingleTest._run_test, args=(self, results))
         p.start()
         p.join()
         if "exception" not in results:
             return results['result']
         else:
             raise RuntimeError(f"\n\nGot an exception during subprocess:\n\n{results['exception']}")
Ejemplo n.º 21
0
def start_servers(target1: Callable, target2: Callable, target3: Callable,
                  metrics_target: Callable) -> None:
    """
    Start servers

    Parameters
    ----------
    target1
       Main flask process
    target2
       Auxiliary flask process

    """
    if USE_MULTIPROCESS:
        logger.info("Using alternative multiprocessing library")
    else:
        logger.info("Using standard multiprocessing library")

    p2 = None
    if target2:
        p2 = mp.Process(target=target2, daemon=False)
        p2.start()

    p3 = None
    if target3:
        p3 = mp.Process(target=target3, daemon=True)
        p3.start()

    p4 = None
    if metrics_target:
        p4 = mp.Process(target=metrics_target, daemon=True)
        p4.start()

    target1()

    if p2:
        p2.join()

    if p3:
        p3.join()

    if p4:
        p4.join()
Ejemplo n.º 22
0
    def setup(self):
        """
        Set up filesystem in user space for http and https
        so that we can retrieve tiles from remote sources.

        Parameters
        ----------
        tmp_dir: string
            The temporary directory where to create the
            http and https directories
        """
        from simple_httpfs import HttpFs

        if not op.exists(self.http_directory):
            os.makedirs(self.http_directory)
        if not op.exists(self.https_directory):
            os.makedirs(self.https_directory)
        if not op.exists(self.diskcache_directory):
            os.makedirs(self.diskcache_directory)

        try:
            sh.umount(self.http_directory)
        except Exception as ex:
            pass

        try:
            sh.umount(self.https_directory)
        except Exception as ex:
            pass

        disk_cache_size = 2**25
        disk_cache_dir = self.diskcache_directory
        lru_capacity = 400
        print(
            "self.diskcache_directory",
            self.diskcache_directory,
            op.exists(self.diskcache_directory),
        )

        def start_fuse(directory):
            print("starting fuse")
            fuse = FUSE(
                HttpFs(
                    "http",
                    disk_cache_size=disk_cache_size,
                    disk_cache_dir=self.diskcache_directory,
                    lru_capacity=lru_capacity,
                ),
                directory,
                foreground=False,
            )

        proc = mp.Process(target=start_fuse, args=[self.http_directory])
        proc.start()
        proc.join()
Ejemplo n.º 23
0
 def run(self):
     with mb.Manager() as m:
         results = m.dict()
         p = mb.Process(target=QLWinSingleTest._run_test,
                        args=(self, results))
         p.start()
         p.join()
         if "exception" not in results:
             return results['result']
         else:
             raise results['exception']
Ejemplo n.º 24
0
 def start_break_timer(self, length):
     # Basically the same implementation as the timer for the pomodoro, but this one cannot be paused
     print('\nBreak started\n')
     current_length = multiprocess.Queue()
     break_process = multiprocess.Process(target=countdown, args=(length,
                                                                  current_length,
                                                                  self.sound_file))
     break_process.start()
     break_process.join()
     break_process.terminate()
     input('Press ENTER to start another pomodoro\r')
Ejemplo n.º 25
0
    def generate(self: object) -> None:
        print("Beginning data generation...")
        start_seconds = time.time()
        queue = multiprocess.Queue()
        w = multiprocess.Process(target=self.write, args=(queue, "STOP"))
        jobs = []

        for i in range(0, 5):
            p = multiprocess.Process(target=self.gen, args=(queue, ))
            jobs.append(p)
            p.start()

        w.start()
        for i, item in enumerate(jobs):
            item.join()
        queue.put("STOP")
        w.join()
        elapsed_time = (time.time() - start_seconds) / 60
        print("Generation completed. Elapsed time: ",
              "{0:.2f}".format(elapsed_time), " minutes")
Ejemplo n.º 26
0
def startProcess(source, threads, midFolder):
    #PARAMETERS
    vilDirec = returnDirectoryCSV(source)
    totalLines = len(vilDirec)
    numLines = int(totalLines / threads) + 1

    processes = []
    arguments = []
    finished = []
    allDone = False
    count = 0

    for s in range(0, totalLines, numLines):
        if (s + numLines > totalLines):
            end = totalLines
        else:
            end = s + numLines
        arg = [midFolder, vilDirec[s:end], count]
        process = mp.Process(target=scrape, args=arg)
        processes.append(process)
        arguments.append(arg)
        finished.append(False)
        count += 1

    for p in processes:
        p.start()

    while not allDone:
        for i in range(len(processes)):
            curProcess = processes[i]
            if not curProcess.is_alive() and finished[i] == False:
                processes[i] = mp.Process(target=scrape, args=arguments[i])
                processes[i].start()
                sleep(15)
                if not processes[i].is_alive():
                    finished[i] = True
        allDone = True
        for b in finished:
            if not b:
                allDone = False
                break
Ejemplo n.º 27
0
def test_join_timeout():
    p = processing.Process(target=join_timeout_func)
    p.start()

    print('waiting for process to finish')

    while 1:
        p.join(timeout=1)
        if not p.is_alive():
            break
        print('.', end=' ')
        sys.stdout.flush()
Ejemplo n.º 28
0
def server():
    started_event = multiprocessing.Event()
    exit_event = multiprocessing.Event()
    process = multiprocessing.Process(
        target=lambda: MPServer(started_event, exit_event).run(rate=1000))
    process.start()
    started_event.wait()  # Starting processes is really slow on Windows
    time.sleep(0.1)  # Give the server a head start at stdout
    yield
    exit_event.set()
    process.join(timeout=1)
    process.terminate()
def server_with_on_sub_handler(sub_mgr, schema, on_sub_handler):

    app = create_app(sub_mgr, schema, on_sub_handler)

    process = multiprocess.Process(target=app_worker,
                                   kwargs={
                                       'app': app,
                                       'port': TEST_PORT
                                   })
    process.start()
    yield
    process.terminate()
def server_with_keep_alive(sub_mgr, schema):

    app = create_app(sub_mgr, schema, {'keep_alive': .250})

    process = multiprocess.Process(target=app_worker,
                                   kwargs={
                                       'app': app,
                                       'port': TEST_PORT
                                   })
    process.start()
    yield
    process.terminate()