Example #1
0
def main():
    size = os.path.getsize("dict.txt")
    half_size = size // 2

    p1 = Process(target=read_part1, args=("dict.txt", half_size))
    p2 = Process(target=read_part2, args=("dict.txt", half_size))
    p1.start()
    p2.start()

    p1.join()
    p2.join()
def test_agent() -> None:
    stop_ev = Event()
    conn, agent_conn = Pipe()

    waypoints = [(-80.0, -65.0, 3.0), (-45.0, -66.0, 3.0), (-33.0, -65.0, 3.0),
                 (-33.0, -65.0, 0.3)]
    aut = Agent(uid=1, motion=MotionHectorQuad("/drone1"), waypoints=waypoints)

    p = Process(target=run_as_process,
                kwargs={
                    "aut": aut,
                    "conn": agent_conn,
                    "stop_ev": stop_ev
                })
    p.start()

    try:
        for i in range(10):
            if conn.poll(1.0):
                act = conn.recv()
                print(act)
                if act[0] == "request":
                    reply = input("> ")
                    conn.send(("reply", {
                        "uid": aut.uid,
                        "acquired": Contract()
                    }))
            else:
                print("Response timeout")
    finally:
        stop_ev.set()  # Stop all automatons
        p.join()
        conn.close()
        agent_conn.close()
Example #3
0
def test_disk_locking_processes(block_maker, storage_factory, redis_hostname):
    def visit(storage, root):
        ds = block_maker.first_ds(first_constant=2, ids_arg=3)
        cached = ds >> Apply(image=sleeper(0.1)) >> CacheToDisk(
            root, storage, JsonSerializer(), 'image')

        for i in ds.ids:
            assert ds.image(i) == cached.image(i)
            assert ds.image(i) == cached.image(i)

    for _ in range(5):
        with tempfile.TemporaryDirectory() as temp, storage_factory(
        ) as temp_storage:
            temp = Path(temp) / 'cache'
            init_storage(temp,
                         algorithm={
                             'name': 'blake2b',
                             'digest_size': 64
                         },
                         levels=[1, 31, 32],
                         locker={
                             'name': 'RedisLocker',
                             'args': [redis_hostname],
                             'kwargs': {
                                 'prefix': 'connectome.tests',
                                 'expire': 10
                             }
                         })

            th = Process(target=visit, args=(temp_storage, temp))
            th.start()
            visit(temp_storage, temp)
            th.join()
Example #4
0
def test_contract_manager() -> None:
    aut = AirspaceManager()
    stop_ev = Event()
    conn, manager_conn = Pipe()

    p = Process(target=run_as_process, kwargs={"aut": aut,
                                               "conn": manager_conn,
                                               "stop_ev": stop_ev})
    p.start()
    try:
        uid = 0
        for i in range(10):
            if conn.poll(1.0):
                act = conn.recv()
                print(act)
            elif i % 3 != 2:
                uid = i % 5
                target = Contract.from_stamped_rectangles([
                    (0.0, Rectangle(mins=[0, 0, 0], maxes=[1, 1, 0.5])),
                    (0.5, Rectangle(mins=[0, 0.5, 0], maxes=[2, 3, 0.5])),
                    (1.0, Rectangle(mins=[0.5, 0.5, 1.0], maxes=[1.5, 1.5, 1.5]))
                    ])
                conn.send(("request", {"uid": uid, "target": target}))
            else:
                releasable = Contract.from_stamped_rectangles([
                    (0.0, Rectangle(mins=[0, 0, 0], maxes=[1, 1, 0.5])),
                    (0.5, Rectangle(mins=[0, 0.5, 0], maxes=[2, 2, 0.5]))
                    ])
                print("Agent " + str(uid) + " release > " + str(releasable))
                conn.send(("release", {"uid": uid, "releasable": releasable}))
    finally:
        stop_ev.set()  # Stop all automatons
        p.join()
        conn.close()
        manager_conn.close()
def main():
    # num_workers = int(mp.cpu_count()/3
    num_workers = 1
    for i in range(num_workers):
        file_ids = get_files_for_worker(i, num_workers)
        process = Process(target=worker, args=(file_ids, i))
        process.start()
Example #6
0
    def start(self, context: ExecutionContext):
        """
        Start multiple processes or threads with the worker function as a target.

        :param context: execution context
        :type context: ExecutionContext
        :return:
        """
        target_function = worker_function
        if self.use_savers:
            self.save(context, full_dump=True)
            target_function = worker_function

        self.workers = []
        for _, worker_arguments in zip(range(self.n_workers),
                                       self.additional_worker_arguments):
            if self.use_threading:
                p = Thread(target=target_function,
                           args=(self, context, self.use_savers,
                                 worker_arguments))
            else:
                p = Process(target=target_function,
                            args=(self, context, self.use_savers,
                                  worker_arguments))

            p.daemon = True
            p.start()
            self.workers.append(p)
def setup_mock_web_api_server(test: TestCase):
    if get_mock_server_mode() == "threading":
        test.server_started = threading.Event()
        test.thread = MockServerThread(test)
        test.thread.start()
        test.server_started.wait()
    else:
        # start a mock server as another process
        target = MockServerProcessTarget()
        test.server_url = "http://localhost:8888"
        test.host, test.port = "localhost", 8888
        test.process = Process(target=target.run, daemon=True)
        test.process.start()

        time.sleep(0.1)

        # start a thread in the current process
        # this thread fetches mock_received_requests from the remote process
        test.monitor_thread = MonitorThread(test)
        test.monitor_thread.start()
        count = 0
        # wait until the first successful data retrieval
        while test.mock_received_requests is None:
            time.sleep(0.01)
            count += 1
            if count >= 100:
                raise Exception("The mock server is not yet running!")
Example #8
0
def spider_process(name,
                   keyword=None,
                   item_num=None,
                   url=None,
                   spider=None,
                   result=None):
    custom_settings = get_config(name)
    if not spider:
        spider = custom_settings.get('spider', 'aCrawler')
    project_settings = get_project_settings()
    settings = dict(project_settings.copy())
    settings.update(custom_settings.get('settings'))
    print(item_num)
    if item_num is not None:
        # 关键字模式下,C_R参数设置同时请求个数
        # 防止终止爬虫时其他请求继续异步进行
        # 但是可能会造成请求较慢
        settings["CONCURRENT_REQUESTS"] = 1
    print(settings)
    process = Process(target=crawl,
                      kwargs={
                          'settings': settings,
                          'spider': spider,
                          'name': name,
                          'custom_settings': custom_settings,
                          'keyword': keyword,
                          'item_num': item_num,
                          'url': url,
                          'result': result
                      })
    return process
def initiate_new_training(workspace_id: ObjectId,
                          training_config_in_train: TrainingConfigInTrain):
    config = parse_config_for_training(training_config_in_train)
    data_set_manager = DataSetManager(workspace_id, WorkspaceDataSource())
    trainer = Trainer(training_config=config,
                      data_set_manager=data_set_manager,
                      create_db=create_sync_db)
    Process(target=trainer.train).start()
Example #10
0
def main():
    sys.stdout = open('log_print.txt', 'a')
    #num_workers = int(mp.cpu_count()/3
    num_workers = 1
    for i in range(num_workers):
        file_ids = get_files_for_worker(i, num_workers)
        process = Process(target=worker, args=(file_ids, i))
        process.start()
Example #11
0
def do_scraping_from_sitemaps(country: str):
    """ Launch scrapping for each of the hotels """
    sitemaps_paths = get_all_sitemaps_paths()
    l = len(sitemaps_paths)
    for i, sitemaps_path in enumerate(sitemaps_paths):
        sitemap_urls = get_sitemap_urls(sitemaps_path=sitemaps_path,
                                        country=country)
        if sitemap_urls:
            numpy = np.array(sitemap_urls)
            urls_splitted = np.array_split(numpy, 2)
            p = Process(target=init_scrap, args=(tuple([urls_splitted[0]])))
            p.start()
            p2 = Process(target=init_scrap,
                         args=(tuple([urls_splitted[1]])))
            p2.start()
            p.join()
            p2.join()
Example #12
0
def start():
    builder = cluster_crawler.ClusterCrawler()
    start_block_id = int(sys.argv[1])
    block_id = start_block_id
    process = None
    while builder.crawl_block(block_id):
        if settings.debug or block_id % 100 == 0:
            print("Block %d crawled" % block_id)

        if block_id - start_block_id > 0 and (
                block_id -
                start_block_id) % settings.block_crawling_limit == 0:
            builder.network_graph.check_integrity()
            while process is not None and process.is_alive():
                print("Waiting for insertion thread to complete...")
                process.join()

            if process is not None and process.exitcode > 0:  #error
                raise Exception("Errorcode %d in DB Sync Thread, aborting" %
                                process.exitcode)
            process = Process(
                target=builder.network_graph.synchronize_mongo_db)
            process.start()
            builder.network_graph = cluster_network.ClusterNetwork(
                settings.db_server, settings.db_port
            )  #Starting a new graph while other graph data is inserted.
            builder.connect_to_bitcoind_rpc()

        if process is not None and not process.is_alive(
        ) and process.exitcode > 0:  #error
            raise Exception("Errorcode %d in DB Sync Thread, aborting" %
                            process.exitcode)
        block_id += 1

    #Finished Crawling, Flushing to DB.
    #Waiting for any previous DB Sync
    while process is not None and process.is_alive():
        print("Waiting for insertion thread to complete...")
        process.join()

    #Sync the rest
    print("Inserting into the DB")
    process = Process(target=builder.network_graph.synchronize_mongo_db)
    process.start()
    process.join()
    def __init__(self, GameClass, starting_position=None, time_limit=3):
        super(AsyncIterativeDeepening, self).__init__(GameClass, starting_position)
        self.root = DeepeningNode(GameClass, starting_position)
        self.time_limit = time_limit

        self.parent_pipe, worker_pipe = Pipe()
        self.worker_process = Process(target=self.loop_func,
                                      args=(GameClass, starting_position, worker_pipe))
        self.receipt_backlog = 0
Example #14
0
def do_cfg_multiprocess(processes=1):
    print('processes:: ', str(processes))
    procs = []
    for i in range(processes):
        print('_')
        print('_process #:: ', str(i))
        proc = Process(target=do_lite_function, args=(i, ))
        procs.append(proc)
        proc.start()
Example #15
0
    def __init__(self):
        m = Manager()
        self.ports = m.dict()  # used ports
        self.data = m.dict()  # all the data (in a synced dict)
        self.lastUpdate = SyncDeltaVal()

        self.finished = Value('b', False)
        self.p = Process(target=self._ui)
        self.p.start()
Example #16
0
def do_main_driver_multiprocess(processes=1, reps=1):
    print('processes:: ', str(processes))
    procs = []
    for i in range(processes):
        print('_')
        print('_process #:: ', str(i))
        proc = Process(target=main_driver, args=(reps, ))
        procs.append(proc)
        proc.start()
Example #17
0
def run_app_as_process(command,
                       daemon=False,
                       shell=False,
                       state_queue=None) -> Process:
    p = Process(target=run_app,
                args=(command, shell, state_queue),
                daemon=daemon)
    p.start()
    return p
Example #18
0
def manager_thread():

  Process(name="shutdownd", target=launcher, args=("selfdrive.shutdownd",)).start()
  system("am startservice com.neokii.optool/.MainService")
  system("am startservice com.neokii.openpilot/.MainService")

  cloudlog.info("manager start")
  cloudlog.info({"environ": os.environ})

  # save boot log
  #subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))

  ignore = []
  if os.getenv("NOBOARD") is not None:
    ignore.append("pandad")
  if os.getenv("BLOCK") is not None:
    ignore += os.getenv("BLOCK").split(",")

  ensure_running(managed_processes.values(), started=False, not_run=ignore)

  started_prev = False
  params = Params()
  sm = messaging.SubMaster(['deviceState'])
  pm = messaging.PubMaster(['managerState'])

  while True:
    sm.update()
    not_run = ignore[:]

    if sm['deviceState'].freeSpacePercent < 5:
      not_run.append("loggerd")

    started = sm['deviceState'].started
    driverview = params.get_bool("IsDriverViewEnabled")
    ensure_running(managed_processes.values(), started, driverview, not_run)

    # trigger an update after going offroad
    if started_prev and not started and 'updated' in managed_processes:
      os.sync()
      managed_processes['updated'].signal(signal.SIGHUP)

    started_prev = started

    running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
                    for p in managed_processes.values() if p.proc]
    cloudlog.debug(' '.join(running_list))

    # send managerState
    msg = messaging.new_message('managerState')
    msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
    pm.send('managerState', msg)

    # TODO: let UI handle this
    # Exit main loop when uninstall is needed
    if params.get_bool("DoUninstall"):
      break
Example #19
0
def compute_hub_SALSA(inlinks, inlinks_count, outlinks, outlinks_count,
                      rootSet):
    print("Size of rootset is " + str(len(rootSet)))
    # computing hub scores
    salsa_hub_scores = dict()

    countBaseSetWithOutlinks = 0
    for page_id in rootSet:
        if page_id in outlinks:
            if len(outlinks[page_id]) > 0:
                countBaseSetWithOutlinks += 1
    print(countBaseSetWithOutlinks)

    for page_id in rootSet:
        if page_id in outlinks:
            if len(outlinks[page_id]) > 0:
                salsa_hub_scores[page_id] = 1 / countBaseSetWithOutlinks
            else:
                salsa_hub_scores[page_id] = 0
        else:
            salsa_hub_scores[page_id] = 0

    for iter in range(0, 3):
        print(iter)
        global temp_salsa_hub_scores
        count = 0

        page_list = list()
        procs = list()
        for page_id in rootSet:
            page_list.append(page_id)
            count += 1
            if count % 100 == 0:
                print(count)
                proc = Process(target=multi_scoring,
                               args=(
                                   page_list,
                                   inlinks,
                                   inlinks_count,
                                   outlinks,
                                   outlinks_count,
                                   temp_salsa_hub_scores,
                                   salsa_hub_scores,
                               ))
                page_list = list()
                procs.append(proc)
                proc.start()

        print("Total processes  " + str(len(procs)))
        for proc in procs:
            proc.join()

        for page_id in temp_salsa_hub_scores:
            salsa_hub_scores[page_id] = temp_salsa_hub_scores[page_id]
        temp_salsa_hub_scores = dict()
    return salsa_hub_scores
Example #20
0
def worker(queues):
    processes = [
        Process(target=_queue_worker, args=(queue, )) for queue in queues
    ]
    for p in processes:

        p.start()

    for p in processes:
        p.join()
Example #21
0
def dashboard():
    # This fixture guarantees the proper termination of all spawned subprocesses
    # after the tests.
    dashboard = Process(target=run_detection)
    dashboard.start()
    yield
    for child in psutil.Process(dashboard.pid).children(recursive=True):
        child.kill()
    dashboard.terminate()
    dashboard.join()
Example #22
0
def process_file(file_path_input, file_path_output, i):
    if os.path.exists(file_path_output):
        return print('{}    {}  Already done'.format(now(), file_path_output))

    input_file = open(file_path_input, 'r')
    output_file = open(file_path_output, 'a')

    # every worker create separate file for a input file
    writer = csv.writer(output_file, delimiter='\t')

    # for every file we create seperate driver (100 URLs)
    driver = webdriver.PhantomJS(executable_path=path_to_phantomjs)

    for line in input_file:
        splited = line.split('\t')
        property_type = splited[0]
        url = splited[1]
        print('{}   Process={}  Current url: {}'.format(now(), i, url))

        # start process for getting microformat properties
        temp_queue = Queue()
        p = Process(target=get_microformat_properties_by_type,
                    args=(url, property_type, temp_queue, i))
        start_with_timeout(p, TIME_OUT_LOAD, "loading", url, i)

        event_properties = temp_queue.get() if not temp_queue.empty() else None
        if p.is_alive():
            p.terminate()
        if event_properties is not None:
            print("{}   Process={}  Got properties for  {}".format(
                now(), i, url))

            # start process for feature extraction and writing to separate file
            p_event_features = Process(target=get_event_features_and_write,
                                       args=(event_properties, driver, writer,
                                             i, output_file))
            start_with_timeout(p_event_features, TIME_OUT_FEATURE,
                               "feature extraction", url, i)
            if p_event_features.is_alive():
                p.terminate()

    return 'done'
def start_socket_mode_server(test, port: int):
    if get_mock_server_mode() == "threading":
        test.sm_thread = threading.Thread(
            target=start_thread_socket_mode_server(test, port))
        test.sm_thread.daemon = True
        test.sm_thread.start()
        time.sleep(2)  # wait for the server
    else:
        test.sm_process = Process(target=start_process_socket_mode_server,
                                  kwargs={"port": port})
        test.sm_process.start()
Example #24
0
    def start_up_app(self):
        self.show_splash_screen()
        try:
            # load db in separate process
            process_startup = Process(target=LucteriosRefreshAll)
            process_startup.start()

            while process_startup.is_alive():
                # print('updating')
                self.splash.update()
        finally:
            self.remove_splash_screen()
Example #25
0
 def test_sigterm(manager_queue: "queue.Queue[LogRecord]") -> None:
     """ProcessTaskPoolExecutor should raise keyboard interrupt."""
     process = Process(target=example_use_case_cancel_repost_process_id, kwargs={"queue_main": manager_queue})
     process.start()
     LocalSocket.receive()
     time.sleep(SECOND_SLEEP_FOR_TEST_SHORT)
     psutil_process = psutil.Process(process.pid)
     psutil_process.send_signal(SIGTERM)
     psutil_process.wait()
     # Reason: Requires to enhance types-psutil
     assert not psutil_process.is_running()  # type: ignore
     assert_graceful_shutdown(manager_queue)
Example #26
0
def run():
    # while True:
        for name in ['jd','sn']: #对应存储再config中的JSON配置文件
            custom_settings = get_config(name)  # 爬取使用的 Spider 名称
            spider = custom_settings.get('spider', 'aCrawler')
            project_settings = get_project_settings()
            settings = dict(project_settings.copy())  # 合并配置
            settings.update(custom_settings.get('settings'))
            process = Process(target=crawl, kwargs={'settings': settings, 'spider': spider, 'name': name,
                                                    'custom_settings': custom_settings})
            process.start()
            process.join()
Example #27
0
def do_sanity_multiprocess():
    names = ['A', 'B', 'C']
    procs = []
    # proc = Process(target=do_lite_function)
    # procs.append(proc)
    # proc.start()

    for count, name in enumerate(names, 1):
        print(count, name)
        proc = Process(target=do_lite_function, args=(name, ))
        procs.append(proc)
        proc.start()
Example #28
0
 def __enter__(self):
     self.listener_ = Listener(self.address_, family='AF_INET')
     for child in range(self.nb_children_):
         process = Process(target=creator,
                           args=(
                               self.data_,
                               self.models_,
                               self.address_,
                           ))
         process.start()
         self.children_.append(process)
     return self
Example #29
0
 def serve(routing: dict, open_browser=False, timeout=None, filename=''):
     p = Process(target=WebServer.serve_and_browse,
                 args=(
                     routing,
                     open_browser,
                     filename,
                 ))
     p.start()
     wait_for_server_seconds = timeout
     logging.info("Waiting for server %d seconds" % wait_for_server_seconds)
     time.sleep(wait_for_server_seconds)
     p.terminate()
Example #30
0
 def keyboard_interrupt(cls, path_file_input: Path, path_file_output: Path) -> None:
     """Test process of keyboard interrupt."""
     process = Process(target=cls.report_raises_keyboard_interrupt, args=(path_file_input, path_file_output))
     process.start()
     assert LocalSocket.receive() == "Ready"
     time.sleep(SECOND_SLEEP_FOR_TEST_KEYBOARD_INTERRUPT_CTRL_C_POSIX)
     cls.simulate_ctrl_c_in_posix(process)
     assert LocalSocket.receive() == "Test succeed"
     psutil_process = psutil.Process(process.pid)
     assert psutil_process.wait() == 0
     # Reason: Requires to enhance types-psutil
     assert not psutil_process.is_running()  # type: ignore