Example #1
0
class MultiProcessRunner(BaseRunner):
    JOIN_TIMEOUT = 60

    def __init__(self, target, **kwargs):
        super(MultiProcessRunner, self).__init__(target, **kwargs)
        self.process = None  # type: Process

    @capture_monitor_exception
    def start(self):
        self.process = Process(target=self.target, kwargs=self.kwargs)
        self.process.start()

    @capture_monitor_exception
    def stop(self):
        if self.process and self.is_alive():
            self.process.terminate()
            self.process.join(MultiProcessRunner.JOIN_TIMEOUT)
            if self.process.is_alive():
                self.process.kill()

    @capture_monitor_exception
    def heartbeat(self):
        # do we want to do something here?
        pass

    @capture_monitor_exception
    def is_alive(self):
        return self.process.is_alive()

    def __str__(self):
        s = super(MultiProcessRunner, self).__str__()
        return f"{s}({self.process})"
Example #2
0
def start():
        builder = cluster_crawler.ClusterCrawler()
        start_block_id  = int(sys.argv[1])
        block_id = start_block_id
        process = None
        while builder.crawl_block(block_id):
            print("Block %d crawled" % block_id)

            if block_id - start_block_id > 0 and (block_id - start_block_id) % settings.block_crawling_limit == 0:
                builder.network_graph.check_integrity()
                while  process is not None and process.is_alive():
                    print("Waiting for insertion thread to complete...")
                    process.join()

                if process is not None and process.exitcode > 0 : #error
                    raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
                process = Process(target=builder.network_graph.synchronize_mongo_db)
                process.start()
                builder.network_graph = cluster_network.ClusterNetwork(settings.db_server, settings.db_port) #Starting a new graph while other graph data is inserted.
                builder.connect_to_bitcoind_rpc()

            if process is not None and not process.is_alive() and process.exitcode > 0 : #error
                    raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
            block_id+=1

        #Finished Crawling, Flushing to DB.
        #Waiting for any previous DB Sync
        while  process is not None and process.is_alive():
            print("Waiting for insertion thread to complete...")
            process.join()

        #Sync the rest
        process = Process(target=builder.network_graph.synchronize_mongo_db)
        process.start()
        process.join()
Example #3
0
def test_disk_locking_processes(block_maker, storage_factory, redis_hostname):
    def visit(storage, root):
        ds = block_maker.first_ds(first_constant=2, ids_arg=3)
        cached = ds >> Apply(image=sleeper(0.1)) >> CacheToDisk(
            root, storage, JsonSerializer(), 'image')

        for i in ds.ids:
            assert ds.image(i) == cached.image(i)
            assert ds.image(i) == cached.image(i)

    for _ in range(5):
        with tempfile.TemporaryDirectory() as temp, storage_factory(
        ) as temp_storage:
            temp = Path(temp) / 'cache'
            init_storage(temp,
                         algorithm={
                             'name': 'blake2b',
                             'digest_size': 64
                         },
                         levels=[1, 31, 32],
                         locker={
                             'name': 'RedisLocker',
                             'args': [redis_hostname],
                             'kwargs': {
                                 'prefix': 'connectome.tests',
                                 'expire': 10
                             }
                         })

            th = Process(target=visit, args=(temp_storage, temp))
            th.start()
            visit(temp_storage, temp)
            th.join()
def test_agent() -> None:
    stop_ev = Event()
    conn, agent_conn = Pipe()

    waypoints = [(-80.0, -65.0, 3.0), (-45.0, -66.0, 3.0), (-33.0, -65.0, 3.0),
                 (-33.0, -65.0, 0.3)]
    aut = Agent(uid=1, motion=MotionHectorQuad("/drone1"), waypoints=waypoints)

    p = Process(target=run_as_process,
                kwargs={
                    "aut": aut,
                    "conn": agent_conn,
                    "stop_ev": stop_ev
                })
    p.start()

    try:
        for i in range(10):
            if conn.poll(1.0):
                act = conn.recv()
                print(act)
                if act[0] == "request":
                    reply = input("> ")
                    conn.send(("reply", {
                        "uid": aut.uid,
                        "acquired": Contract()
                    }))
            else:
                print("Response timeout")
    finally:
        stop_ev.set()  # Stop all automatons
        p.join()
        conn.close()
        agent_conn.close()
Example #5
0
def test_contract_manager() -> None:
    aut = AirspaceManager()
    stop_ev = Event()
    conn, manager_conn = Pipe()

    p = Process(target=run_as_process, kwargs={"aut": aut,
                                               "conn": manager_conn,
                                               "stop_ev": stop_ev})
    p.start()
    try:
        uid = 0
        for i in range(10):
            if conn.poll(1.0):
                act = conn.recv()
                print(act)
            elif i % 3 != 2:
                uid = i % 5
                target = Contract.from_stamped_rectangles([
                    (0.0, Rectangle(mins=[0, 0, 0], maxes=[1, 1, 0.5])),
                    (0.5, Rectangle(mins=[0, 0.5, 0], maxes=[2, 3, 0.5])),
                    (1.0, Rectangle(mins=[0.5, 0.5, 1.0], maxes=[1.5, 1.5, 1.5]))
                    ])
                conn.send(("request", {"uid": uid, "target": target}))
            else:
                releasable = Contract.from_stamped_rectangles([
                    (0.0, Rectangle(mins=[0, 0, 0], maxes=[1, 1, 0.5])),
                    (0.5, Rectangle(mins=[0, 0.5, 0], maxes=[2, 2, 0.5]))
                    ])
                print("Agent " + str(uid) + " release > " + str(releasable))
                conn.send(("release", {"uid": uid, "releasable": releasable}))
    finally:
        stop_ev.set()  # Stop all automatons
        p.join()
        conn.close()
        manager_conn.close()
Example #6
0
    def proof_of_work(self, last_block):
        """
            Simple Proof of Work Algorithm:
            - Find a number p' such that hash(pp') contains leading 4 zeroes
            - Where p is the previous proof, and p' is the new proof

            :param last_block: <dict> last Block
            :return: <int>
            """

        last_proof = last_block['proof']
        last_hash = self.hash(last_block)

        # Flags
        flags = Value(c_bool, False)
        proof_result = Value(c_int, 0)

        # Proof
        proof1 = 0
        proof2 = 75001
        proof3 = 150001
        proof4 = 225001
        proof5 = 300001

        start_time = time.time()
        p1 = Process(target=self.find_proof,
                     args=(flags, 0, last_proof, proof1, last_hash,
                           proof_result, 0, 75001))
        p2 = Process(target=self.find_proof,
                     args=(flags, 1, last_proof, proof2, last_hash,
                           proof_result, 75001, 150001))
        p3 = Process(target=self.find_proof,
                     args=(flags, 2, last_proof, proof3, last_hash,
                           proof_result, 150001, 225001))
        p4 = Process(target=self.find_proof,
                     args=(flags, 3, last_proof, proof4, last_hash,
                           proof_result, 225001, 300001))
        p5 = Process(target=self.find_proof,
                     args=(flags, 4, last_proof, proof5, last_hash,
                           proof_result, 300001, sys.maxsize))

        p1.start()
        p2.start()
        p3.start()
        p4.start()
        p5.start()
        p1.join()
        p2.join()
        p3.join()
        p4.join()
        p5.join()

        end_time = time.time()
        print('TIMETIMETIMETIME : ', end_time - start_time)

        print('proof_result : ', proof_result.value)

        return proof_result.value
Example #7
0
def compute_hub_SALSA(inlinks, inlinks_count, outlinks, outlinks_count,
                      rootSet):
    print("Size of rootset is " + str(len(rootSet)))
    # computing hub scores
    salsa_hub_scores = dict()

    countBaseSetWithOutlinks = 0
    for page_id in rootSet:
        if page_id in outlinks:
            if len(outlinks[page_id]) > 0:
                countBaseSetWithOutlinks += 1
    print(countBaseSetWithOutlinks)

    for page_id in rootSet:
        if page_id in outlinks:
            if len(outlinks[page_id]) > 0:
                salsa_hub_scores[page_id] = 1 / countBaseSetWithOutlinks
            else:
                salsa_hub_scores[page_id] = 0
        else:
            salsa_hub_scores[page_id] = 0

    for iter in range(0, 3):
        print(iter)
        global temp_salsa_hub_scores
        count = 0

        page_list = list()
        procs = list()
        for page_id in rootSet:
            page_list.append(page_id)
            count += 1
            if count % 100 == 0:
                print(count)
                proc = Process(target=multi_scoring,
                               args=(
                                   page_list,
                                   inlinks,
                                   inlinks_count,
                                   outlinks,
                                   outlinks_count,
                                   temp_salsa_hub_scores,
                                   salsa_hub_scores,
                               ))
                page_list = list()
                procs.append(proc)
                proc.start()

        print("Total processes  " + str(len(procs)))
        for proc in procs:
            proc.join()

        for page_id in temp_salsa_hub_scores:
            salsa_hub_scores[page_id] = temp_salsa_hub_scores[page_id]
        temp_salsa_hub_scores = dict()
    return salsa_hub_scores
Example #8
0
def dashboard():
    # This fixture guarantees the proper termination of all spawned subprocesses
    # after the tests.
    dashboard = Process(target=run_detection)
    dashboard.start()
    yield
    for child in psutil.Process(dashboard.pid).children(recursive=True):
        child.kill()
    dashboard.terminate()
    dashboard.join()
Example #9
0
def main():
    size = os.path.getsize("dict.txt")
    half_size = size // 2

    p1 = Process(target=read_part1, args=("dict.txt", half_size))
    p2 = Process(target=read_part2, args=("dict.txt", half_size))
    p1.start()
    p2.start()

    p1.join()
    p2.join()
Example #10
0
def run():
    # while True:
        for name in ['jd','sn']: #对应存储再config中的JSON配置文件
            custom_settings = get_config(name)  # 爬取使用的 Spider 名称
            spider = custom_settings.get('spider', 'aCrawler')
            project_settings = get_project_settings()
            settings = dict(project_settings.copy())  # 合并配置
            settings.update(custom_settings.get('settings'))
            process = Process(target=crawl, kwargs={'settings': settings, 'spider': spider, 'name': name,
                                                    'custom_settings': custom_settings})
            process.start()
            process.join()
Example #11
0
def run():
    while True:
        for name in ['sina', 'xinhua']: #对应存储再config中的JSON配置文件
            custom_settings = get_config(name)  # 爬取使用的 Spider 名称
            spider = custom_settings.get('spider', 'sina')
            project_settings = get_project_settings()
            settings = dict(project_settings.copy())  # 合并配置
            settings.update(custom_settings.get('settings'))
            process = Process(target=crawl, kwargs={'settings': settings, 'spider': spider, 'name': name,
                                                    'custom_settings': custom_settings})
            process.start()
            process.join()
        time.sleep(60 * 60 * 24)    #一天运行一次
Example #12
0
 def test_keyboard_interrupt_on_linux(self) -> None:
     """
     - Keyboard interrupt should reach to all descendant processes.
     - Keyboard interrupt should shutdown ProcessTaskPoolExecutor gracefully.
     """
     process = Process(target=self.report_raises_keyboard_interrupt)
     process.start()
     LocalSocket.receive()
     time.sleep(SECOND_SLEEP_FOR_TEST_SHORT)
     self.simulate_ctrl_c_in_posix(process)
     assert LocalSocket.receive() == "Test succeed"
     process.join()
     assert process.exitcode == 0
     assert not process.is_alive()
def main():
    print('主进程开始')
    p = Process(target=test, args=(1, ))
    p.start()
    print(p.is_alive())
    print('主进程结束')
    print(p.is_alive())
    print(p.is_alive())
    print(p.is_alive())
    print(p.is_alive())
    print(p.is_alive())
    print('子进程名字:', p.name)
    print('子进程pid:', p.pid)
    p.terminate()
    p.join(20)
Example #14
0
def main():
    logging.info("Main thread started")
    maxValue = 3
    worker = Process(target=work,
                     args=("working", maxValue),
                     daemon=True,
                     name="Worker")
    worker.start()
    time.sleep(5)
    # if the process is running , stop it
    if worker.is_alive:
        worker.terminate()  # pretty dangerous
    worker.join()

    logging.info(f"Main thread finished {worker.exitcode}")
Example #15
0
def test_parallel_read_processes(storage_factory, redis_hostname):
    def job():
        storage.load(lambda x: time.sleep(1), key)

    with storage_factory({'name': 'RedisLocker', 'args': [redis_hostname],
                          'kwargs': {'prefix': 'connectome.tests', 'expire': 10}}) as storage:
        key = storage.store(__file__)

        start = time.time()
        th = Process(target=job)
        th.start()
        job()
        th.join()
        stop = time.time()

        assert stop - start < 1.5
Example #16
0
def setup_and_teardown_flask_app(app: Flask, host: str, port: int):
    """
    Manages setup of provided flask app on given `host` and `port` and its teardown.

    As for setup process following things are done:
        * `/health` endpoint is added to provided flask app,
        * app is launched in separate process,
        * function waits for flask app to fully launch - to do this it repetitively checks `/health` endpoint if it will
            return status code 200.

    Example use of this function in fixture:

    >>> with setup_and_teardown_flask_app(Flask(__name__), "localhost", 10000):
    >>>     yield

    :param app: app to launch
    :param host: host on which to launch app
    :param port: port on which to launch app
    """
    def wait_for_flask_app_to_be_accessible():
        timeout = 1
        end_time = datetime.now() + timedelta(seconds=timeout)
        response = requests.Response()
        response.status_code = HTTP_404_NOT_FOUND

        while response.status_code != HTTP_200_OK and datetime.now(
        ) < end_time:
            with contextlib.suppress(requests.exceptions.ConnectionError):
                response = requests.request(
                    "POST", "http://{}:{}/health".format(host, port))
            time.sleep(0.01)

        fail_message = "Timeout expired: failed to start mock REST API in {} seconds".format(
            timeout)
        assert response.status_code == HTTP_200_OK, fail_message

    app.route("/health", methods=["POST"])(lambda: "OK")

    process = Process(target=app.run, args=(host, port))
    process.start()

    wait_for_flask_app_to_be_accessible()
    yield

    process.terminate()
    process.join()
Example #17
0
def do_scraping_from_sitemaps(country: str):
    """ Launch scrapping for each of the hotels """
    sitemaps_paths = get_all_sitemaps_paths()
    l = len(sitemaps_paths)
    for i, sitemaps_path in enumerate(sitemaps_paths):
        sitemap_urls = get_sitemap_urls(sitemaps_path=sitemaps_path,
                                        country=country)
        if sitemap_urls:
            numpy = np.array(sitemap_urls)
            urls_splitted = np.array_split(numpy, 2)
            p = Process(target=init_scrap, args=(tuple([urls_splitted[0]])))
            p.start()
            p2 = Process(target=init_scrap,
                         args=(tuple([urls_splitted[1]])))
            p2.start()
            p.join()
            p2.join()
Example #18
0
def main():
    logging.info(f'App started')

    max = 2
    worker = Process(target=work, args=['Working', max], daemon=True, name='Worker')
    worker.start()

    time.sleep(5)

    # if the process is running, stop it
    if worker.is_alive():
        worker.terminate()   # kill the process with SIGTERM
    worker.join()

    # exicode == 0 is good
    # anything else is an error
    logging.info(f'App finished: {worker.exitcode}')
Example #19
0
def start():
    builder = cluster_crawler.ClusterCrawler()
    start_block_id = int(sys.argv[1])
    block_id = start_block_id
    process = None
    while builder.crawl_block(block_id):
        if settings.debug or block_id % 100 == 0:
            print("Block %d crawled" % block_id)

        if block_id - start_block_id > 0 and (
                block_id -
                start_block_id) % settings.block_crawling_limit == 0:
            builder.network_graph.check_integrity()
            while process is not None and process.is_alive():
                print("Waiting for insertion thread to complete...")
                process.join()

            if process is not None and process.exitcode > 0:  #error
                raise Exception("Errorcode %d in DB Sync Thread, aborting" %
                                process.exitcode)
            process = Process(
                target=builder.network_graph.synchronize_mongo_db)
            process.start()
            builder.network_graph = cluster_network.ClusterNetwork(
                settings.db_server, settings.db_port
            )  #Starting a new graph while other graph data is inserted.
            builder.connect_to_bitcoind_rpc()

        if process is not None and not process.is_alive(
        ) and process.exitcode > 0:  #error
            raise Exception("Errorcode %d in DB Sync Thread, aborting" %
                            process.exitcode)
        block_id += 1

    #Finished Crawling, Flushing to DB.
    #Waiting for any previous DB Sync
    while process is not None and process.is_alive():
        print("Waiting for insertion thread to complete...")
        process.join()

    #Sync the rest
    print("Inserting into the DB")
    process = Process(target=builder.network_graph.synchronize_mongo_db)
    process.start()
    process.join()
    def get_data_from_sn(self, search_list, database_name):

        if database_name == 'neo':
            sn_node = graph.find_one("SN", "name", "SOCIALNETWORKS")
            twitter_node = graph.merge_one("Twitter", "name", "TWITTER")
            sn_has_twitter = Relationship(sn_node, "HAS", twitter_node)
            graph.create_unique(sn_has_twitter)

        twitter_searcher = TwitterSearcher()
        twitter_searcher_process = Process(target=twitter_searcher.run_twitter_search, args=(search_list, database_name,))

        twitter_streamer = TwitterStreamer()
        twitter_streamer_process = Process(target=twitter_streamer.run_twitter_stream, args=(search_list, database_name,))

        twitter_searcher_process.start()
        twitter_streamer_process.start()
        twitter_streamer_process.join()
        twitter_searcher_process.join()
Example #21
0
def parser(final_link, file_name):
    def crawl():
        crawler = CrawlerProcess(settings={
            "FEEDS": {
                file_name: {
                    "format": "jl",
                    "encoding": "utf-8"
                },
            },
        }, )
        crawler.crawl(WildberriesSpider, start_urls=[final_link])
        crawler.start()

    process = Process(target=crawl)
    process.start()
    process.join()

    return file_name
Example #22
0
def generate_settings(base_requirement: Requirements,
                      heterogeneity_score: Callable[
                          [Requirements, Requirements], float],
                      steps: int = 5,
                      arch_steps: int = 5,
                      percentage: float = 1,
                      folder: str = './data',
                      cores: int = None) -> None:
    """Saves the settings in the given folder"""
    probs_for_archs = generate_arch_probs(arch_steps)
    old_probs_per_arch = generate_probabilities(steps)
    if cores is None:
        cores = multiprocessing.cpu_count()

    now = datetime.datetime.now()
    now = now.strftime('%Y_%m_%d_%H_%M_%S')
    folder = f'{folder}/{now}_archsteps-{arch_steps}_steps-{steps}_percentage-{percentage}'
    Path(folder).mkdir(parents=True, exist_ok=True)
    old_probs_per_arch = filter_invalid_settings(old_probs_per_arch)
    probs_per_arch = defaultdict(list)
    for arch, old_probs in old_probs_per_arch.items():
        for values in old_probs.values():
            probs_per_arch[arch].append(
                choose_attribute_settings(values, percentage))

    split = np.array_split(probs_for_archs, cores)
    ps = []
    for i in range(cores):
        if len(split[i]) > 0:
            # process_arches(split[i], probs_per_arch, base_requirement, heterogeneity_score, folder,)
            p = Process(target=process_arches,
                        args=(
                            split[i],
                            probs_per_arch,
                            base_requirement,
                            heterogeneity_score,
                            folder,
                        ))
            p.start()
            ps.append(p)

    for p in ps:
        p.join()
def start():
        builder = GraphBuilder()
        start_block_id  = int(sys.argv[1])
        block_id = start_block_id
        process = None
        try:
            while builder.crawl_block(block_id):
                print("Block %d crawled" % block_id)

                if block_id - start_block_id > 0 and (block_id - start_block_id) % Settings.block_crawling_limit == 0:
                    builder.network_graph.check_integrity()
                    while  process is not None and process.is_alive():
                        print("Waiting for insertion thread to complete...")
                        process.join()

                    if process is not None and process.exitcode > 0 : #error
                        raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
                    process = Process(target=builder.network_graph.synchronize_mongo_db)
                    process.start()
                    builder.network_graph = NetworkGraph.Network(Settings.db_server,Settings.db_port) #Starting a new graph while other graph data is inserted.

                if process is not None and not process.is_alive() and process.exitcode > 0 : #error
                        raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
                block_id+=1

            #Finished Crawling, Flushing to DB.
            #Waiting for any previous DB Sync
            while  process is not None and process.is_alive():
                print("Waiting for insertion thread to complete...")
                process.join()

            #Sync the rest
            process = Process(target=builder.network_graph.synchronize_mongo_db)
            process.start()
            process.join()

            #DONE!

        #For Debugging purpose
        except:
            input("An exception will rise ")
            raise
Example #24
0
def main():
    logging.info('Started')

    max = 2
    worker = Process(target=work,
                     args=['Working', max],
                     daemon=True,
                     name='Super Mario')
    worker.start()

    time.sleep(5)

    #if the process is running, stop it
    if worker.is_alive():
        worker.terminate()
    worker.join()

    #exitcode == 0
    #Anything else is an error
    logging.info(f'Finished: {worker.exitcode}')
    def get_data_from_sn(self, search_list, database_name):

        if database_name == 'neo':
            sn_node = graph.find_one("SN", "name", "SOCIALNETWORKS")
            facebook_node = graph.merge_one("Facebook", "name", "FACEBOOK")
            sn_has_facebook = Relationship(sn_node, "HAS", facebook_node)
            graph.create_unique(sn_has_facebook)


        event_searcher = EventSearcher()
        event_process = Process(target=event_searcher.run_facebook_events, args=(search_list, database_name,))

        group_searcher = GroupSearcher()
        group_process = Process(target=group_searcher.run_facebook_groups, args=(search_list, database_name,))

        page_searcher = PageSearcher()
        page_process = Process(target=page_searcher.run_facebook_pages, args=(search_list, database_name,))

        event_process.start()
        group_process.start()
        page_process.start()

        event_process.join()
        group_process.join()
        page_process.join()
Example #26
0
class Temp_API_Server:
    def __init__(self):
        self.port = random_port()
        self.proc = None

    def __enter__(self):
        self.start_server()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.stop_server()

    def http_GET(self, path=''):
        try:
            full_url = urljoin(self.server_url(), path)
            return GET_json(full_url)
        except:
            return None

    def server_running(self):
        return self.proc.is_alive()

    def start_server(self):
        self.proc = Process(target=run_server, args=[self.port])
        self.proc.start()
        return self.wait_for_server_ok()

    def stop_server(self):
        self.proc.kill()
        self.proc.join()

    def server_url(self):
        return f"http://127.0.0.1:{self.port}"

    def wait_for_server_ok(self, max_attempts=20, wait_interval=0.1):
        for i in range(0, max_attempts):
            status = self.http_GET('/health')
            if status:
                return status
            wait_for(wait_interval)
Example #27
0
    def run_all(self, nof_processes: int):
        procs = []
        if len(self.scenarios) == 0:
            return
        if len(self.scenarios) < nof_processes:
            nof_processes = len(self.scenarios)
        width = math.ceil(len(self.scenarios) / float(nof_processes))
        for i in range(0, nof_processes):
            log_file = os.path.join(self.output_dir, "experiment_log_{}{}.log".format(self.prefix, i))
            data_file = os.path.join(self.output_dir, "experiment_data_{}{}.log".format(self.prefix, i))
            l = i * width
            u = min((i + 1) * width, len(self.scenarios))
            if nof_processes == 1:
                ExperimentRunner._run_scenarios(self.scenarios, data_file, log_file)
            else:
                part = self.scenarios[l:u]
                p = Process(target=ExperimentRunner._run_scenarios, args=(part, data_file, log_file))
                procs.append(p)
                p.start()

        for p in procs:
            p.join()
Example #28
0
def main():
    name = process.current_process().name
    logging.info(f'{name} started')

    #Setup the process
    address = 'localhost'  #127.0.0.1
    port = 2823  # above 1024
    password = b'password'

    p = Process(target=proc,
                args=[address, port, password],
                daemon=True,
                name="Worker")
    p.start()

    logging.info(f'{name} waiting on the worker...')
    time.sleep(1)

    #Connect to the process
    dest = (address, port)
    conn = Client(dest, authkey=password)

    #Command loop
    while True:
        command = input('\r\nEnter a command or type quit:\r\n').strip()
        logging.info(f'{name} command: {command}')
        conn.send(command)
        if command == 'quit':
            break

    #Cleanup and shutdown
    if p.is_alive:
        logging.info(f'{name} terminating worker')
        conn.close()
        time.sleep(1)
        p.terminate()
    p.join()

    logging.info(f'{name} finished')
Example #29
0
def automatizar_descargas():
    book = open_workbook("seed.xlsx")
    sheet = book.sheet_by_index(0)  # If your data is on sheet 1
    columna_nombres = []
    for row in range(1, sheet.nrows):  # start from 1, to leave out row 0
        columna_nombres.append(" ".join(
            str(sheet.row_values(row)
                [0]).upper().strip().split()))  # extract from zero col

    lista_personas_no_encontradas = manager.list(
    )  # <-- can be shared between processes.
    lista_personas_encontradas = manager.list(
    )  # <-- can be shared between processes.

    columnas_de_nombres = chunkIt(columna_nombres, 1)
    processes = []

    for nombres in columnas_de_nombres:
        p = Process(target=descargar_archivos_persona,
                    args=(nombres, lista_personas_no_encontradas,
                          lista_personas_encontradas))  # Passing the list
        p.start()
        processes.append(p)
    for p in processes:
        p.join()

    with xlsxwriter.Workbook('NO_ENCONTRADAS.xlsx') as workbook:
        worksheet = workbook.add_worksheet()
        worksheet.write_column('A1', lista_personas_no_encontradas)

    with xlsxwriter.Workbook('ENCONTRADAS.xlsx') as workbook:
        worksheet = workbook.add_worksheet()
        worksheet.write_column('A1', lista_personas_encontradas)

    # done
    print("+++++++++++++++++ Descargas terminadas!!!!!")
Example #30
0
def _run_repo_main(_args):
    p = Process(target=repo.main, args=(_args,))
    p.start()
    p.join()
Example #31
0
def build_in_subprocess(exec_file_path: Path, output_dir: Path):
    process = Process(target=build, args=(exec_file_path, output_dir), kwargs=dict(reload=True))
    process.start()
    process.join()
Example #32
0
class SubprocEnvWorker(EnvWorker):
    """Subprocess worker used in SubprocVectorEnv and ShmemVectorEnv."""

    def __init__(
        self, env_fn: Callable[[], gym.Env], share_memory: bool = False
    ) -> None:
        super().__init__(env_fn)
        self.parent_remote, self.child_remote = Pipe()
        self.share_memory = share_memory
        self.buffer: Optional[Union[dict, tuple, ShArray]] = None
        if self.share_memory:
            dummy = env_fn()
            obs_space = dummy.observation_space
            dummy.close()
            del dummy
            self.buffer = _setup_buf(obs_space)
        args = (
            self.parent_remote,
            self.child_remote,
            CloudpickleWrapper(env_fn),
            self.buffer,
        )
        self.process = Process(target=_worker, args=args, daemon=True)
        self.process.start()
        self.child_remote.close()

    def __getattr__(self, key: str) -> Any:
        self.parent_remote.send(["getattr", key])
        return self.parent_remote.recv()

    def _decode_obs(self) -> Union[dict, tuple, np.ndarray]:
        def decode_obs(
            buffer: Optional[Union[dict, tuple, ShArray]]
        ) -> Union[dict, tuple, np.ndarray]:
            if isinstance(buffer, ShArray):
                return buffer.get()
            elif isinstance(buffer, tuple):
                return tuple([decode_obs(b) for b in buffer])
            elif isinstance(buffer, dict):
                return {k: decode_obs(v) for k, v in buffer.items()}
            else:
                raise NotImplementedError

        return decode_obs(self.buffer)

    def reset(self) -> Any:
        self.parent_remote.send(["reset", None])
        obs = self.parent_remote.recv()
        if self.share_memory:
            obs = self._decode_obs()
        return obs

    @staticmethod
    def wait(  # type: ignore
        workers: List["SubprocEnvWorker"],
        wait_num: int,
        timeout: Optional[float] = None,
    ) -> List["SubprocEnvWorker"]:
        remain_conns = conns = [x.parent_remote for x in workers]
        ready_conns: List[connection.Connection] = []
        remain_time, t1 = timeout, time.time()
        while len(remain_conns) > 0 and len(ready_conns) < wait_num:
            if timeout:
                remain_time = timeout - (time.time() - t1)
                if remain_time <= 0:
                    break
            # connection.wait hangs if the list is empty
            new_ready_conns = connection.wait(
                remain_conns, timeout=remain_time)
            ready_conns.extend(new_ready_conns)  # type: ignore
            remain_conns = [
                conn for conn in remain_conns if conn not in ready_conns]
        return [workers[conns.index(con)] for con in ready_conns]

    def send_action(self, action: np.ndarray) -> None:
        self.parent_remote.send(["step", action])

    def get_result(
        self,
    ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        obs, rew, done, info = self.parent_remote.recv()
        if self.share_memory:
            obs = self._decode_obs()
        return obs, rew, done, info

    def seed(self, seed: Optional[int] = None) -> Optional[List[int]]:
        self.parent_remote.send(["seed", seed])
        return self.parent_remote.recv()

    def render(self, **kwargs: Any) -> Any:
        self.parent_remote.send(["render", kwargs])
        return self.parent_remote.recv()

    def close_env(self) -> None:
        try:
            self.parent_remote.send(["close", None])
            # mp may be deleted so it may raise AttributeError
            self.parent_remote.recv()
            self.process.join()
        except (BrokenPipeError, EOFError, AttributeError):
            pass
        # ensure the subproc is terminated
        self.process.terminate()
Example #33
0
        if (l == ord('q')):
            break
        maxInd = np.argmax(predictions)
        NNState = IndexToState[maxInd]
        m['NeuralNetworkState'] = NNState


if __name__ == '__main__':
    print("starting")
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
    GPIO.setup(GPIO_ECHO, GPIO.IN)

    m = mp.Manager().dict()
    t = mp.Value('d', 0)
    m['NeuralNetworkState'] = NeuralNetWorkRead.Stop
    m['distance'] = 0
    m['confidence'] = 1
    m['Start'] = 0
    m['NetworkDone'] = 0
    picar.setup()
    p1 = Process(target=RobotProccess, args=(m, ))
    p2 = Process(target=DistanceProccess, args=(m, ))
    p3 = Process(target=GetNeuralNetworkResponseProccess, args=(m, ))
    p1.start()
    p2.start()
    p3.start()
    p1.join()
    p2.join()
    p3.join()
Example #34
0
class AsyncMCTS(MoveChooser):
    """
    Implementation of Monte Carlo Tree Search that uses the other player's time to continue thinking.
    This is achieved using multiprocessing, and a Pipe for transferring data to and from the worker process.
    """
    def __init__(self,
                 GameClass,
                 starting_position,
                 time_limit=3,
                 network=None,
                 c=np.sqrt(2),
                 d=1,
                 threads=1):
        """
        Either:
        If network is provided, threads must be 1.
        If network is not provided, then threads will be used for leaf parallelization
        """
        super().__init__(GameClass, starting_position)
        if network is not None and threads != 1:
            raise Exception('Threads != 1 with Network != None')

        self.parent_pipe, worker_pipe = Pipe()
        self.worker_process = Process(target=self.loop_func,
                                      args=(GameClass, starting_position,
                                            time_limit, network, c, d, threads,
                                            worker_pipe))

    def start(self):
        self.worker_process.start()

    def report_user_move(self, user_chosen_move):
        """
        Reports the given user chosen move to the worker thread.
        This allows the search tree to be narrowed.

        :param user_chosen_move:
        """
        self.parent_pipe.send(user_chosen_move)
        self.position = user_chosen_move

    def choose_move(self, return_distribution=False):
        """
        Instructs the worker thread to decide on an optimal move.
        The worker thread will then continue thinking for time_limit, and then return a list of its chosen moves.
        If multiple states are passed through before the ai's turn is completed,
        then they will be the contents of the list. Otherwise the list will have a single state.

        :return: The moves chosen by monte carlo tree search.
        """
        self.parent_pipe.send(None)
        chosen_positions = self.parent_pipe.recv()
        self.position = chosen_positions[-1][0]
        return chosen_positions if return_distribution else [
            position for position, _ in chosen_positions
        ]

    def terminate(self):
        self.worker_process.terminate()
        self.worker_process.join()

    @staticmethod
    def loop_func(GameClass, position, time_limit, network, c, d, threads,
                  worker_pipe):
        if network is None:
            pool = Pool(threads) if threads > 1 else None
            root = RolloutNode(position,
                               parent=None,
                               GameClass=GameClass,
                               c=c,
                               rollout_batch_size=threads,
                               pool=pool,
                               verbose=True)
        else:
            network.initialize()
            root = HeuristicNode(position,
                                 None,
                                 GameClass,
                                 network,
                                 c,
                                 d,
                                 verbose=True)

        while True:
            best_node = root.choose_expansion_node()

            if best_node is not None:
                best_node.expand()

            if root.children is not None and worker_pipe.poll():
                user_chosen_position = worker_pipe.recv()

                if user_chosen_position is not None:
                    # an updated position has been received so we can truncate the tree
                    for child in root.children:
                        if np.all(child.position == user_chosen_position):
                            root = child
                            root.parent = None
                            break
                    else:
                        print(user_chosen_position)
                        raise Exception('Invalid user chosen move!')

                    if GameClass.is_over(root.position):
                        print('Game Over in Async MCTS: ',
                              GameClass.get_winner(root.position))
                        return
                else:
                    # this move chooser has been requested to decide on a move via the choose_move function
                    start_time = time()
                    while time() - start_time < time_limit:
                        best_node = root.choose_expansion_node()

                        # best_node will be None if the tree is fully expanded
                        if best_node is None:
                            break

                        best_node.expand()

                    is_ai_player_1 = GameClass.is_player_1_turn(root.position)
                    chosen_positions = []
                    print(
                        f'MCTS choosing move based on {root.count_expansions()} expansions!'
                    )

                    # choose moves as long as it is still the ai's turn
                    while GameClass.is_player_1_turn(
                            root.position) == is_ai_player_1:
                        if root.children is None:
                            best_node = root.choose_expansion_node()
                            if best_node is not None:
                                best_node.expand()
                        root, distribution = root.choose_best_node(
                            return_probability_distribution=True, optimal=True)
                        chosen_positions.append((root.position, distribution))

                    print('Expected outcome: ', root.get_evaluation())
                    root.parent = None  # delete references to the parent and siblings
                    worker_pipe.send(chosen_positions)
                    if GameClass.is_over(root.position):
                        print('Game Over in Async MCTS: ',
                              GameClass.get_winner(root.position))
                        return

    def reset(self):
        raise NotImplementedError('')
Example #35
0
def show_info_on(a_topic):
    ''' Postconditions:
    1. a_topic is on the monitor
    2. The name of the module executing this is on the monitor
    3. If available, the ID of the process parent to this is on the monitor
    4. The ID of the process executing this is on the monitor
    '''
    print('Information on ' + a_topic)  # 1.
    print('Name of module executing this: ', __name__)  # 2.
    if hasattr(os, 'getppid'):  # 3: if available on this OS
        print('Parent process: ', os.getppid())
    print('ID of process executing this: ', os.getpid(), '\n')  # 4.

def say_hello(name):
    ''' Postconditions:
    1. = Postconditions of get_info_on('Say-hello process')
    2. "Hello <name>" is on the monitor
    '''
    show_info_on('Say-hello process')
    print('Hello ', name)

if __name__ == '__main__':
    ''' Postconditions:
    1. = Postconditions of get_info_on (this) main process
    2. = Postconditions of get_info_on a new process executing say_hello('Hugh Person')
    '''
    show_info_on('main line')
    p = Process(target=say_hello, args=('Hugh Person',))
    p.start()
    p.join()
Example #36
0
from multiprocessing.queues import Queue
import os, time, random

# 写数据进程执行的代码:
def write(q):
    print('Process to write: %s' % os.getpid())
    for value in ['A', 'B', 'C']:
        print('Put %s to queue...' % value)
        q.put(value)
        time.sleep(random.random())

# 读数据进程执行的代码:
def read(q):
    print('Process to read: %s' % os.getpid())
    while True:
        value = q.get(True)
        print('Get %s from queue.' % value)

if __name__=='__main__':
    # 父进程创建Queue,并传给各个子进程:
    q = Queue()
    pw = Process(target=write, args=(q,))
    pr = Process(target=read, args=(q,))
    # 启动子进程pw,写入:
    pw.start()
    # 启动子进程pr,读取:
    pr.start()
    # 等待pw结束:
    pw.join()
    # pr进程里是死循环,无法等待其结束,只能强行终止:
    pr.terminate()
Example #37
0
 def process_paid_order(self, order):
     self.logger.info(order.order_number+" - process")
     self.ioc.new_order_service().process_paid_order(order)
     p = Process(target=self.inform_customer, args=(self.ioc, order,))
     p.start()
     p.join()
    # get break down of how many rows per process
    size = int(n / 4)
    # take time
    start_time = time.time()
    # create each process
    p0 = Process(target=subMatrixSmallestCount, args=(matrix, 0, size, n, Smallest, TotalCount, 1))
    p1 = Process(target=subMatrixSmallestCount, args=(matrix, 1, size, n, Smallest, TotalCount, 2))
    p2 = Process(target=subMatrixSmallestCount, args=(matrix, 2, size, n, Smallest, TotalCount, 3))
    p3 = Process(target=subMatrixSmallestCount, args=(matrix, 3, size, n, Smallest, TotalCount, 4))
    # start processes
    p0.start()
    p1.start()
    p2.start()
    p3.start()
    # wait for processes to finish
    p0.join()
    p1.join()
    p2.join()
    p3.join()

    print('Smallest value: {}'.format(Smallest.value))
    print('Count of that value: {}'.format(TotalCount.value))
    # print time
    print('Total time: {}s'.format (time.time() - start_time))






Example #39
0
class TestRemoteSession(TestCase):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.server_process = None
        """:type: Process"""

    def setUp(self):
        self.test_file = TestFile(settings.test_file_size)

        self.original_data = self.test_file.get_content()

        self.ftp_root = self.test_file.path
        self.remote_filename = uuid.uuid4().hex

        tuned_server, self.port = server.get_tuned_server(self.ftp_root)

        def server_func():
            tuned_server.serve_forever(handle_exit=True)

        self.server_process = Process(target=server_func)
        """:type: Process"""
        self.server_process.start()

    def get_connected_client_in_binary_mode(self):
        client = Client()
        client.connect('localhost', self.port)
        client.login(settings.ftp_user, settings.ftp_pass)
        client.type('I')  # Binary mode ('I'mage)
        return client

    def tearDown(self):
        while self.server_process.is_alive():
            # посылаем SIGINT процессу, pyftpd умеет его обрабатывать и закрывается корректно
            os.kill(self.server_process.pid, signal.SIGINT)
            self.server_process.join(timeout=1)

        unlink(self.test_file.full_filename)

    def test_receive_file(self):
        """
        тест на получение заранее сгенерированных и сохраненных на диске данных клиентом через подключение
        к локальному серверу.
        """
        client = self.get_connected_client_in_binary_mode()
        code, rest, data = client.retr(self.test_file.filename)
        q_code, q_rest = client.quit()

        self.assertEqual(code, 226)
        self.assertTrue(self.original_data == data)

    def test_send_file(self):
        """
        Тест на отправку файла клиентом на локальный сервер и сравнение содержимого файла на диске
        с отправленными данными
        """
        client = self.get_connected_client_in_binary_mode()
        client.stor(self.remote_filename, self.original_data)

        with open(os.path.join(self.ftp_root, self.remote_filename), 'rb') as f:
            stored_file_content = f.read()

        code, rest = client.quit()

        self.assertTrue(self.original_data == stored_file_content)