class MultiProcessRunner(BaseRunner):
    JOIN_TIMEOUT = 60

    def __init__(self, target, **kwargs):
        super(MultiProcessRunner, self).__init__(target, **kwargs)
        self.process = None  # type: Process

    @capture_monitor_exception
    def start(self):
        self.process = Process(target=self.target, kwargs=self.kwargs)
        self.process.start()

    @capture_monitor_exception
    def stop(self):
        if self.process and self.is_alive():
            self.process.terminate()
            self.process.join(MultiProcessRunner.JOIN_TIMEOUT)
            if self.process.is_alive():
                self.process.kill()

    @capture_monitor_exception
    def heartbeat(self):
        # do we want to do something here?
        pass

    @capture_monitor_exception
    def is_alive(self):
        return self.process.is_alive()

    def __str__(self):
        s = super(MultiProcessRunner, self).__str__()
        return f"{s}({self.process})"
Exemple #2
0
def test_contract_manager() -> None:
    aut = AirspaceManager()
    stop_ev = Event()
    conn, manager_conn = Pipe()

    p = Process(target=run_as_process, kwargs={"aut": aut,
                                               "conn": manager_conn,
                                               "stop_ev": stop_ev})
    p.start()
    try:
        uid = 0
        for i in range(10):
            if conn.poll(1.0):
                act = conn.recv()
                print(act)
            elif i % 3 != 2:
                uid = i % 5
                target = Contract.from_stamped_rectangles([
                    (0.0, Rectangle(mins=[0, 0, 0], maxes=[1, 1, 0.5])),
                    (0.5, Rectangle(mins=[0, 0.5, 0], maxes=[2, 3, 0.5])),
                    (1.0, Rectangle(mins=[0.5, 0.5, 1.0], maxes=[1.5, 1.5, 1.5]))
                    ])
                conn.send(("request", {"uid": uid, "target": target}))
            else:
                releasable = Contract.from_stamped_rectangles([
                    (0.0, Rectangle(mins=[0, 0, 0], maxes=[1, 1, 0.5])),
                    (0.5, Rectangle(mins=[0, 0.5, 0], maxes=[2, 2, 0.5]))
                    ])
                print("Agent " + str(uid) + " release > " + str(releasable))
                conn.send(("release", {"uid": uid, "releasable": releasable}))
    finally:
        stop_ev.set()  # Stop all automatons
        p.join()
        conn.close()
        manager_conn.close()
Exemple #3
0
def test_disk_locking_processes(block_maker, storage_factory, redis_hostname):
    def visit(storage, root):
        ds = block_maker.first_ds(first_constant=2, ids_arg=3)
        cached = ds >> Apply(image=sleeper(0.1)) >> CacheToDisk(
            root, storage, JsonSerializer(), 'image')

        for i in ds.ids:
            assert ds.image(i) == cached.image(i)
            assert ds.image(i) == cached.image(i)

    for _ in range(5):
        with tempfile.TemporaryDirectory() as temp, storage_factory(
        ) as temp_storage:
            temp = Path(temp) / 'cache'
            init_storage(temp,
                         algorithm={
                             'name': 'blake2b',
                             'digest_size': 64
                         },
                         levels=[1, 31, 32],
                         locker={
                             'name': 'RedisLocker',
                             'args': [redis_hostname],
                             'kwargs': {
                                 'prefix': 'connectome.tests',
                                 'expire': 10
                             }
                         })

            th = Process(target=visit, args=(temp_storage, temp))
            th.start()
            visit(temp_storage, temp)
            th.join()
def test_agent() -> None:
    stop_ev = Event()
    conn, agent_conn = Pipe()

    waypoints = [(-80.0, -65.0, 3.0), (-45.0, -66.0, 3.0), (-33.0, -65.0, 3.0),
                 (-33.0, -65.0, 0.3)]
    aut = Agent(uid=1, motion=MotionHectorQuad("/drone1"), waypoints=waypoints)

    p = Process(target=run_as_process,
                kwargs={
                    "aut": aut,
                    "conn": agent_conn,
                    "stop_ev": stop_ev
                })
    p.start()

    try:
        for i in range(10):
            if conn.poll(1.0):
                act = conn.recv()
                print(act)
                if act[0] == "request":
                    reply = input("> ")
                    conn.send(("reply", {
                        "uid": aut.uid,
                        "acquired": Contract()
                    }))
            else:
                print("Response timeout")
    finally:
        stop_ev.set()  # Stop all automatons
        p.join()
        conn.close()
        agent_conn.close()
def main():
    # num_workers = int(mp.cpu_count()/3
    num_workers = 1
    for i in range(num_workers):
        file_ids = get_files_for_worker(i, num_workers)
        process = Process(target=worker, args=(file_ids, i))
        process.start()
Exemple #6
0
def start():
        builder = cluster_crawler.ClusterCrawler()
        start_block_id  = int(sys.argv[1])
        block_id = start_block_id
        process = None
        while builder.crawl_block(block_id):
            print("Block %d crawled" % block_id)

            if block_id - start_block_id > 0 and (block_id - start_block_id) % settings.block_crawling_limit == 0:
                builder.network_graph.check_integrity()
                while  process is not None and process.is_alive():
                    print("Waiting for insertion thread to complete...")
                    process.join()

                if process is not None and process.exitcode > 0 : #error
                    raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
                process = Process(target=builder.network_graph.synchronize_mongo_db)
                process.start()
                builder.network_graph = cluster_network.ClusterNetwork(settings.db_server, settings.db_port) #Starting a new graph while other graph data is inserted.
                builder.connect_to_bitcoind_rpc()

            if process is not None and not process.is_alive() and process.exitcode > 0 : #error
                    raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
            block_id+=1

        #Finished Crawling, Flushing to DB.
        #Waiting for any previous DB Sync
        while  process is not None and process.is_alive():
            print("Waiting for insertion thread to complete...")
            process.join()

        #Sync the rest
        process = Process(target=builder.network_graph.synchronize_mongo_db)
        process.start()
        process.join()
Exemple #7
0
 def func(*args, **kwargs):
     deltas, before, after = [], 0, 0
     sys.stdout.write(" " * 147)
     for item in iterator:
         if isinstance(item, tuple):
             args = list(item) + list(args)
             item = item[0]
         else:
             args = [item] + args
         delta = after - before
         eta = 0
         if delta > 0:
             deltas.append(delta)
             eta = (length - item) * (sum(deltas) / len(deltas))
         percentage = 100 * item / length
         p = Process(target=draw_progress_bar,
                     args=(eta, percentage,
                           f"{item + 1} of {length}..."))
         before = time.time()
         p.start()
         function(*args, **kwargs)
         p.terminate()
         after = time.time()
     p = Process(target=draw_progress_bar, args=(0, 100, "Ready!"))
     p.start()
     time.sleep(0.1)
     p.terminate()
     print()
Exemple #8
0
class AIOProcess:
    """ Execute a coroutine on a separate process """
    def __init__(self,
                 coroutine: Callable = None,
                 *args,
                 daemon: bool = False,
                 target_override: Callable = None,
                 **kwargs):
        if not asyncio.iscoroutinefunction(coroutine):
            raise ValueError("target must be a coroutine function")

        self.aio_process = Process(target=target_override
                                   or partial(AIOProcess.run_async, coroutine),
                                   args=args,
                                   kwargs=kwargs,
                                   daemon=daemon)

    @staticmethod
    def run_async(coroutine: Callable, *args, **kwargs):
        try:
            loop = uvloop.new_event_loop()
            asyncio.set_event_loop(loop)
            result = loop.run_until_complete(coroutine(*args, **kwargs))

            return result
        except BaseException:
            log.exception(f"aio process {os.getpid()} failed")
            raise

    def start(self):
        self.aio_process.start()

    async def join(self, timeout=None):
        if not self.is_alive() and self.exit_code is None:
            raise ValueError("must start process before joining")

        if timeout is not None:
            return await asyncio.wait_for(self.join(), timeout)

        while self.exit_code is None:
            await asyncio.sleep(0.005)

    @property
    def pid(self):
        return self.aio_process.pid

    @property
    def daemon(self):
        return self.aio_process.daemon

    @property
    def exit_code(self):
        return self.aio_process.exitcode

    def is_alive(self):
        return self.aio_process.is_alive()

    def terminate(self):
        self.aio_process.terminate()
def start_logging_process():
    """
    Starts the logging Process that does the database Logging
    """
    tcpserver = LoggingSocketReceiver()
    process = Process(target=tcpserver.serve_until_stopped)
    process.start()
    return process
Exemple #10
0
def main():
    sys.stdout = open('log_print.txt', 'a')
    #num_workers = int(mp.cpu_count()/3
    num_workers = 1
    for i in range(num_workers):
        file_ids = get_files_for_worker(i, num_workers)
        process = Process(target=worker, args=(file_ids, i))
        process.start()
Exemple #11
0
    def proof_of_work(self, last_block):
        """
            Simple Proof of Work Algorithm:
            - Find a number p' such that hash(pp') contains leading 4 zeroes
            - Where p is the previous proof, and p' is the new proof

            :param last_block: <dict> last Block
            :return: <int>
            """

        last_proof = last_block['proof']
        last_hash = self.hash(last_block)

        # Flags
        flags = Value(c_bool, False)
        proof_result = Value(c_int, 0)

        # Proof
        proof1 = 0
        proof2 = 75001
        proof3 = 150001
        proof4 = 225001
        proof5 = 300001

        start_time = time.time()
        p1 = Process(target=self.find_proof,
                     args=(flags, 0, last_proof, proof1, last_hash,
                           proof_result, 0, 75001))
        p2 = Process(target=self.find_proof,
                     args=(flags, 1, last_proof, proof2, last_hash,
                           proof_result, 75001, 150001))
        p3 = Process(target=self.find_proof,
                     args=(flags, 2, last_proof, proof3, last_hash,
                           proof_result, 150001, 225001))
        p4 = Process(target=self.find_proof,
                     args=(flags, 3, last_proof, proof4, last_hash,
                           proof_result, 225001, 300001))
        p5 = Process(target=self.find_proof,
                     args=(flags, 4, last_proof, proof5, last_hash,
                           proof_result, 300001, sys.maxsize))

        p1.start()
        p2.start()
        p3.start()
        p4.start()
        p5.start()
        p1.join()
        p2.join()
        p3.join()
        p4.join()
        p5.join()

        end_time = time.time()
        print('TIMETIMETIMETIME : ', end_time - start_time)

        print('proof_result : ', proof_result.value)

        return proof_result.value
Exemple #12
0
def do_cfg_multiprocess(processes=1):
    print('processes:: ', str(processes))
    procs = []
    for i in range(processes):
        print('_')
        print('_process #:: ', str(i))
        proc = Process(target=do_lite_function, args=(i, ))
        procs.append(proc)
        proc.start()
Exemple #13
0
def run_app_as_process(command,
                       daemon=False,
                       shell=False,
                       state_queue=None) -> Process:
    p = Process(target=run_app,
                args=(command, shell, state_queue),
                daemon=daemon)
    p.start()
    return p
def process_file(file_path_input, file_path_output, i):
    if os.path.exists(file_path_output):
        return print('{}    {}  Already done'.format(now(), file_path_output))

    input_file = open(file_path_input, 'r')
    output_file = open(file_path_output, 'a')

    # every worker create separate file for a input file
    writer = csv.writer(output_file, delimiter='\t')

    # for every file we create seperate driver (100 URLs)
    driver = webdriver.PhantomJS(executable_path=path_to_phantomjs)

    for line in input_file:
        splited = line.split('\t')
        # property_type = splited[0]
        url = splited[1]
        print('{}   Process={}  Current url: {}'.format(now(), i, url))

        # start process for getting microformat properties
        temp_queue = Queue()
        # p = Process(target=get_microformat_properties_by_type, args=(url, property_type, temp_queue, i))
        p = Process(target=get_element_features,
                    args=(url, driver, temp_queue, i))
        print("{}   {}  Process={}  {}  {}".format(now(), i, "Started: ",
                                                   "feature extraction", url))
        p.start()
        event_features = temp_queue.get(timeout=TIME_OUT_FEATURE)
        # try:
        #     pass
        # except Empty:
        #     print("{}   {}  Process={}  {}  {}".format(now(), i, "Timed out on: ", "feature extraction", url))

        if p.is_alive():
            p.terminate()

        print("Event features:" + str(event_features))

        if event_features is not None:
            print("{}   Process={}  Got properties for  {}".format(
                now(), i, url))

            # start process for feature extraction and writing to separate file
            # p_event_features = Process(target=get_event_features_and_write,
            #                            args=(event_features, driver, writer, i, output_file))
            #
            p_event_features = Process(target=write_element_features,
                                       args=(event_features, writer, i,
                                             output_file))
            p.start()
            # start_with_timeout(p_event_features, TIME_OUT_LOAD, "feature writing", url, i)
            if p_event_features.is_alive():
                p.terminate()

    driver.service.process.send_signal(signal.SIGTERM)
    driver.quit()
    return 'done'
Exemple #15
0
def do_main_driver_multiprocess(processes=1, reps=1):
    print('processes:: ', str(processes))
    procs = []
    for i in range(processes):
        print('_')
        print('_process #:: ', str(i))
        proc = Process(target=main_driver, args=(reps, ))
        procs.append(proc)
        proc.start()
Exemple #16
0
def compute_hub_SALSA(inlinks, inlinks_count, outlinks, outlinks_count,
                      rootSet):
    print("Size of rootset is " + str(len(rootSet)))
    # computing hub scores
    salsa_hub_scores = dict()

    countBaseSetWithOutlinks = 0
    for page_id in rootSet:
        if page_id in outlinks:
            if len(outlinks[page_id]) > 0:
                countBaseSetWithOutlinks += 1
    print(countBaseSetWithOutlinks)

    for page_id in rootSet:
        if page_id in outlinks:
            if len(outlinks[page_id]) > 0:
                salsa_hub_scores[page_id] = 1 / countBaseSetWithOutlinks
            else:
                salsa_hub_scores[page_id] = 0
        else:
            salsa_hub_scores[page_id] = 0

    for iter in range(0, 3):
        print(iter)
        global temp_salsa_hub_scores
        count = 0

        page_list = list()
        procs = list()
        for page_id in rootSet:
            page_list.append(page_id)
            count += 1
            if count % 100 == 0:
                print(count)
                proc = Process(target=multi_scoring,
                               args=(
                                   page_list,
                                   inlinks,
                                   inlinks_count,
                                   outlinks,
                                   outlinks_count,
                                   temp_salsa_hub_scores,
                                   salsa_hub_scores,
                               ))
                page_list = list()
                procs.append(proc)
                proc.start()

        print("Total processes  " + str(len(procs)))
        for proc in procs:
            proc.join()

        for page_id in temp_salsa_hub_scores:
            salsa_hub_scores[page_id] = temp_salsa_hub_scores[page_id]
        temp_salsa_hub_scores = dict()
    return salsa_hub_scores
Exemple #17
0
def dashboard():
    # This fixture guarantees the proper termination of all spawned subprocesses
    # after the tests.
    dashboard = Process(target=run_detection)
    dashboard.start()
    yield
    for child in psutil.Process(dashboard.pid).children(recursive=True):
        child.kill()
    dashboard.terminate()
    dashboard.join()
Exemple #18
0
def run(configfile):
    """
    This method is called after initializing the framework and starts the configured servers.
    @param configfile: Path referring to the xml configfile.
    """
    logger.debug('run() method started!')
    for role in config.get_roles(configfile):
        process = Process(target=role)
        print('Starting Process!')
        process.start()
    logger.debug('run() is done!')
Exemple #19
0
def main():
    size = os.path.getsize("dict.txt")
    half_size = size // 2

    p1 = Process(target=read_part1, args=("dict.txt", half_size))
    p2 = Process(target=read_part2, args=("dict.txt", half_size))
    p1.start()
    p2.start()

    p1.join()
    p2.join()
Exemple #20
0
    def start_up_app(self):
        self.show_splash_screen()
        try:
            # load db in separate process
            process_startup = Process(target=LucteriosRefreshAll)
            process_startup.start()

            while process_startup.is_alive():
                # print('updating')
                self.splash.update()
        finally:
            self.remove_splash_screen()
Exemple #21
0
 def keyboard_interrupt(cls, path_file_input: Path, path_file_output: Path) -> None:
     """Test process of keyboard interrupt."""
     process = Process(target=cls.report_raises_keyboard_interrupt, args=(path_file_input, path_file_output))
     process.start()
     assert LocalSocket.receive() == "Ready"
     time.sleep(SECOND_SLEEP_FOR_TEST_KEYBOARD_INTERRUPT_CTRL_C_POSIX)
     cls.simulate_ctrl_c_in_posix(process)
     assert LocalSocket.receive() == "Test succeed"
     psutil_process = psutil.Process(process.pid)
     assert psutil_process.wait() == 0
     # Reason: Requires to enhance types-psutil
     assert not psutil_process.is_running()  # type: ignore
Exemple #22
0
def do_sanity_multiprocess():
    names = ['A', 'B', 'C']
    procs = []
    # proc = Process(target=do_lite_function)
    # procs.append(proc)
    # proc.start()

    for count, name in enumerate(names, 1):
        print(count, name)
        proc = Process(target=do_lite_function, args=(name, ))
        procs.append(proc)
        proc.start()
Exemple #23
0
 def __enter__(self):
     self.listener_ = Listener(self.address_, family='AF_INET')
     for child in range(self.nb_children_):
         process = Process(target=creator,
                           args=(
                               self.data_,
                               self.models_,
                               self.address_,
                           ))
         process.start()
         self.children_.append(process)
     return self
Exemple #24
0
 def serve(routing: dict, open_browser=False, timeout=None, filename=''):
     p = Process(target=WebServer.serve_and_browse,
                 args=(
                     routing,
                     open_browser,
                     filename,
                 ))
     p.start()
     wait_for_server_seconds = timeout
     logging.info("Waiting for server %d seconds" % wait_for_server_seconds)
     time.sleep(wait_for_server_seconds)
     p.terminate()
Exemple #25
0
 def test_sigterm(manager_queue: "queue.Queue[LogRecord]") -> None:
     """ProcessTaskPoolExecutor should raise keyboard interrupt."""
     process = Process(target=example_use_case_cancel_repost_process_id, kwargs={"queue_main": manager_queue})
     process.start()
     LocalSocket.receive()
     time.sleep(SECOND_SLEEP_FOR_TEST_SHORT)
     psutil_process = psutil.Process(process.pid)
     psutil_process.send_signal(SIGTERM)
     psutil_process.wait()
     # Reason: Requires to enhance types-psutil
     assert not psutil_process.is_running()  # type: ignore
     assert_graceful_shutdown(manager_queue)
Exemple #26
0
def run():
    # while True:
        for name in ['jd','sn']: #对应存储再config中的JSON配置文件
            custom_settings = get_config(name)  # 爬取使用的 Spider 名称
            spider = custom_settings.get('spider', 'aCrawler')
            project_settings = get_project_settings()
            settings = dict(project_settings.copy())  # 合并配置
            settings.update(custom_settings.get('settings'))
            process = Process(target=crawl, kwargs={'settings': settings, 'spider': spider, 'name': name,
                                                    'custom_settings': custom_settings})
            process.start()
            process.join()
Exemple #27
0
    def start_up_app(self):
        self.show_splash_screen()

        # load db in separate process
        process_startup = Process(target=LucteriosRefreshAll)
        process_startup.start()

        while process_startup.is_alive():
            # print('updating')
            self.splash.update()

        self.remove_splash_screen()
Exemple #28
0
def test(args):
    model = Rockfish.load_from_checkpoint(checkpoint_path=args.checkpoint)
    model.freeze()

    test_ds = Fast5Data(args.test_path, args.recursive, args.reseg_path,
                    args.norm_method, args.motif, args.sample_size, args.window)

    if args.n_workers > 0:
        test_dl = DataLoader(test_ds, batch_size=args.batch_size,
                        num_workers=args.n_workers, pin_memory=True,
                        worker_init_fn=worker_init_fn,
                        prefetch_factor=args.prefetch_factor)
    else:
        test_dl = DataLoader(test_ds, batch_size=args.batch_size,
                        num_workers=args.n_workers, pin_memory=True,
                        worker_init_fn=worker_init_fn)

    n_gpus = torch.cuda.device_count()
    if n_gpus > 0:
        model = DataParallel(model, device_ids=list(range(n_gpus)))
        model.to(f'cuda:{model.device_ids[0]}')

    model.eval()

    output_queue = mp.Queue()
    consumers = []
    abs_out_path = str(args.out_path.absolute())
    for i in range(args.output_workers):
        worker_path = TMP_PATH.format(final=abs_out_path, id=i)
        process = Process(target=output_worker, args=(worker_path, output_queue))
        process.start()

        consumers.append(process)

    with torch.no_grad():
        for info, sig, k_mer in tqdm(test_dl):
            pred = model(sig, k_mer).squeeze(-1)
            pred = pred.cpu().numpy()

            output_queue.put((info, pred))

    for _ in range(len(consumers)):
        output_queue.put(None)
    for c in consumers:
        c.join()

    with args.out_path.open('w') as out:
        for i in range(len(consumers)):
            worker_path = TMP_PATH.format(final=abs_out_path, id=i)
            with open(worker_path, 'r') as tmp_f:
                out.write(tmp_f.read())
            os.remove(worker_path)
Exemple #29
0
def run():
    while True:
        for name in ['sina', 'xinhua']: #对应存储再config中的JSON配置文件
            custom_settings = get_config(name)  # 爬取使用的 Spider 名称
            spider = custom_settings.get('spider', 'sina')
            project_settings = get_project_settings()
            settings = dict(project_settings.copy())  # 合并配置
            settings.update(custom_settings.get('settings'))
            process = Process(target=crawl, kwargs={'settings': settings, 'spider': spider, 'name': name,
                                                    'custom_settings': custom_settings})
            process.start()
            process.join()
        time.sleep(60 * 60 * 24)    #一天运行一次
Exemple #30
0
 def test_keyboard_interrupt_on_linux(self) -> None:
     """
     - Keyboard interrupt should reach to all descendant processes.
     - Keyboard interrupt should shutdown ProcessTaskPoolExecutor gracefully.
     """
     process = Process(target=self.report_raises_keyboard_interrupt)
     process.start()
     LocalSocket.receive()
     time.sleep(SECOND_SLEEP_FOR_TEST_SHORT)
     self.simulate_ctrl_c_in_posix(process)
     assert LocalSocket.receive() == "Test succeed"
     process.join()
     assert process.exitcode == 0
     assert not process.is_alive()
def create_app():
    app = Flask(__name__)

    logger.info("Server starting...")

    from app.routes import bp
    app.register_blueprint(bp)

    starting_port = 50052
    for port in range(starting_port, starting_port + 8):
        process = Process(target=serve_runner, args=(port, ))
        process.start()

    return app
Exemple #32
0
 def terminate(cls, path_file_input: Path, path_file_output: Path) -> None:
     """Test process of keyboard interrupt."""
     process = Process(target=cls.report_raises_cencelled_error, args=(path_file_input, path_file_output))
     process.start()
     assert LocalSocket.receive() == "Ready"
     time.sleep(SECOND_SLEEP_FOR_TEST_KEYBOARD_INTERRUPT_CTRL_C_POSIX)
     psutil_process = psutil.Process(process.pid)
     psutil_process.terminate()
     # Return code seems change when:
     # run this test only: -15
     # run all tests: 1
     assert psutil_process.wait() in [-15, 1]
     # Reason: Requires to enhance types-psutil
     assert not psutil_process.is_running()  # type: ignore
Exemple #33
0
class ActorRunner:
    def __init__(self, actors: Iterable[Actor]):
        self._process = Process(target=self._run, args=(actors, ))
        self._process.start()

    def _run(self, actors: Iterable[Actor]):
        asyncio.run(self._loop(actors))

    async def _loop(self, actors: Iterable[Actor]):
        loop = get_running_loop()
        for actor in actors:
            loop.create_task(actor.runner())
        while loop.is_running():
            await asyncio.sleep(1)
Exemple #34
0
class SubprocessEnv(Env):
    def __init__(self, factory: Callable[[], Env], blocking: bool = True):
        self._blocking = blocking
        self._parent_conn, child_conn = Pipe()
        self._process = Process(target=self._start, args=(factory, child_conn))
        self._process.start()
        self.observation_space, self.action_space = self._parent_conn.recv()

    def _start(self, factory: Callable[[], Env], connection: Connection):
        env = factory()
        _ = env.reset()
        connection.send((env.observation_space, env.action_space))
        terminate = False
        while not terminate:
            command, kwargs = connection.recv()
            if command == 'render':
                rendering = env.render(**kwargs)
                connection.send(rendering)
            elif command == 'step':
                step = env.step(**kwargs)
                connection.send(step)
            elif command == 'reset':
                obs = env.reset(**kwargs)
                connection.send(obs)
            elif command == 'close':
                terminate = True
                connection.close()

    def step(self, action):
        self._parent_conn.send(('step', dict(action=action)))
        return self._return()

    def reset(self, **kwargs):
        self._parent_conn.send(('reset', kwargs))
        return self._return()

    def render(self, mode: str = 'human', **kwargs):
        self._parent_conn.send(('render', {'mode': mode, **kwargs}))
        return self._return()

    def close(self):
        self._parent_conn.send(('close', False))
        self._parent_conn.close()

    def _return(self) -> Any:
        if self._blocking:
            return self._parent_conn.recv()
        else:
            return lambda: self._parent_conn.recv()
    def get_data_from_sn(self, search_list, database_name):

        if database_name == 'neo':
            sn_node = graph.find_one("SN", "name", "SOCIALNETWORKS")
            twitter_node = graph.merge_one("Twitter", "name", "TWITTER")
            sn_has_twitter = Relationship(sn_node, "HAS", twitter_node)
            graph.create_unique(sn_has_twitter)

        twitter_searcher = TwitterSearcher()
        twitter_searcher_process = Process(target=twitter_searcher.run_twitter_search, args=(search_list, database_name,))

        twitter_streamer = TwitterStreamer()
        twitter_streamer_process = Process(target=twitter_streamer.run_twitter_stream, args=(search_list, database_name,))

        twitter_searcher_process.start()
        twitter_streamer_process.start()
        twitter_streamer_process.join()
        twitter_searcher_process.join()
def start():
        builder = GraphBuilder()
        start_block_id  = int(sys.argv[1])
        block_id = start_block_id
        process = None
        try:
            while builder.crawl_block(block_id):
                print("Block %d crawled" % block_id)

                if block_id - start_block_id > 0 and (block_id - start_block_id) % Settings.block_crawling_limit == 0:
                    builder.network_graph.check_integrity()
                    while  process is not None and process.is_alive():
                        print("Waiting for insertion thread to complete...")
                        process.join()

                    if process is not None and process.exitcode > 0 : #error
                        raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
                    process = Process(target=builder.network_graph.synchronize_mongo_db)
                    process.start()
                    builder.network_graph = NetworkGraph.Network(Settings.db_server,Settings.db_port) #Starting a new graph while other graph data is inserted.

                if process is not None and not process.is_alive() and process.exitcode > 0 : #error
                        raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
                block_id+=1

            #Finished Crawling, Flushing to DB.
            #Waiting for any previous DB Sync
            while  process is not None and process.is_alive():
                print("Waiting for insertion thread to complete...")
                process.join()

            #Sync the rest
            process = Process(target=builder.network_graph.synchronize_mongo_db)
            process.start()
            process.join()

            #DONE!

        #For Debugging purpose
        except:
            input("An exception will rise ")
            raise
    def get_data_from_sn(self, search_list, database_name):

        if database_name == 'neo':
            sn_node = graph.find_one("SN", "name", "SOCIALNETWORKS")
            facebook_node = graph.merge_one("Facebook", "name", "FACEBOOK")
            sn_has_facebook = Relationship(sn_node, "HAS", facebook_node)
            graph.create_unique(sn_has_facebook)


        event_searcher = EventSearcher()
        event_process = Process(target=event_searcher.run_facebook_events, args=(search_list, database_name,))

        group_searcher = GroupSearcher()
        group_process = Process(target=group_searcher.run_facebook_groups, args=(search_list, database_name,))

        page_searcher = PageSearcher()
        page_process = Process(target=page_searcher.run_facebook_pages, args=(search_list, database_name,))

        event_process.start()
        group_process.start()
        page_process.start()

        event_process.join()
        group_process.join()
        page_process.join()
class TestOrchestrator(BaseTestCase):

    def setUp(self):
        self.test_directory = os.path.dirname(os.path.realpath(__file__))
        self.test_config = os.path.join(self.test_directory,
                                        "packager_config.cfg")
        self.packager_config = PackagerConfig(self.test_config)
        """Spin up a server in a seperate process"""
        def start_test_server():
            app = Orchestrator(self.packager_config)
            from werkzeug.serving import run_simple
            run_simple('127.0.0.1', 5000, app)

        self.p = Process(target=start_test_server, daemon=True)
        self.p.start()

    def tearDown(self):
        """Make sure we tear down the process properly at the end"""
        self.p.terminate()

    def testOrchestratorAcceptingInputCorrectly(self):
        @exponential_back_off
        def wait_for_process():
            return requests.get("http://127.0.0.1:5000/health")

        wait_seconds = 1
        print("await process spin up for %d seconds" % wait_seconds)
        time.sleep(wait_seconds)
        wait_for_process()

        with open(self.get_file('code'), 'r') as code, \
                open(self.get_file('tests'), 'r') as test:
            language = 'python'
            question_name = 'foo'
            code_payload = code.read()
            test_payload = test.read()
            payload = {
                'question_name': question_name,
                'language': language,
                'code': code_payload,
                'test': test_payload
            }

            # http:// required,
            # requests library needs protocol scheme to connect
            got_resp_raw = requests.post('http://127.0.0.1:5000/submit',
                                         data=payload)
            got_resp = json.loads(got_resp_raw.text)
            exp_data = {
                'language': language,
                'code': code_payload,
                'test': test_payload,
                'question_name': question_name
            }
            exp_response_message = 'not tested for in this testcase'
            exp_resp = {
                'execution_result': exp_response_message,
                'got_data': exp_data
            }

            self.assertEqual(exp_resp['got_data'], got_resp['got_data'])

            # we just need to check if the result contains success
            for testcase, test_result in got_resp['execution_result'].items():
                self.assertTrue(test_result['success'],
                                'testcase {} failure with message {}'
                                .format(testcase, test_result['message']))
Exemple #39
0
def show_info_on(a_topic):
    ''' Postconditions:
    1. a_topic is on the monitor
    2. The name of the module executing this is on the monitor
    3. If available, the ID of the process parent to this is on the monitor
    4. The ID of the process executing this is on the monitor
    '''
    print('Information on ' + a_topic)  # 1.
    print('Name of module executing this: ', __name__)  # 2.
    if hasattr(os, 'getppid'):  # 3: if available on this OS
        print('Parent process: ', os.getppid())
    print('ID of process executing this: ', os.getpid(), '\n')  # 4.

def say_hello(name):
    ''' Postconditions:
    1. = Postconditions of get_info_on('Say-hello process')
    2. "Hello <name>" is on the monitor
    '''
    show_info_on('Say-hello process')
    print('Hello ', name)

if __name__ == '__main__':
    ''' Postconditions:
    1. = Postconditions of get_info_on (this) main process
    2. = Postconditions of get_info_on a new process executing say_hello('Hugh Person')
    '''
    show_info_on('main line')
    p = Process(target=say_hello, args=('Hugh Person',))
    p.start()
    p.join()
Exemple #40
0
 def process_paid_order(self, order):
     self.logger.info(order.order_number+" - process")
     self.ioc.new_order_service().process_paid_order(order)
     p = Process(target=self.inform_customer, args=(self.ioc, order,))
     p.start()
     p.join()
Exemple #41
0
from multiprocessing.queues import Queue
import os, time, random

# 写数据进程执行的代码:
def write(q):
    print('Process to write: %s' % os.getpid())
    for value in ['A', 'B', 'C']:
        print('Put %s to queue...' % value)
        q.put(value)
        time.sleep(random.random())

# 读数据进程执行的代码:
def read(q):
    print('Process to read: %s' % os.getpid())
    while True:
        value = q.get(True)
        print('Get %s from queue.' % value)

if __name__=='__main__':
    # 父进程创建Queue,并传给各个子进程:
    q = Queue()
    pw = Process(target=write, args=(q,))
    pr = Process(target=read, args=(q,))
    # 启动子进程pw,写入:
    pw.start()
    # 启动子进程pr,读取:
    pr.start()
    # 等待pw结束:
    pw.join()
    # pr进程里是死循环,无法等待其结束,只能强行终止:
    pr.terminate()
    print('end matrix')
    '''
    # declare variables in shared memory
    Smallest = Value('i', n)
    TotalCount = Value('i', 0)
    # get break down of how many rows per process
    size = int(n / 4)
    # take time
    start_time = time.time()
    # create each process
    p0 = Process(target=subMatrixSmallestCount, args=(matrix, 0, size, n, Smallest, TotalCount, 1))
    p1 = Process(target=subMatrixSmallestCount, args=(matrix, 1, size, n, Smallest, TotalCount, 2))
    p2 = Process(target=subMatrixSmallestCount, args=(matrix, 2, size, n, Smallest, TotalCount, 3))
    p3 = Process(target=subMatrixSmallestCount, args=(matrix, 3, size, n, Smallest, TotalCount, 4))
    # start processes
    p0.start()
    p1.start()
    p2.start()
    p3.start()
    # wait for processes to finish
    p0.join()
    p1.join()
    p2.join()
    p3.join()

    print('Smallest value: {}'.format(Smallest.value))
    print('Count of that value: {}'.format(TotalCount.value))
    # print time
    print('Total time: {}s'.format (time.time() - start_time))

class TestRemoteSession(TestCase):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.server_process = None
        """:type: Process"""

    def setUp(self):
        self.test_file = TestFile(settings.test_file_size)

        self.original_data = self.test_file.get_content()

        self.ftp_root = self.test_file.path
        self.remote_filename = uuid.uuid4().hex

        tuned_server, self.port = server.get_tuned_server(self.ftp_root)

        def server_func():
            tuned_server.serve_forever(handle_exit=True)

        self.server_process = Process(target=server_func)
        """:type: Process"""
        self.server_process.start()

    def get_connected_client_in_binary_mode(self):
        client = Client()
        client.connect('localhost', self.port)
        client.login(settings.ftp_user, settings.ftp_pass)
        client.type('I')  # Binary mode ('I'mage)
        return client

    def tearDown(self):
        while self.server_process.is_alive():
            # посылаем SIGINT процессу, pyftpd умеет его обрабатывать и закрывается корректно
            os.kill(self.server_process.pid, signal.SIGINT)
            self.server_process.join(timeout=1)

        unlink(self.test_file.full_filename)

    def test_receive_file(self):
        """
        тест на получение заранее сгенерированных и сохраненных на диске данных клиентом через подключение
        к локальному серверу.
        """
        client = self.get_connected_client_in_binary_mode()
        code, rest, data = client.retr(self.test_file.filename)
        q_code, q_rest = client.quit()

        self.assertEqual(code, 226)
        self.assertTrue(self.original_data == data)

    def test_send_file(self):
        """
        Тест на отправку файла клиентом на локальный сервер и сравнение содержимого файла на диске
        с отправленными данными
        """
        client = self.get_connected_client_in_binary_mode()
        client.stor(self.remote_filename, self.original_data)

        with open(os.path.join(self.ftp_root, self.remote_filename), 'rb') as f:
            stored_file_content = f.read()

        code, rest = client.quit()

        self.assertTrue(self.original_data == stored_file_content)