Example #1
0
def daemon_lynx(zmq_client_port: int = 2409, zmq_worker_port: int = 2410):
    print("Start ZMQ Broker...")
    Process(target=default_broker,
            args=(zmq_client_port, zmq_worker_port)).start()
    for w in range(1, default_zmq_worker_runner + 1):
        print(f"Start LipidLynxX ZMQ Worker#{w}...")
        Process(target=general_worker, args=(w, zmq_worker_port)).start()
Example #2
0
 def __init__(self, dsn: str, publication_name: str, slot_name: str,
              pipe_conn: Connection) -> None:
     Process.__init__(self)
     self.dsn = dsn
     self.publication_name = publication_name
     self.slot_name = slot_name
     self.pipe_conn = pipe_conn
Example #3
0
class MultiProcessRunner(BaseRunner):
    JOIN_TIMEOUT = 60

    def __init__(self, target, **kwargs):
        super(MultiProcessRunner, self).__init__(target, **kwargs)
        self.process = None  # type: Process

    @capture_monitor_exception
    def start(self):
        self.process = Process(target=self.target, kwargs=self.kwargs)
        self.process.start()

    @capture_monitor_exception
    def stop(self):
        if self.process and self.is_alive():
            self.process.terminate()
            self.process.join(MultiProcessRunner.JOIN_TIMEOUT)
            if self.process.is_alive():
                self.process.kill()

    @capture_monitor_exception
    def heartbeat(self):
        # do we want to do something here?
        pass

    @capture_monitor_exception
    def is_alive(self):
        return self.process.is_alive()

    def __str__(self):
        s = super(MultiProcessRunner, self).__str__()
        return f"{s}({self.process})"
def main():
    # num_workers = int(mp.cpu_count()/3
    num_workers = 1
    for i in range(num_workers):
        file_ids = get_files_for_worker(i, num_workers)
        process = Process(target=worker, args=(file_ids, i))
        process.start()
Example #5
0
class AIOProcess:
    """ Execute a coroutine on a separate process """
    def __init__(self,
                 coroutine: Callable = None,
                 *args,
                 daemon: bool = False,
                 target_override: Callable = None,
                 **kwargs):
        if not asyncio.iscoroutinefunction(coroutine):
            raise ValueError("target must be a coroutine function")

        self.aio_process = Process(target=target_override
                                   or partial(AIOProcess.run_async, coroutine),
                                   args=args,
                                   kwargs=kwargs,
                                   daemon=daemon)

    @staticmethod
    def run_async(coroutine: Callable, *args, **kwargs):
        try:
            loop = uvloop.new_event_loop()
            asyncio.set_event_loop(loop)
            result = loop.run_until_complete(coroutine(*args, **kwargs))

            return result
        except BaseException:
            log.exception(f"aio process {os.getpid()} failed")
            raise

    def start(self):
        self.aio_process.start()

    async def join(self, timeout=None):
        if not self.is_alive() and self.exit_code is None:
            raise ValueError("must start process before joining")

        if timeout is not None:
            return await asyncio.wait_for(self.join(), timeout)

        while self.exit_code is None:
            await asyncio.sleep(0.005)

    @property
    def pid(self):
        return self.aio_process.pid

    @property
    def daemon(self):
        return self.aio_process.daemon

    @property
    def exit_code(self):
        return self.aio_process.exitcode

    def is_alive(self):
        return self.aio_process.is_alive()

    def terminate(self):
        self.aio_process.terminate()
Example #6
0
def main():
    sys.stdout = open('log_print.txt', 'a')
    #num_workers = int(mp.cpu_count()/3
    num_workers = 1
    for i in range(num_workers):
        file_ids = get_files_for_worker(i, num_workers)
        process = Process(target=worker, args=(file_ids, i))
        process.start()
Example #7
0
def start_logging_process():
    """
    Starts the logging Process that does the database Logging
    """
    tcpserver = LoggingSocketReceiver()
    process = Process(target=tcpserver.serve_until_stopped)
    process.start()
    return process
Example #8
0
def do_cfg_multiprocess(processes=1):
    print('processes:: ', str(processes))
    procs = []
    for i in range(processes):
        print('_')
        print('_process #:: ', str(i))
        proc = Process(target=do_lite_function, args=(i, ))
        procs.append(proc)
        proc.start()
Example #9
0
def run_app_as_process(command,
                       daemon=False,
                       shell=False,
                       state_queue=None) -> Process:
    p = Process(target=run_app,
                args=(command, shell, state_queue),
                daemon=daemon)
    p.start()
    return p
Example #10
0
    def __init__(self):
        m = Manager()
        self.ports = m.dict()  # used ports
        self.data = m.dict()  # all the data (in a synced dict)
        self.lastUpdate = SyncDeltaVal()

        self.finished = Value('b', False)
        self.p = Process(target=self._ui)
        self.p.start()
Example #11
0
def do_main_driver_multiprocess(processes=1, reps=1):
    print('processes:: ', str(processes))
    procs = []
    for i in range(processes):
        print('_')
        print('_process #:: ', str(i))
        proc = Process(target=main_driver, args=(reps, ))
        procs.append(proc)
        proc.start()
    def __init__(self, GameClass, starting_position=None, time_limit=3):
        super(AsyncIterativeDeepening, self).__init__(GameClass, starting_position)
        self.root = DeepeningNode(GameClass, starting_position)
        self.time_limit = time_limit

        self.parent_pipe, worker_pipe = Pipe()
        self.worker_process = Process(target=self.loop_func,
                                      args=(GameClass, starting_position, worker_pipe))
        self.receipt_backlog = 0
Example #13
0
 def __init__(self, main, id):
     Process.__init__(self)
     self.convertQueue = main.ConvertQueue
     self.name = "Converter " + str(id)
     self.isRunning = True
     self.dataTransport = main.dataTransport
     self.main = main
     self.params = main.params
     self.filter = main.params['filter']
Example #14
0
def run(configfile):
    """
    This method is called after initializing the framework and starts the configured servers.
    @param configfile: Path referring to the xml configfile.
    """
    logger.debug('run() method started!')
    for role in config.get_roles(configfile):
        process = Process(target=role)
        print('Starting Process!')
        process.start()
    logger.debug('run() is done!')
Example #15
0
    def start_up_app(self):
        self.show_splash_screen()
        try:
            # load db in separate process
            process_startup = Process(target=LucteriosRefreshAll)
            process_startup.start()

            while process_startup.is_alive():
                # print('updating')
                self.splash.update()
        finally:
            self.remove_splash_screen()
Example #16
0
 def keyboard_interrupt(cls, path_file_input: Path, path_file_output: Path) -> None:
     """Test process of keyboard interrupt."""
     process = Process(target=cls.report_raises_keyboard_interrupt, args=(path_file_input, path_file_output))
     process.start()
     assert LocalSocket.receive() == "Ready"
     time.sleep(SECOND_SLEEP_FOR_TEST_KEYBOARD_INTERRUPT_CTRL_C_POSIX)
     cls.simulate_ctrl_c_in_posix(process)
     assert LocalSocket.receive() == "Test succeed"
     psutil_process = psutil.Process(process.pid)
     assert psutil_process.wait() == 0
     # Reason: Requires to enhance types-psutil
     assert not psutil_process.is_running()  # type: ignore
Example #17
0
 def test_sigterm(manager_queue: "queue.Queue[LogRecord]") -> None:
     """ProcessTaskPoolExecutor should raise keyboard interrupt."""
     process = Process(target=example_use_case_cancel_repost_process_id, kwargs={"queue_main": manager_queue})
     process.start()
     LocalSocket.receive()
     time.sleep(SECOND_SLEEP_FOR_TEST_SHORT)
     psutil_process = psutil.Process(process.pid)
     psutil_process.send_signal(SIGTERM)
     psutil_process.wait()
     # Reason: Requires to enhance types-psutil
     assert not psutil_process.is_running()  # type: ignore
     assert_graceful_shutdown(manager_queue)
Example #18
0
 def __enter__(self):
     self.listener_ = Listener(self.address_, family='AF_INET')
     for child in range(self.nb_children_):
         process = Process(target=creator,
                           args=(
                               self.data_,
                               self.models_,
                               self.address_,
                           ))
         process.start()
         self.children_.append(process)
     return self
Example #19
0
    def start_up_app(self):
        self.show_splash_screen()

        # load db in separate process
        process_startup = Process(target=LucteriosRefreshAll)
        process_startup.start()

        while process_startup.is_alive():
            # print('updating')
            self.splash.update()

        self.remove_splash_screen()
Example #20
0
def do_sanity_multiprocess():
    names = ['A', 'B', 'C']
    procs = []
    # proc = Process(target=do_lite_function)
    # procs.append(proc)
    # proc.start()

    for count, name in enumerate(names, 1):
        print(count, name)
        proc = Process(target=do_lite_function, args=(name, ))
        procs.append(proc)
        proc.start()
Example #21
0
def test(args):
    model = Rockfish.load_from_checkpoint(checkpoint_path=args.checkpoint)
    model.freeze()

    test_ds = Fast5Data(args.test_path, args.recursive, args.reseg_path,
                    args.norm_method, args.motif, args.sample_size, args.window)

    if args.n_workers > 0:
        test_dl = DataLoader(test_ds, batch_size=args.batch_size,
                        num_workers=args.n_workers, pin_memory=True,
                        worker_init_fn=worker_init_fn,
                        prefetch_factor=args.prefetch_factor)
    else:
        test_dl = DataLoader(test_ds, batch_size=args.batch_size,
                        num_workers=args.n_workers, pin_memory=True,
                        worker_init_fn=worker_init_fn)

    n_gpus = torch.cuda.device_count()
    if n_gpus > 0:
        model = DataParallel(model, device_ids=list(range(n_gpus)))
        model.to(f'cuda:{model.device_ids[0]}')

    model.eval()

    output_queue = mp.Queue()
    consumers = []
    abs_out_path = str(args.out_path.absolute())
    for i in range(args.output_workers):
        worker_path = TMP_PATH.format(final=abs_out_path, id=i)
        process = Process(target=output_worker, args=(worker_path, output_queue))
        process.start()

        consumers.append(process)

    with torch.no_grad():
        for info, sig, k_mer in tqdm(test_dl):
            pred = model(sig, k_mer).squeeze(-1)
            pred = pred.cpu().numpy()

            output_queue.put((info, pred))

    for _ in range(len(consumers)):
        output_queue.put(None)
    for c in consumers:
        c.join()

    with args.out_path.open('w') as out:
        for i in range(len(consumers)):
            worker_path = TMP_PATH.format(final=abs_out_path, id=i)
            with open(worker_path, 'r') as tmp_f:
                out.write(tmp_f.read())
            os.remove(worker_path)
Example #22
0
    def __init__(self,
                 coroutine: Callable = None,
                 *args,
                 daemon: bool = False,
                 target_override: Callable = None,
                 **kwargs):
        if not asyncio.iscoroutinefunction(coroutine):
            raise ValueError("target must be a coroutine function")

        self.aio_process = Process(target=target_override
                                   or partial(AIOProcess.run_async, coroutine),
                                   args=args,
                                   kwargs=kwargs,
                                   daemon=daemon)
Example #23
0
 def terminate(cls, path_file_input: Path, path_file_output: Path) -> None:
     """Test process of keyboard interrupt."""
     process = Process(target=cls.report_raises_cencelled_error, args=(path_file_input, path_file_output))
     process.start()
     assert LocalSocket.receive() == "Ready"
     time.sleep(SECOND_SLEEP_FOR_TEST_KEYBOARD_INTERRUPT_CTRL_C_POSIX)
     psutil_process = psutil.Process(process.pid)
     psutil_process.terminate()
     # Return code seems change when:
     # run this test only: -15
     # run all tests: 1
     assert psutil_process.wait() in [-15, 1]
     # Reason: Requires to enhance types-psutil
     assert not psutil_process.is_running()  # type: ignore
def create_app():
    app = Flask(__name__)

    logger.info("Server starting...")

    from app.routes import bp
    app.register_blueprint(bp)

    starting_port = 50052
    for port in range(starting_port, starting_port + 8):
        process = Process(target=serve_runner, args=(port, ))
        process.start()

    return app
Example #25
0
class ActorRunner:
    def __init__(self, actors: Iterable[Actor]):
        self._process = Process(target=self._run, args=(actors, ))
        self._process.start()

    def _run(self, actors: Iterable[Actor]):
        asyncio.run(self._loop(actors))

    async def _loop(self, actors: Iterable[Actor]):
        loop = get_running_loop()
        for actor in actors:
            loop.create_task(actor.runner())
        while loop.is_running():
            await asyncio.sleep(1)
Example #26
0
class SubprocessEnv(Env):
    def __init__(self, factory: Callable[[], Env], blocking: bool = True):
        self._blocking = blocking
        self._parent_conn, child_conn = Pipe()
        self._process = Process(target=self._start, args=(factory, child_conn))
        self._process.start()
        self.observation_space, self.action_space = self._parent_conn.recv()

    def _start(self, factory: Callable[[], Env], connection: Connection):
        env = factory()
        _ = env.reset()
        connection.send((env.observation_space, env.action_space))
        terminate = False
        while not terminate:
            command, kwargs = connection.recv()
            if command == 'render':
                rendering = env.render(**kwargs)
                connection.send(rendering)
            elif command == 'step':
                step = env.step(**kwargs)
                connection.send(step)
            elif command == 'reset':
                obs = env.reset(**kwargs)
                connection.send(obs)
            elif command == 'close':
                terminate = True
                connection.close()

    def step(self, action):
        self._parent_conn.send(('step', dict(action=action)))
        return self._return()

    def reset(self, **kwargs):
        self._parent_conn.send(('reset', kwargs))
        return self._return()

    def render(self, mode: str = 'human', **kwargs):
        self._parent_conn.send(('render', {'mode': mode, **kwargs}))
        return self._return()

    def close(self):
        self._parent_conn.send(('close', False))
        self._parent_conn.close()

    def _return(self) -> Any:
        if self._blocking:
            return self._parent_conn.recv()
        else:
            return lambda: self._parent_conn.recv()
def setup_mock_web_api_server(test: TestCase):
    if get_mock_server_mode() == "threading":
        test.server_started = threading.Event()
        test.thread = MockServerThread(test)
        test.thread.start()
        test.server_started.wait()
    else:
        # start a mock server as another process
        target = MockServerProcessTarget()
        test.server_url = "http://localhost:8888"
        test.host, test.port = "localhost", 8888
        test.process = Process(target=target.run, daemon=True)
        test.process.start()

        time.sleep(0.1)

        # start a thread in the current process
        # this thread fetches mock_received_requests from the remote process
        test.monitor_thread = MonitorThread(test)
        test.monitor_thread.start()
        count = 0
        # wait until the first successful data retrieval
        while test.mock_received_requests is None:
            time.sleep(0.01)
            count += 1
            if count >= 100:
                raise Exception("The mock server is not yet running!")
Example #28
0
def start_validation(request):
    print(request.data)
    ser = NewValidationSerializer(data=request.data)
    ser.is_valid(raise_exception=True)
    new_val_run = ser.save(user=request.user)
    new_val_run.user = request.user
    new_val_run.save()

    # need to close all db connections before forking, see
    # https://stackoverflow.com/questions/8242837/django-multiprocessing-and-database-connections/10684672#10684672
    connections.close_all()

    p = Process(target=run_validation, kwargs={"validation_id": new_val_run.id})
    p.start()
    serializer = ValidationRunSerializer(new_val_run)
    return JsonResponse(serializer.data, status=status.HTTP_200_OK, safe=False)
Example #29
0
def spider_process(name,
                   keyword=None,
                   item_num=None,
                   url=None,
                   spider=None,
                   result=None):
    custom_settings = get_config(name)
    if not spider:
        spider = custom_settings.get('spider', 'aCrawler')
    project_settings = get_project_settings()
    settings = dict(project_settings.copy())
    settings.update(custom_settings.get('settings'))
    print(item_num)
    if item_num is not None:
        # 关键字模式下,C_R参数设置同时请求个数
        # 防止终止爬虫时其他请求继续异步进行
        # 但是可能会造成请求较慢
        settings["CONCURRENT_REQUESTS"] = 1
    print(settings)
    process = Process(target=crawl,
                      kwargs={
                          'settings': settings,
                          'spider': spider,
                          'name': name,
                          'custom_settings': custom_settings,
                          'keyword': keyword,
                          'item_num': item_num,
                          'url': url,
                          'result': result
                      })
    return process
Example #30
0
    def start(self, context: ExecutionContext):
        """
        Start multiple processes or threads with the worker function as a target.

        :param context: execution context
        :type context: ExecutionContext
        :return:
        """
        target_function = worker_function
        if self.use_savers:
            self.save(context, full_dump=True)
            target_function = worker_function

        self.workers = []
        for _, worker_arguments in zip(range(self.n_workers),
                                       self.additional_worker_arguments):
            if self.use_threading:
                p = Thread(target=target_function,
                           args=(self, context, self.use_savers,
                                 worker_arguments))
            else:
                p = Process(target=target_function,
                            args=(self, context, self.use_savers,
                                  worker_arguments))

            p.daemon = True
            p.start()
            self.workers.append(p)
def initiate_new_training(workspace_id: ObjectId,
                          training_config_in_train: TrainingConfigInTrain):
    config = parse_config_for_training(training_config_in_train)
    data_set_manager = DataSetManager(workspace_id, WorkspaceDataSource())
    trainer = Trainer(training_config=config,
                      data_set_manager=data_set_manager,
                      create_db=create_sync_db)
    Process(target=trainer.train).start()
Example #32
0
 def __init__(self, env_fn: Callable[[], gym.Env],
              share_memory=False) -> None:
     super().__init__(env_fn)
     self.parent_remote, self.child_remote = Pipe()
     self.share_memory = share_memory
     self.buffer = None
     if self.share_memory:
         dummy = env_fn()
         obs_space = dummy.observation_space
         dummy.close()
         del dummy
         self.buffer = _setup_buf(obs_space)
     args = (self.parent_remote, self.child_remote,
             CloudpickleWrapper(env_fn), self.buffer)
     self.process = Process(target=_worker, args=args, daemon=True)
     self.process.start()
     self.child_remote.close()
Example #33
0
def dashboard():
    # This fixture guarantees the proper termination of all spawned subprocesses
    # after the tests.
    dashboard = Process(target=run_detection)
    dashboard.start()
    yield
    for child in psutil.Process(dashboard.pid).children(recursive=True):
        child.kill()
    dashboard.terminate()
    dashboard.join()
    def get_data_from_sn(self, search_list, database_name):

        if database_name == 'neo':
            sn_node = graph.find_one("SN", "name", "SOCIALNETWORKS")
            twitter_node = graph.merge_one("Twitter", "name", "TWITTER")
            sn_has_twitter = Relationship(sn_node, "HAS", twitter_node)
            graph.create_unique(sn_has_twitter)

        twitter_searcher = TwitterSearcher()
        twitter_searcher_process = Process(target=twitter_searcher.run_twitter_search, args=(search_list, database_name,))

        twitter_streamer = TwitterStreamer()
        twitter_streamer_process = Process(target=twitter_streamer.run_twitter_stream, args=(search_list, database_name,))

        twitter_searcher_process.start()
        twitter_streamer_process.start()
        twitter_streamer_process.join()
        twitter_searcher_process.join()
    def setUp(self):
        self.test_directory = os.path.dirname(os.path.realpath(__file__))
        self.test_config = os.path.join(self.test_directory,
                                        "packager_config.cfg")
        self.packager_config = PackagerConfig(self.test_config)
        """Spin up a server in a seperate process"""
        def start_test_server():
            app = Orchestrator(self.packager_config)
            from werkzeug.serving import run_simple
            run_simple('127.0.0.1', 5000, app)

        self.p = Process(target=start_test_server, daemon=True)
        self.p.start()
Example #36
0
    def setUp(self):
        self.test_file = TestFile(settings.test_file_size)

        self.original_data = self.test_file.get_content()

        self.ftp_root = self.test_file.path
        self.remote_filename = uuid.uuid4().hex

        tuned_server, self.port = server.get_tuned_server(self.ftp_root)

        def server_func():
            tuned_server.serve_forever(handle_exit=True)

        self.server_process = Process(target=server_func)
        """:type: Process"""
        self.server_process.start()
class TestOrchestrator(BaseTestCase):

    def setUp(self):
        self.test_directory = os.path.dirname(os.path.realpath(__file__))
        self.test_config = os.path.join(self.test_directory,
                                        "packager_config.cfg")
        self.packager_config = PackagerConfig(self.test_config)
        """Spin up a server in a seperate process"""
        def start_test_server():
            app = Orchestrator(self.packager_config)
            from werkzeug.serving import run_simple
            run_simple('127.0.0.1', 5000, app)

        self.p = Process(target=start_test_server, daemon=True)
        self.p.start()

    def tearDown(self):
        """Make sure we tear down the process properly at the end"""
        self.p.terminate()

    def testOrchestratorAcceptingInputCorrectly(self):
        @exponential_back_off
        def wait_for_process():
            return requests.get("http://127.0.0.1:5000/health")

        wait_seconds = 1
        print("await process spin up for %d seconds" % wait_seconds)
        time.sleep(wait_seconds)
        wait_for_process()

        with open(self.get_file('code'), 'r') as code, \
                open(self.get_file('tests'), 'r') as test:
            language = 'python'
            question_name = 'foo'
            code_payload = code.read()
            test_payload = test.read()
            payload = {
                'question_name': question_name,
                'language': language,
                'code': code_payload,
                'test': test_payload
            }

            # http:// required,
            # requests library needs protocol scheme to connect
            got_resp_raw = requests.post('http://127.0.0.1:5000/submit',
                                         data=payload)
            got_resp = json.loads(got_resp_raw.text)
            exp_data = {
                'language': language,
                'code': code_payload,
                'test': test_payload,
                'question_name': question_name
            }
            exp_response_message = 'not tested for in this testcase'
            exp_resp = {
                'execution_result': exp_response_message,
                'got_data': exp_data
            }

            self.assertEqual(exp_resp['got_data'], got_resp['got_data'])

            # we just need to check if the result contains success
            for testcase, test_result in got_resp['execution_result'].items():
                self.assertTrue(test_result['success'],
                                'testcase {} failure with message {}'
                                .format(testcase, test_result['message']))
Example #38
0
def show_info_on(a_topic):
    ''' Postconditions:
    1. a_topic is on the monitor
    2. The name of the module executing this is on the monitor
    3. If available, the ID of the process parent to this is on the monitor
    4. The ID of the process executing this is on the monitor
    '''
    print('Information on ' + a_topic)  # 1.
    print('Name of module executing this: ', __name__)  # 2.
    if hasattr(os, 'getppid'):  # 3: if available on this OS
        print('Parent process: ', os.getppid())
    print('ID of process executing this: ', os.getpid(), '\n')  # 4.

def say_hello(name):
    ''' Postconditions:
    1. = Postconditions of get_info_on('Say-hello process')
    2. "Hello <name>" is on the monitor
    '''
    show_info_on('Say-hello process')
    print('Hello ', name)

if __name__ == '__main__':
    ''' Postconditions:
    1. = Postconditions of get_info_on (this) main process
    2. = Postconditions of get_info_on a new process executing say_hello('Hugh Person')
    '''
    show_info_on('main line')
    p = Process(target=say_hello, args=('Hugh Person',))
    p.start()
    p.join()
Example #39
0
from multiprocessing.queues import Queue
import os, time, random

# 写数据进程执行的代码:
def write(q):
    print('Process to write: %s' % os.getpid())
    for value in ['A', 'B', 'C']:
        print('Put %s to queue...' % value)
        q.put(value)
        time.sleep(random.random())

# 读数据进程执行的代码:
def read(q):
    print('Process to read: %s' % os.getpid())
    while True:
        value = q.get(True)
        print('Get %s from queue.' % value)

if __name__=='__main__':
    # 父进程创建Queue,并传给各个子进程:
    q = Queue()
    pw = Process(target=write, args=(q,))
    pr = Process(target=read, args=(q,))
    # 启动子进程pw,写入:
    pw.start()
    # 启动子进程pr,读取:
    pr.start()
    # 等待pw结束:
    pw.join()
    # pr进程里是死循环,无法等待其结束,只能强行终止:
    pr.terminate()
Example #40
0
 def process_paid_order(self, order):
     self.logger.info(order.order_number+" - process")
     self.ioc.new_order_service().process_paid_order(order)
     p = Process(target=self.inform_customer, args=(self.ioc, order,))
     p.start()
     p.join()
Example #41
0
class TestRemoteSession(TestCase):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.server_process = None
        """:type: Process"""

    def setUp(self):
        self.test_file = TestFile(settings.test_file_size)

        self.original_data = self.test_file.get_content()

        self.ftp_root = self.test_file.path
        self.remote_filename = uuid.uuid4().hex

        tuned_server, self.port = server.get_tuned_server(self.ftp_root)

        def server_func():
            tuned_server.serve_forever(handle_exit=True)

        self.server_process = Process(target=server_func)
        """:type: Process"""
        self.server_process.start()

    def get_connected_client_in_binary_mode(self):
        client = Client()
        client.connect('localhost', self.port)
        client.login(settings.ftp_user, settings.ftp_pass)
        client.type('I')  # Binary mode ('I'mage)
        return client

    def tearDown(self):
        while self.server_process.is_alive():
            # посылаем SIGINT процессу, pyftpd умеет его обрабатывать и закрывается корректно
            os.kill(self.server_process.pid, signal.SIGINT)
            self.server_process.join(timeout=1)

        unlink(self.test_file.full_filename)

    def test_receive_file(self):
        """
        тест на получение заранее сгенерированных и сохраненных на диске данных клиентом через подключение
        к локальному серверу.
        """
        client = self.get_connected_client_in_binary_mode()
        code, rest, data = client.retr(self.test_file.filename)
        q_code, q_rest = client.quit()

        self.assertEqual(code, 226)
        self.assertTrue(self.original_data == data)

    def test_send_file(self):
        """
        Тест на отправку файла клиентом на локальный сервер и сравнение содержимого файла на диске
        с отправленными данными
        """
        client = self.get_connected_client_in_binary_mode()
        client.stor(self.remote_filename, self.original_data)

        with open(os.path.join(self.ftp_root, self.remote_filename), 'rb') as f:
            stored_file_content = f.read()

        code, rest = client.quit()

        self.assertTrue(self.original_data == stored_file_content)
    def get_data_from_sn(self, search_list, database_name):

        if database_name == 'neo':
            sn_node = graph.find_one("SN", "name", "SOCIALNETWORKS")
            facebook_node = graph.merge_one("Facebook", "name", "FACEBOOK")
            sn_has_facebook = Relationship(sn_node, "HAS", facebook_node)
            graph.create_unique(sn_has_facebook)


        event_searcher = EventSearcher()
        event_process = Process(target=event_searcher.run_facebook_events, args=(search_list, database_name,))

        group_searcher = GroupSearcher()
        group_process = Process(target=group_searcher.run_facebook_groups, args=(search_list, database_name,))

        page_searcher = PageSearcher()
        page_process = Process(target=page_searcher.run_facebook_pages, args=(search_list, database_name,))

        event_process.start()
        group_process.start()
        page_process.start()

        event_process.join()
        group_process.join()
        page_process.join()
Example #43
0
def start():
        builder = cluster_crawler.ClusterCrawler()
        start_block_id  = int(sys.argv[1])
        block_id = start_block_id
        process = None
        try:
            while builder.crawl_block(block_id):
                print("Block %d crawled" % block_id)

                if block_id - start_block_id > 0 and (block_id - start_block_id) % settings.block_crawling_limit == 0:
                    builder.network_graph.check_integrity()
                    while  process is not None and process.is_alive():
                        print("Waiting for insertion thread to complete...")
                        process.join()

                    if process is not None and process.exitcode > 0 : #error
                        raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
                    process = Process(target=builder.network_graph.synchronize_mongo_db)
                    process.start()
                    builder.network_graph = cluster_network.ClusterNetwork(settings.db_server, settings.db_port) #Starting a new graph while other graph data is inserted.

                if process is not None and not process.is_alive() and process.exitcode > 0 : #error
                        raise Exception("Errorcode %d in DB Sync Thread, aborting" % process.exitcode)
                block_id+=1

            #Finished Crawling, Flushing to DB.
            #Waiting for any previous DB Sync
            while  process is not None and process.is_alive():
                print("Waiting for insertion thread to complete...")
                process.join()

            #Sync the rest
            process = Process(target=builder.network_graph.synchronize_mongo_db)
            process.start()
            process.join()

            #DONE!

        #For Debugging purpose
        except:
            input("An exception will rise ")
            raise
    for i in range(0,n):
        for j in range (0, n):
            # end="" makes it so it does not print a newline
            print(matrix[i][j], end="")
        print()
    print('end matrix')
    '''
    # declare variables in shared memory
    Smallest = Value('i', n)
    TotalCount = Value('i', 0)
    # get break down of how many rows per process
    size = int(n / 4)
    # take time
    start_time = time.time()
    # create each process
    p0 = Process(target=subMatrixSmallestCount, args=(matrix, 0, size, n, Smallest, TotalCount, 1))
    p1 = Process(target=subMatrixSmallestCount, args=(matrix, 1, size, n, Smallest, TotalCount, 2))
    p2 = Process(target=subMatrixSmallestCount, args=(matrix, 2, size, n, Smallest, TotalCount, 3))
    p3 = Process(target=subMatrixSmallestCount, args=(matrix, 3, size, n, Smallest, TotalCount, 4))
    # start processes
    p0.start()
    p1.start()
    p2.start()
    p3.start()
    # wait for processes to finish
    p0.join()
    p1.join()
    p2.join()
    p3.join()

    print('Smallest value: {}'.format(Smallest.value))