Example #1
0
 def test_process_queue_consumer_timeout_join_if_task_not_done(self):
     with ProcessPoolExecutor(max_workers=1) as executor:
         manager = Manager()
         exit_event = manager.Event()
         work_queue = manager.Queue()
         # make sure we take time, but don't timeout on our own
         # the join should cause the timeout
         consumer = ProcessQueueConsumer(
             lambda item: time.sleep(0.5),
             work_queue,
             exit_event,
             2,
             1
         )
         work_queue.put("sleep")
         future = executor.submit(consumer.run)
         #print("here")
         time.sleep(0.1)
         self.assertTrue(not future.done())
         # have the worker finish
         work_queue.put(None)
Example #2
0
    def get_degrees(self, start_uri=GOOD_MEN, target_uri=KEVIN_BACON):
        validation = self.validate_urls([start_uri, target_uri])

        worker_pool = Pool(cpu_count())
        worker_manager = Manager()
        event = worker_manager.Event()

        self.q.put([start_uri])
        self.qsize.put(1)

        # Start up all the workers in the pool
        for i in range(cpu_count()):
            worker_pool.apply_async(self.explore, (target_uri, event))

        event.wait()
        worker_pool.terminate()

        try:
            path = self.deg.get(timeout=self.TIMEOUT)
        except Empty:
            return [], -1
        degrees = len(path) - 1
        return path, degrees
Example #3
0
def run_prepare(file_paths, num_processes):
    """
    Run PrePARE on each file in the submission. Any failures are reported
    as an error with the logging and an exception is raised at the end of
    processing if one or more files has failed.

    :param list file_paths: The paths of the files in the submission's
        directory.
    :param int num_processes: The number of processes to use in parallel.
    :raises SubmissionError: at the end of checking if one or more files has
    failed PrePARE's checks.
    """
    logger.debug('Starting PrePARE on {} files'.format(len(file_paths)))
    jobs = []
    manager = Manager()
    params = manager.Queue()
    file_failed = manager.Event()
    if num_processes != 1:
        for i in range(num_processes):
            p = Process(target=_run_prepare, args=(params, file_failed))
            jobs.append(p)
            p.start()

    for item in itertools.chain(file_paths, (None, ) * num_processes):
        params.put(item)

    if num_processes == 1:
        _run_prepare(params, file_failed)
    else:
        for j in jobs:
            j.join()

    if file_failed.is_set():
        logger.error('Not all files passed PrePARE')
        raise SubmissionError()

    logger.debug('All files successfully checked by PrePARE')
Example #4
0
class CControlCenter(CProcess):
    const_command_queue_process_result_empty = 1
    const_command_queue_process_result_notify_terminal = 2

    __shared_control_center_info_locker__: Lock = None
    __shared_control_center_info__: dict = None
    __command_queue__: Queue = None

    __control_center_manager__: Manager = None
    __control_center_params: dict = None
    __control_center_objects__: dict = None
    __control_center_objects_locker__: Lock = None

    __sentinel_manager__: Manager = None
    __sentinel_process__: Process = None
    __sentinel_queue__: Queue = None
    __sentinel_stop_event__: Event = None

    def __init__(self, cmd_queue, shared_control_center_info_locker,
                 shared_control_center_info):
        super().__init__()
        self.__command_queue__ = cmd_queue
        self.__shared_control_center_info_locker__ = shared_control_center_info_locker
        self.__shared_control_center_info__ = shared_control_center_info

    def wait_child(self, signum, frame):
        CLogger().info('收到SIGCHLD消息')
        try:
            while True:
                # -1 表示任意子进程
                # os.WNOHANG 表示如果没有可用的需要 wait 退出状态的子进程,立即返回不阻塞
                cpid, status = os.waitpid(-1, os.WNOHANG)
                if cpid == 0:
                    CLogger().info('没有子进程可以处理了.')
                    break
                exitcode = status >> 8
                CLogger().info('子进程{0}已退出, 退出码为{1}'.format(cpid, exitcode))
        except OSError as e:
            if e.errno == errno.ECHILD:
                CLogger().info('当前进程没有等待结束的子进程了.')
            else:
                raise
        CLogger().info('处理SIGCHLD消息结束...')

    def run(self):
        if not CUtils.equal_ignore_case(CSys.get_platform_system_name(),
                                        self.OS_Windows):
            base_signal.signal(base_signal.SIGCHLD, self.wait_child)

        CLogger().info('控制中心进程[{0}]启动运行...'.format(self.pid))
        # 此时, 才在controlCenter的进程中
        host_settings_dict = CUtils.dict_value_by_name(
            self.__shared_control_center_info__, self.NAME_CMD_SETTINGS, None)
        settings.application.load_obj(host_settings_dict)
        settings.application.init_sys_path()

        CLogger().info('控制中心进程[{0}]启动哨兵值守进程...'.format(self.pid))
        self.__control_center_manager__ = Manager()
        self.__control_center_params = self.__control_center_manager__.dict()
        self.__control_center_objects__ = self.__control_center_manager__.dict(
        )
        self.__control_center_objects_locker__ = self.__control_center_manager__.Lock(
        )

        self.__sentinel_manager__ = Manager()
        self.__sentinel_queue__ = self.__sentinel_manager__.Queue()
        self.__sentinel_stop_event__ = self.__sentinel_manager__.Event()

        self.__control_center_params[
            self.NAME_CMD_SETTINGS] = host_settings_dict

        self.__sentinel_process__ = CSentinel(
            self.__control_center_objects_locker__,
            self.__control_center_objects__, self.__sentinel_stop_event__,
            self.__sentinel_queue__, self.__control_center_params)
        self.__sentinel_process__.daemon = True
        self.__sentinel_process__.start()

        while True:
            CLogger().info('控制中心进程[{0}]开始检查接收任务队列...'.format(self.pid))

            if self.process_queue_command(
            ) == self.const_command_queue_process_result_notify_terminal:
                break

            CLogger().info('控制中心进程[{0}]开始检查哨兵反馈的消息...'.format(self.pid))
            self.process_queue_sentinel()

            CLogger().info('控制中心进程[{0}]开始同步控制中心对外公布的数据...'.format(self.pid))
            # 同步一下控制中心的对外公布数据
            self.sync_shared_control_center_info()
            # 休息一下
            time.sleep(3)

        CLogger().info('控制中心进程[{0}]开始进行退出前的准备工作, 该过程会比较复杂漫长...'.format(
            self.pid))
        # 开始处理退出工作
        self.before_stop()

    def process_queue_command(self) -> int:
        """
        处理命令队列中的所有任务
        :return:
        const_command_queue_process_result_empty: 队列已经处理完毕, 队列清空
        const_command_queue_process_result_notify_terminal: 队列处理中发现了关闭退出的命令, 执行该命令
        """
        while True:
            try:
                command = self.__command_queue__.get(False)
            except:
                CLogger().info('控制中心进程[{0}]处理完所有命令队列, 将退出...'.format(self.pid))
                return self.const_command_queue_process_result_empty
            else:
                # 收到关闭消息后,退出循环
                if command is None:
                    CLogger().info('控制中心进程[{0}]接收到退出通知, 将退出...'.format(
                        self.pid))
                    return self.const_command_queue_process_result_notify_terminal
                else:
                    # 收到正常消息,开始处理
                    CLogger().info('控制中心进程[{0}]接收到正常通知, 将开始处理通知...'.format(
                        self.pid))
                    self.process_command(command)

    def process_queue_sentinel(self):
        """
        处理哨兵发来的消息队列中的任务
        :return:
        """
        while True:
            try:
                sentinel_message = self.__sentinel_queue__.get(False)
            except:
                CLogger().info('控制中心进程[{0}]处理完所有哨兵反馈的消息, 将退出...'.format(
                    self.pid))
                return
            else:
                # 收到关闭消息后,退出循环
                if sentinel_message is not None:
                    # 收到正常消息,开始处理
                    CLogger().info(
                        '控制中心进程[{0}]开始检查哨兵反馈的消息, 将开始处理该消息...'.format(self.pid))
                    self.process_sentinel_message(sentinel_message)
                else:
                    CLogger().info('控制中心进程[{0}]发现哨兵反馈来停止的消息, 将退出...'.format(
                        self.pid))
                    return

    def process_command(self, command):
        CLogger().info('控制中心进程[{0}]开始处理指令{1}...'.format(self.pid, command))
        cmd_type = command.get(self.NAME_CMD_COMMAND, self.CMD_START)
        cmd_title = command.get(self.NAME_CMD_TITLE, '')

        if CUtils.equal_ignore_case(cmd_type, self.CMD_START):
            CLogger().info('控制中心进程[{0}]收到了启动调度的指令...'.format(self.pid))
            self.command_start(command)
        elif CUtils.equal_ignore_case(cmd_type, self.CMD_STOP):
            CLogger().info('控制中心进程[{0}]收到了停止调度的指令...'.format(self.pid))
            self.command_stop(command)
        elif CUtils.equal_ignore_case(cmd_type, self.CMD_FORCE_STOP):
            CLogger().info('控制中心进程[{0}]收到了强制停止调度的指令...'.format(self.pid))
            self.command_force_stop(command)

    def process_sentinel_message(self, sentinel_message):
        """
        处理子进程哨兵进程反馈的消息
        :param sentinel_message:
        :return:
        """
        cmd_id = sentinel_message.get(self.NAME_CMD_ID)
        cmd_title = sentinel_message.get(self.NAME_CMD_TITLE)
        if cmd_id is None:
            return

        CLogger().info('哨兵进程发现调度{0}.{1}的某一个进程无故销毁! '.format(cmd_id, cmd_title))

    def sync_shared_control_center_info(self):
        pass

    def command_start(self, command):
        cmd_id = command.get(self.NAME_CMD_ID, '')
        cmd_title = command.get(self.NAME_CMD_TITLE, '')
        cmd_algorithm = command.get(self.NAME_CMD_ALGORITHM, '')
        cmd_trigger = command.get(self.NAME_CMD_TRIGGER, '')
        cmd_params = command.get(self.NAME_CMD_PARAMS, '')
        CLogger().info('调度{0}.{1}.{2}的调度参数为[{3}]...'.format(
            cmd_id, cmd_title, cmd_algorithm, cmd_params))

        cmd_parallel_count = super().params_value_by_name(
            cmd_params, self.Name_Parallel_Count, 1)
        CLogger().info('调度{0}.{1}.{2}的调度并行个数为{3}...'.format(
            cmd_id, cmd_title, cmd_algorithm, cmd_parallel_count))

        if cmd_parallel_count <= 0:
            CLogger().info('调度{0}.{1}.{2}的目标并行数量为0, 系统直接进行调度的停止操作...'.format(
                cmd_id, cmd_title, cmd_algorithm))
            self.command_stop(command)
            return

        control_center_object = self.__control_center_objects__.get(cmd_id)
        if control_center_object is not None:
            CLogger().info('调度{0}.{1}.{2}进程池已经启动, 系统不再继续...'.format(
                cmd_id, cmd_title, cmd_algorithm))
            return

        CLogger().info('调度{0}.{1}.{2}将改为{3}个并行进程...'.format(
            cmd_id, cmd_title, cmd_algorithm, cmd_parallel_count))

        params = self.__control_center_manager__.dict()

        params[self.NAME_CMD_ID] = cmd_id
        params[self.NAME_CMD_TITLE] = cmd_title
        params[self.NAME_CMD_ALGORITHM] = cmd_algorithm
        params[self.NAME_CMD_TRIGGER] = cmd_trigger
        params[self.NAME_CMD_PARAMS] = cmd_params
        params[self.NAME_CMD_SETTINGS] = settings.application.json_obj

        stop_event = self.__control_center_manager__.Event()
        subprocess_list = self.__control_center_manager__.list()
        control_center_object = dict()
        control_center_object[self.NAME_PARAMS] = params
        control_center_object[self.NAME_STOP_EVENT] = stop_event
        control_center_object[self.NAME_SUBPROCESS_LIST] = subprocess_list

        for i in range(cmd_parallel_count):
            proc = CWorker(stop_event, params)
            proc.start()
            subprocess_list.append(proc.pid)
            CLogger().info('调度{0}.{1}.{2}成功启动了子进程{3}...'.format(
                cmd_id, cmd_title, cmd_algorithm, proc.pid))

        # 加入对象dict中记录!
        self.__control_center_objects__[cmd_id] = control_center_object

        CLogger().info('调度{0}.{1}.{2}成功启动了{3}个并行进程...'.format(
            cmd_id, cmd_title, cmd_algorithm, cmd_parallel_count))

    def command_stop(self, command):
        """
        处理发来的停止命令

        :param command:
        :return:
        """
        cmd_id = command.get(self.NAME_CMD_ID, '')
        cmd_title = command.get(self.NAME_CMD_TITLE, '')

        CLogger().info('控制中心开始停止调度[{0}.{1}]...'.format(cmd_id, cmd_title))

        if self.__control_center_objects__ is None:
            CLogger().info('控制中心对象为None...')
            return

        if len(self.__control_center_objects__) == 0:
            CLogger().info('控制中心对象里没有任何池记录...')
            return
        try:
            self.__control_center_objects_locker__.acquire()

            control_center_object = self.__control_center_objects__.get(cmd_id)
            if control_center_object is None:
                CLogger().warning('调度{0}进程池已经不存在, 无需停止...'.format(cmd_id))
                return

            # 给池里的所有进程,发送关闭信号!
            stop_event = control_center_object.get(self.NAME_STOP_EVENT, None)
            if stop_event is not None:
                stop_event.set()
                CLogger().info(
                    '调度{0}.{1}的进程池中所有进程退出的信号已发出, 进程池的检查和关闭, 将在哨兵进程中处理...'.
                    format(cmd_id, cmd_title))
            else:
                CLogger().info(
                    '调度{0}.{1}的进程池退出信号灯已经无效, 进程池的检查和关闭, 将在哨兵进程中处理...'.format(
                        cmd_id, cmd_title))
        finally:
            self.__control_center_objects_locker__.release()
            CLogger().info('控制中心已经停止调度[{0}.{1}]'.format(cmd_id, cmd_title))

    def before_stop(self):
        CLogger().info('控制中心开始进行调度的清理工作...')

        CLogger().info('首先停止哨兵值守进程检查调度...')
        self.__sentinel_stop_event__.set()

        CLogger().info('等待哨兵值守进程退出...')
        self.__sentinel_process__.join()
        CLogger().info('哨兵值守进程已经停止...')

        if self.__control_center_objects__ is None:
            CLogger().info('控制中心对象为None...')
            return

        if len(self.__control_center_objects__) == 0:
            CLogger().info('控制中心对象里没有任何池记录...')
            return

        CLogger().info('开始处理{0}个调度对象的进程池...'.format(
            len(self.__control_center_objects__)))

        try:
            self.__control_center_objects_locker__.acquire()

            control_center_object_key_list = self.__control_center_objects__.keys(
            )
            for control_center_object_key in control_center_object_key_list:
                CLogger().info(
                    '已经提取出{0}进程池...'.format(control_center_object_key))
                control_center_object = self.__control_center_objects__.get(
                    control_center_object_key)
                if control_center_object is None:
                    CLogger().info('参数对象为None...')
                    self.__control_center_objects__.pop(
                        control_center_object_key)
                    continue

                command = control_center_object.get(self.NAME_PARAMS, None)
                CLogger().info('已经提取出参数对象...')

                if command is None:
                    CLogger().info('参数对象为None...')
                    self.__control_center_objects__.pop(
                        control_center_object_key)
                    continue

                cmd_id = command.get(self.NAME_CMD_ID, '')
                cmd_title = command.get(self.NAME_CMD_TITLE, '')

                CLogger().info('控制中心开始清理调度[{0}.{1}]的所有进程...'.format(
                    cmd_id, cmd_title))

                if cmd_id == '':
                    continue

                # 给池里的所有进程,发送关闭信号!
                stop_event = control_center_object.get(self.NAME_STOP_EVENT,
                                                       None)
                if stop_event is None:
                    continue

                stop_event.set()
                CLogger().info('调度{0}.{1}的进程池中所有进程退出的信号已发出...'.format(
                    cmd_id, cmd_title))

                # 等待进程池全部退出
                while True:
                    CLogger().info('检查调度{0}.{1}的进程池中所有进程是否都已经退出...'.format(
                        cmd_id, cmd_title))
                    all_subprocess_closed = True
                    subprocess_list = control_center_object.get(
                        self.NAME_SUBPROCESS_LIST)
                    if subprocess_list is None:
                        break

                    for subproc_id in subprocess_list:
                        if CProcessUtils.process_id_exist(subproc_id):
                            CLogger().info(
                                '检查调度{0}.{1}的进程池中进程{2}仍然在运行...'.format(
                                    cmd_id, cmd_title, subproc_id))
                            all_subprocess_closed = False

                    if all_subprocess_closed:
                        CLogger().info('检查调度{0}.{1}的进程池中所有进程都已经不在...'.format(
                            cmd_id, cmd_title))
                        break
                    else:
                        time.sleep(1)
                        CLogger().info(
                            '检查调度{0}.{1}的进程池中还有进程正在运行, 等待中...'.format(
                                cmd_id, cmd_title))

                CLogger().info('调度{0}.{1}的进程池中所有进程均已退出...'.format(
                    cmd_id, cmd_title))
                self.__control_center_objects__.pop(control_center_object_key)
        finally:
            self.__control_center_objects_locker__.release()
            CLogger().info('控制中心的所有调度清理工作已完成,控制中心进程将关闭...')

    def command_force_stop(self, command):
        """
        处理发来的强制停止命令

        :param command:
        :return:
        """
        cmd_id = command.get(self.NAME_CMD_ID, '')
        cmd_title = command.get(self.NAME_CMD_TITLE, '')

        CLogger().info('控制中心开始停止调度[{0}.{1}]...'.format(cmd_id, cmd_title))

        if self.__control_center_objects__ is None:
            CLogger().info('控制中心对象为None...')
            return

        if len(self.__control_center_objects__) == 0:
            CLogger().info('控制中心对象里没有任何池记录...')
            return
        try:
            self.__control_center_objects_locker__.acquire()

            control_center_object = self.__control_center_objects__.get(cmd_id)
            if control_center_object is None:
                CLogger().warning('调度{0}进程池已经不存在, 无需停止...'.format(cmd_id))
                return

            # 给池里的所有进程,发送关闭信号!
            stop_event = control_center_object.get(self.NAME_STOP_EVENT, None)
            if stop_event is not None:
                stop_event.set()
                CLogger().info('调度{0}.{1}的进程池中所有进程退出的信号已发出...'.format(
                    cmd_id, cmd_title))

            # 直接杀死进程池中的全部子进程
            subprocess_list = control_center_object.get(
                self.NAME_SUBPROCESS_LIST)
            if subprocess_list is not None:
                for subproc_id in subprocess_list:
                    CProcessUtils.process_kill(subproc_id)

            CLogger().info('调度{0}.{1}的进程池中所有进程均已退出...'.format(
                cmd_id, cmd_title))
            self.__control_center_objects__.pop(cmd_id)
        finally:
            self.__control_center_objects_locker__.release()
            CLogger().info('控制中心已经停止调度[{0}.{1}]'.format(cmd_id, cmd_title))
Example #5
0
    def __init__(self,
                 client,
                 group,
                 topic,
                 auto_commit=True,
                 auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
                 auto_commit_every_t=AUTO_COMMIT_INTERVAL,
                 num_procs=1,
                 partitions_per_proc=0,
                 **simple_consumer_options):

        # Initiate the base consumer class
        super(MultiProcessConsumer,
              self).__init__(client,
                             group,
                             topic,
                             partitions=None,
                             auto_commit=auto_commit,
                             auto_commit_every_n=auto_commit_every_n,
                             auto_commit_every_t=auto_commit_every_t)

        # Variables for managing and controlling the data flow from
        # consumer child process to master
        manager = MPManager()
        self.queue = manager.Queue(
            1024)  # Child consumers dump messages into this
        self.events = Events(
            start=manager.Event(),  # Indicates the consumers to start fetch
            exit=manager.Event(),  # Requests the consumers to shutdown
            pause=manager.Event())  # Requests the consumers to pause fetch
        self.size = manager.Value(
            'i', 0)  # Indicator of number of messages to fetch

        # dict.keys() returns a view in py3 + it's not a thread-safe operation
        # http://blog.labix.org/2008/06/27/watch-out-for-listdictkeys-in-python-3
        # It's safer to copy dict as it only runs during the init.
        partitions = list(self.offsets.copy().keys())

        # By default, start one consumer process for all partitions
        # The logic below ensures that
        # * we do not cross the num_procs limit
        # * we have an even distribution of partitions among processes

        if partitions_per_proc:
            num_procs = len(partitions) / partitions_per_proc
            if num_procs * partitions_per_proc < len(partitions):
                num_procs += 1

        # The final set of chunks
        chunks = [partitions[proc::num_procs] for proc in range(num_procs)]

        self.procs = []
        for chunk in chunks:
            options = {'partitions': list(chunk)}
            if simple_consumer_options:
                simple_consumer_options.pop('partitions', None)
                options.update(simple_consumer_options)

            args = (client.copy(), self.group, self.topic, self.queue,
                    self.size, self.events)
            proc = Process(target=_mp_consume, args=args, kwargs=options)
            proc.daemon = True
            proc.start()
            self.procs.append(proc)
Example #6
0
class ReplicatorManager(Replicator):
    def __init__(self,
                 name,
                 ip,
                 port,
                 log_server_path,
                 log_server_lock,
                 n_replicators=3):
        super().__init__(name=name,
                         ip=ip,
                         port=port,
                         log_server_path=log_server_path,
                         log_server_lock=log_server_lock,
                         server=False)
        # Replicators
        self.replicators = []
        for i in range(1, n_replicators + 1):
            r = Replicator(name=f'Replicator_{i}',
                           ip=self.ip,
                           port=self.port + i,
                           log_server_path=self.log_server_path,
                           log_server_lock=self.log_server_lock)
            self.replicators.append(r)
            r.start()

        # Variables
        #self.request_queue = Queue()
        self.manager = Manager()
        self.request_queue = self.manager.list()
        self.request_answer = self.manager.dict()

        # URL
        self.add_endpoint(endpoint='/',
                          handler=self.get_request,
                          methods=['POST'])

        # Process to solve requests
        self.solver_process = Process(target=self.solve_request)
        self.solver_process.start()
        return

    def run(self):
        super().run()
        #Kill childs
        self.solver_process.terminate()
        for r in self.replicators:
            r.terminate()
        return

    def get_request(self):
        data, status = self.get_data(keys=['request', 'timestamp', 'send_id'])
        if status != 200:
            self.log(data)
            return data, status
        event_wait = self.manager.Event()
        self.log(
            f"[RECIVE][REQUEST][{data['send_id']}][{data['request']['type'].upper()}]: {data['request']['data']}"
        )
        #self.request_queue.put(
        self.request_queue.append(
            (data['timestamp'], data['send_id'], data['request'], event_wait))
        request_answer_key = (data['send_id'], data['timestamp'])
        event_wait.wait()
        answer = self.request_answer[request_answer_key]
        return answer[0], answer[1]

    def make_request(self, replicator, client_name, data, rq_type,
                     request_answer_key):
        data['send_id'] = self.name
        self.log(
            f'[SEND][REQUEST][{replicator.name}]: from {client_name} > {data}')
        answer = requests.post(
            f'http://{replicator.ip}:{replicator.port}/{rq_type}_file',
            json=data)
        self.log(
            f'[RECIVE][RESPONSE][{replicator.name}][{answer.status_code}]: {answer.text}'
        )
        if answer.status_code != 200:
            self.request_answer[request_answer_key] = (answer.text,
                                                       answer.status_code)
        return

    def solve_request(self):
        while True:
            #if not self.request_queue.empty():
            if self.request_queue:
                # Sort requests by timestamp
                self.request_queue.sort()

                # Picking request
                client_timestamp, client_name, client_request, event_wait = self.request_queue.pop(
                    0)
                self.log(
                    f'[EXECUTE][({client_timestamp},{client_name})]: {client_request}'
                )
                client_request['data']['send_id'] = client_name

                if client_request['type'] == 'create':
                    answer = self.create_file(data=client_request['data'])
                elif client_request['type'] == 'update':
                    answer = self.update_file(data=client_request['data'])
                elif client_request['type'] == 'append':
                    answer = self.append_file(data=client_request['data'])
                elif client_request['type'] == 'delete':
                    answer = self.delete_file(data=client_request['data'])
                elif client_request['type'] == 'get':
                    answer = self.get_file(data=client_request['data'])
                request_answer_key = (client_name, client_timestamp)
                self.request_answer[request_answer_key] = answer

                # If an Error occurs
                if answer[1] != 200:
                    event_wait.set()
                    continue

                # Send request to relicators
                replicators_request = []
                for replicator in self.replicators:
                    #print(f"Encaminhando {client_request} para {replicator.name}")
                    replicators_request.append(
                        Process(target=self.make_request,
                                args=(
                                    replicator,
                                    client_name,
                                    client_request['data'],
                                    client_request['type'],
                                    request_answer_key,
                                )))
                for rq in replicators_request:
                    rq.start()

                # Waiting for replicators
                for rq in replicators_request:
                    rq.join()

                event_wait.set()
                continue
Example #7
0
class PSProfile(Plugin):

    name = 'psprofile'
    score = 1

    def options(self, parser, env=os.environ):
        logger.debug("options(self, parser, env = os.environ):...")

        parser.add_option(
            "--psprofile-file",
            action="store",
            default=env.get("NOSE_PSP_FILE", "psprofile.json"),
            dest="psp_file",
            metavar="FILE",
            help=
            "By Default a psprofile.json is generated in the current working directory"
        )

        Plugin.options(self, parser, env)

    def configure(self, options, conf):
        logger.debug("configure(self, options, conf):...")
        super(PSProfile, self).configure(options, conf)

        if not self.enabled:
            logger.debug("plugin not enabled")
            return

        self.__profile_data = {}
        self.__psp_report = options.psp_file

    def prepareTestCase(self, test):

        pid = os.getpid()

        self.testname = test.test._testMethodName
        logger.debug("prepareTestCase(self, test):... %s [%s]" %
                     (self.testname, pid))

        # setup the multiprocessing proxy objects
        self.__process_manager = Manager()
        self.__process_event = self.__process_manager.Event()
        self.__process_data = self.__process_manager.dict()

        # LBYP approach here
        self.__process_data['ioc'] = []
        self.__process_data['fds'] = []
        self.__process_data['mem'] = []
        self.__process_data['time'] = []
        self.__process_data['cpu'] = []

        self.__process = Process(target=PSProfile.profile,
                                 args=(pid, self.__process_data,
                                       self.__process_event))

    def startTest(self, test):
        self.__process_event.clear()
        self.__process.start()
        logger.debug("startTest(self, test):... %s" % self.testname)

    def stopTest(self, test):
        logger.debug("stopTest(self, test):... %s" % self.testname)
        self.__process_event.set()
        self.__process.join()
        self.__profile_data[self.testname] = dict(self.__process_data)

    def report(self, stream):
        logger.debug("report(self, stream):...")
        json_report = json.dumps(self.__profile_data)

        self.write_report(json_report)

        if self.conf.verbosity > 4:
            stream.writeln(str(json_report))

    def write_report(self, json_report):
        """ write a report, a .json file with json data
		"""
        report_file_path = self.__psp_report
        with open(report_file_path, 'w') as report_file:
            report_file.write(json_report)

        print "-" * 70
        print "PROFILE: %s" % self.__psp_report
        print "-" * 70

    def finalize(self, result):
        logger.debug("finalize(self, result):...")

    @staticmethod
    def profile(pid, data, event, interval=1):
        """
		PSProfile.profile is executed in a multiprocessing context, the method
		can be executed or not within a nose environment.
		
		data -- is a DictProxy type object instance
		event -- is a flag to stop or not the process
		
		The data is written by proxy using objects once the process is about
		to finish.

		If the process to profile isn't alive, it will finish the process
		"""

        # the method code can be improved, but DictProxy isn't very flexible

        ps = psutil.Process(pid)

        ioc = []
        fds = []
        mem = []
        stamp = []
        cpu = []

        while not event.is_set():
            if ps.status():
                ioc.append(ps.io_counters())
                fds.append(ps.num_fds())
                mem.append(ps.memory_info())
                stamp.append(time.time())
                cpu.append(ps.cpu_percent(interval=interval))
            else:
                event.set()
                logger.debug(
                    "process to profile is not alive, about to finish this process [%s]"
                    % os.getpid())

        data['ioc'] = ioc
        data['fds'] = fds
        data['mem'] = mem
        data['time'] = stamp
        data['cpu'] = cpu
Example #8
0
    # the dictionary between parallel processes
    return rdict


# ------------------------------------------------------------------------------
# main
# ------------------------------------------------------------------------------
if __name__ == '__main__':

    # event handling (using .is_set and set)
    # event = Event()
    # shared data between processes
    manager = Manager()
    # just a dictionary
    return_dict = manager.dict()
    event = manager.Event()

    letters = string.ascii_lowercase

    # number of runs = number_groups * number_cores
    #                = 3 * 100
    # loop around groups (number of times to run the set of parallel jobs)
    for it in range(GROUPS):
        params_per_process = []

        jobs = []
        # these are the cores (set ridiculously high)
        #    - these all start in parallel
        for jt in range(CORES):
            for kit in range(SUBGROUPS):
                if SUBGROUPS < len(letters):
Example #9
0
    app.register_blueprint(main)
    app.register_blueprint(stats)
    app.register_blueprint(files)

    app.config.update(RECORD_EVENT=record_event,
                      SHARED_DICT=shared_dict,
                      CAMERA_DICT=camera_dict)

    return app


if __name__ == "__main__":
    manager = Manager()

    record_event = manager.Event()
    running_event = manager.Event()
    shared_dict = manager.dict()
    camera_dict = manager.dict()
    running_event.set()

    motion = MotionSensor(
        shared_data_dict=shared_dict,
        running_event=running_event)  # create a sensor process
    motion.setup()
    motion.start()

    gpsp = GpsPoller(shared_data_dict=shared_dict, running_event=running_event)
    gpsp.setup()
    gpsp.start()  # start it up
Example #10
0
class Pipeline:
    def __init__(
        self,
        max_init_workers: Optional[int] = None,
        max_queues_size: int = MAX_QUEUES_SIZE,
    ):
        """
        :param max_init_workers: Number of workers to use for concurrent initialization of stages, default the number of CPUs
        :param max_queues_size: Maximum size of any queue instanced for the pipeline (stage input and output queues)
        """
        self._containers = LastOrderedDict()
        self._error_manager = ErrorManager()
        self._max_init_workers = max_init_workers
        self._init_executor = None
        self._wait_previous_executor = None
        self._pipeline_executor = None
        self._max_queues_size = max_queues_size
        self._out_queue = None
        self._enqueue_source = False
        self._sync_manager = None
        # an empty source, on which we can only occasionally send items
        self._source_container = SourceContainer()
        self._count = 0
        self._executors_ready = False

    def _new_mp_queue(self) -> ItemsQueue:
        """
        Construct queue for multiprocessing communication
        """
        if self._sync_manager is None:
            self._sync_manager = Manager()
        return self._sync_manager.Queue(maxsize=self._max_queues_size)

    def _new_queue(self) -> ItemsQueue:
        """
        Construct queue for communication
        """
        return Queue(maxsize=self._max_queues_size)

    def _new_mp_event(self) -> Event:
        """
        Construct synchronization event for multiprocessing
        """
        if self._sync_manager is None:
            self._sync_manager = Manager()
        return self._sync_manager.Event()

    @staticmethod
    def _new_event() -> Event:
        """
        Construct synchronization event
        """
        return Event()

    def _new_mp_counter(self) -> ProcessCounter:
        """
        Construct a safe counter for multiprocessing
        """
        if self._sync_manager is None:
            self._sync_manager = Manager()
        return ProcessCounter(self._sync_manager)

    @staticmethod
    def _new_counter() -> ThreadCounter:
        """
        Construct a safe counter for threads
        """
        return ThreadCounter()

    def _wait_executors(self, wait_seconds: float = CONCURRENCY_WAIT):
        """
        Wait for all containers to start

        :param wait_seconds: Recurrently wait these seconds for all stage initializers to finish
        """
        if self._executors_ready:
            return
        if self._init_executor is not None:
            self._init_executor.shutdown(wait=True)
            self._init_executor = None
        while not all(self._containers.values()):
            time.sleep(wait_seconds)
        self._wait_previous_executor.shutdown(wait=True)
        for name, container in self._containers.items():
            if isinstance(
                    container,
                (ConcurrentStageContainer, BatchConcurrentStageContainer)):
                container.run()
        # finalize initialization of the error manager shared by this and other stage threads
        self._error_manager.on_start()
        self._executors_ready = True
        _logger.debug("Pipeline ready to run")

    def shutdown(self):
        if self._out_queue is not None:
            self._out_queue.join()
        # if self._init_executor is not None:
        #     self._init_executor.shutdown()
        # FIXME stage shutdown may raise exception, the executor gets stuck
        # for name, stage in self._containers.items():
        #     if isinstance(stage, (ConcurrentStageContainer, BatchConcurrentStageContainer)):
        #         stage.shutdown()
        if self._sync_manager is not None:
            self._sync_manager.shutdown()

    def __del__(self):
        self.shutdown()

    def build(self) -> Pipeline:
        """
        Pipeline builder method
        """
        if not any(self._containers):
            raise ValueError("Must append at least a stage")
        _logger.debug(f"Building the pipeline on stages: {self._log_stages()}")
        self._wait_executors()
        return self

    def run(self) -> Generator[DataItem, None, None]:
        """
        Run the pipeline given a source and a concatenation of stages.
        Get the sequence of items through iteration

        :return: Iterator over processed items
        :raises ValueError: When a source has not been set for the pipeline
        """
        if not self._source_container.is_set():
            raise ValueError("Set the data source for this pipeline")
        _logger.debug(f"Running the pipeline on stages: {self._log_stages()}")
        counter = 0
        last_stage_name = self._last_stage_name()
        terminator_thread = None
        source_thread = None
        # in case the first stage is concurrent
        if self._enqueue_source:
            source_thread = Thread(
                target=self._source_container.pop_into_queue)
            source_thread.start()
        while True:
            for name, container in self._containers.items():
                try:
                    # concurrent stages run by themselves in threads/processes
                    if not isinstance(
                            container,
                        (ConcurrentStageContainer,
                         BatchConcurrentStageContainer),
                    ):
                        container.process()
                    # but me must periodically check for errors
                    else:
                        container.check_errors()
                except Exception as e:
                    self.stop()
                    # TODO in case of errors we loose pending items!
                    self._terminate_all(force=True)
                    self.shutdown()
                    self._count += 1
                    raise e
                # retrieve finally processed items from the last stage
                if name == last_stage_name:
                    for _ in range(container.size if isinstance(
                            container, BatchStageContainer) else 1):
                        item = container.get_processed()
                        if item is not None:
                            if not isinstance(item, Stop):
                                yield item
                                counter += 1
                                self._count += 1
                            # if a stop is finally signaled, start termination of all containers
                            elif (not self._all_terminated()
                                  and terminator_thread is None):
                                terminator_thread = Thread(
                                    target=self._terminate_all)
                                terminator_thread.start()
                        # an item is None if the final output queue is empty
                        else:
                            break
            # exit the loop only when all items have been returned
            if self._all_empty() and counter >= self._source_container.count():
                if source_thread is not None:
                    source_thread.join()
                if terminator_thread is not None:
                    terminator_thread.join()
                    self.shutdown()
                return

    @property
    def count(self) -> int:
        """
        Get the number of items processed by all executed runs, also for items which have failed

        :return: Count of processed items
        """
        return self._count

    def _terminate_all(self,
                       force: bool = False,
                       wait_seconds: float = CONCURRENCY_WAIT):
        """
        Terminate all running containers

        :param force: If True do not wait for a container to process all items produced by the source
        :param wait_seconds: Time to wait before pinging again a container for its termination
        """
        _logger.debug("Terminating the pipeline")
        # scroll the pipeline by its order and terminate stages after the relative queues are empty
        for container in self._containers.values():
            if not force:
                # ensure the stage has processed all source items
                while container.count() < self._source_container.count():
                    time.sleep(wait_seconds)
            container.terminate()
            if isinstance(container, ConcurrentStageContainer):
                if force:
                    # empty the queues, losing pending items
                    container.empty_queues()
                while not container.is_terminated():
                    time.sleep(wait_seconds)
                container.queues_join()
                while not container.queues_empty():
                    time.sleep(wait_seconds)
        _logger.debug("Termination done")

    def _all_terminated(self) -> bool:
        """
        Check if all containers have been alerted for termination and are exited
        """
        return all(container.is_terminated()
                   for container in self._containers.values())

    def _all_empty(self) -> bool:
        """
        Check if all containers are terminated and there are not items left in the queues
        """
        return self._all_terminated() and all(
            container.queues_empty()
            for container in self._containers.values()
            if isinstance(container, (ConcurrentStageContainer,
                                      BatchConcurrentStageContainer)))

    def process(self, item: DataItem) -> DataItem:
        """
        Process a single item synchronously (no concurrency) through the pipeline
        """
        _logger.debug(f"Processing {item} on stages: {self._log_stages()}")
        last_stage_name = self._containers.last_key()
        self._source_container.prepend_item(item)
        for name, container in self._containers.items():
            container.process()
            if name == last_stage_name:
                return container.get_processed(block=True)

    def process_async(self,
                      item: DataItem,
                      callback: Optional[Callable[[DataItem], Any]] = None):
        """
        Process a single item asynchronously through the pipeline, stages may run concurrently.
        The call returns immediately, processed items are retrieved with :meth:`.Pipeline.get_item`

        :param callback: A function to call after a successful process of the item
        """
        _logger.debug(
            f"Processing asynchronously {item} on stages: {self._log_stages()}"
        )
        if callback is not None:
            item.set_callback(callback)
        self._source_container.prepend_item(item)
        self._start_pipeline_executor()

    def stop(self):
        """
        Tell the source to stop to generate items and consequently the pipeline
        """
        self._source_container.stop()

    def get_item(self, block: bool = True) -> DataItem:
        """
        Get a single item from the asynchronous execution of the pipeline on single items from :meth:`.Pipeline.process_async`

        :param block: If True wait indefinitely for the next processed item
        :raises ValueError: When there is not output queue set, the pipeline is not running asynchronously
        :raises queue.Empty: When we do not block and the queue is empty
        """
        if self._out_queue is not None:
            item = self._out_queue.get(block)
            self._out_queue.task_done()
            return item
        else:
            raise ValueError(
                "No pipeline is running asynchronously, not item can be retrieved from the output queue"
            )

    def set_source(self, source: Source) -> Pipeline:
        """
        Set the source of the pipeline: a subclass of :class:`.stage.Source`
        """
        self._source_container.set(source)
        return self

    def set_error_manager(self, error_manager: ErrorManager) -> Pipeline:
        """
        Set the error manager for handling errors from each stage item processing
        """
        self._error_manager = error_manager
        for container in self._containers.values():
            container.set_error_manager(self._error_manager)
        return self

    def _last_stage_name(self) -> str:
        if self._containers:
            return self._containers.last_key()

    def _last_container(self) -> BaseContainer:
        if self._containers:
            return self._containers[self._last_stage_name()]
        else:
            return self._source_container

    def _wait_for_previous(
        self,
        container: ConnectedStageMixin,
        last_stage_name: str,
        wait_seconds: float = CONCURRENCY_WAIT,
    ):
        """
        Given a container we want to append to the pipeline, wait for the last one (added to the pipeline) to be created

        :param container: A container to add to the pipeline
        :param last_stage_name: Name of the last stage currently in the pipeline
        :param wait_seconds: Time to recurrently wait the construction of the container relative to the last stage in the pipeline
        """
        def _waiter():
            if last_stage_name is not None:
                while self._containers[last_stage_name] is None:
                    time.sleep(wait_seconds)
                container.set_previous(self._containers[last_stage_name])
            else:
                container.set_previous(self._source_container)

        executor = self._get_wait_previous_executor()
        executor.submit(_waiter)

    def _build_container(self, name: str, stage: StageType, concurrency: int,
                         parallel: bool) -> BaseContainer:
        """
        Get a new container instance according to the pipeline configuration

        :param name: Stage name
        :param stage: A stage instance
        :param concurrency: Number of concurrent stage executions, if 0 then just create the non-concurrent containers
        :param parallel: If True use multiprocessing, otherwise threads
        """
        if concurrency <= 0:
            constructor = (BatchStageContainer if isinstance(
                stage, BatchStage) else StageContainer)
            # if not concurrent we must explicitly finalize initialization of this single stage object
            stage.on_start()
            return constructor(name, stage, self._error_manager)
        else:
            constructor = (BatchConcurrentStageContainer if isinstance(
                stage, BatchStage) else ConcurrentStageContainer)
            if parallel:
                return constructor(
                    name,
                    stage,
                    self._error_manager,
                    self._new_mp_queue,
                    self._new_mp_counter,
                    self._new_mp_event,
                    concurrency,
                    parallel,
                )
            else:
                # if the stage is executed on multiple threads we must finalize initialization once,
                # while on multiprocessing each process executor calls it for its own copy of the stage
                stage.on_start()
                return constructor(
                    name,
                    stage,
                    self._error_manager,
                    self._new_queue,
                    self._new_counter,
                    self._new_event,
                    concurrency,
                    parallel,
                )

    def get_stage(self, name: str) -> StageType:
        """
        Get a stage instance by its name
        """
        return self._containers.get(name).stage

    def append_stage(
        self,
        name: str,
        stage: StageType,
        concurrency: int = 0,
        parallel: bool = False,
    ) -> Pipeline:
        """
        Append a stage to the pipeline just after the last one appended, or after the source if it is the first stage

        :param name: Name for identify the stage in the pipeline, it is also set in the stage and it must be unique in the pipeline
        :param stage: Instance of a stage
        :param concurrency: Number of concurrent stage executions, if 0 then threads/processes won't be involved for this stage
        :param parallel: If True use multiprocessing, otherwise threads
        """
        self._executors_ready = False
        # FIXME here we force a BatchStage to run on a thread, but we would leave it on the main thread
        if concurrency < 1 and isinstance(stage, BatchStage):
            parallel = False
            concurrency = 1
        self._check_stage_name(name)
        container = self._build_container(name, stage, concurrency, parallel)
        if concurrency > 0:
            # if it is concurrent and it is the first stage, make the source working on a output queue
            if not self._containers:
                self._enqueue_source = True
        self._wait_for_previous(
            container,
            self._last_stage_name())  # wait that previous stage is initialized
        self._containers[name] = container
        return self

    def append_stage_concurrently(
        self,
        name: str,
        stage_class: Callable,
        args: Sequence = None,
        kwargs: Mapping = None,
        concurrency: int = 0,
        parallel: bool = False,
    ) -> Pipeline:
        """
        Append a stage class to the pipeline just after the last one appended, or after the source if it is the first stage.
        The stage construction will be executed concurrently respect to the general pipeline construction

        :param name: Name for identify the stage in the pipeline, it is also set in the stage and it must be unique in the pipeline
        :param stage_class: Class of a stage
        :param args: List of arguments for the stage constructor
        :param kwargs: Dictionary of keyed arguments for the stage constructor
        :param concurrency: Number of concurrent stage executions, if 0 then threads/processes won't be involved for this stage
        :param parallel: If True use multiprocessing, otherwise threads
        """
        self._executors_ready = False
        # FIXME here we force a BatchStage to run on a thread, but we would leave it on the main thread
        if concurrency < 1 and issubclass(stage_class, BatchStage):
            parallel = False
            concurrency = 1
        if kwargs is None:
            kwargs = {}
        if args is None:
            args = []
        self._check_stage_name(name)
        # if it is concurrent and it is the first stage, make the source working on a output queue
        if concurrency > 0 and not self._containers:
            self._enqueue_source = True
        last_stage_name = self._last_stage_name()
        # set it immediately so the order of the calls of this method is followed in `_containers`
        self._containers[name] = None
        future = self._get_init_executor(parallel).submit(
            stage_class, *args, **kwargs)

        def append_stage(stage_future: Future):
            stage = stage_future.result()
            container = self._build_container(name, stage, concurrency,
                                              parallel)
            self._wait_for_previous(container, last_stage_name)
            self._containers[name] = container

        future.add_done_callback(append_stage)
        return self

    def _get_init_executor(self, parallel: bool = False) -> Executor:
        """
        Get a pool executor for concurrent stage initialization

        :param parallel: True if the executor uses multiprocessing, otherwise treads
        """
        if self._init_executor is None:
            executor = ThreadPoolExecutor if not parallel else ProcessPoolExecutor
            self._init_executor = executor(max_workers=self._max_init_workers)
        return self._init_executor

    def _get_wait_previous_executor(self) -> Executor:
        """
        Get a pool executor for the function that will recurrently wait for a container to be ready
        """
        if self._wait_previous_executor is None:
            self._wait_previous_executor = ThreadPoolExecutor()
        return self._wait_previous_executor

    def _start_pipeline_executor(self) -> Thread:
        """
        Get a thread where to run a pipeline that accepts asynchronous processing of single items
        """
        if self._pipeline_executor is None:
            self._init_out_queue()

            def pipeline_runner():
                for item in self.run():
                    item.callback()
                    self._out_queue.put(item)

            self._pipeline_executor = Thread(target=pipeline_runner,
                                             daemon=True)
            self._pipeline_executor.start()
        return self._pipeline_executor

    def _check_stage_name(self, name: str):
        """
        Check if a stage name is not already defined in the pipeline
        :raises ValueError: Stage name is already defined in the pipeline
        """
        if name in self._containers:
            raise ValueError(
                f"The stage name {name} is already used in this pipeline")

    def _init_out_queue(self):
        """
        Get the internal output pipeline for asynchronous processing of single items
        """
        self._out_queue = self._new_queue()

    def _log_stages(self):
        return ", ".join(self._containers.keys())
Example #11
0
    def main(self):

        t0 = timeit.default_timer()

        # shared resources manager
        m = Manager()
        # original screenshots of the game window
        raw_frames = m.Queue(maxsize=3)
        # environment data
        env_data = m.Queue(maxsize=2)
        # output images
        output_frames = m.Queue(maxsize=3)

        # end event
        end = m.Event()

        # proc's pool
        pool = self.__init_mp_pool(pool_size=16)

        # need to get sub-process (external game) pid to
        # for graceful shutdown
        game_pid = m.Value('pid', None)

        game = Game(rom_path=ROM_PATH,
                    emulator_path=EMULATOR_PATH,
                    rom_name=ROM_NAME,
                    pid=game_pid)

        processes = [pool.apply_async(game.run, args=(end,))] + \
                     self.init_environment(pool, end, q_source=raw_frames, q_target=env_data) + \
                     self.init_agent(pool, end, q_source=env_data, q_target=output_frames) + \
                     self.init_grabber(pool, end, q_target=raw_frames) + \
                     self.init_gui(pool, end, q_source=output_frames)

        fin_processes = []

        try:
            while True:
                for proc in processes:
                    if (proc.ready() and proc not in fin_processes):
                        fin_processes.append(proc)
                if len(fin_processes) == len(processes):
                    break
        except KeyboardInterrupt:
            self.log.info('\nCaught Ctrl+C, terminating workers.')
            game.stop(game_pid.value)
            pool.terminate()
            pool.join()
        except Exception as err:
            self.log.error('\nMain process err: %s ' % err)
            pool.terminate()
            pool.join()
        else:
            pool.close()
            pool.join()
        finally:
            m.shutdown()

        self.log.info('Finished processing.\n' +
                      'Main process worked for %.2f seconds' %
                      (timeit.default_timer() - t0))
class IntegrationManager:
    def __init__(self, db_path, nmergers, batch_size):
        # Establish database connection
        self.db_path = db_path
        self.engine = create_engine(self.db_path)
        self.session = sessionmaker(bind=self.engine)()
        self.batch_size = batch_size

        # Establish process management
        self.nmergers = nmergers
        self.mergers = []
        self.manager = Manager()

        self.sequenceq = self.manager.Queue(batch_size * 2)
        self.mergedq = self.manager.Queue(batch_size * 10)

        self.term_sig = self.manager.Event()
        self.hold_sigs = [self.manager.Event() for m in range(nmergers)]

    def gather_sequences(self):
        print("-- Gathering sequences to parse --")
        return [
            q[0]
            for q in self.session.query(PSM.base_sequence).distinct().all()
        ]

    def start_mergers(self):
        print("-- Creating and starting {} mergers --".format(self.nmergers))
        for i in range(self.nmergers):
            m = PSMMerger(self.db_path, self.sequenceq, self.mergedq,
                          self.term_sig, self.hold_sigs[i])
            self.mergers.append(Process(target=m.run))

        [m.start() for m in self.mergers]

    def flush_to_database(self):
        results = []
        while True:
            try:
                results.append(self.mergedq.get(True, 1))
            except Empty:
                break

        try:
            self.session.add_all(results)
            self.session.commit()
        except OperationalError:
            raise
            self.session.rollback()
        except IntegrityError:
            self.session.rollback()
            for pep in results:
                print(pep)
                for mod in pep.modifications:
                    print(mod)

        print("-- Got {} results --".format(len(results)))

    def shutdown(self):
        print("-- Attempting shutdown -- ")
        self.term_sig.set()
        for m in self.mergers:
            m.join()

        self.manager.shutdown()

    def run(self):
        sequences = self.gather_sequences()
        self.start_mergers()

        for batchn, ind in enumerate(range(0, len(sequences),
                                           self.batch_size)):
            print("-- Processing batch {} --".format(batchn))
            for seq in sequences[ind:min(len(sequences), ind +
                                         self.batch_size)]:
                self.sequenceq.put(seq)

            processes_held = [False]
            while not all(processes_held) and not self.sequenceq.empty():
                processes_held = [h.is_set() for h in self.hold_sigs]

            print("-- Flushing batch {} --".format(batchn))
            self.flush_to_database()

        self.shutdown()
Example #13
0
class Server:
    def __init__(self):
        self.static_routes: dict = {}
        self.dynamic_routes = []
        self.pool: Optional[ProcessPoolExecutor] = None
        self.proc_env = ProcEnv()

    template_404 = SimpleTemplate("<h1>Path or file not found: {}</h1>")
    template_500 = SimpleTemplate("<h1>Server error in {}</h1><p>{}")
    template_503 = SimpleTemplate(
        "<h1>Server timed out after {} seconds in {}</h1>")

    def error_404(self, request: Request) -> Response:
        """
        Built-in 404: Not Found error handler.
        """
        return Response(self.template_404.render(request.path), code=404)

    def error_500(self, request: Request, error: Exception) -> Response:
        """
        Built-in 500: Server Error handler.
        """
        return Response(
            self.template_500.render(request.path, str(error)),
            code=500,
        )

    def error_503(self, request: Request) -> Response:
        """
        Built-in 503: Server Timeout handler.
        """
        return Response(
            self.template_503.render(DEFAULT_TIMEOUT, request.path),
            code=503,
        )

    def route(
        self,
        path: str,
        route_type: RouteType = RouteType.pool,
        action: Union[Iterable, str] = "GET",
        before=None,
        after=None,
    ):
        """
        Route decorator, used to assign a route to a function handler by wrapping the function. Accepts a `path`, an optional `route_type`, and an optional list of HTTP verbs (or a single verb string, default "GET") as arguments.
        """
        parameters = []
        route_regex = None

        path_match = re.finditer(path_re, path)

        for n in path_match:
            parameters.append(n.group(0)[1:-1])

        if parameters:
            route_regex = re.compile(re.sub(path_re_str, "(.*?)", path))

        if isinstance(action, str):
            action = [action]

        def decorator(callback):

            if route_regex:
                for _ in action:
                    self.add_dynamic_route(route_regex, _, callback,
                                           route_type, parameters)
            else:
                for _ in action:
                    self.add_route(path, _, callback, route_type)
            return callback

        return decorator

    def add_route(
        self,
        path: str,
        action: str,
        callback: Callable,
        route_type: RouteType = RouteType.pool,
    ):
        """
        Assign a static route to a function handler.
        """
        route = (callback, route_type)
        if not self.static_routes.get(path):
            self.static_routes[path] = {action: route}
        else:
            self.static_routes[path][action] = route

    def add_dynamic_route(
        self,
        regex_pattern,
        action: str,
        callback: Callable,
        route_type: RouteType = RouteType.pool,
        parameters: list = None,
    ):
        """
        Assign a dynamic route (with wildcards) to a function handler.
        """
        self.dynamic_routes.append(
            (regex_pattern, action, callback, route_type, parameters))

    @classmethod
    def run_route_pool(cls, raw_env: bytes, func: Callable, *a, **ka):
        """
        Execute a function synchronously in the local environment. A copy of the HTTP request data is passed automatically to the handler as its first argument.
        """
        local_env = Request(raw_env)
        result = func(local_env, *a, **ka)
        if isinstance(result, Response):
            return result.as_bytes()
        return result

    @classmethod
    def run_route_pool_stream(cls, remote_queue: Queue, signal, raw_env: bytes,
                              func: Callable, *a, **ka):
        """
        Execute a function synchronously in the process pool, and return results from it incrementally.
        """
        local_env = Request(raw_env)
        for _ in func(local_env, *a, **ka):
            if signal.is_set():
                raise ParentProcessConnectionAborted
            remote_queue.put(_)
        remote_queue.put(None)

    async def start_server(self, host: str, port: int):
        """
        Launch the asyncio server with the master connection handler.
        """
        self.srv = await asyncio.start_server(self.connection_handler, host,
                                              port)
        async with self.srv:  # type: ignore
            _e(f"Listening on {host}:{port}")
            await self.srv.serve_forever()

    def run(
        self,
        host: str = "localhost",
        port: int = 8000,
        workers: Union[bool, int, None] = True,
    ):
        """
        Run pixie_web on the stated hostname and port.
        """
        _e("Pixie-web 0.1")

        if workers is not None:
            if workers is True:
                self.use_process_pool()
            elif workers is False:
                pass
            else:
                self.use_process_pool(int(workers))

        try:
            asyncio.run(self.start_server(host, port))
        except KeyboardInterrupt:
            _e("Closing server with ctrl-C")
        except asyncio.CancelledError:
            _e("Closing due to internal loop shutdown")

    @classmethod
    def pool_start(cls):
        """
        Launched at the start of each pooled process. This modifies the environment data in the process to let any routes running in the process know that it's in a pool, not in the main process.
        """

        proc_env.proc_type = ProcessType.pool

    def use_process_pool(self, workers: Optional[int] = None):
        """
        Set up the process pool and ensure it's running correctly.
        """
        self.mgr = Manager()

        self.pool = ProcessPoolExecutor(max_workers=workers,
                                        initializer=Server.pool_start)

        from concurrent.futures.process import BrokenProcessPool

        try:
            self.pool.submit(dummy).result()
        except (OSError, RuntimeError, BrokenProcessPool):
            _e("'run()' function must be invoked from within 'if __name__ == \"__main__\"' block to invoke multiprocessing. Defaulting to single-process pool."
               )
            self.pool = None
        else:
            _e(f"Using {self.pool._max_workers} processes")  # type: ignore

        self.proc_env.pool = self.pool

    def close_server(self):
        if self.srv is None:
            raise Exception(
                "No server to close on this instance. Use `ProcessType.main_async` to route the close operation to the main server."
            )
        self.srv.close()
        self.srv = None

    def application(self, environ, start_response):
        path = environ["REQUEST_URI"]
        verb = environ["REQUEST_METHOD"]
        try:
            handler, route_type = self.static_routes[path][verb]
        except KeyError:
            for route in self.dynamic_routes:
                if verb != route[1]:
                    continue
                route_match = route[0].fullmatch(path)
                if route_match:
                    handler, route_type = route[2:4]
                    parameters = route_match.groups()
            if (not self.dynamic_routes) or (not route_match):
                response = Response(f"Not found: {path}", code=404)
                start_response(*response.start_response())
                return [response.body]
        else:
            parameters = []

        if route_type is RouteType.sync:
            result: Union[bytes, Response,
                          str] = handler(Request(environ), *parameters)

        if result is None:
            start_response("200 OK", [("Content-Type", "text/plain")])
            return [b""]
        elif isinstance(result, Response):
            start_response(*result.start_response())
            return [result.body]
        elif isinstance(result, SimpleResponse):
            head, body = result.split(b"\r\n\r\n", 1)
            response_type, content_type = head.split(b"\r\n", 2)
            protocol, code = response_type.split(b" ", 1)
            content_type = content_type.split(b": ", 1)
            start_response(code, [("Content-Type", content_type[1])])
            return [body]
        elif isinstance(result, bytes):
            raise NotImplementedError(
                "Raw bytestream not supported for WSGI; emit Header() first or use Response or SimpleResponse"
            )
        else:
            # iterable, check for first item as header
            # for now, not allowed
            raise NotImplementedError(
                "Iterable not yet supported for WSGI; use Response or SimpleResponse"
            )

    # have Response object:
    # start_response()
    # result_iter()

    async def connection_handler(self, reader: asyncio.StreamReader,
                                 writer: asyncio.StreamWriter):
        """
        Reeads the data from the network connection, and attempts to find an appropriate route for it.
        """

        readline = reader.readline
        get_loop = asyncio.get_event_loop
        write = writer.write
        drain = writer.drain
        at_eof = reader.at_eof
        wait_for = asyncio.wait_for
        close = writer.close
        AsyncTimeout = asyncio.TimeoutError
        run_in_executor = get_loop().run_in_executor

        while True:

            action = raw_data = signal = content_length = None

            while True:
                _ = await readline()

                if at_eof():
                    close()
                    return

                if raw_data is None:
                    raw_data = bytearray(_)
                    action = _.decode("utf-8").split(" ")
                    continue
                else:
                    raw_data.extend(_)

                if _ in (b"\r\n", b"\n"):
                    break

                if _.startswith(b"Content-Length:"):
                    content_length = int(_.decode("utf-8").split(":")[1])

            if content_length:
                raw_data.extend(await reader.read(content_length))

            path = action[1].split("?", 1)[0]
            verb = action[0]

            try:
                handler, route_type = self.static_routes[path][verb]
            except KeyError:
                for route in self.dynamic_routes:
                    if verb != route[1]:
                        continue
                    route_match = route[0].fullmatch(path)
                    if route_match:
                        handler, route_type = route[2:4]
                        parameters = route_match.groups()
                if (not self.dynamic_routes) or (not route_match):
                    write(self.error_404(Request(raw_data)).as_bytes())
                    await drain()
                    continue
            else:
                parameters = []

            try:

                # Run with no pooling or async, in default process.
                # Single-threaded, potentially blocking.

                if route_type is RouteType.sync:
                    result = handler(Request(raw_data), *parameters)

                # Run a sync function in an async thread (cooperative multitasking)

                elif route_type is RouteType.sync_thread:
                    result = await run_in_executor(None, handler,
                                                   Request(raw_data),
                                                   *parameters)

                # Run async function in default process.
                # Single-threaded, nonblocking.

                elif route_type is RouteType.asnc:
                    result = await handler(Request(raw_data), *parameters)

                # Run non-async code in process pool.
                # Multi-processing, not blocking.

                # Note that we pass `Server.run_route_pool`, not `self.run_route_pool`, because otherwise we can't correctly pickle the object. So we just use the class method that exists in the pool instance, since it doesn't need `self` anyway. If we DID need `self` over there, we could always get the server instance from the module-local server obj.

                elif route_type is RouteType.pool:
                    result = await wait_for(
                        run_in_executor(
                            self.pool,
                            Server.run_route_pool,
                            raw_data,
                            handler,
                            *parameters,
                        ),
                        DEFAULT_TIMEOUT,
                    )

                # Run incremental stream, potentially blocking, in process pool

                elif route_type is RouteType.stream:

                    job_queue = self.mgr.Queue()
                    signal = self.mgr.Event()

                    job = self.pool.submit(
                        Server.run_route_pool_stream,
                        job_queue,
                        signal,
                        raw_data,
                        handler,
                        *parameters,
                    )

                    writer.transport.set_write_buffer_limits(0)

                    # We can't send an async queue object to the subprocess,
                    # so we use a manager queue and poll it every .1 sec

                    while True:

                        while True:
                            try:
                                _ = job_queue.get_nowait()
                            except EmptyQueue:
                                await asyncio.sleep(0.1)
                                continue
                            else:
                                break

                        if _ is None:
                            break

                        write(_)
                        await drain()

                    writer.close()
                    return

            except FileNotFoundError:
                result = self.error_404(Request(raw_data))
            except AsyncTimeout:
                result = self.error_503(Request(raw_data))
            except Exception as err:
                result = self.error_500(Request(raw_data), err)

            try:
                if result is None:
                    write(simple_response(b""))
                elif isinstance(result, Response):
                    write(result.as_bytes())
                elif isinstance(result, SimpleResponse):
                    write(result)
                elif isinstance(result, bytes):
                    write(result)
                else:
                    for _ in result:
                        write(_)
                        await drain()
                    writer.close()
                    return

                await drain()

            except ConnectionAbortedError:
                if signal:
                    signal.set()
                writer.close()
                return
Example #14
0
def start_crawler(master_browser=initialise_remote_browser,
                  child_browser=initialise_remote_browser):
    global ARGS
    ######################################################
    # Attaining arguments and configurations for crawler #
    ######################################################
    LOGGER.info('Reading arguments and configurations for crawl session...')
    ARGS = read_arguments()
    config = get_config()
    downloader_count = int(config['MULTIPROCESSING']['Downloaders'])
    extractor_count = int(config['MULTIPROCESSING']['Extractors'])
    crawl_id = init_crawl_id()
    crawlers = init_enabled_crawlers()

    # Validate configurations
    if downloader_count <= 0 or extractor_count <= 0:
        LOGGER.error('Number of processes should be more than 0!')
        raise ValueError('Number of processes should be more than 0!')
        return
    if not crawlers:
        LOGGER.error('No crawlers enabled, ending crawl...')
        return
    LOGGER.info('Arguments and configurations initialised!')

    notification = {'crawl_id': crawl_id.hex, 'type': ARGS.type}
    oneway_publish('crawler', notification, 'crawler.event.start')
    #######################################################
    # Initialise sessions needed for authenticated crawls #
    #######################################################
    LOGGER.info('Preparing sessions for authenticated crawls...')
    browser = master_browser()
    # Get cookies for crawlers
    cookies = {}
    try:
        for crawler in crawlers:
            if crawler.signin(browser):
                sleep(
                    3
                )  # Quick hack: Wait for all activities on browser to complete
                auth_cookies = crawler.extract_auth_cookies(browser)
                for key in auth_cookies.keys():
                    cookies[key] = auth_cookies[key]
        browser.close()
        browser.quit()
    except Exception as e:
        raise e
    LOGGER.info('Sessions for authenticated crawls prepared!')

    #############################
    # Preparing crawl processes #
    #############################
    LOGGER.info('Initialising processes and variables for crawl session...')
    try:
        patch_autoproxy()

        ## MANAGER START ##
        manager = Manager()

        fqdn_metadata = manager.dict()
        rate_limiters = manager.dict()
        db_conn_init()
        for crawler in crawlers:
            # Add to queued links
            crawler.crawl_id = crawl_id.hex
            add_queued_links(crawler)

            for fqdn in crawler.fqdns_in_scope:
                # Initialise variables
                fqdn_metadata[fqdn] = manager.dict({
                    'class': crawler.__class__,
                    'robots_txt': None
                })

                rate_limiters[fqdn] = manager.Event()

                if crawler.fqdn == fqdn:
                    if crawler.robots_url:
                        robots = RobotFileParser(crawler.robots_url)
                        robots.read()
                        fqdn_metadata[fqdn]['robots_txt'] = robots

                # Initialise rate limiters
                rl = RateLimiterThread(crawler.requests_per_sec,
                                       rate_limiters[fqdn])
                rl.daemon = True
                rl.start()
        db_conn_kill()

        # Synchronisation events
        start_event = manager.Event()
        terminate_time = datetime.now() + timedelta(seconds=ARGS.duration * 60)

        # Processes
        downloader_procs = []
        for _ in range(downloader_count):
            downloader = Downloader(crawl_id, child_browser, terminate_time,
                                    cookies, fqdn_metadata, rate_limiters)
            downloader.start()
            downloader_procs.append(downloader)

        extractor_procs = []
        for _ in range(extractor_count):
            extractor = Extractor(crawl_id, start_event, terminate_time,
                                  fqdn_metadata)
            extractor.start()
            extractor_procs.append(extractor)

        for downloader in downloader_procs:
            downloader.browser_started.wait()

        processes = downloader_procs + extractor_procs
    except Exception as e:
        raise e
    LOGGER.info('Processes and variables initialised for crawl session!')

    ##########################################
    # Start crawl and sleep until time is up #
    ##########################################
    LOGGER.info(
        f'Crawl {crawl_id.hex} starting, come back in {ARGS.duration} minute(s)!'
    )
    start_event.set()
    sleep((terminate_time - datetime.now()).total_seconds() + 5)
    LOGGER.info(f'Crawl {crawl_id.hex} completed! Cleaning up...')

    ##########################################
    # Clean up and wait for processes to end #
    ##########################################
    # Give processes time to finish
    sleep(15)

    # Clean up database
    db_conn_init()
    DownloadedDocument.objects(crawl_id=crawl_id.hex).delete()
    QueuedLink.objects(crawl_id=crawl_id.hex).delete()
    db_conn_kill()

    # Forcefully terminate if required
    forceful_terminations = 0
    for process in processes:
        try:
            process.browser.quit()
            process.browser.close()
        except:
            pass
        if process.is_alive():
            forceful_terminations += 1
            process.terminate()
        process.join()
        process.close()
    LOGGER.info(
        f'{forceful_terminations} process(es) had to be forcefully terminated.'
    )

    oneway_publish('crawler', notification, 'crawler.event.end')
Example #15
0
    def _init_camera_instance(self):
        manager = Manager()
        self.info_queue = manager.Queue()
        self.event_cap_save = manager.Event()
        self.event_cap_show = manager.Event()
        self.event_cap_close = manager.Event()
        self.shm_show_lists = []
        self._cur_save_dict = manager.dict()
        self.cap_cfgs = self.camera_cfg.values()

        self.event_cap_close.clear()
        self.event_cap_save.clear()
        self.event_cap_show.clear()

        pool = Pool(self._camera_cnt)
        for idx, cap_cfg in enumerate(self.cap_cfgs):
            manager_dict = {
                "info_queue": self.info_queue,
                'save_cfg': self.video_cfg,
                'event_cap_save': self.event_cap_save,
                'event_cap_show': self.event_cap_show,
                'event_cap_close': self.event_cap_close,
                'shm_list': None,
                'save_dict': self._cur_save_dict,
            }

            if cap_cfg.type == 'USBRSYNC':
                assert isinstance(cap_cfg.instance_idx, list)
                instance_cnt = len(cap_cfg.instance_idx)
                for _ in range(instance_cnt):
                    self.shm_show_lists.append(manager.list())
                manager_dict['shm_list'] = self.shm_show_lists[-instance_cnt:]

            elif cap_cfg.type in ['USB', 'RTSP']:
                # assert isinstance(cap_cfg.instance_idx, int)
                self.shm_show_lists.append(manager.list())
                manager_dict['shm_list'] = self.shm_show_lists[-1]

            else:
                raise ValueError(cap_cfg.type)

            pool.apply_async(video_cap_thread, args=(idx, cap_cfg, manager_dict))

        pool.close()
        self.pool = pool

        # create thread to display
        self.th_list = []
        # for i in range(self._camera_cnt):
        qpid_cnt = 0
        for _, cap_cfg in enumerate(self.cap_cfgs):
            flip = cap_cfg['flip']
            scale_ratio = cap_cfg['scale_ratio']
            if cap_cfg.type == 'USBRSYNC':
                instance_cnt = len(cap_cfg.instance_idx)
            elif cap_cfg.type in ['USB', 'RTSP']:
                instance_cnt = 1
            else:
                instance_cnt = 0
            for _ in range(instance_cnt):
                th = QThreadDisplay(qpid_cnt, self.shm_show_lists[qpid_cnt], self.event_cap_close, self.info_queue, scale_ratio, flip)
                th.dis_signal.connect(eval(f"self.label_img{qpid_cnt}").setPixmap)
                self.th_list.append(th)
                qpid_cnt += 1
        for th in self.th_list:
            th.start()
Example #16
0
    def run(self):
        workers = []

        if self.agent.id_supporters:
            manager = Manager()
            if self.agent.id_leader == None:
                self.energy_list = manager.list(range(self.config.num_agents))
                for i in range(0, self.config.num_agents):
                    self.energy_list[i] = 0
                self.stop_event = manager.Event()
            stop_event = manager.Event()

            for i in range(0, self.config.num_sup):
                if self.agent.id_leader == None:
                    workers.append(
                        WorkerProcess(self.agent.id_supporters[i],
                                      self.support_recv, self.support_send[i],
                                      self.agent_div_recv,
                                      self.support_div_recv[i],
                                      self.support_reset_recv[i],
                                      self.support_reset_send[i], self.config,
                                      self.sequence, self.hist_obj,
                                      self.energy_list, stop_event,
                                      self.results_path))
                else:
                    workers.append(
                        WorkerProcess(self.agent.id_supporters[i],
                                      self.support_recv, self.support_send[i],
                                      self.root_div_send,
                                      self.support_div_recv[i],
                                      self.support_reset_recv[i],
                                      self.support_reset_send[i], self.config,
                                      self.sequence, self.hist_obj,
                                      self.energy_list, stop_event,
                                      self.results_path))

            for worker in workers:
                worker.start()

        else:
            if self.agent.id_leader == None:
                self.energy_list = [0]
                self.stop_event = Event()

        jump_radius_aux = self.config.test_jump_dist
        self.agent.current.init_solution(self.hist_obj)
        self.agent.update()

        print 'WorkerProcess %d: \n%s' % (self.id, self.agent)

        start_process_time = datetime.datetime.now()
        self.agent.generation = 1

        best_energy = self.agent.pockets[0].energy_value
        gens_without_improve = 0
        gens_convergence = self.config.test_noimprove
        gens_start = 0
        restart_successed = True
        restarts_failed = 0
        energy_calls = sum(self.energy_list)
        self.agent.status_log_append(
            datetime.datetime.now() - start_process_time, energy_calls)

        while (self.stop_event.is_set() == False):

            # Crossover it isn't allowed to execute on agent 0
            if self.agent.id_leader != None:
                if self.agent.leader_pockets[0] != None:
                    index_pocket_leader_agent = self.fitness_roulette_selection(
                        self.agent.leader_pockets)
                    index_pocket_self_agent = self.select_rand_solution(
                        self.agent.pockets)
                    self.agent.crossover(
                        self.agent.leader_pockets[index_pocket_leader_agent],
                        self.agent.pockets[index_pocket_self_agent],
                        self.config.crossover_prob)
            else:
                index_pocket_self_agent = self.select_rand_solution(
                    self.agent.pockets)
                self.agent.current = copy.deepcopy(
                    self.agent.pockets[index_pocket_self_agent])

            # Local search
            time_ls_start = datetime.datetime.now()
            self.agent.simulated_annealing(self.config.ls_prob_ss,
                                           self.config.test_ls_fact,
                                           self.config.test_jump_prob,
                                           jump_radius_aux,
                                           self.config.test_temp_init,
                                           self.hist_obj)
            self.agent.time_ls += datetime.datetime.now() - time_ls_start
            jump_radius_aux = jump_radius_aux * self.config.test_jump_fact

            updated = self.agent.update()

            # Update pockets with supporter data
            if self.agent.id_supporters:
                while not self.support_recv.empty():
                    self.receive_solution_pickle(self.support_recv, True)
                    print '>> WorkerProcess %d receive a pocket from supporters, pocket list: %s' % (
                        self.id, self.agent.pockets)

            # Update pocket_leader with leader data
            if self.agent.id_leader != None:
                if not self.leader_recv.empty():
                    self.receive_solution_pickle(self.leader_recv, False)
                    print '>> WorkerProcess %d receive a list of pockets from leader %d' % (
                        self.id, self.agent.id_leader)

            if updated or self.agent.update():
                # Send pocket_leader with leader data
                if self.agent.id_supporters:
                    for i in range(0, self.config.num_sup):
                        if not self.support_send[i].full():
                            print '> WorkerProcess %d send a list of pockets to supporter %d' % (
                                self.id, self.agent.id_supporters[i])
                            self.send_solution_pickle(self.agent.pockets,
                                                      self.support_send[i])

                # Send pockets with supporter data
                if self.agent.id_leader != None:
                    if self.agent.pockets[0].energy_value < best_energy:
                        if not self.leader_send.full():
                            print '> WorkerProcess %d send a pocket to leader %d with energy: %d' % (
                                self.id, self.agent.id_leader,
                                self.agent.pockets[0].energy_value)
                            self.send_solution_pickle(self.agent.pockets[0],
                                                      self.leader_send)

            if self.config.calculate_div_density:
                # Diversity density calculations
                time_div_start = datetime.datetime.now()

                if self.agent.id_leader == None:
                    for i in range(0, self.config.num_agents - 1):
                        if not self.agent_div_recv[i].empty():
                            buff = self.agent_div_recv[i].get()
                            agent_pockets = pickle.loads(buff)
                            j = 0
                            for p in agent_pockets:
                                if p != None:
                                    self.agent.population_pockets[i][
                                        j] = copy.deepcopy(p)
                                else:
                                    break
                                j += 1

                    if self.agent.id_supporters:
                        for i in range(0, self.config.num_sup):
                            if not self.support_div_recv[i].empty():
                                buff = self.support_div_recv[i].get()
                                supporter_pockets = pickle.loads(buff)
                                j = 0
                                for p in supporter_pockets:
                                    if p != None:
                                        self.agent.supporter_pockets[i][
                                            j] = copy.deepcopy(p)
                                    else:
                                        break
                                    j += 1
                else:
                    if not self.root_div_send[self.id - 1].full():
                        buff = pickle.dumps(self.agent.pockets, 2)
                        self.root_div_send[self.id - 1].put(buff)

                    if not self.leader_div_send.full():
                        buff = pickle.dumps(self.agent.pockets, 2)
                        self.leader_div_send.put(buff)

                    if self.agent.id_supporters:
                        for i in range(0, self.config.num_sup):
                            if not self.support_div_recv[i].empty():
                                buff = self.support_div_recv[i].get()
                                supporter_pockets = pickle.loads(buff)
                                j = 0
                                for p in supporter_pockets:
                                    if p != None:
                                        self.agent.supporter_pockets[i][
                                            j] = copy.deepcopy(p)
                                    else:
                                        break
                                    j += 1

                self.agent.calculate_densities()
                self.agent.time_div += datetime.datetime.now() - time_div_start

            self.agent.generation += 1

            # Reset control
            if self.config.if_reset:
                if self.agent.id_leader == None:
                    if self.agent.pockets[0].energy_value == best_energy:
                        gens_without_improve += 1
                    else:
                        gens_without_improve = 0

                    if gens_without_improve == gens_convergence:
                        if self.agent.id_supporters:
                            for i in range(0, self.config.num_sup):
                                self.support_reset_send[i].put(0)
                            for i in range(0, self.config.num_sup):
                                last_solution = pickle.loads(
                                    self.support_reset_recv[i].get())
                                if last_solution.energy_value < best_energy:
                                    restarts_failed += 1
                                    restart_successed = False
                                    self.agent.update(last_solution)
                                    best_energy = self.agent.pockets[
                                        0].energy_value
                                    gens_without_improve = 0
                        if restart_successed:
                            print '\n***Restart successed***\n'
                            self.event_restart.set()
                            if self.agent.id_supporters:
                                for i in range(0, self.config.num_sup):
                                    self.support_reset_send[i].put(True)
                        else:
                            print '\n***Restart failed: %d***\n' % restarts_failed
                            if self.agent.id_supporters:
                                for i in range(0, self.config.num_sup):
                                    self.support_reset_send[i].put(False)
                        restart_successed = True
                else:
                    if not self.leader_reset_recv.empty():
                        self.leader_reset_recv.get()
                        if self.agent.id_supporters:
                            for i in range(0, self.config.num_sup):
                                self.support_reset_send[i].put(0)
                            for i in range(0, self.config.num_sup):
                                self.receive_solution_pickle(
                                    self.support_reset_recv[i], True)
                        self.send_solution_pickle(self.agent.pockets[0],
                                                  self.leader_reset_send)
                        restart_successed = self.leader_reset_recv.get()
                        if restart_successed:
                            self.event_restart.set()
                            if self.agent.id_supporters:
                                for i in range(0, self.config.num_sup):
                                    self.support_reset_send[i].put(True)
                        else:
                            if self.agent.id_supporters:
                                for i in range(0, self.config.num_sup):
                                    self.support_reset_send[i].put(False)
                        restart_successed = True

                # Is event restart set?
                if self.event_restart.is_set():
                    if self.agent.id_leader == None:
                        # Only the root leader can keep the best solution
                        self.agent.pockets = [self.agent.pockets[0]] + [
                            None for i in range(1, self.config.num_pockets)
                        ]
                        self.agent.population_pockets = [[
                            None for i in range(0, self.config.num_pockets)
                        ] for i in range(1, self.config.num_agents)]
                        for i in range(0, self.config.num_agents - 1):
                            if not self.agent_div_recv[i].empty():
                                self.agent_div_recv[i].get()
                    else:
                        self.agent.pockets = [
                            None for i in range(0, self.config.num_pockets)
                        ]
                        self.agent.leader_pockets = [
                            None for i in range(0, self.config.num_pockets)
                        ]
                        if not self.leader_recv.empty():
                            self.leader_recv.get()

                    if self.agent.id_supporters:
                        while not self.support_recv.empty():
                            self.support_recv.get()

                        self.agent.supporter_pockets = [[
                            None for i in range(0, config.num_pockets)
                        ] for i in range(1, config.num_sup + 1)]
                        for i in range(0, self.config.num_sup):
                            if not self.support_div_recv[i].empty():
                                self.support_div_recv[i].get()

                    self.agent.restarts += 1

                    print 'RESTARTING %3d - WorkerProcess %2d - %s' % (
                        self.agent.restarts, self.id, self.agent)
                    self.agent.current.init_solution(self.hist_obj)
                    self.agent.update()
                    jump_radius_aux = self.config.test_jump_dist
                    gens_convergence = self.config.test_noimprove + self.agent.generation - gens_convergence - gens_start
                    gens_start = self.agent.generation
                    gens_without_improve = 0
                    self.event_restart.clear()
                    print 'RESTARTED %3d - WorkerProcess %2d - %s' % (
                        self.agent.restarts, self.id, self.agent)

            self.energy_list[self.id] = self.agent.current.energy_calls
            energy_calls = sum(self.energy_list)
            self.agent.status_log_append(
                datetime.datetime.now() - start_process_time, energy_calls)

            if self.agent.id_leader == None:
                if energy_calls > self.config.energy_limit:
                    self.stop_event.set()

            best_energy = self.agent.pockets[0].energy_value

        if self.agent.id_supporters:
            stop_event.set()
        if self.agent.id_leader != None:
            self.leader_send.cancel_join_thread()
            self.root_div_send[self.id - 1].cancel_join_thread()
            self.leader_div_send.cancel_join_thread()

        self.save_results()

        if self.agent.id_supporters:
            for worker in workers:
                worker.join()

        print '\n************ WorkerProcess %d done ************\n' % (self.id)
Example #17
0
    def __run_merge(self):
        """
        Runs the merge.
        :return:
        """

        self.__set_status(self.RUNNING)

        self.__master = None

        selected_scans = self.selected_ids
        matched_scans = self.__matched_scans

        output_files = self.summary()
        master_f = output_files['master']
        del output_files['master']

        scans = {
            scan_id: {
                'image': matched_scans[scan_id]['image'],
                'output': output_files[scan_id]
            }
            for scan_id in selected_scans
        }

        print('Merging scan IDs : {}.' ''.format(', '.join(self.selected_ids)))

        try:
            manager = Manager()
            self.__term_evt = term_evt = manager.Event()

            self.__shared_progress = mp_sharedctypes.RawArray(
                ctypes.c_int32, len(scans))

            master_f = os.path.join(self.__output_dir, master_f)

            if not self.__overwrite:
                mode = 'w-'
            else:
                mode = 'w'

            # trying to access the file (erasing it if necessary)
            with XsocsH5.XsocsH5MasterWriter(master_f, mode=mode):
                pass

            if self.__n_proc is None:
                n_proc = cpu_count()
            else:
                n_proc = self.__n_proc

            def init(term_evt_, shared_progress_):
                global g_term_evt
                global g_shared_progress
                g_term_evt = term_evt_
                g_shared_progress = shared_progress_

            # setting progress to 0
            np.frombuffer(self.__shared_progress, dtype='int32')[:] = 0

            pool = Pool(n_proc,
                        initializer=init,
                        initargs=(term_evt, self.__shared_progress),
                        maxtasksperchild=2)

            def callback(result_):
                scan, finished, info = result_
                print('{0} finished.'.format(scan))
                if not finished:
                    term_evt.set()

            results = {}
            self.__proc_indices = proc_indices = {}
            for proc_idx, (scan_id, infos) in enumerate(scans.items()):
                args = (scan_id, proc_idx, self.__spec_h5, self.__output_dir,
                        infos['output'], infos['image'], self.beam_energy,
                        self.chan_per_deg, self.center_chan, self.compression)
                results[scan_id] = pool.apply_async(_add_edf_data,
                                                    args,
                                                    callback=callback)
                proc_indices[scan_id] = proc_idx

            pool.close()

            self.__proc_indices = proc_indices
            pool.join()

            proc_results = [result.get() for result in results.values()]
            proc_codes = np.array(
                [proc_result[1] for proc_result in proc_results])

            rc = self.DONE
            if not np.all(proc_codes == self.DONE):
                if self.ERROR in proc_codes:
                    rc = self.ERROR
                elif self.CANCELED in proc_codes:
                    rc = self.CANCELED
                else:
                    raise ValueError('Unknown return code.')

            if rc == self.DONE:
                with XsocsH5.XsocsH5MasterWriter(master_f, mode='a') as m_h5f:
                    items = scans.items()
                    for proc_idx, (scan_id, infos) in enumerate(items):
                        entry_fn = infos['output']
                        entry = entry_fn.rpartition('.')[0]
                        m_h5f.add_entry_file(entry, entry_fn)

            self.__set_status(rc)

        except Exception as ex:
            self.__set_status(self.ERROR, str(ex))
        else:
            self.__results = master_f

        self.prefix = None
        self.__master = master_f

        # TODO : catch exception?
        if self.__callback:
            self.__callback()

        return self.__results
Example #18
0
def add_routes(app):
    app.router.add_static('/static', path=PROJECT_ROOT)
    app.router.add_get('/remote', store_handlers.get_remote_manifest)
    app.router.add_get('/local', store_handlers.get_local_manifest)
    app.router.add_get('/modules/{module}/{version}/readme',
                       store_handlers.get_module_readme)
    app.router.add_post('/install', install_module)
    app.router.add_post('/uninstall', store_handlers.uninstall_module)
    app.router.add_get('/installstream', get_install_stream)


if __name__ == '__main__':
    manager = Manager()
    INSTALL_STATE = manager.dict()
    SSE_UPDATE_CONDITION = manager.Condition()
    SSE_UPDATE_EVENT = manager.Event()
    INSTALL_STATE['stage'] = ''
    INSTALL_STATE['message'] = ''
    INSTALL_STATE['module_name'] = ''
    INSTALL_STATE['module_version'] = ''
    INSTALL_STATE['cur_chunk'] = 0
    INSTALL_STATE['total_chunks'] = 0
    INSTALL_STATE['cur_size'] = 0
    INSTALL_STATE['total_size'] = 0
    INSTALL_STATE['update_time'] = time.time()
    install_worker = Process(target=install_from_queue,
                             args=(INSTALL_QUEUE, INSTALL_STATE,
                                   SSE_UPDATE_EVENT))
    install_worker.start()

    app = web.Application()
Example #19
0
class WorkProcessor:
    """
    Work processor responsible for starting PoW generation on different threads
    and processing the results. Runs on its own thread.
    """
    def __init__(self, process_count, work_lock, work_units):
        self.process_count = int(process_count)
        self.work_lock = work_lock
        self.work_units = work_units

        self.work_to_generate = OrderedDict()
        self.work_block_hash_pools = OrderedDict()

        self.manager = None
        self.pool = None

    def update(self):
        """
        Perform a single round of updates:

        * Check which work units need to be solved
        * Check which work units have been solved
        * Start generating PoW for pending work units
        """
        self.update_pending_blocks()
        self.update_completed_blocks()
        self.start_work()

    def shutdown(self):
        self.shutdown_pool()

    def create_pool(self):
        self.manager = Manager()
        self.pool = concurrent.futures.ThreadPoolExecutor(
            max_workers=self.process_count,
            thread_name_prefix="work_local_worker")

    def shutdown_pool(self):
        self.stop_all_workers()

        if not self.pool:
            # Pool might already be inactive because no PoW was being worked on
            return

        self.pool.shutdown()
        self.pool = None

        self.manager.shutdown()
        self.manager = None

    def update_pending_blocks(self):
        """
        Check the owork units for any unfinished blocks
        """
        # Collect pending blocks
        with self.work_lock:
            for work_unit in self.work_units.values():
                if not work_unit.solved:
                    work_block_hash = work_unit.work_block_hash
                    self.work_to_generate[work_block_hash] = work_unit

    def get_all_active_workers(self):
        workers = []
        for work_block_hash in self.work_block_hash_pools.keys():
            workers += self.get_active_workers(work_block_hash)

        return workers

    def get_active_workers(self, work_block_hash):
        if not self.work_block_hash_pools.get(work_block_hash, None):
            return []

        return [
            worker for worker in self.work_block_hash_pools[work_block_hash]
            ["workers"] if not worker.done()
        ]

    def get_block_hash_workers(self, work_block_hash):
        if not self.work_block_hash_pools.get(work_block_hash, None):
            return []

        return [
            worker for worker in self.work_block_hash_pools[work_block_hash]
            ["workers"]
        ]

    def start_worker(self, work_unit):
        """
        Start a single worker for a work unit

        :param work_unit: Pending work unit
        :type work_unit: siliqua.work.WorkUnit
        """
        work_block_hash = work_unit.work_block_hash

        if not self.work_block_hash_pools.get(work_block_hash, None):
            self.work_block_hash_pools[work_block_hash] = {
                "work_unit": work_unit,
                "shutdown_flag": self.manager.Event(),
                "workers": []
            }

        work_info = self.work_block_hash_pools[work_block_hash]

        if not self.pool:
            self.create_pool()

        work_info["workers"].append(
            self.pool.submit(process_work,
                             work_unit=work_info["work_unit"],
                             shutdown_flag=work_info["shutdown_flag"]))

    def stop_all_workers(self):
        """
        Stop all running workers for all work units
        """
        # Handle all block hash workers simultaneously, since calling
        # 'stop_workers' individually could cause this method to block
        # for a long time before all workers finally exit
        active_workers = self.get_all_active_workers()
        for work_info in self.work_block_hash_pools.values():
            work_info["shutdown_flag"].set()

        for worker in active_workers:
            # Wait until all threads complete
            worker.result()

        return True

    def stop_workers(self, work_block_hash):
        """
        Stop all running workers for the given work block hash

        :param str work_block_hash: Work block hash
        """
        if not self.work_block_hash_pools.get(work_block_hash, None):
            return

        work_info = self.work_block_hash_pools[work_block_hash]
        work_info["shutdown_flag"].set()

        # Shutdown flag has been set; wait until all workers exit
        for worker in work_info["workers"]:
            worker.result()

        del self.work_block_hash_pools[work_block_hash]

    def update_completed_blocks(self):
        """
        Check if work has been completed. Push any completed work into the
        queue.
        """
        try:
            with self.work_lock:
                remaining_block_hashes = list(
                    self.work_block_hash_pools.keys())
                for work_block_hash in remaining_block_hashes:
                    workers = self.get_block_hash_workers(work_block_hash)

                    found_work = False

                    done_workers, _ = concurrent.futures.wait(
                        workers,
                        timeout=0,
                        return_when=concurrent.futures.FIRST_COMPLETED)

                    for worker in done_workers:
                        workers.remove(worker)

                        exception = worker.exception(timeout=0)
                        if exception:
                            logger.warning("Work thread died unexpectedly.")
                        else:
                            completed_work_unit = worker.result(timeout=0)
                            try:
                                work_unit = self.work_units[work_block_hash]
                            except KeyError:
                                # If the work unit no longer exists, the main
                                # thread already picked up the finished work
                                # If so, clear the workers as normal
                                found_work = True
                                break

                            work_unit.work = completed_work_unit.work
                            logger.info(
                                "Generated PoW for block %s in account %s",
                                completed_work_unit.block_hash,
                                completed_work_unit.account_id)
                            found_work = True

                    if found_work:
                        # If we found work, shutdown all workers for this block hash
                        del self.work_to_generate[work_block_hash]
                        self.stop_workers(work_block_hash)

                        logger.debug("%s PoW(s) left to generate",
                                     len(self.work_to_generate))
        except Exception as exc:
            logger.info("Error: {} {}".format(str(exc), type(exc)))

    def start_work(self):
        """
        Check the pending work units and either start worker threads if
        work is available, or shutdown the pool entirely if no
        work is available
        """
        if not self.work_to_generate:
            if self.pool:
                logger.info("No PoW to generate at this time, "
                            "shutting down worker pool.")
                self.shutdown_pool()
            return

        if not self.pool:
            logger.info("Received %s PoW(s) to generate, starting worker pool",
                        len(self.work_to_generate))
            self.create_pool()

        active_worker_count = len(self.get_all_active_workers())
        available_worker_count = self.process_count - active_worker_count

        work_iter = cycle(self.work_to_generate.values())
        for _ in range(0, available_worker_count):
            work_unit = next(work_iter)
            self.start_worker(work_unit=work_unit)
Example #20
0
def main():
    parser = argparse.ArgumentParser(
        description="Cosmosvanity - Create custom cosmos addresses.")
    parser.add_argument(
        "--startswith",
        type=str,
        help="Find an address ending with the provided argument.",
    )
    parser.add_argument(
        "--endswith",
        type=str,
        help="Find an address starting with the provided argument.",
    )
    parser.add_argument(
        "--contains",
        type=str,
        help="Find an address containing the provided argument.")
    parser.add_argument(
        "--letters",
        type=int,
        help="Find an address containing the provided number of letters.",
    )
    parser.add_argument(
        "--digits",
        type=int,
        help="Find an address containing the provided number of digits.",
    )
    parser.add_argument("-n",
                        type=int,
                        help="Number of addresses to search for.",
                        default=1)
    args = parser.parse_args()

    vanity_args = {}
    if args.startswith and _is_valid_bech32(args.startswith):
        vanity_args[starts_with] = str(args.startswith)
    if args.endswith and _is_valid_bech32(args.endswith):
        vanity_args[ends_with] = str(args.endswith)
    if args.contains and _is_valid_bech32(args.contains):
        vanity_args[contains] = args.contains
    if args.letters and args.letters:
        vanity_args[letters] = args.letters
    if args.digits and args.digits:
        vanity_args[digits] = args.digits

    with Pool(processes=mp.cpu_count()) as pool:
        manager = Manager()
        event = manager.Event()
        queue = manager.Queue()
        for i in range(args.n):
            for _ in range(mp.cpu_count()):
                pool.apply_async(find_vanity_addr,
                                 args=(vanity_args, event, queue))

        vanity_addresses = []
        address = None
        while not address:
            address = queue.get()
            vanity_addresses.append(address)
            address = None
            if len(vanity_addresses) >= args.n:
                event.set()
                break
        for address in vanity_addresses:
            print(address)
Example #21
0
class MultiProcessBatcher(Iterator):
    def __init__(
        self,
        base_iterable: Iterable,
        batch_size: int,
        stack_fn: Callable,
        num_workers: int,
        max_queue_size: Optional[int] = None,
        decode_fn: Callable = lambda x: x,
    ):
        assert num_workers >= 1
        assert max_queue_size is None or max_queue_size >= num_workers

        self.base_iterable = base_iterable
        self.batch_size = batch_size
        self.stack_fn = stack_fn
        self.decode_fn = decode_fn
        self.num_workers = num_workers
        self.max_queue_size = (max_queue_size if max_queue_size is not None
                               else 5 * num_workers)

        self.manager = Manager()
        self.batch_queue = self.manager.Queue(maxsize=self.max_queue_size)
        self.terminate_event = self.manager.Event()
        self.exhausted_events = [
            self.manager.Event() for _ in range(self.num_workers)
        ]
        self.processes = []

        for worker_id, event in enumerate(self.exhausted_events):
            p = Process(
                target=self.worker_fn,
                args=(
                    worker_id,
                    self.num_workers,
                    self.base_iterable,
                    self.batch_size,
                    self.stack_fn,
                    self.batch_queue,
                    self.terminate_event,
                    event,
                ),
            )
            p.start()
            self.processes.append(p)

        self.count = 0

    @staticmethod
    def worker_fn(
        worker_id: int,
        num_workers: int,
        iterable: Iterable,
        batch_size: int,
        stack_fn: Callable,
        batch_queue: Queue,
        terminate_event,
        exhausted_event,
    ):
        MPWorkerInfo.worker_process = True
        MPWorkerInfo.worker_id = worker_id
        MPWorkerInfo.num_workers = num_workers

        for batch in batcher(iterable, batch_size):
            stacked_batch = stack_fn(batch)
            try:
                if terminate_event.is_set():
                    return
                buf = io.BytesIO()
                ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(
                    (worker_id, stacked_batch))
                batch_queue.put(buf.getvalue())
            except (EOFError, BrokenPipeError):
                return

        exhausted_event.set()

    def __iter__(self):
        return self

    def __next__(self):
        if (all(event.is_set() for event in self.exhausted_events)
                and self.batch_queue.empty()):
            self._halt_processes()
            raise StopIteration

        try:
            # TODO make timeout configurable
            got = self.batch_queue.get(timeout=120)
            worker_id, batch = pickle.loads(got)
            batch = self.decode_fn(batch)
        except Empty:
            raise StopIteration()

        return batch

    def _empty_queue(self):
        try:
            batch = self.batch_queue.get(block=False)
            while batch:
                self.batch_queue.get(block=False)
        except (Empty, FileNotFoundError):
            pass

    def _halt_processes(self):
        # Send termination message to workers
        self.terminate_event.set()
        # Empty queue to make sure workers get the message
        self._empty_queue()
        for p in self.processes:
            p.join()
Example #22
0
            if self.waitingEvent.is_set():
                self.waitingEvent.clear()
            self.waitingEvent.wait()

def f(queueLock, idleSemaphore, waitingEvent, queue, time, i):
    s = ShiftQueue(queueLock, idleSemaphore, waitingEvent, queue, time, i)
    s.setShift(3)
    s.setIdle()
    assert(s.getTime() == 3)
    s.setShift(9)
    s.setShift(4)
    s.setIdle()
    assert(s.getTime() == 7)
    s.setIdle()
    assert(s.getTime() == 12)
    return

if __name__ == '__main__':
    manager = Manager()

    queueLock = manager.Lock()
    idleSemaphore = manager.Semaphore(NUM_MASTER - 1)
    waitingEvent = manager.Event()
    queue = manager.list()
    time = manager.list([0])
    processes = [Process(target=f, args=(queueLock, idleSemaphore, waitingEvent, queue, time, i)) for i in range(0, NUM_MASTER)]
    for p in processes:
        p.start()
    for p in processes:
        p.join()
Example #23
0
class BrokerSimulator(Process):
    def __init__(self, grid_info, timeout=5, debug_tags="log|debug|verbose", enable_caching=True):
        Process.__init__(self)
        self.debug_tags = debug_tags
        self.id = "B-{}".format(ShortId().generate())
        
        # Store the grid info for this broker
        self.grid = grid_info
        
        # Instantiate MQTT client
        self.verbose("Instantiating MQTT client")
        self.client = mqtt.Client()
        self.client.on_connect = self.onConnectHandler
        self.client.on_message = self.onMessageHandler

        # Initialize MQTT variables
        self.verbose("Initializing MQTT variables")
        self.broker_sub = "broker_{}".format(self.id)
        self.cache_sub = "cache_{}".format(self.id)
        self.subs_list = [ "all", "broker_general", "cache_general", "cache_index",
                           self.broker_sub, self.cache_sub ]
        
        # Instantiate a Manager object for variables that need to be multiprocessed
        self.verbose("Instantiating Manager object")
        self.broker_manager = Manager()
        #self.broker_manager.start()
        #self.broker_manager.start()

        self.active_queries = self.broker_manager.dict()
        self.finished_queries = self.broker_manager.dict()

        # Instantiate Task Queue objects
        self.verbose("Instantiating Task Queue objects")
        self.tq_info = AttrDict()
        self.tq_info.request = AttrDict()
        self.tq_info.response = AttrDict()
        self.tq_info.request.queue = self.broker_manager.Queue()
        self.tq_info.request.event = self.broker_manager.Event()
        self.tq_info.response.queue = self.broker_manager.Queue()
        self.tq_info.response.event = self.broker_manager.Event()

        self.cache = Cache()
        
        self.proc_nodes = []
        self.agg_nodes = []
        
        self.timeout = timeout
        self.status  = "INITIALIZED"
        self.caching_enabled = enable_caching

        self.verbose("Started.")
        return
    
    def run(self):
        self.verbose("Connecting...")
        self.client.connect("localhost", 1883, 60)
        self.client.loop_forever()
        # self.broker_manager.shutdown()
        self.verbose("Shutdown.")
        return
    
    def onConnectHandler(self, client, userdata, flags, rc):
        self.verbose("Connected.")
        
        def launchNodes():
            self.verbose("Starting processing node/s")
            # Start the task processing nodes
            self.tq_info.request.event.clear()
            for i in range(0, self.grid.node_count):
                self.proc_nodes.append(ProcNodeSimulator(self.tq_info, self.broker_sub))
                self.proc_nodes[i].start()

            self.verbose("Processing nodes started.")

            # Start the aggregation node/s
            self.tq_info.response.event.clear()
            agg_info = AttrDict()
            agg_info.request = AttrDict()
            agg_info.request.queue = self.tq_info.response.queue
            agg_info.request.event = self.tq_info.response.event
            self.agg_nodes.append( AggNodeSimulator( agg_info, 
                                                     self.active_queries, 
                                                     self.broker_sub) )
            self.verbose("Starting aggregation node/s")
            self.agg_nodes[0].start()

            self.verbose("Aggregation nodes started.")
            return

        self.verbose("Launching Nodes in a separate process")
        Process(target=launchNodes).start()
        
        # Subscribe to general MQTT topics and own topics
        client.subscribe([ (sub, 0) for sub in self.subs_list ])
        self.verbose("Subscribed to: {}".format(self.subs_list))
        
        # Announce connection to channel
        payload_intro = {
            'type' : 'announce',
            'class': 'broker',
            'id'   : self.id, # TODO include other capabilities here too
            'grid' : { 'x' : self.grid.x, 'y' : self.grid.y, 'cache_limit' : self.cache.cache_limit, 'latency_map' : self.grid.latency_map.latency_map }
        }
        self.verbose("Announcement payload: {}".format(json.dumps(payload_intro)))
        client.publish("all", json.dumps(payload_intro))
        self.verbose("Announcement made.")

        return
    
    def onMessageHandler(self, client, userdata, msg):
        topic = str(msg.topic)
        request = json.loads(msg.payload)
        
        #self.verbose("Received from {}: {}".format(topic, request))
        if (request['type'] == 'shutdown') and (topic in ['all', 'broker_general', self.broker_sub]):
            self.verbose("Shutting down...")

            shutdown_request = { 'type' : 'shutdown' }

            self.verbose("Shutting down processing nodes...")
            for i in range(0, len(self.proc_nodes)):
                self.tq_info.request.queue.put(shutdown_request)

            self.tq_info.request.event.set()
        
            self.verbose("Waiting for processing nodes to shutdown...")
            for pn in self.proc_nodes:
                pn.join()

            self.verbose("Shutting down aggregation nodes...")
            self.tq_info.response.queue.put(shutdown_request)
            self.tq_info.response.event.set()

            self.verbose("Waiting for aggregation nodes to shutdown...")
            for an in self.agg_nodes:
                an.join()

            client.disconnect()

        elif (request['type'] == 'status') and (topic in ['all', 'broker_general', self.broker_sub]):
            rc = self.handleStatusRequest(request, client, topic)
            if rc != True:
                self.debug("Error Occurred during handling of status request!")
                client.disconnect()
            
        elif (request['type'] == 'query') and (topic == self.broker_sub):
            rc = self.handleQueryRequest(request, client)
            if rc != True:
                self.debug("Error Occurred during handling of query request!")
                client.disconnect()
                
        elif (request['type'] == 'get_cached_item') and (topic == self.cache_sub):
            rc = self.handleGetCachedItemRequest(request, client)
            if rc != True:
                self.debug("Error Occurred during handling of query request!")
                client.disconnect()
                
        elif (request['type'] == 'cache_index_response') and (topic == self.broker_sub):
            rc = self.handleQueryCacheIndexResponse(request, client)
            if rc != True:
                self.debug("Error Occurred during handling of cache index response!")
                client.disconnect()
                
        elif (request['type'] == 'get_cached_item_response') and (topic == self.broker_sub):
            rc = self.handleQueryCachedItemResponse(request, client)
            if rc != True:
                self.debug("Error Occurred during handling of cache response!")
                client.disconnect()
        
        elif (request['type'] == 'aggregation_result') and (topic == self.broker_sub):
            rc = self.handleAggregationResult(request, client)
            if rc != True:
                self.debug("Error Occurred during handling of aggregation result!")
                client.disconnect()

        elif (request['type'] == 'cache_reassign') and (topic in ['all', 'broker_general', self.broker_sub]):
            rc = self.handleCacheReassign(request, client)
            if rc != True:
                self.debug("Error Occurred during handling of cache reassign request!")
                client.disconnect()
        
        
        return
    
    def handleQueryRequest(self, query, client):
        self.verbose("Handling Query Request")
        # Save information about the pending query
        self.active_queries[query['id']] = self.broker_manager.dict({
                                                'inputs'  : query['inputs'],
                                                'grid'    : query['grid'],
                                                'tasks'   : query['tasks'],
                                                'load'    : query['load'],
                                                'count'   : query['tasks']['processing'], 
                                                'started' : time.time(),
                                                'ended'   : None, 
                                            })

        if not self.caching_enabled:
            self.verbose("Caching disabled. Switching over to full processing...")
            query['query_id'] = query['id']
            return self.handleQueryProcessing(query, client)
        
        # Check if this query's result can be retrieved from some other cache
        request = {
            'query_id' : query['id'],
            'type' : 'get_cache_list',
            'input_key' : query['inputs'],
            'broker_sub_id' : self.broker_sub,
        }
        self.verbose("Sending Cache Index Request: {}".format(request))
        client.publish("cache_index_requests", json.dumps(request))
        
        return True
    
    def handleQueryCacheIndexResponse(self, query, client):
        self.verbose("Handling Query Cache Index Response")
        # Check if this does not have an active query -- if so, disregard it
        if not query['query_id'] in self.active_queries.keys():
            return False
        
#         [ Expected CacheIndex Response ]
#
#             response = {
#                 'query_id' : query['id'],
#                 'type' : 'cache_index_response',
#                 'target_id' : broker id of requestor,
#                 'input_key' : key requested,
#                 'cache_list' : list of known holders of this info.
#             }
#       
        # If the returned list is empty, then we have to process this on our own
        if not query['cache_list']:
            self.verbose("Not in cache. Switching over to full processing...")
            return self.handleQueryProcessing(query, client)
        
        # Otherwise, attempt to retrieve the result from another cache
        target_id = random.choice(query['cache_list'])
        target_sub = "cache_{}".format(target_id)
        
        # Reload the input key from the list of known active queries
        input_key = self.active_queries[query['query_id']]['inputs']
        
        # Request the result from another cache
        request = {
            'query_id' : query['query_id'],
            'type' : 'get_cached_item',
            'resp_sub' : self.broker_sub,
            'input_key' : input_key,
        }
        self.verbose("Requesting cached item from {}...".format(target_sub))
        client.publish(target_sub, json.dumps(request))
        
        return True
    
    def handleGetCachedItemRequest(self, query, client):
        self.verbose("Handling Get Cached Item Request")
        # Context:  This is a standalone request for the value of an item
        #    that is currently cache in this broker. Usually, this is done 
        #    after getting confirmation from the cache index of the cached
        #    result's location.
        
#         [ Expected Broker Request ]
#
#         request = {
#             'query_id' : query['id'],
#             'type' : 'get_cached_item',
#             'resp_sub' : subscription topic of the response,
#             'input_key' : key of result to be retrieved from the cache,
#         }
#
        # Load the result from the cache
        cached_result = self.cache.getItem(query['input_key'])
        
        # Send the result
        target_sub = query['resp_sub']
        response = {
            'query_id'  : query['query_id'],
            'type'      : 'get_cached_item_response',
            'cache_sub' : self.cache_sub,
            'grid'      : {'x' : self.grid.x, 'y' : self.grid.y},
            'input_key' : query['input_key'],
            'result'    : cached_result,
        }
        client.publish(target_sub, json.dumps(response))
        return True
    
    def handleQueryCachedItemResponse(self, query, client):
        self.verbose("Handling Get Cached Item Response")
        # Context:  A cached item was previously requested from a target broker.
        #    In this function, we process the response of that broker and try
        #    to retrieve the value of the cached item from it
        
        # Check if this does not have an active query -- if so, disregard it
        q_id = query['query_id']
        if not q_id in self.active_queries.keys():
            return self.handleQueryProcessing(query, client)
        
#         [ Expected Broker Response ]
#
#         response = {
#             'query_id' : query['id'],
#             'type' : 'get_cached_item_response',
#             'cache_sub' : self.cache_sub,
#             'grid'      : {'x' : self.grid.x, 'y' : self.grid.y},
#             'input_key' : input_key,
#             'result'    : cached_result,
#         }
#
        # If nothing was found, then we handover to normal processing
        if query['result'] == None:
            # TODO Should this incur any cache retrieval penalties?
            return False
        
        # Otherwise, get the latency between the current grid and the cached item source
        targ_x = query['grid']['x']
        targ_y = query['grid']['y']
        
        cache_retrieval_delay = self.grid.getLatency(targ_x, targ_y)
        
        def delayed_finish_query():
            # Simulate the delay with sleep
            if cache_retrieval_delay > 0.0:
                time.sleep(cache_retrieval_delay)

            # Reload the ongoing query's information
            query_info = self.active_queries[q_id]
             
            # Add the aggregation result to the finished results
            self.verbose("Publishing finished result")
            finished_result = {
                    'type'    : 'completion_result',
                    'assigned': { 'x' : targ_x, 'y' : targ_y },
                    'origin'  : query_info['grid'],
                    'query_id': q_id,
                    'inputs'  : query_info['inputs'],
                    'tasks'   : query_info['tasks'],
                    'load'    : query_info['load'],
                    'count'   : query_info['count'], 
                    'started' : query_info['started'],
                    'ended'   : time.time(), 
                    'result'  : query['result'],
                    'from_cache' : True,
                    'cache_retrieval_delay' : cache_retrieval_delay,
            }
            query_client = mqtt.Client()
            query_client.connect("localhost", 1883, 60)
            query_client.publish(QUERY_COMPLETION_SUB, json.dumps(finished_result))

            # Remove query id from active queries
            del self.active_queries[q_id]

            self.verbose("Result found after {} secs: {}".format(cache_retrieval_delay, query['result']))
            query_client.disconnect()
            return

        Process(target=delayed_finish_query).start()

        return True
    
        
        # If not, then process it as normal
        #    Make a note of the active tasks
        #    Push the tasks to the task queue and set the event flag for it
        
    def handleQueryProcessing(self, query, client):
        self.verbose("Handling Query Processing")
        # Check if this does not have an active query -- if so, disregard it
        if not query['query_id'] in self.active_queries.keys():
            return False
            
        # Reload the ongoing query's information
        query_info = self.active_queries[query['query_id']]
        
        # Start a number of tasks by putting them on the task queue
        self.verbose("Starting tasks for query processing")
        self.verbose("    Query: {}".format(query))
        self.verbose("    Query Info: {}".format(query_info))

        task_count = query_info['count']
        for i in range(0, task_count):
            task_inputs = {
                'type'      : 'task',
                'query_id'  : query['query_id'],
                'exec_time' : query_info['load'],
                'inputs'    : query_info['inputs'],
            }
            self.tq_info.request.queue.put(task_inputs)
        
        # Tell the processing tasks to start
        self.tq_info.request.event.set()
        self.verbose("Tasks started")

        return True

    def handleAggregationResult(self, query, client):
        self.verbose("Handling Aggregation Result")
        # Check if this does not have an active query -- if so, disregard it
        q_id = query['query_id']
        if not q_id in self.active_queries.keys():
            return False

        # Add the aggregation result to the finished results
        self.verbose("Publishing finished result")
        finished_result = {
                'type'    : 'completion_result',
                'assigned': { 'x' : self.grid.x, 'y' : self.grid.y },
                'origin'  : { 'x' : self.grid.x, 'y' : self.grid.y },
                'query_id': q_id,
                'inputs'  : query['inputs'],
                'grid'    : query['grid'],
                'tasks'   : query['tasks'],
                'load'    : query['load'],
                'count'   : query['tasks']['processing'], 
                'started' : query['started'],
                'ended'   : query['ended'], 
                'result'  : query['result'],
                'from_cache' : False,
                'cache_retrieval_delay' : 0.0,
        }

        client.publish(QUERY_COMPLETION_SUB, json.dumps(finished_result))

        # Remove query id from active queries
        del self.active_queries[q_id]

        # Add the *result* to the cache
        self.cache.add( query['inputs'],  query['result'])

        # Tell the cache index to add a new entry as well
        request = {
            'type' : 'add',
            'input_key' : query['inputs'],
            'broker_id' : self.id,
        }
        self.verbose("Sending Cache Index Request: {}".format(request))
        client.publish("cache_index_requests", json.dumps(request))

        return True

    def handleCacheReassign(self, request, client, topic):
        # TODO
        nqa_map = request['map']

        # Cycle through each input key in the reassigned query map
        # for k in self.cache.getKeys():
        #     # Check if the input key is in our cache, if so 
        #     cached_item  = self.cache.getItem(k)
            

        return True
    
    def handleStatusRequest(self, request, client, topic):
        response = {'id' : self.id,
                    'status' : self.status}
        # Send the result
        client.publish(topic, json.dumps(response))

        return True
    
    def log(self, message):
        print("[{}] {}".format(self.id, message))
        return
    
    def verbose(self, message):
        if "verbose" in self.debug_tags:
            self.log(message)
        return
    
    def debug(self, message):
        if "debug" in self.debug_tags:
            self.log(message)
        return
if __name__ == '__main__':
    import sys, os
    from time import sleep
    from multiprocessing import Manager
    from DegenPrimer.Primer import Primer
    from DegenPrimer.SeqUtils import load_sequence
    from DegenPrimer.WorkCounter import WorkCounterManager
    from DegenPrimer.WaitingThread import WaitingThread
    from DegenPrimer.TD_Functions import PCR_P
    from DegenPrimer.BlastPrimers import BlastPrimers
    from threading import Lock

    os.chdir('../')

    mgr = Manager()
    abort_event = mgr.Event()

    PCR_P.PCR_T = 53
    PCR_P.Mg = 3e-3
    PCR_P.dNTP = 300e-6
    PCR_P.DNA = 1e-10

    fwd_primer = Primer(
        load_sequence('ATATTCTACRACGGCTATCC', 'F-TGAM_0057-268_d1',
                      'F-TGAM_0057-268_d1'), 0.43e-6, True)
    rev_primer = Primer(
        load_sequence('GAASGCRAAKATYGGGAAC', 'R-TGAM_0055-624-d4',
                      'R-TGAM_0055-624-d4'), 0.43e-6, True)

    blastp = BlastPrimers(abort_event,
                          job_id='F-TGAM_0057-268_d1-R-TGAM_0055-624-d4',
Example #25
0
class TestOwnerPrintingInterProcessFileLock(unittest.TestCase):
    def setUp(self):
        self.lock_dir = tempfile.mkdtemp()
        self.lock_path = os.path.join(self.lock_dir, 'lock')
        self.lock = OwnerPrintingInterProcessFileLock(self.lock_path)
        self.manager = Manager()
        self.lock_held = self.manager.Event()
        self.terminate = self.manager.Event()
        self.lock_process = Process(
            target=hold_lock_until_terminate,
            args=(self.lock_path, self.lock_held, self.terminate),
        )

    def tearDown(self):
        self.terminate.set()
        try:
            shutil.rmtree(self.lock_dir)
        except OSError:
            pass

    def test_non_blocking_attempt(self):
        self.lock_process.start()
        self.lock_held.wait()
        self.assertFalse(self.lock.acquire(blocking=False))

    def test_message(self):
        self.lock_process.start()
        self.lock_held.wait()
        self.assertTrue(os.path.exists(self.lock.message_path))
        with open(self.lock.message_path, 'rb') as f:
            message_content = f.read()
        self.assertIn(str(self.lock_process.pid), message_content)

        os.unlink(self.lock.message_path)

        def message_fn(message):
            self.assertIn(self.lock.missing_message_output, message)

        self.lock.acquire(blocking=False, message_fn=message_fn)

    def test_blocking(self):
        self.lock_process.start()
        self.lock_held.wait()
        self.assertFalse(self.lock.acquire(timeout=.1))

        acquire_is_blocking = self.manager.Event()

        def terminate_subproc(terminate, acquire_is_blocking):
            acquire_is_blocking.wait()
            terminate.set()

        Thread(target=terminate_subproc,
               args=(self.terminate, acquire_is_blocking)).start()

        def message_fn(message):
            self.assertIn(str(self.lock_process.pid), message)
            acquire_is_blocking.set()

        # NOTE: We shouldn't ever wait this long (locally this runs in ~milliseconds)
        # but sometimes CI containers are extremely slow, so we choose a very large
        # value just in case.
        self.assertTrue(self.lock.acquire(timeout=30, message_fn=message_fn))

    def test_reentrant(self):
        self.assertTrue(self.lock.acquire())
        self.assertTrue(self.lock.acquire())

    def test_release(self):
        self.assertTrue(self.lock.acquire())
        self.assertTrue(self.lock.acquired)
        self.lock.release()
        self.assertFalse(self.lock.acquired)
Example #26
0
class Control(inLib.Device):
    '''
    The device control of pco.edge sCMOS camera.
    '''
    def __init__(self, settings):
        inLib.Device.__init__(self, 'cooke.pcoedge.camera_api', settings)

        #Camera properties
        self._props = {}

        #Loaded yaml settings
        self.loadedSettings = settings
        self.saved_settings = []
        self.newSettings('no_file', settings_dict=settings)

        #Sensor settings
        self._bpp = 14
        status, (x0, y0, x1, y1) = self._api.getROI()
        self._props['dimensions'] = x1 - x0 + 1, y1 - y0 + 1
        self._props['x_start'] = x0
        self._props['y_start'] = y0

        self._initializedBuffers = 0
        self._lastBuffer = 0

        self.buffer0_allocd = False
        self.buffer1_allocd = False

        self._buffersAllocated = np.ones((16), dtype=bool) * False
        self._buffersQueued = np.ones((16), dtype=bool) * False
        self._buffers = []
        self._bufferEvents = []

        self._images = np.array([])
        self._fillBottom = True

        self._alternatingBuffer = 0

        self.daxfile = 0

        self._api.setRecorderSubmode(1)

        self.fastImages = None

        #Multiprocessing tests
        self.grabFrames = 100
        self.emitSignalFrame = 4000

        self.initFramesMP = 100
        self.subProcessStart = False
        self.subProcess2Start = False
        self.parent_conn, self.child_conn = Pipe()
        self.mgr = Manager()
        self.q = self.mgr.Queue()
        self.qIms = self.mgr.Queue()
        self.resultsQueue = self.mgr.Queue()
        self.resultsEvent = self.mgr.Event()
        self.p = Process(target=imageAnalysisTest3,
                         args=(
                             self.grabFrames,
                             self.q,
                             self.resultsQueue,
                             self.qIms,
                         ))

        self.furtherAnalysis = Process(target=useImageAnalysisResults,
                                       args=(
                                           self.emitSignalFrame,
                                           self.resultsQueue,
                                           self.resultsEvent,
                                       ))

        self.image1 = 0
        self.image2 = 100

        self.point1 = 0
        self.point2 = 0

    def newSettings(self, filename, settings_dict=None):
        if settings_dict is None:
            settings_dict = inLib.load_settings(filename)
            if settings_dict.has_key('devices'):
                settings_dict = settings_dict['devices']['camera']
            settings_dict['settings_filename'] = filename
        self.saved_settings = [settings_dict] + self.saved_settings

    def updateSettings(self, settings_dict):
        try:
            roi = settings_dict['roi']
            self._setROI(roi)
        except:
            print "Unable to set new ROI"
        try:
            exposure_time = settings_dict['exposure_time']
            delay_time = settings_dict['delay_time']
            self._setDelayExposureTime(delay_time, exposure_time, 'ms', 'ms')
        except:
            print "Unable to set new timings"

    def unSetEvent(self):
        self.resultsEvent.clear()

    def whenToEmitAndCapture(self):
        return self.emitSignalFrame, self.grabFrames

    def changeWhenToEmitAndCapture(self, sig, capt):
        self.emitSignalFrame = sig
        self.grabFrames = capt

    def getRecordingStruct(self):
        prec = self._api.getRecordingStruct()
        return prec

    def _setROI(self, ROI):
        x0, x1, y0, y1 = ROI
        status = self._api.setROI(x0, y0, x1, y1)

    def _getROI(self):
        status, (x0, y0, x1, y1) = self._api.getROI()
        return x0, x1, y0, y1

    def _getResolution(self):
        status, (x0, y0, x1, y1) = self._api.getROI()
        return x1 - x0 + 1, y1 - y0 + 1

    def _getDelayExposureTime(self):
        err, times = self._api.getDelayExposureTime()
        self._props['exposure_time'] = times[1]
        return times[0], times[1]

    def _setDelayExposureTime(self, delay, exposure, delay_units, exp_units):
        if delay_units == 'ns':
            delay_base = 0
        elif delay_units == 'us':
            delay_base = 1
        elif delay_units == 'ms':
            delay_base = 2
        else:
            delay_base = 2

        if exp_units == 'ns':
            exp_base = 0
        elif exp_units == 'us':
            exp_base = 1
        elif exp_units == 'ms':
            exp_base = 2
        else:
            exp_base = 2

        status = self._api.setDelayExposureTime(delay, exposure, delay_base,
                                                exp_base)

    def _initBuffers(self, num_to_init=16):
        xres, yres = self._props['dimensions']
        self._buffers = []
        for i in range(0, num_to_init):
            self._buffers.append(np.zeros((xres * yres), dtype=np.uint16))
        for i in range(0, num_to_init):
            if self._buffersAllocated[i]:
                status, eHandler, self._buffers[i] = self._api.allocateBuffer(
                    xres, yres, i)
                #self._bufferEvents[i] = eHandler
                print "Buffer size: ", (i, self._buffers[i].shape)
            else:
                status, eHandler, self._buffers[i] = self._api.allocateBuffer(
                    xres, yres, -1)
                self._bufferEvents.append(eHandler)
                print "Buffer size: ", (i, self._buffers[i].shape)
                print "Init buff status: ", (i, status)
                print "Event: ", self._bufferEvents[i].value
            if status == 0:
                self._buffersAllocated[i] = True

    def _initImages(self):
        self._fillBottom = True
        self._images = np.array([])

    def _addBuffers(self, start=0, end=16):
        xres, yres = self._props['dimensions']
        for i in range(start, end):
            if self._buffersAllocated[i]:
                self._api.addBuffer(0, 0, i, xres, yres, self._bpp)
                self._buffersQueued[i] = True

    def addManyBuffers(self, total):
        self._initBuffers()
        i = 0
        j = 0
        xres, yres = self._props['dimensions']
        while i < total:
            err = self._api.addBuffer(0, 0, j, xres, yres, self._bpp)
            if err != 0:
                print "error on ", i
                print "j=", j
                print "addBuffer error: ", err
            i += 1
            j += 1
            if j == 16:
                j = 0
        print "Pending buffers: ", self._buffersPending()

    def beginDAXRecording(self, filename):
        self.daxfile = writer.DaxFile(filename, self._props)
        self.recordedFrames = 0

    def endDAX(self):
        if self.daxfile != 0:
            self.daxfile.closeFile([0, 0, 0], 0)
            print "Closing DAX file " + str(self.daxfile.filename)

    def writeMemoryToDAX(self, filename):
        self.beginDAXRecording(filename)
        print "Length of self._images: ", len(self._images)
        for i in range(0, len(self._images)):
            temp = self._images[i].astype(np.dtype('>H'))
            self.daxfile.saveFrames(temp.tostring(), 1)
            self.recordedFrames += 1
        self.endDAX()

    def captureFast(self, numImages):

        #Stop camera if it is recording
        if self._api.getRecordingState() > 0:
            self.stopRecording()
        else:
            self._api.removeBuffer()
        xres, yres = self._getResolution()

        #First need to create valid events
        err, event1, buf1 = self._api.allocateBuffer(xres, yres, -1)
        err, event2, buf2 = self._api.allocateBuffer(xres, yres, -1)

        #Add buffers to queue
        self._api.addBuffer(0, 0, 0, xres, yres, 14)
        self._api.addBuffer(0, 0, 1, xres, yres, 14)

        #Set recording to on to fill those two buffers:
        self._api.camlinkSetParams(xres, yres)
        self._api.armCamera()
        self._api.setRecordingState(0x0001)

        #Wait for 2nd buffer to fill, then close
        win32event.WaitForSingleObject(
            event1.value, 5000)  #Waits for event1 or for 5 seconds

        #Stop recording
        self.stopRecording()

        #Fill with numImages:
        self._api.camlinkSetParams(xres, yres)
        self._api.armCamera()
        self._api.setRecordingState(0x0001)
        self.fastImages = self._api.largeBufferFill(xres,
                                                    yres,
                                                    numImages,
                                                    events=[event1, event2])
        '''
        self.fastImages will be an array of np.uint16
        Dimensions of xres,yres,15,variable  (variable = totalnumberofframes/15)
        '''

        #Wait for event2:
        win32event.WaitForSingleObject(event2.value, 5000)

        self.stopRecording()

    def getFastImageFrame(self, frameNumber):
        yres = self.fastImages[0][0][0]._length_
        xres = self.fastImages[0][0]._length_
        print "Dimensions: ", [xres, yres]
        num1 = frameNumber / 15
        num2 = frameNumber % 15
        image = np.frombuffer(self.fastImages[num1][num2],
                              dtype=np.uint16).reshape(yres, xres)
        print "max: ", image.max()
        print "min: ", image.min()
        return image

    def setInitFramesMP(self, frames):
        self.initFramesMP = frames

    def _transferOutOfBuffer(self, start=0, end=8, toDAX=False, verbose=False):
        global mpArray
        if verbose:
            print "Size of image buffer: ", len(self._images)
        #self._images = copy.deepcopy(self._buffers[start:end])
        if toDAX:
            for i in range(start, end):
                temp = self._buffers[i].astype(np.dtype('>H')).copy()
                self.daxfile.saveFrames(temp.tostring(), 1)
                self.recordedFrames += 1
            self._images = np.array(self._buffers[start:end]).copy()
            #print "Shape of _images: ", self._images.shape
            return self.recordedFrames
        numImages = len(self._images)
        '''
        if self.image1 == 0:
            self.image2 = self.initFramesMP
        if numImages == 0:
            self._images = copy.deepcopy(self._buffers[start:end])
            globalImageBuffer = copy.deepcopy(self._buffers[start:end])
            self.p.start()
            self.image1 = 0
            self.image2 = self.initFramesMP
        else:
            self._images.extend(copy.deepcopy(self._buffers[start:end]))
            globalImageBuffer = copy.deepcopy(self._images)
            print "Length of globImageBuffer from inside: ", len(globalImageBuffer)
            if numImages > self.initFramesMP:
                if self.p.is_alive():  
                    self.q.put([self.image1,self.image2])
                    self.image1 = self.image2
                    self.image2 = numImages
        '''
        if len(self._images) == 0:
            self._images = copy.deepcopy(self._buffers[start:end])
        else:
            self._images.extend(copy.deepcopy(self._buffers[start:end]))

        #arrData = self.mgr.Array('H',list(chain.from_iterable(self._buffers[start:end])),lock=False)

        if not self.subProcessStart and not self.p.is_alive():
            self.p.start()
            self.subProcessStart = True

        self.q.put(self._buffers[start:end])

        if not self.subProcess2Start:
            self.furtherAnalysis.start()
            self.subProcess2Start = True

        return numImages

    def flushImQ(self):
        while not self.qIms.empty():
            self.qIms.get()

    def endMP(self):
        print "in EndMP: ", mpArray
        self.q.put(None)
        #if not self.subProcess2Start:
        #    self.furtherAnalysis.start()
        self.subProcess2Start = True
        print "Ending q...\n"
        self.q.put(None)
        #self.q.close()
        time.sleep(3)
        #self.p.terminate()
        if self.furtherAnalysis.is_alive():
            self.furtherAnalysis.join()
        if self.p.is_alive():
            self.p.join()

        self.subProcessStart = False
        self.subProcess2Start = False
        #self.furtherAnalysis.terminate()
        #self.q.close()
        del self.mgr
        self.mgr = Manager()
        del self.q
        del self.resultsQueue
        #del self.resultsEvent
        self.q = self.mgr.Queue()
        self.resultsQueue = self.mgr.Queue()
        #self.resultsEvent = self.mgr.Event()
        del self.p
        self.p = Process(target=imageAnalysisTest3,
                         args=(
                             self.grabFrames,
                             self.q,
                             self.resultsQueue,
                             self.qIms,
                         ))
        del self.furtherAnalysis
        self.furtherAnalysis = Process(target=useImageAnalysisResults,
                                       args=(
                                           self.emitSignalFrame,
                                           self.resultsQueue,
                                           self.resultsEvent,
                                       ))

        #self.furtherAnalysis.start()
        #self.furtherAnalysis.join()

    def stateOfProcesses(self):
        print "imageAnalysisTesting process: ", self.p.is_alive()
        print "Further analysis proces: ", self.furtherAnalysis.is_alive()

    def imageAnalysisTest(self, conn):
        print "New process starting..."
        #images = conn.recv()
        #print "Got images. Length = ", len(images)

    def getImageBuffer(self):
        return self._images

    def _fillImages(self, toDAX=False):

        num_pending = self._buffersPending()
        nIms = -1
        if num_pending <= 8:
            if self._fillBottom:
                nIms = self._transferOutOfBuffer(0, 8, toDAX=toDAX)
                self._addBuffers(0, 8)
            else:
                nIms = self._transferOutOfBuffer(8, 16, toDAX=toDAX)
                self._addBuffers(8, 16)
            self._fillBottom = not self._fillBottom
        return nIms, num_pending
        '''
        #print self._bufferEvents
        numIms = 0
        while numIms<400:
            if self._fillBottom:
                win32event.WaitForSingleObject(self._bufferEvents[8].value, 5000)
                nIms = self._transferOutOfBuffer(0,8,toDAX=toDAX)
                #self._addBuffers(0,8)
            else:
                win32event.WaitForSingleObject(self._bufferEvents[0].value, 5000)
                nIms = self._transferOutOfBuffer(8,16,toDAX=toDAX)
                #self._addBuffers(8,16)
            self._fillBottom = not self._fillBottom
            numIms = self._images.shape[0]
        return self._images.shape[0]
        '''

    def _saveImages(self):
        if self._images.shape[0] > 0:
            np.save("D:\\Data\\test_images_out.npy", self._images)

    def getNumberInImages(self):
        return self._images.shape[0]

    def _buffersPending(self):
        return self._api.getPendingBuffer()[1]

    def getImageForPreview(self):
        xres, yres = self._props['dimensions']
        pending = self._buffersPending()
        #print "Pending buffers: ", pending
        if pending < 2:
            im = self._buffers[self._alternatingBuffer].reshape(xres, yres)
            self._addBuffers(self._alternatingBuffer,
                             self._alternatingBuffer + 1)
            self._alternatingBuffer = 1 * (not self._alternatingBuffer)
            #print "Image returned for preview."
            return im
        else:
            return None

    def _freeBuffers(self):
        for i in range(0, 16):
            if self._buffersAllocated[i]:
                self._api.freeBuffer(i)

    def beginPreview(self):
        if self._api.getRecordingState() > 0:
            self.stopRecording()
        xres, yres = self._getResolution()
        self._props['dimensions'] = xres, yres
        print "Resolution ", (xres, yres)
        self._api.camlinkSetParams(xres, yres)
        self._api.armCamera()
        self._initBuffers(2)
        self._addBuffers(0, 2)
        self._api.setRecordingState(0x0001)
        print "Preview started..."

    def stopPreview(self):
        if self._api.getRecordingState() > 0:
            self.stopRecording()
        self._api.freeBuffer(0)
        self._api.freeBuffer(1)
        self._buffersAllocated[0] = False
        self._buffersAllocated[1] = False

    def beginBufferFill(self):
        self.stopRecording()
        self._api.armCamera()
        self._initBuffers(16)
        self._addBuffers(0, 16)
        #self.addManyBuffers(20)
        self._api.setRecordingState(0x0001)
        print "Recording started..."

    def stopRecording(self):
        self._api.setRecordingState(0x0000)
        self._api.removeBuffer()

    def shutDown(self):
        print 'Shutting down pco.edge camera...'
        self.stopRecording()
        self._freeBuffers()
        self._api.closeCamera()
Example #27
0
    lock = RLock()
    #Data can be stored in a shared memory map using Value or Array
    num = Value(
        'd', 0.0)  #typecode from array module, eg int(i), double(d), long(l),
    arr = Array('i', range(10))
    p1 = Process(target=sharedf, args=(num, arr, event))
    p1.start()
    event.wait()  #or call event.is_set()
    with lock:
        print(num.value)
        print(arr[:])

    #Manager
    #controls a server process which holds Python objects
    #and allows other processes to manipulate them using proxies
    #supports list, dict, Namespace,
    #Lock, RLock, Semaphore, BoundedSemaphore, Condition, Event, Queue,
    #Value and Array
    manager = Manager()
    event2 = manager.Event()
    d = manager.dict()
    l = manager.list(range(10))
    p2 = Process(target=managerf, args=(d, l, event2))
    p2.start()
    event2.wait()  #or call event.is_set()
    with lock:
        print(d)
        print(l)
    p1.join()
    p2.join()
Example #28
0
class DAI(Process):
    daemon = True

    def __init__(self,
                 api_url,
                 device_model,
                 device_addr=None,
                 device_name=None,
                 persistent_binding=False,
                 username=None,
                 extra_setup_webpage='',
                 device_webpage='',
                 register_callback=None,
                 on_register=None,
                 on_deregister=None,
                 on_connect=None,
                 on_disconnect=None,
                 push_interval=1,
                 interval=None,
                 device_features=None):
        super(DAI, self).__init__()

        self._manager = Manager()
        self._event = self._manager.Event(
        )  # create Event proxy object at main process

        self.api_url = api_url
        self.device_model = device_model
        self.device_addr = device_addr
        self.device_name = device_name
        self.persistent_binding = persistent_binding
        self.username = username
        self.extra_setup_webpage = extra_setup_webpage
        self.device_webpage = device_webpage

        self.register_callback = register_callback
        self.on_register = on_register
        self.on_deregister = on_deregister
        self.on_connect = on_connect
        self.on_disconnect = on_disconnect

        self.push_interval = push_interval
        self.interval = interval if interval else {}

        self.device_features = device_features if device_features else {}

        self.flags = self._manager.dict()
        self.queues = self._manager.dict()
        self.globals = self._manager.dict()
        if not self.globals.get('index'):
            self.globals['index'] = 1

    def push_data(self, df_name):
        if not self.device_features[df_name].push_data:
            return
        log.debug('%s:%s', df_name, self.flags[df_name])
        while self.flags[df_name]:
            _data = self.device_features[df_name].push_data(
                self.queues[df_name])
            if not isinstance(_data, NoData) and _data is not NoData:
                self.dan.push(df_name, _data)
            time.sleep(self.interval.get(df_name, self.push_interval))

    def on_signal(self, signal, df_list):
        log.info('Receive signal: \033[1;33m%s\033[0m, %s', signal, df_list)
        if 'CONNECT' == signal:
            for df_name in df_list:
                # race condition
                if not self.flags.get(df_name):
                    self.flags[df_name] = True
                    # https://stackoverflow.com/questions/56716470/
                    self.queues[df_name] = self._manager.Queue()
                    t = Thread(target=self.push_data, args=(df_name, ))
                    t.daemon = True
                    t.start()
        elif 'DISCONNECT' == signal:
            for df_name in df_list:
                self.flags[df_name] = False
        elif 'SUSPEND' == signal:
            # Not use
            pass
        elif 'RESUME' == signal:
            # Not use
            pass
        return True

    def on_data(self, df_name, data):
        self.device_features[df_name].on_data(data, self.queues, self.globals)
        return True

    @staticmethod
    def df_func_name(df_name):
        return re.sub(r'-(I|O)$', r'_\1', df_name)

    def _check_parameter(self):
        if self.api_url is None:
            raise RegistrationError('api_url is required')

        if self.device_model is None:
            raise RegistrationError('device_model not given.')

        if isinstance(self.device_addr, UUID):
            self.device_addr = str(self.device_addr)
        elif self.device_addr:
            try:
                UUID(self.device_addr)
            except ValueError:
                try:
                    self.device_addr = str(UUID(int=int(self.device_addr, 16)))
                except ValueError:
                    log.warning(
                        'Invalid device_addr. Change device_addr to None.')
                    self.device_addr = None

        if self.persistent_binding and self.device_addr is None:
            msg = ('In case of `persistent_binding` set to `True`, '
                   'the `device_addr` should be set and fixed.')
            raise ValueError(msg)

        if not self.device_features.keys():
            raise RegistrationError('Neither idf_list nor odf_list is empty.')

        return True

    def finalizer(self):
        try:
            if not self.persistent_binding:
                self.dan.deregister()
        except Exception as e:
            log.warning('dai process cleanup exception: %s', e)

    def start(self, *args, **kwargs):
        ret = super(DAI, self).start(*args, **kwargs)
        # conduct deregistration properly,
        # if one doesn't stop process before main process ends
        atexit.register(self.terminate)
        return ret

    def run(self):  # this function will be executed in child process
        self._check_parameter()

        self.dan = Client()

        idf_list = []
        odf_list = []
        for df in self.device_features.values():
            if df.df_type == 'idf':
                idf_list.append(df.profile())
            else:
                odf_list.append(df.profile())

        def f():
            for key in self.flags:
                self.flags[key] = False
            log.debug('on_disconnect: _flag = %s', str(self.flags))
            if self.on_disconnect:
                return self.on_disconnect()

        self.dan.register(self.api_url,
                          on_signal=self.on_signal,
                          on_data=self.on_data,
                          accept_protos=['mqtt'],
                          id_=self.device_addr,
                          idf_list=idf_list,
                          odf_list=odf_list,
                          name=self.device_name,
                          profile={
                              'model': self.device_model,
                              'u_name': self.username,
                              'extra_setup_webpage': self.extra_setup_webpage,
                              'device_webpage': self.device_webpage,
                          },
                          register_callback=self.register_callback,
                          on_register=self.on_register,
                          on_deregister=self.on_deregister,
                          on_connect=self.on_connect,
                          on_disconnect=f)

        log.info('Press Ctrl+C to exit DAI.')
        try:
            self._event.wait()
        except KeyboardInterrupt:
            pass
        finally:
            self.finalizer()

    def wait(self):
        try:
            if platform.system() == 'Windows' or sys.version_info.major == 2:
                # workaround for https://bugs.python.org/issue35935
                while True:
                    time.sleep(86400)
            else:
                Event().wait()
        except KeyboardInterrupt:
            self.join()  # wait for deregistration

    def terminate(self, *args, **kwargs):
        '''
        Terminate DAI.
        This is a blocking call.
        '''
        try:
            self._event.set()
        except Exception:
            # this is triggered if the ``run`` function ended already.
            pass

        self.join()
        return super(DAI, self).terminate(*args, **kwargs)