Example #1
0
    def __init__(self,
                 image_file=None,
                 screen_polling_time=60,
                 sleep_time=1,
                 data_polling_time=1,
                 data_limit=60,
                 data_timeout=None):

        manager = Manager()

        self.temperature_data = manager.list()
        self.pressure_data = manager.list()
        self.humidity_data = manager.list()

        self.temperature_statistics = manager.dict()
        self.pressure_statistics = manager.dict()
        self.humidity_statistics = manager.dict()

        self.calculate_condition = manager.Condition()

        self.image_file = image_file
        self.data_polling = data_polling_time
        self.data_timeout = data_timeout
        self.data_limit = data_limit

        if screen_polling_time < 20:
            raise ValueError(
                "Polling time cannot be less 20s, the refresh rate of the screen."
            )
        if data_polling_time > screen_polling_time:
            raise ValueError(
                "Data must be polled at least once per screen refresh.")
        if screen_polling_time / data_polling_time > 60:
            UserWarning(
                "Data will show the last {} seconds, but only be polled every {} seconds."
                .format(data_polling_time * 60, screen_polling_time))
        if screen_polling_time / data_polling_time > 180:
            raise ValueError(
                "Too much data will be lost in between screen refreshes (120+ data points)."
            )
        self.polling_time = screen_polling_time

        if sleep_time > 60:
            UserWarning(
                "Sleeping longer than 60s will mean that the screen updates less than once per minute."
            )
        self.sleep_time = sleep_time
Example #2
0
class DgmProcess:
    """Create a class to spwan the DemoDGM in another process, to test the
    run_command C++ client and it's Python bindings.
    """

    def __init__(self):
        self.manager = Manager()
        self.condition = self.manager.Condition()
        self.another_process = Process(
            target=DgmProcess._run_demo_dynamic_graph_manager,
            args=(self.condition,))

    def start_dgm(self):
        self.condition.acquire()
        self.another_process.start()
        self.condition.wait()

    def stop_dgm(self):
        self.condition.notify()
        self.condition.release()
        self.another_process.join()

    @staticmethod
    def _run_demo_dynamic_graph_manager(condition):

        # start the demo dgm
        bash_command = "rosrun dynamic_graph_manager demo_dynamic_graph_manager"
        dgm_subprocess = subprocess.Popen(
            bash_command.split(), stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)

        condition.acquire()
        condition.notify()
        condition.wait()
        # kill the demo dgm
        dgm_subprocess.send_signal(signal.SIGINT)
        dgm_subprocess.wait()
Example #3
0
def ngram_jurisdictions(slug=None, max_n=3):
    """
        Add jurisdiction specified by slug to rocksdb, or all jurisdictions if name not provided.

        This is the primary ngrams entrypoint. It spawns NGRAM_THREAD_COUNT worker processes to
        ngram each jurisdiction-year, plus a rocksdb worker process that pulls their work off of
        the queue and writes it to the database.
    """
    # process pool of workers to ngram each jurisdiction-year and return keys
    ngram_workers = Pool(settings.NGRAM_THREAD_COUNT, maxtasksperchild=1)

    # inter-process queue of returned keys
    m = Manager()
    queue = m.Queue(settings.NGRAM_THREAD_COUNT)
    ngram_worker_offsets = m.dict()
    ngram_worker_lock = m.Lock()

    # process to write keys to rocksdb
    rocksdb_loaded = m.Condition()
    rocksdb_worker = Process(target=rocksdb_writer,
                             args=(queue, rocksdb_loaded))
    rocksdb_worker.start()
    with rocksdb_loaded:
        rocksdb_loaded.wait()

    # queue each jurisdiction-year for processing
    jurisdictions = Jurisdiction.objects.all()
    if slug:
        jurisdictions = jurisdictions.filter(slug=slug)
    ngram_worker_results = []
    for jurisdiction in jurisdictions:

        # skip empty jurisdictions
        if not jurisdiction.case_metadatas.exists():
            continue

        # get year range
        case_query = CaseMetadata.objects.in_scope().filter(
            jurisdiction__slug=jurisdiction.slug)
        first_year = case_query.order_by('decision_date',
                                         'id').first().decision_date.year
        last_year = case_query.order_by('-decision_date',
                                        '-id').first().decision_date.year

        # ngram each year
        for year in range(first_year, last_year + 1):
            # ngram_worker(queue, jurisdiction_id, year, max_n)
            ngram_worker_results.append(
                (jurisdiction.slug, year,
                 ngram_workers.apply_async(
                     ngram_worker,
                     (ngram_worker_offsets, ngram_worker_lock, queue,
                      jurisdiction.id, jurisdiction.slug, year, max_n))))

    # wait for all ngram workers to finish
    ngram_workers.close()
    ngram_workers.join()

    # report failures
    for jurisdiction_slug, year, result in ngram_worker_results:
        if not result._success:
            exc = result._value
            print("%s-%s failed:" % (jurisdiction_slug, year))
            traceback.print_exception(etype=type(exc),
                                      value=exc,
                                      tb=exc.__traceback__)

    # tell rocksdb worker to exit, and wait for it to finish
    queue.put('STOP')
    rocksdb_worker.join()
Example #4
0
                    LOGGER.info("End {order} {git_path} {git_remote_name} "
                                "{git_branch} {git_url}".format(
                        git_path=git_path, git_branch=git_branch, order=order,
                        git_url=git_url, git_remote_name=git_remote_name,
                    ))
                except Exception as e:
                    LOGGER.error(e)

if __name__ == "__main__":
    manager = Manager()
    queue_map = {}
    with Pool(len(git_map) + 1) as p:
        for index, git in enumerate(git_map):
            queue = manager.Queue()
            cd = manager.Condition()
            git_url = git.get("git_url", False)
            git_remote_name = git.get("git_remote_name", False)
            git_branch = git.get("git_branch", False)
            if not git_url or not git_remote_name or not git_branch:
                raise Exception("Please make sure git url, path and "
                                "remote name had been typed")

            h = hashlib.md5()
            h.update((git_url + git_remote_name + git_branch).encode(
                encoding="utf-8")
            )
            route_key = h.hexdigest()
            queue_map[route_key] = (queue, cd)
            p.apply_async(worker, (cd,queue, index))
Example #5
0
 def __init__(self, manager: Manager, max_size=0):
     self.lock = manager.Lock()
     self.not_full = manager.Condition(self.lock)
     self.not_empty = manager.Condition(self.lock)
     self.closed = manager.Value(c_bool, False)
     self.queue = manager.Queue(max_size)
Example #6
0
                    self.lock.notify_all()
                    # 当前进程锁开启(暂停当前)
                    self.lock.wait()

                else:
                    item = self.q.get()
                    content = "【消费者】 << 进程名为{}, 其进程id为{}, 消费名称为 {} 的物品, 此时物品栏中物品个数为 {}". \
                        format(self.name, self.pid, item, self.q.qsize())
                    time.sleep(1)
                    print(content)


if __name__ == '__main__':
    manager = Manager()
    # 定义锁
    lock = manager.Condition()
    counter = 0
    item_basket = manager.Queue(maxsize=10)
    """
    producer_process = producer(item_basket, lock, counter)
    consumer_process = consumer(item_basket, lock)
    producer_process.start()
    consumer_process.start()
    producer_process.join()
    consumer_process.join()
    """

    p_pool = Pool(5)
    for i in range(5):
        p_pool.apply_async(producer, args=(
            item_basket,
Example #7
0
def add_routes(app):
    app.router.add_static('/static', path=PROJECT_ROOT)
    app.router.add_get('/remote', store_handlers.get_remote_manifest)
    app.router.add_get('/local', store_handlers.get_local_manifest)
    app.router.add_get('/modules/{module}/{version}/readme',
                       store_handlers.get_module_readme)
    app.router.add_post('/install', install_module)
    app.router.add_post('/uninstall', store_handlers.uninstall_module)
    app.router.add_get('/installstream', get_install_stream)


if __name__ == '__main__':
    manager = Manager()
    INSTALL_STATE = manager.dict()
    SSE_UPDATE_CONDITION = manager.Condition()
    SSE_UPDATE_EVENT = manager.Event()
    INSTALL_STATE['stage'] = ''
    INSTALL_STATE['message'] = ''
    INSTALL_STATE['module_name'] = ''
    INSTALL_STATE['module_version'] = ''
    INSTALL_STATE['cur_chunk'] = 0
    INSTALL_STATE['total_chunks'] = 0
    INSTALL_STATE['cur_size'] = 0
    INSTALL_STATE['total_size'] = 0
    INSTALL_STATE['update_time'] = time.time()
    install_worker = Process(target=install_from_queue,
                             args=(INSTALL_QUEUE, INSTALL_STATE,
                                   SSE_UPDATE_EVENT))
    install_worker.start()