コード例 #1
0
def download_photos(meta_path):
    data_dir = os.path.dirname(meta_path)
    photo_dir = os.path.join(data_dir, 'photos')
    os.makedirs(photo_dir, exist_ok=True)

    try:
        print(f'## Read {meta_path}')
        df = pd.read_json(os.path.join(data_dir, 'photos.json'), orient='records', lines=True)
    except:
        print('## Please first running "data_process.py" to generate "photos.json"!!!')
        return

    print(f'## Start to download pictures and save them into {photo_dir}')
    pool = ThreadPoolExecutor()
    tasks = []
    for name, url in zip(df['photo_id'], df['imUrl']):
        path = os.path.join(photo_dir, name + '.jpg')
        if not os.path.exists(path) or not is_valid_jpg(path):
            task = pool.submit(download_photo, url, path)
            tasks.append(task)

    failed = []
    for i, task in enumerate(as_completed(tasks)):
        res, url, path = task.result()
        if not res:
            failed.append((url, path))
        print(f'## Tried {i}/{len(tasks)} photos!', end='\r', flush=True)
    pool.shutdown()

    for url, path in failed:
        print(f'## Failed to download {url} to {path}')
    print(f'## {len(tasks) - len(failed)} images were downloaded successfully to {photo_dir}!')
コード例 #2
0
 def add_services_status(self):
     res = {}
     path = config_path
     if path.startswith("\\\\?\\"):
         path = path.replace("\\\\?\\", "")
     connection_checker = ConnectionCheck(logger)
     pool = ThreadPoolExecutor(4)
     jobs = {}
     for root, dirs, files in os.walk(path):
         for file in files:
             if file[-4:] == ".ini":
                 try:
                     config = configparser.ConfigParser()
                     config.read(os.path.join(path, file))
                     defaults = config['tunnel']
                     remote_host = defaults['remote_host']
                     remote_port = int(defaults.get('remote_port'))
                     tunnel_name = defaults.get(
                         'tunnel_name', realpath(file))
                     res[tunnel_name] = {
                         'remote_host': remote_host,
                         'remote_port': remote_port
                     }
                     jobs[tunnel_name] = pool.submit(
                         connection_checker.test_connection,
                         tunnel_name, remote_host, remote_port)
                 except Exception as e:
                     logger.exception(
                         "Error getting status for %s" % (file, ))
                     continue
         for name, job_future in jobs.items():
             res[name]['status'] = job_future.result()
     pool.shutdown()
     return res
コード例 #3
0
def execute_test_unsafe(poll_config: NewPollConfiguration, count: int):
    init_store()
    # startup the pool
    workers = round(max(math.log(count), 1))
    logger.debug(f'Creating poll with {workers}')
    executor = ThreadPoolExecutor(workers)

    # TODO maybe add some sleep between each send?
    fs = [executor.submit(send_new_poll, poll_config) for _ in range(count)]

    logger.debug('Waiting on tasks to finish')
    # wait for all remaining polls to be delivered
    futures.wait(fs, timeout=count * 1.5)

    logger.debug('Waiting on polls to be received')
    # wait for all remaining polls to be delivered
    sleep_trash_hold = min(int(count * 1), 60)

    logger.info(
        f'New Execute Execution stopped - waiting max threshold {sleep_trash_hold}.'
    )
    time.sleep(sleep_trash_hold)

    logger.info('Finalizing test.')
    release_store()
    # maybe send some more meaningful results
    RomanClient(poll_config.roman_url).send_text(token=poll_config.token,
                                                 text='Execution finished.')

    # delete pool
    executor.shutdown()
コード例 #4
0
ファイル: s3_upload.py プロジェクト: davidsvaughn/yolov5
def upload_files(files, s3_path):
    executor = ThreadPoolExecutor(max_workers=32)
    for i,file_path in enumerate(files):
        executor.submit(upload_s3_file, file_path, s3_path)
        if i%100==0:
            print(f"{i}\t{file_path}")
    executor.shutdown(wait=True)        
コード例 #5
0
    def preprocess(data_type,
                   img_dir,
                   geojson_dir,
                   rgb_tg,
                   mask_tg,
                   num_workers=8):
        pool = ThreadPoolExecutor(max_workers=num_workers)
        re_img_index = re.compile("img\d+")
        re_pat = re.compile("(.*?)img\d+")
        # building_pat = re_pat.search(os.listdir(Path(geojson_dir) / "buildings")[0]).group(1)
        pat = re_pat.search(os.listdir(Path(geojson_dir))[0]).group(1)

        for f in os.listdir(img_dir):
            img_index = re_img_index.search(f).group(0)
            geojson = Path(geojson_dir) / (pat + img_index + ".geojson")

            def pool_wrapper(p1, p2, p3, p4):
                if data_type == "building":
                    thread = GeoLabelUtil.BuildingRenderThread(p1, p2, p3, p4)
                elif data_type == "road":
                    thread = GeoLabelUtil.RoadRenderThread(p1, p2, p3, p4)
                else:
                    raise NotImplementedError("Not Implemented Data Type: " +
                                              data_type)
                thread.run()
                # thread.start()

            pool.submit(pool_wrapper,
                        Path(img_dir) / f,
                        Path(rgb_tg) / (img_index + ".png"), geojson,
                        Path(mask_tg) / (img_index + ".png"))
        pool.shutdown(wait=True)
コード例 #6
0
def get_content(path):
    global CHUNK_SZ, dd
    dic = greet_server(path)
    dd = dic
    other = ulta_dic(dic)
    offset_mgr.cache[path] = (dic, other)
    sww = time.clock()
    initialize(dic, 0)
    print("To initialize:", time.clock() - sww)

    total_sz = other.iloc[len(other) - 1]
    total_ch = math.ceil(total_sz / CHUNK_SZ)

    li = list(range(total_ch))
    random.shuffle(li)

    pool = ThreadPoolExecutor(max_workers=8)
    for i in li:
        future = pool.submit(get_chunk, path, i, dic)
    """for f in fu:
        data=f.result()
        place_chunk(path,fu[f],data,dic)
    """
    pool.shutdown(wait=True)
    print("Total time:", time.clock() - sww)
コード例 #7
0
ファイル: target.py プロジェクト: zhangyiiZ/saltshaker_api
 def post(self):
     logger.info("PingList")
     args = parser.parse_args()
     db = DB()
     host_id = args['host_id']
     cipher = args['cipher']
     state, result = db.select('host',
                               "where data -> '$.id'='%s'" % host_id)
     minion_id = result[0]['minion_id']
     logger.info('minion_id:' + minion_id)
     product_id = result[0]['product_id']
     salt_api = salt_api_for_product(product_id)
     state, targets = db.select('target',
                                "where data -> '$.host_id'='%s'" % host_id)
     targets_not = []
     thread_pool = ThreadPoolExecutor(max_workers=10,
                                      thread_name_prefix="target_")
     futures = []
     for target in targets:
         future = thread_pool.submit(pingTarget, target, minion_id,
                                     salt_api, cipher)
         futures.append(future)
     thread_pool.shutdown(wait=True)
     for future in futures:
         result = future.result()
         logger.info(str(result['status']))
         if str(result['status']).__contains__("Timeout") | str(
                 result['status']).__contains__("Unknown"):
             targets_not.append(result["target"])
     return {"status": True, "message": '配置发送成功', "data": targets_not}, 200
コード例 #8
0
    def _upload_files_in_parallel(self, file_metas):
        """Uploads files in parallel.

        Args:
            file_metas: List of metadata for files to be uploaded.
        """
        idx = 0
        len_file_metas = len(file_metas)
        while idx < len_file_metas:
            end_of_idx = idx + self._parallel if \
                idx + self._parallel <= len_file_metas else \
                len_file_metas

            logger.debug('uploading files idx: {}/{}'.format(
                idx + 1, end_of_idx))

            target_meta = file_metas[idx:end_of_idx]
            while True:
                pool = ThreadPoolExecutor(len(target_meta))
                results = list(
                    pool.map(SnowflakeFileTransferAgent.upload_one_file,
                             target_meta))
                pool.shutdown()

                # need renew AWS token?
                retry_meta = []
                for result_meta in results:
                    if result_meta['result_status'] in [
                            ResultStatus.RENEW_TOKEN,
                            ResultStatus.RENEW_PRESIGNED_URL
                    ]:
                        retry_meta.append(result_meta)
                    else:
                        self._results.append(result_meta)

                if len(retry_meta) == 0:
                    # no new AWS token is required
                    break
                if any([
                        result_meta['result_status'] ==
                        ResultStatus.RENEW_TOKEN for result_meta in results
                ]):
                    client = self.renew_expired_client()
                    for result_meta in retry_meta:
                        result_meta['client'] = client
                    if end_of_idx < len_file_metas:
                        for idx0 in range(idx + self._parallel,
                                          len_file_metas):
                            file_metas[idx0]['client'] = client
                if any([
                        result_meta['result_status'] ==
                        ResultStatus.RENEW_PRESIGNED_URL
                        for result_meta in results
                ]):
                    self._update_file_metas_with_presigned_url()
                target_meta = retry_meta

            if end_of_idx == len_file_metas:
                break
            idx += self._parallel
コード例 #9
0
    def testWithStores(self):
        # store_opts2 = {"store_type": StoreTypes.ROLLPAIR_QUEUE, "capacity": 100}
        store_opts2 = {"store_type": StoreTypes.ROLLPAIR_QUEUE}
        rp = self.ctx.load("ns1", "n1")
        rp_q = self.ctx.load("ns1", "n2", store_opts2)
        rp.put_all([("k1", "v1"), ("k2", "v2")])

        def func_asyn(partitions):
            import time
            part1 = partitions[0]
            serder1 = create_serdes(part1._store_locator._serdes)
            with create_adapter(part1) as db1:
                for i in range(100):
                    db1.put(serder1.serialize("a" + str(i)))
                    time.sleep(0.1)

        def func_syn(partitions):
            part1, part2 = partitions
            serder1 = create_serdes(part1._store_locator._serdes)
            serder2 = create_serdes(part2._store_locator._serdes)
            with create_adapter(part1) as db1, create_adapter(part2) as db2:
                for i in range(100):
                    db1.put(serder1.serialize("q" + str(i)), db2.get())

        pool = ThreadPoolExecutor()
        pool.submit(rp_q.with_stores, func_asyn)

        rp.with_stores(func_syn, [rp_q])
        print(list(rp.get_all()))
        pool.shutdown()
コード例 #10
0
    def multiplyKaratsubaParallel(args) -> Polynomial:
        depth = args[0]
        A = args[1]
        B = args[2]
        if depth > 4:
            return PolynomialOperations.multiplySequencially(A, B)
        if A.n < 2 or B.n < 2:
            return PolynomialOperations.multiplySequencially(A, B)

        m = int(max(A.n, B.n) / 2)
        lowA = Polynomial(len(A.coefficients[:m]), A.coefficients[:m])
        highA = Polynomial(len(A.coefficients[m:]), A.coefficients[m:])
        lowB = Polynomial(len(B.coefficients[:m]), B.coefficients[:m])
        highB = Polynomial(len(B.coefficients[m:]), B.coefficients[m:])
        karaPool = ThreadPoolExecutor(mp.cpu_count())
        futureResult1 = karaPool.submit(PolynomialOperations.multiplyKaratsubaParallel, ([depth+1, lowA, lowB]))
        futureResult2 = karaPool.submit(PolynomialOperations.multiplyKaratsubaParallel, ([depth+1, PolynomialOperations.add(lowA, highA), PolynomialOperations.add(lowB, highB)]))
        futureResult3 = karaPool.submit(PolynomialOperations.multiplyKaratsubaParallel, ([depth+1, highA, highB]))
        karaPool.shutdown(wait=True)
        result1 = futureResult1.result()
        result2 = futureResult2.result()
        result3 = futureResult3.result()
        r1 = PolynomialOperations.shift(result3, 2 * m)
        r2 = PolynomialOperations.shift(
            PolynomialOperations.subtract(PolynomialOperations.subtract(result2, result3), result1), m)
        return PolynomialOperations.add(PolynomialOperations.add(r1, r2), result1)
コード例 #11
0
def prepareServer(RequestHandlerClass, pipe, threads, timeout):
    '''
    Prepare in a process the request handling.
    '''
    def process(request, address):
        RequestHandlerClass(request, address, None)
        try:
            request.shutdown(socket.SHUT_WR)
        except socket.error:
            pass  # some platforms may raise ENOTCONN here
        request.close()

    pool = ThreadPoolExecutor(threads)
    while True:
        if not pipe.poll(timeout): break
        else:
            data = pipe.recv()
            if data is None: break
            elif data is True: continue

            requestfd, address = data
            request = socket.fromfd(rebuild_handle(requestfd), socket.AF_INET,
                                    socket.SOCK_STREAM)

            pool.submit(process, request, address)

    pool.shutdown(False)
コード例 #12
0
ファイル: python.py プロジェクト: inovizz/aws-syndicate
def assemble_python_lambdas(bundle_name, project_path):
    project_base_folder = os.path.basename(os.path.normpath(project_path))
    project_abs_path = build_path(CONFIG.project_path, project_path)
    _LOG.info('Going to process python project by path: {0}'.format(
        project_abs_path))
    target_folder = build_path(CONFIG.project_path, ARTIFACTS_FOLDER,
                               bundle_name)
    _LOG.debug('Target directory: {0}'.format(target_folder))
    executor = ThreadPoolExecutor(max_workers=5)
    futures = []
    for root, sub_dirs, files in os.walk(project_abs_path):
        for item in files:
            if item.endswith(LAMBDA_CONFIG_FILE_NAME):
                _LOG.info('Going to build artifact in: {0}'.format(root))
                arg = {
                    'item': item,
                    'project_base_folder': project_base_folder,
                    'project_path': project_path,
                    'root': root,
                    'target_folder': target_folder
                }
                futures.append(executor.submit(_build_python_artifact, arg))
    concurrent.futures.wait(futures, return_when=ALL_COMPLETED)
    executor.shutdown()
    _LOG.info('Python project was processed successfully')
コード例 #13
0
def run_generators(task_generators: List[BaseSpiderTaskGenerator],
                   item_pool_workers: int = 32,
                   retries: int = 5):
    g_pool = ThreadPoolExecutor(max_workers=len(task_generators))
    i_pool = ThreadPoolExecutor(max_workers=item_pool_workers)

    def retry_and_handle_exception(func: Callable):
        def wrapper():
            for i in range(retries):
                try:
                    func()
                    break
                except Exception as ex:
                    log.error(f"Error while executing task (retries={i}).",
                              exc_info=ex)

        return wrapper

    def submit_all_items(stg: BaseSpiderTaskGenerator):
        def wrapper():
            try:
                for sub_task in stg.generate():
                    i_pool.submit(retry_and_handle_exception(sub_task))
            except Exception as ex:
                log.error("Error while generating tasks.", exc_info=ex)

        return wrapper

    for g in task_generators:
        g_pool.submit(submit_all_items(g))
    log.info("All generators started.")
    g_pool.shutdown(wait=True)
    log.info("All generators terminated.")
    i_pool.shutdown(wait=True)
    log.info("All tasks finished.")
コード例 #14
0
class ProcessingTimesCollector:
    def __init__(self):
        self._executor = ThreadPoolExecutor(max_workers=1)
        self._times_map = {}

    def _add_times(self, times):
        for k, v in times.items():
            try:
                agg = self._times_map[k]
            except KeyError:
                agg = TimerStatsAggregator()
                self._times_map[k] = agg
            agg.add_time(v)

    def add_times(self, times):
        self._executor.submit(self._add_times, times)

    def _get_aggregates(self, prefix):
        return {
            identifier: stats.finalize()
            for identifier, stats in self._times_map.items()
            if identifier.startswith(prefix)
        }

    def get_aggregates(self, identifier=None) -> Dict[str, TimerStats]:
        future = self._executor.submit(self._get_aggregates, identifier or '')
        return future.result()

    def close(self):
        self._executor.shutdown(wait=True)
コード例 #15
0
class SourcesMatcher:
    def __init__(self,
                 source_a: DataLoader,
                 source_b: DataLoader,
                 matcher: CompanyMatcher = None,
                 worker_amount: int = 10) -> None:
        """
        Create matches between two companies data sources asynchronously.

        :param source_a: A generator of companies from the first data source
        :param source_b: A generator of companies from the second data source
        """
        super().__init__()
        self.source_a = source_a
        self.source_b = source_b
        self.pool = ThreadPoolExecutor(max_workers=worker_amount)
        self.matcher = CompanyMatcher() if matcher is None else matcher

    def compare(self) -> Generator[Future, None, None]:
        """Compare all data sources and returns the result as a list of
        futures CompanyMatch.

        :return: A Generator containing Futures of CompanyMatch.
        """
        for company_a in self.source_a.load():
            for company_b in self.source_b.load():
                yield self.pool.submit(self.matcher.match, company_a,
                                       company_b)

    def stop(self):
        """Stop the matcher and all associated operations."""
        self.pool.shutdown()
コード例 #16
0
    def crawl_single_kind(self, kind='basketball'):
        print("GET: KIND = ", kind)
        # 爬取页面,找到页面中各个鞋的类别的页面
        url = 'http://www.shihuo.cn/' + kind + '/list?page_size=60&page=1'
        response = requests.get(url, headers=self.header)
        html = response.text

        # 首先找到总页面数
        start = html.find('var totalPage = parseInt(') + len('var totalPage = parseInt(')
        end = html.find(')', start)
        page_nums = math.ceil(float(html[start:end]))
        # print(page_nums)

        # 循环找到全部下级链接
        links = []
        for page_num in range(page_nums):
            url = 'http://www.shihuo.cn/' + kind + '/list?page_size=60&page=' + str(page_num)
            response = requests.get(url, headers=self.header)
            html = response.text
            soup = BeautifulSoup(html, "lxml")
            # print(soup.prettify())
            link_class = soup.select('#js_hover li .imgs-area .link ')
            for item in link_class:
                links.append('http:' + item['href'])

        thread_pool = ThreadPoolExecutor(8)
        for link in links:
            thread_pool.submit(self.crawl_all_color_for_shoes, link)
        thread_pool.shutdown(wait=True)
コード例 #17
0
ファイル: thread_pool.py プロジェクト: lxjian01/django_demo
class ThreadPool(object):
    """
    线程池
    """
    def __init__(self):
        # 线程池
        thread_num = settings.THREAD_POOL_EXECUTOR_NUM
        self.executor = ThreadPoolExecutor(thread_num)
        # 用于存储每个项目批量任务的期程
        self.future_dict = {}
        # 全局锁
        self.lock = threading.Lock()
        logger.info("Init thread pool ok.")

    # 检查某个项目是否有正在运行的批量任务
    def is_project_thread_running(self, project_id):
        future = self.future_dict.get(project_id, None)
        if future and future.running():
            # 存在正在运行的批量任务
            return True
        return False

    # 展示所有的异步任务
    def check_future(self):
        data = {}
        for project_id, future in self.future_dict.items():
            data[project_id] = future.running()
        return data

    def __del__(self):
        self.executor.shutdown()
        logger.info("Thread pool closed.")
コード例 #18
0
class DummyAsyncReader(PluginAsyncMessageReader):
    def __init__(self, params):
        self._params = params
        self._executor = None
        self._reader_executor = None
        self._on_message = None
        self._on_failure = None
        self._closed = True

    def open(self):
        self._closed = False
        self._reader_executor = ThreadPoolExecutor(max_workers=1)
        self._reader_executor.submit(self._read_messages)
        self._executor = ThreadPoolExecutor()

    def close(self):
        self._closed = True
        if self._reader_executor is not None:
            self._reader_executor.shutdown()
        self._reader_executor = None
        if self._executor is not None:
            self._executor.shutdown()
        self._executor = None

    def metrics(self):
        return "this is a dummy metrics"

    def reset_metrics(self):
        pass

    def _read_messages(self):
        topics = self._params.get("topics")
        if type(topics) != list:
            topics = [topics]
        while not self._closed:
            for topic in topics:
                try:
                    value = qread(topic, timeout=0.1)
                    raw = {"topic": topic, "value": value}
                    self._executor.submit(self._on_message, value, topic, raw)
                except Empty:
                    continue
                except Exception as e:
                    self._executor.submit(self._on_failure, e)

    @property
    def on_message(self):
        return self._on_message

    @on_message.setter
    def on_message(self, on_message):
        self._on_message = on_message

    @property
    def on_failure(self):
        return self._on_failure

    @on_failure.setter
    def on_failure(self, on_failure):
        self._on_failure = on_failure
コード例 #19
0
    def ctreate_ThreadPool(self):
        user_phone = request.form.get('userPhone')
        user_name = request.form.get('userName')
        # t = request.form.get('uploadTime')
        t = '2021'
        desc = request.form.get('imageDesc')
        theme = request.form.get('imageTitle')
        number = request.form.get('imageCount')

        user = wx_user.query.filter(
            wx_user.phone == user_phone).first()  #获取用户名
        user_id = user.id  #获取用户ID
        print(user_id)
        number = int(number)
        d = Data_deal(user_id, number, desc, theme)
        d.update_info()
        threadPool = ThreadPoolExecutor(max_workers=number,
                                        thread_name_prefix="pro")  # 根据用户上传的文件
        # 一个用户来访问就会开一个线程,然后根据他传过来的图片数量,开启线程池读取文件
        for i in range(number):
            future = threadPool.submit(self.re(user_name, user_id,
                                               t))  # j将线程提交到线程池
        threadPool.shutdown()
        #返回信息给客户端图片接收完毕
        for j in r.scan_iter('imgs*'):
            r.delete(j)
        for k in r.scan_iter('upload_img*'):
            r.delete(k)
コード例 #20
0
 def multi_test(self):
     # using thread pool to improve testing speed
     t = ThreadPoolExecutor(10)
     print('testing proxy, it will take several minutes......')
     for proxy in self.proxy_lis:
         t.submit(self.tes_proxy, proxy).add_done_callback(self.save_valid_proxy_lis)
     t.shutdown()
     print(f'fetch {self.p_count} valid proxies!')
コード例 #21
0
def download_articles_from(titles_list):
    crawler = Crawler()
    print("Starting download")
    pool = ThreadPoolExecutor(max_workers=5)
    for title in titles_list:
        pool.submit(crawler.search, title)

    pool.shutdown(wait=True)
    crawler.write_fails()
コード例 #22
0
ファイル: handler.py プロジェクト: ZSAIm/Nbdler
class AIOReaderWriter(Handler):
    """ AIO读写工作线程。

    为了避免IO的文件读写阻塞影响下载工作线程,该处理器实现异步文件IO读写方法

    负责工作:
        1. 管理IO读写线程
    """
    name = 'aio'

    def __init__(self):
        self._executor = None
        self._writers = set()

    async def prepare(self):
        self._executor = ThreadPoolExecutor(
            max_workers=1,
            thread_name_prefix=f'BufferWriter {self.parent.file.name}')

    @asynccontextmanager
    async def open(self, file, mode='r', *args, **kwargs):
        """ 异步打开文件。

        Args:
            file: 参见io.open()方法参数file
            mode: 参见io.open()方法参数mode
            args: 参见io.open()方法参数的列表参数
            kwargs: 参见io.open()方法参数字典参数

        Returns:
            异步文件对象AsyncIOFile,对耗时IO文件操作进行异步定义。
        """
        def async_open():
            return open(file, mode, *args, **kwargs)

        executor = self._executor
        assert executor
        loop = asyncio.get_running_loop()
        fd = await loop.run_in_executor(executor, async_open)
        aiofile = AIOFile(executor, fd, loop=loop)
        self._writers.add(aiofile)
        yield aiofile
        # 关闭文件
        await loop.run_in_executor(executor, fd.close)
        self._writers.remove(aiofile)

    async def run(self):
        pass

    async def close(self):
        for handler in h.iter_all():
            if handler != self:
                await handler.join()
        self._executor.shutdown(False)

    async def pause(self):
        pass
コード例 #23
0
def test_concurrent_insert(conn_cnx, db_parameters):
    """Concurrent insert tests. Inserts block on the one that's running."""
    number_of_threads = 22  # change this to increase the concurrency
    expected_success_runs = number_of_threads - 1
    cnx_array = []

    try:
        with conn_cnx() as cnx:
            cnx.cursor().execute("""
create or replace warehouse {}
warehouse_type=standard
warehouse_size=small
""".format(db_parameters['name_wh']))
            sql = """
create or replace table {name} (c1 integer, c2 string)
""".format(name=db_parameters['name'])
            cnx.cursor().execute(sql)
            for i in range(number_of_threads):
                cnx_array.append({
                    'host': db_parameters['host'],
                    'port': db_parameters['port'],
                    'user': db_parameters['user'],
                    'password': db_parameters['password'],
                    'account': db_parameters['account'],
                    'database': db_parameters['database'],
                    'schema': db_parameters['schema'],
                    'table': db_parameters['name'],
                    'idx': str(i),
                    'warehouse': db_parameters['name_wh']
                })

            pool = ThreadPoolExecutor(number_of_threads)
            results = list(pool.map(
                _concurrent_insert,
                cnx_array))
            pool.shutdown()
            success = 0
            for record in results:
                success += 1 if record['success'] else 0

            # 21 threads or more
            assert success >= expected_success_runs, "Number of success run"

            c = cnx.cursor()
            sql = "select * from {name} order by 1".format(
                name=db_parameters['name'])
            c.execute(sql)
            for rec in c:
                logger.debug(rec)
            c.close()

    finally:
        with conn_cnx() as cnx:
            cnx.cursor().execute(
                "drop table if exists {}".format(db_parameters['name']))
            cnx.cursor().execute(
                "drop warehouse if exists {}".format(db_parameters['name_wh']))
コード例 #24
0
def startup():
    while True:
        # 开启线程池
        pool = ThreadPoolExecutor(max_workers=20)
        for i in range(20):
            # 多线程运行检查函数
            pool.submit(GetProxyIP)
        # 回收线程池
        pool.shutdown()
コード例 #25
0
ファイル: normal.py プロジェクト: tonirucks/LID-DS
class ScenarioNormal(ScenarioContainerBase):
    def __init__(self, image: ChainImage, behaviour_type, user_count):
        super().__init__(image)
        self.behaviour_type = behaviour_type  # TODO: make enum
        self.containers: Dict[str, Optional[Container]] = dict(
            (secrets.token_hex(8), None) for _ in range(user_count))
        self.logger = {}
        self.wait_times = []
        self.thread_pool = ThreadPoolExecutor(max_workers=user_count + 1)
        if self.to_stdin:
            self.log_threads = []

    def generate_behaviours(self, recording_time):
        self.wait_times = get_sampling_method(self.behaviour_type).generate_wait_times(len(self.containers),
                                                                                       recording_time)

    def start_containers(self):
        for k in self.containers.keys():
            args = format_command(self.image.init_args)
            self.containers[k] = run_image(self.image.name, network=self.network, name=k, command=args)
            self.logger[k] = log.get_logger(f"[NORMAL] {k}", self.queue)
            Collector().add_container(k, "normal", get_ip_address(self.containers[k]))

    def start_simulation(self):
        for i, name in enumerate(self.containers):
            if self.to_stdin:
                t = Thread(target=show_logs, args=(self.containers[name], self.logger[name]))
                t.start()
                self.log_threads.append(t)
            self.thread_pool.submit(self._simulate_container, self.wait_times[i], name)
        return dict(zip(self.containers.keys(), self.wait_times))

    def teardown(self):
        for _, container in self.containers.items():
            container.remove(force=True)
        for t in self.log_threads:
            t.join()
        self.thread_pool.shutdown(wait=True)

    def _simulate_container(self, wait_times, name):
        socket = None
        if self.to_stdin:
            socket = self.containers[name].attach_socket(params={'stdin': 1, 'stream': 1})
            socket._writing = True
        for wt in wait_times:
            time.sleep(wt)
            for command in self.image.commands:
                cmd = format_command(command.command)
                if command.stdin:
                    try:
                        socket.write(cmd.encode() + b"\n")
                    except:
                        pass
                else:
                    _, out = self.containers[name].exec_run(cmd)
                    for line in out.decode("utf-8").split("\n")[:-1]:
                        self.logger[name].info("%s" % line)
コード例 #26
0
ファイル: BaseGUI.py プロジェクト: chwba/concur_gui_test
    def threadify(self, _feature, information_dict: dict):
        # `-1` signifies that the nice_thread status is returned. This is quite ugly. Maybe use two separate queues?
        feature = _feature(information_dict)
        self.status_queue.put((-1, "Running..."))

        executor = ThreadPoolExecutor(self.n_threads)
        for i in range(self.n_tasks):
            executor.submit(self.append_to_queue, wid=i, feature=feature)
        executor.shutdown(wait=True)
        self.status_queue.put((-1, "Work done."))
コード例 #27
0
def test_concurrent_ocsp_requests(tmpdir):
    """Run OCSP revocation checks in parallel. The memory and file caches are deleted randomly."""
    cache_file_name = path.join(str(tmpdir), 'cache_file.txt')
    SnowflakeOCSP.clear_cache()  # reset the memory cache

    target_hosts = TARGET_HOSTS * 5
    pool = ThreadPoolExecutor(len(target_hosts))
    for hostname in target_hosts:
        pool.submit(_validate_certs_using_ocsp, hostname, cache_file_name)
    pool.shutdown()
コード例 #28
0
def mat_vec(A: List[List[int]], x: List[int]) -> List[int]:
    N = len(A)
    y = [0] * N
    f = partial(calc, A=A, x=x, y=y)

    pool = ThreadPoolExecutor(max_workers=2)
    pool.map(f, [i for i in range(N)])
    pool.shutdown(True)

    return y
コード例 #29
0
class AppTestCase(TestCase):
    def set_up_app(self, games: List[Game] = None, languages: List[Language] = None, check_sys: CheckingSystem = None,
                   rating_system: RatingSystem = None, notification_service: NotificationService = None) -> None:
        self.dth = DatabaseTestHelper()
        self.dth.set_up()
        self.thread_pool = ThreadPoolExecutor(2)

        self.components = Components(
            database=self.dth.db,
            solutions_dao=SolutionsDao(self.dth.db),
            games_registry=GamesRegistry.from_games(games or []),
            languages_registry=LanguageRegistry.from_languages(languages or []),
            checking_system=(SyncCheckingSystem(check_sys) if check_sys else None),
            rating_system=rating_system,
            notification_service=notification_service
        )
        self.rh = AppRequestHandler(self.components)

    def tear_down_app(self) -> None:
        self.dth.tear_down()
        self.thread_pool.shutdown()

    def assert_has_answer(self, message_content: Union[FileHandle, str], template: Template, user_id: int = 42) -> dict:
        request = RequestFaker.message(message_content, user_id=user_id)
        container = RequestContainer(request)

        self.rh.handle(container)

        self.assertEqual(1, len(container.responses))
        response = container.responses[0]
        self.assertIsInstance(response, ResponseReplyTemplate)
        self.assertEqual(template, response.template)

        # Test that message can be constructed without errors
        response.get_content()

        return response.args

    def assert_has_answers(self, message_content: Union[FileHandle, str], template_1: Template, template_2: Template) -> dict:
        request = RequestFaker.message(message_content)
        container = RequestContainer(request)

        self.rh.handle(container)

        self.assertEqual(2, len(container.responses))
        response1 = container.responses[0]
        self.assertIsInstance(response1, ResponseReplyTemplate)
        self.assertEqual(template_1, response1.template)
        response1.get_content()
        response2 = container.responses[1]
        self.assertIsInstance(response2, ResponseReplyTemplate)
        self.assertEqual(template_2, response2.template)
        response2.get_content()

        return container.responses[1].args
コード例 #30
0
class ProcessWorker(AbstractWorker):
    def __init__(self, worker_num):
        if worker_num <= 0:
            worker_num = max(os.cpu_count() // 2, 3)
            logging.warning(
                '[ProcessWorker] automatically set worker_num = %d due to target error.',
                worker_num)
        if WIN32:
            from concurrent.futures.thread import ThreadPoolExecutor
            self.pool = ThreadPoolExecutor(worker_num)
        else:
            self.pool = _ProcessPoolExecutor(worker_num)

        super().__init__(worker_num)

    def _parallel_execute(self, func, iterable):
        futures = []

        for params in iterable:
            if isinstance(params, dict):
                args = list()
                kwargs = params
            else:
                args = list(params)
                kwargs = dict()
            args.insert(0, func)
            futures.append(self.pool.submit(function_starter, *args, **kwargs))

        wait(futures)
        results = []
        for future in futures:
            try:
                results.append(future.result())
            except concurrent.futures.process.BrokenProcessPool:
                # killed by parent process
                results.append(None)
            except Exception as e:
                results.append(None)
                logging.exception(e)
        return results

    def _submit(self, func, synchronized, args):
        args = list(args)
        args.insert(0, func)
        if synchronized:
            return self.pool.submit(function_starter, *args).result()
        else:
            return self.pool.submit(function_starter, *args)

    def as_completed(self, funcs):
        return as_completed(funcs)

    def terminate(self, cancel_futures):
        super().terminate(cancel_futures)
        self.pool.shutdown(True, cancel_futures=cancel_futures)
コード例 #31
0
ファイル: server_production.py プロジェクト: AtomLaw/Ally-Py
def prepareServer(RequestHandlerClass, pipe, threads, timeout):
    '''
    Prepare in a process the request handling.
    '''
    def process(request, address):
        RequestHandlerClass(request, address, None)
        try: request.shutdown(socket.SHUT_WR)
        except socket.error: pass  # some platforms may raise ENOTCONN here
        request.close()
    
    pool = ThreadPoolExecutor(threads)
    while True:
        if not pipe.poll(timeout): break
        else:
            data = pipe.recv()
            if data is None: break
            elif data is True: continue
            
            requestfd, address = data
            request = socket.fromfd(rebuild_handle(requestfd), socket.AF_INET, socket.SOCK_STREAM)
            
            pool.submit(process, request, address)
            
    pool.shutdown(False)
コード例 #32
0
ファイル: __init__.py プロジェクト: ifrpl/toddler
class RabbitManager(BaseManager):
    """Base for managers that connects to rabbit

    """
    def __init__(self, rabbitmq_url=None, queue=None, routing_key=None,
                 exchange="message", exchange_type="direct", log=None,
                 max_tasks=5, logging=None):
        """

        == Config dict structure (case adjusted to json configuration):
        {
            "rabbit": {
                "url": "apmq://rabbit",
                "queue": "test",
                "routingKey": "example.json"
                "exchange": "message", // optional, default: message
                "exchangeType:" "topic" // optional, default: topic
            }
        }

        :param str rabbitmq_url: optional url to rabbitmq
        :param str queue: name of the queue
        :param str routing_key: routing key for queue
        :param str exchange: name of the exchange
        :param str exchange_type: type of the exchange
        :param dict config: Manager configuration from parsed json config all
                            the above options can be configured from it
        :param logging.Logger log: optional logger that will replace new one
        :raises exceptions.NotConfigured:
        :return:
        """

        if queue is None:
            raise exceptions.NotConfigured("Misssing queue")

        self._connection = None
        self._channel = None
        self._closing = False
        self._consumer_tag = None
        self._max_tasks = max_tasks  # 2 cores + 1
        self._tasks_number = 0
        self._executor = ThreadPoolExecutor(max_workers=self._max_tasks)
        self._max_tasks_warning_counter = 0

        self._rabbitmq_url = rabbitmq_url
        self._queue = queue
        self._routing_key = routing_key
        self._exchange = exchange
        self._exchange_type = exchange_type

        if log is None:
            from toddler.logging import setup_logging
            if logging is not None:
                self.log = setup_logging(config=logging)
            else:
                self.log = setup_logging()
        else:
            self.log = log

    def reconnect(self):
        """Will be run by IOLoop.time if the connection is closed.
        See on_connection_closed method.
        """
        self._connection.ioloop.stop()

        if not self._closing:
            self._connection = self.connect()
            self._connection.ioloop.start()

    @property
    def queue(self):
        return self._queue

    def on_connection_closed(self, connection, reply_code, reply_text):
        """

        :param pika.connection.Connection connection: closed connection ob
        :param int reply_code: reply code if given
        :param str reply_text: reply text if given
        :return:
        """
        self._channel = None
        if self._closing:
            self._connection.ioloop.stop()
        else:
            self.log.warning(
                "Connection closed, will reopen in 5 seconds: (%s) %s",
                reply_code,
                reply_text
            )

            self._connection.add_timeout(5, self.reconnect)

    def on_channel_closed(self, channel, reply_code, reply_text):
        """Invoked when channel has been closed

        :param pika.channel.Channel channel:
        :param int reply_code:
        :param str reply_text:
        :return:
        """
        self.log.info("Channel to rabbit closed.")
        self._connection.close()

    def on_channel_open(self, channel):
        """Invoked when channel has been opened

        :param pika.channel.Channel channel:
        """
        self.log.info("Channel opened")
        self._channel = channel
        self._channel.add_on_close_callback(self.on_channel_closed)
        self.start_consuming()

    def close_channel(self):
        self.log.info("Closing channel")
        self._channel.close()

    def open_channel(self):
        self.log.info("Opening channel")
        self._connection.channel(on_open_callback=self.on_channel_open)

    def on_connection_open(self, connection):

        self.log.info("Connected")
        self._connection = connection
        self._connection.add_on_close_callback(self.on_connection_closed)
        self.open_channel()

    def connect(self):
        """Connects to rabbitmq server, according to config
        :return pika.SelectConnection:
        """
        self.log.info("Connecting to RabbitMQ")
        return pika.BlockingConnection(
            pika.URLParameters(self._rabbitmq_url + "?heartbeat_interval=5"),
            # self.on_connection_open,
            # stop_ioloop_on_close=False

        )
    
    def on_cancel_ok(self, frame):
        """Invoked when locale Basic.Cancel is acknowledged by RabbitMQ

        :param pika.frame.Method frame:
        :return:
        """

        self.log.info("Rabbit acknowledged the cancel of the consumer")
        self.close_channel()

    def on_consumer_cancelled(self, method_frame):
        """Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
        receiving messages.

        :param pika.frame.Method method_frame: The Basic.Cancel frame
        :return:
        """
        self.log.info("Consumer was cancelled remotely, shutting down: %r",
                      method_frame)
        if self._channel:
            self._channel.close()

    def acknowledge_message(self, delivery_tag):
        """

        :param delivery_tag:
        :return:
        """
        self.log.info("Acknowledging message %s", delivery_tag)
        self._channel.basic_ack(delivery_tag)
        
    def requeue_message(self, delivery_tag):
        """
        
        :param delivery_tag: 
        :return:
        """
        self.log.info("Requeuing message %s", delivery_tag)
        self._channel.basic_nack(delivery_tag, requeue=True)

    def on_message(self, channel, basic_deliver, properties, body):
        """Invoked when message received from rabbit

        :param pika.channel.Channel channel:
        :param pika.spec.Basic.Deliver basic_deliver:
        :param pika.spec.BasicProperties properties:
        :param str body:
        :return:
        """

        self.log.info("Received messages # %s from %s",
                      basic_deliver.delivery_tag,
                      properties.app_id)
        
        try:
            if self._tasks_number >= self._max_tasks:
                raise RuntimeError("Max tasks limit reached")
            
            self._tasks_number += 1
            
            ftr = self._executor.submit(self.process_task, body)

            def process_done(future: Future):
                nonlocal self
                self._tasks_number -= 1
                if future.cancelled():
                    # process_task ended by cancel
                    self.requeue_message(self.requeue_message(
                        basic_deliver.delivery_tag)
                    )
                else:
                    if future.exception():
                        exception = future.exception()
                        if not isinstance(exception, RequeueMessage):
                            self.log.exception(exception)
                        
                        self.requeue_message(
                            basic_deliver.delivery_tag
                        )
                    else:
                        self.acknowledge_message(basic_deliver.delivery_tag)

            ftr.add_done_callback(process_done)

            return ftr

        except RuntimeError:
            self.requeue_message(basic_deliver.delivery_tag)
            time.sleep(0.5)

        except Exception as e:
            self.log.exception(e)
            self.requeue_message(basic_deliver.delivery_tag)
            time.sleep(10)

    def stop_consuming(self):
        """Send Basic.Cancel to rabbit

        :return:
        """

        if self._channel:
            self.log.info("Stop consuming")
            self._channel.basic_cancel(self.on_cancel_ok, self._consumer_tag)

    def start_consuming(self):
        """Begins to consume messages

        :return:
        """

        self.log.info("Start consuming")

        self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
        self._consumer_tag = self._channel.basic_consume(self.on_message,
                                                         self.queue)

        self.run()

    def run(self):
        """Run consumer"""

        self.log.info("Running consumer")
        connection = self.connect()
        """:type: pika.SelectConnection"""

        channel = connection.channel()
        self._channel = channel
        self._connection = connection

        for method_frame, properties, body in channel.consume(self.queue):
            while self._tasks_number >= self._max_tasks:
                time.sleep(0.1)

            self.on_message(channel, method_frame, properties, body)


    def stop(self):
        """Stops consuming service
        :return:
        """

        self.log.info("Stopping")
        self._closing = True
        self.stop_consuming()
        self._executor.shutdown(True)
        # if self._connection is not None:
        #     self._connection.ioloop.start()
        self.log.info("Stopped")

    def __exit__(self, *args, **kwargs):

        self.stop()
        super(RabbitManager, self).__exit__(*args, **kwargs)
コード例 #33
0
class AlignedSEMStream(SEMStream):
    """
    This is a special SEM stream which automatically first aligns with the
    CCD (using spot alignment) every time the stage position changes.
    Alignment correction can either be done via beam shift (=shift), or
    by just updating the image position.
    """
    def __init__(self, name, detector, dataflow, emitter,
                 ccd, stage, focus, shiftebeam=MTD_MD_UPD, **kwargs):
        """
        shiftebeam (MTD_*): if MTD_EBEAM_SHIFT, will correct the SEM position using beam shift
         (iow, using emitter.shift). If MTD_MD_UPD, it will just update the
         position correction metadata on the SEM images.
        ccd (Optical detector)
        stage (actuator): the sample stage, just to know when re-alignment is needed
        focus (actuator): the _optical_ focuser, just to know when re-alignment is needed
        focuser (actuator): the _e-beam_ focuser, to allow focusing the image
        """
        super(AlignedSEMStream, self).__init__(name, detector, dataflow, emitter, **kwargs)
        self._ccd = ccd
        self._stage = stage
        self._focus = focus
        self._shiftebeam = shiftebeam
        self.calibrated = model.BooleanVA(False)  # whether the calibration has been already done
        self._last_pos = stage.position.value.copy()
        self._last_pos.update(focus.position.value)  # last known position of the stage
        self._shift = (0, 0)  # (float, float): shift to apply in meters
        self._last_shift = (0, 0)  # (float, float): last ebeam shift applied
        # In case initialization takes place in unload position the
        # calibration values are not obtained yet. Thus we avoid to initialize
        # cur_trans before spot alignment takes place.
        self._cur_trans = None
        stage.position.subscribe(self._onMove)
        focus.position.subscribe(self._onMove)
        self._executor = ThreadPoolExecutor(max_workers=1)
        self._beamshift = None

    def _onMove(self, pos):
        """
        Called when the stage moves (changes position)
        pos (dict): new position
        """
        # Check if the position has really changed, as some stage tend to
        # report "new" position even when no actual move has happened
        logging.debug("Stage location is %s m,m,m", pos)
        if self._last_pos == pos:
            return
        self._last_pos.update(pos)

        # if self.is_active.value:
        self.calibrated.value = False

        # just reset status
        self._setStatus(None)

    # need to override it to support beam shift
    def _applyROI(self):
        """
        Update the scanning area of the SEM according to the roi
        """
        res, shift = self._computeROISettings(self.roi.value)

        if (self._shiftebeam == MTD_EBEAM_SHIFT) and (self._beamshift is not None):
            shift = tuple(s + c for s, c in zip(shift, self._beamshift))

        # always in this order
        self._emitter.resolution.value = res
        self._emitter.shift.value = shift

    def _compensateShift(self):
        """
        Compensate the SEM shift, using either beam shift or metadata update
        """
        # update the correction metadata
        logging.debug("Update metadata for SEM image shift")
        self._detector.updateMetadata({MD_POS_COR: self._shift})

    def _prepare(self):
        """
        Perform calibration if needed
        """
        logging.debug("Preparing stream %s ...", self)
        # actually indicate that preparation has been triggered, don't wait for
        # it to be completed
        self._prepared = True
        f = self._executor.submit(self._DoPrepare)

        # Note that there is no need to call super(). This would only check
        # for an optical path manager which in this case has no effect.

        return f

    def __del__(self):
        self._executor.shutdown(wait=False)

    def _DoPrepare(self):
        # Need to calibrate ?
        if not self.calibrated.value:
            self._setStatus(logging.INFO, u"Automatic SEM alignment in progress…")
            # store current settings
            no_spot_settings = (self._emitter.dwellTime.value,
                                self._emitter.resolution.value)
            # Don't mess up with un/subscribing while doing the calibration
            self._getEmitterVA("dwellTime").unsubscribe(self._onDwellTime)
            self._getEmitterVA("resolution").unsubscribe(self._onResolution)

            shift = (0, 0)
            self._beamshift = None
            try:
                logging.info("Determining the Ebeam center position")
                # TODO Handle cases where current beam shift is larger than
                # current limit. Happens when accel. voltage is changed
                self._emitter.shift.value = (0, 0)
                shift = FindEbeamCenter(self._ccd, self._detector, self._emitter)
                logging.debug("Spot shift is %s m,m", shift)
                self._beamshift = shift
                # Also update the last beam shift in order to be used for stage
                # offset correction in the next stage moves
                self._last_shift = (0.75 * self._last_shift[0] - 0.25 * shift[0],
                                    0.75 * self._last_shift[1] - 0.25 * shift[1])
                cur_trans = self._stage.getMetadata().get(model.MD_POS_COR, (0, 0))
                self._cur_trans = (cur_trans[0] - self._last_shift[0],
                                   cur_trans[1] - self._last_shift[1])
                self._stage.updateMetadata({
                    model.MD_POS_COR: self._cur_trans
                })
                logging.debug("Compensated stage translation %s m,m", self._cur_trans)
                if self._shiftebeam == MTD_EBEAM_SHIFT:
                    # First align using shift
                    self._applyROI()
                    # Then by updating the metadata
                    shift = (0, 0)  # just in case of failure
                    shift = FindEbeamCenter(self._ccd, self._detector, self._emitter)
                elif self._shiftebeam == MTD_MD_UPD:
                    pass
                else:
                    raise NotImplementedError("Unknown shiftbeam method %s" % (self._shiftebeam,))
            except LookupError:
                self._setStatus(logging.WARNING, (u"Automatic SEM alignment unsuccessful", u"Need to focus all streams"))
                # logging.warning("Failed to locate the ebeam center, SEM image will not be aligned")
            except Exception:
                self._setStatus(logging.WARNING, (u"Automatic SEM alignment unsuccessful", u"Need to focus all streams"))
                logging.exception("Failure while looking for the ebeam center")
            else:
                self._setStatus(None)
                logging.info("Aligning SEM image using shift of %s", shift)
                self.calibrated.value = True
            finally:
                # restore hw settings
                (self._emitter.dwellTime.value,
                 self._emitter.resolution.value) = no_spot_settings
                self._getEmitterVA("dwellTime").subscribe(self._onDwellTime)
                self._getEmitterVA("resolution").subscribe(self._onResolution)

            self._shift = shift
            self._compensateShift()
コード例 #34
0
class Server(metaclass=ABCMeta):
    """An abstract class meant to be extended, which represents a generic
    Remote Adapter object capable to run Remote Data or Metadata Adapter and
    connect it to the Proxy Adapter running on Lightstreamer Server.

    An instance of a Server's subclass should be provided with a suitable
    Adapter instance and with suitable initialization parameters and
    established connections, then activated through its own :meth:`start` and
    finally disposed through its own :meth:`close`. Further reuse of the same
    instance is not supported.
    """

    _DEFAULT_POOL_SIZE = 4

    # Number of current instances of Server' subclasses.
    _number = 0

    def __init__(self, address, name, keep_alive, thread_pool_size):
        Server._number += 1

        # Logger actually overridden by subclasses.
        self._log = logging.getLogger("lightstreamer-adapter.server")
        self._exception_handler = None
        self._config = {}
        self._config['address'] = address
        self._config['name'] = "#{}".format(Server._number) if (name is
                                                                None) else name
        self._config['keep_alive'] = max(0, keep_alive) if (keep_alive is not
                                                            None) else 0
        pool = max(0, thread_pool_size) if thread_pool_size is not None else 0
        if pool == 0:
            try:
                self._config['thread_pool_size'] = cpu_count()
            except NotImplementedError:
                self._config['thread_pool_size'] = Server._DEFAULT_POOL_SIZE
        else:
            self._config['thread_pool_size'] = pool

        self._executor = ThreadPoolExecutor(self._config['thread_pool_size'])
        self._server_sock = None
        self._request_receiver = None

    @property
    def name(self):
        """The name, used for logging purposes, associated to the Server
        instance.

        :type: str
        """
        return self._config['name']

    @property
    def keep_alive(self):
        """The keepalive interval expressed in seconds (or fractions)

        :type: float
        """
        return self._config['keep_alive']

    @property
    def thread_pool_size(self):
        """The thread pool size

        :type: int
        """
        return self._config['thread_pool_size']

    def set_exception_handler(self, handler):
        """Sets the handler for error conditions occurring on the Remote
        Server. By setting the handler, it's possible to override the default
        exception handling.

        :param lightstreamer_adapter.server.ExceptionHandler handler: the
         handler for error conditions occurring on the Remote Server.
        """
        self._exception_handler = handler

    @abstractmethod
    def start(self):
        """Starts the Remote Adapter. A connection to the Proxy Adapter is
        performed (as soon as one is available). Then, requests issued by
        the Proxy Adapter are received and forwarded to the Remote Adapter.
        """
        if self.keep_alive > 0:
            self._log.info("Keepalive time for %s set to %f milliseconds",
                           self.name, self.keep_alive)
        else:
            self._log.info("Keepalive for %s disabled", self.name)

        self._server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._server_sock.connect(self._config['address'])

        # Creates and starts the Request Receiver.
        self._request_receiver = _RequestReceiver(sock=self._server_sock,
                                                  server=self)
        self._request_receiver.start()

        # Invokes hook to notify subclass that the Request Receiver
        # has been started.
        self._on_request_receiver_started()

    def close(self):
        """Stops the management of the Remote Adapter and destroys the threads
        used by this Server. This instance can no longer be used.

        Note that this does not stop the supplied Remote Adapter, as no close
        method is available in the Remote Adapter interface. If the process is
        not terminating, then the Remote Adapter cleanup should be performed by
        accessing the supplied Adapter instance directly and calling custom
        methods.
        """
        self._request_receiver.quit()
        self._executor.shutdown()
        self._server_sock.close()

    def on_received_request(self, request):
        """Invoked when the RequestReciver gets a new request coming from the
        Proxy Adapter.

        This method takes the responsibility to proceed with a first
        coarse-grained parsing, to identify the three main components of the
        packet structure, as follows:

        <ID>|<method>|<data>
         |      |       |
         |      |     The arguments to be passed to the method
         |      |
         |   The method to invoke on the Remote Adapter
         |
        The Request Id

        Once parsed, the request is then dispatched to the subclass for later
        management.
        """
        try:
            parsed_request = protocol.parse_request(request)
            if parsed_request is None:
                self._log.warning("Discarding malformed request: %s", request)
                return

            request_id = parsed_request["id"]
            method_name = parsed_request["method"]
            data = parsed_request["data"]
            self._handle_request(request_id, data, method_name)
        except RemotingException as err:
            self.on_exception(err)

    def _send_reply(self, request_id, response):
        self._log.debug("Sending reply for request: %s", request_id)
        self._request_receiver.send_reply(request_id, response)

    def on_ioexception(self, ioexception):
        """Called by the Remote Server upon a read or write operation failure.

        See documentation from the ExceptionHandler.handle_io exception method
        for further details.
        """
        if self._exception_handler is not None:
            self._log.info(("Caught exception: %s, notifying the "
                            "application..."), str(ioexception))
            # Enable default handling in case the exception handler
            # returns False.
            if not self._exception_handler.handle_ioexception(ioexception):
                return

        self._handle_ioexception(ioexception)

    def on_exception(self, exception):
        """Called by the Remote Server upon an unexpected error.

        See documentation from the ExceptionHandler.handle_exception method for
        further details.
        """
        if self._exception_handler is not None:
            self._log.info(("Caught exception: %s, notifying the "
                            "application..."), str(exception))
            # Enable default handling in case the exception handler
            # returns False.
            if not self._exception_handler.handle_exception(exception):
                return

        self._handle_exception(exception)

    @abstractmethod
    def _handle_ioexception(self, ioexception):
        os._exit(1)
        return False

    def _handle_exception(self, ioexception):
        pass

    @abstractmethod
    def _on_request_receiver_started(self):
        """Hook method to notify the subclass that the Request Receiver has
        been started.

        This method is intended to be overridden by subclasses.
        """
        pass

    @abstractmethod
    def _handle_request(self, request_id, data, method_name):
        """Intended to be overridden by subclasses, invoked for handling the
        received request, already splitted into the supplied parameters.
        """
        pass
コード例 #35
0
ファイル: path.py プロジェクト: delmic/odemis
class OpticalPathManager(object):
    """
    The purpose of this module is setting the physical components contained in
    the optical path of a SPARC system to the right position/configuration with
    respect to the mode given.
    """
    def __init__(self, microscope):
        """
        microscope (Microscope): the whole microscope component, thus it can
            handle all the components needed
        """
        self.microscope = microscope
        self._graph = affectsGraph(self.microscope)
        self._chamber_view_own_focus = False

        # Use subset for modes guessed
        if microscope.role == "sparc2":
            self._modes = copy.deepcopy(SPARC2_MODES)
        elif microscope.role in ("sparc-simplex", "sparc"):
            self._modes = copy.deepcopy(SPARC_MODES)
        elif microscope.role in ("secom", "delphi"):
            self._modes = copy.deepcopy(SECOM_MODES)
        else:
            raise NotImplementedError("Microscope role '%s' unsupported" % (microscope.role,))

        # Currently only used with the SECOM/DELPHI
        self.quality = ACQ_QUALITY_FAST

        # keep list of all components, to avoid creating new proxies
        # every time the mode changes
        self._cached_components = model.getComponents()

        # All the actuators in the microscope, to cache proxy's to them
        self._actuators = []
        for comp in self._cached_components:
            if hasattr(comp, 'axes') and isinstance(comp.axes, dict):
                self._actuators.append(comp)

        # last known axes position (before going to an alignment mode)
        self._stored = {}  # (str, str) -> pos: (comp role, axis name) -> position
        self._last_mode = None  # previous mode that was set

        # Removes modes which are not supported by the current microscope
        for m, (det, conf) in self._modes.items():
            try:
                comp = self._getComponent(det)
            except LookupError:
                logging.debug("Removing mode %s, which is not supported", m)
                del self._modes[m]

        # Create the guess information out of the mode
        # TODO: just make it a dict comprole -> mode
        self.guessed = self._modes.copy()
        # No stream should ever imply alignment mode
        for m in ALIGN_MODES:
            try:
                del self.guessed[m]
            except KeyError:
                pass  # Mode to delete is just not there

        if self.microscope.role in ("secom", "delphi"):
            # To record the fan settings when in "fast" acq quality
            try:
                ccd = self._getComponent("ccd")
            except LookupError:
                ccd = None
                # Check that at least it's a confocal microscope
                try:
                    lm = self._getComponent("laser-mirror")
                except LookupError:
                    logging.warning("Couldn't find a CCD on a SECOM/DELPHI")

            self._has_fan_speed = model.hasVA(ccd, "fanSpeed")
            self._has_fan_temp = (model.hasVA(ccd, "targetTemperature") and
                                  not ccd.targetTemperature.readonly)
            # Consider that by default we are in "fast" acquisition, with the fan
            # active (if it ought to be active)
            self._fan_enabled = True
            # Settings of the fan when the fan is in "active cooling" mode
            self._enabled_fan_speed = None
            self._enabled_fan_temp = None

        # Handle different focus for chamber-view (in SPARCv2)
        if "chamber-view" in self._modes:
            self._focus_in_chamber_view = None
            self._focus_out_chamber_view = None
            # Check whether the focus affects the chamber view
            try:
                chamb_det = self._getComponent(self._modes["chamber-view"][0])
                focus = self._getComponent("focus")
                if self.affects(focus.name, chamb_det.name):
                    self._chamber_view_own_focus = True
            except LookupError:
                pass
            if not self._chamber_view_own_focus:
                logging.debug("No focus component affecting chamber")

        # will take care of executing setPath asynchronously
        self._executor = ThreadPoolExecutor(max_workers=1)

    def __del__(self):
        logging.debug("Ending path manager")

        # Restore the spectrometer focus, so that on next start, this value will
        # be used again as "out of chamber view".
        if self._chamber_view_own_focus and self._last_mode == "chamber-view":
            focus_comp = self._getComponent("focus")
            if self._focus_out_chamber_view is not None:
                logging.debug("Restoring focus from before coming to chamber view to %s",
                              self._focus_out_chamber_view)
                try:
                    focus_comp.moveAbsSync(self._focus_out_chamber_view)
                except IOError as e:
                    logging.info("Actuator move failed giving the error %s", e)

        try:
            self._executor.shutdown(wait=False)
        except AttributeError:
            pass  # Not created

    def _getComponent(self, role):
        """
        same as model.getComponent, but optimised by caching the result.
        Uses regex to match the name to a list of cached components

        return Component
        raise LookupError: if matching component not found
        """
        # if we have not returned raise an exception
        for comp in self._cached_components:
            if comp.role is not None and re.match(role + "$", comp.role):
                return comp
        # if not found...
        raise LookupError("No component with the role %s" % (role,))

    def setAcqQuality(self, quality):
        """
        Update the acquisition quality expected. Depending on the quality,
        some hardware settings will be adjusted.
        quality (ACQ_QUALITY): the acquisition quality
        """
        assert quality in (ACQ_QUALITY_FAST, ACQ_QUALITY_BEST)

        if quality == self.quality:
            return
        self.quality = quality

        if self.microscope.role in ("secom", "delphi"):
            if quality == ACQ_QUALITY_FAST:
                # Restore the fan (if it was active before)
                self._setCCDFan(True)
            # Don't turn off the fan if BEST: first wait for setPath()

    def setPath(self, mode, detector=None):
        """
        Given a particular mode it sets all the necessary components of the
        optical path (found through the microscope component) to the
        corresponding positions.
        path (stream.Stream or str): The stream or the optical path mode
        detector (Component or None): The detector which will be targeted on this
          path. This can only be set if the path is a str (optical mode). That
          is useful in case the mode can be used with multiple detectors (eg,
          fiber-align on a SPARC with multiple spectrometers). When path is a
          Stream, the Stream.detector is always used.
        return (Future): a Future allowing to follow the status of the path
          update.
        raises (via the future):
            ValueError if the given mode does not exist
            IOError if a detector is missing
        """
        f = self._executor.submit(self._doSetPath, mode, detector)

        return f

    def _doSetPath(self, path, detector):
        """
        Actual implementation of setPath()
        """
        if isinstance(path, stream.Stream):
            if detector is not None:
                raise ValueError("Not possible to specify both a stream, and a detector")
            try:
                mode = self.guessMode(path)
            except LookupError:
                logging.debug("%s doesn't require optical path change", path)
                return
            target = self.getStreamDetector(path)  # target detector
        else:
            mode = path
            if mode not in self._modes:
                raise ValueError("Mode '%s' does not exist" % (mode,))
            comp_role = self._modes[mode][0]
            if detector is None:
                target = self._getComponent(comp_role)
            else:
                target = detector

        logging.debug("Going to optical path '%s', with target detector %s.", mode, target.name)

        # Special SECOM mode: just look at the fan and be done
        if self.microscope.role in ("secom", "delphi"):
            if self.quality == ACQ_QUALITY_FAST:
                self._setCCDFan(True)
            elif self.quality == ACQ_QUALITY_BEST:
                self._setCCDFan(target.role == "ccd")

        fmoves = []  # moves in progress, list of (future, Component, dict(axis->pos) tuples

        # Restore the spectrometer focus before any other move, as (on the SR193),
        # the value is grating/output dependent
        if self._chamber_view_own_focus and self._last_mode == "chamber-view":
            focus_comp = self._getComponent("focus")
            self._focus_in_chamber_view = focus_comp.position.value.copy()
            if self._focus_out_chamber_view is not None:
                logging.debug("Restoring focus from before coming to chamber view to %s",
                              self._focus_out_chamber_view)
                fmoves.append((focus_comp.moveAbs(self._focus_out_chamber_view), focus_comp, self._focus_out_chamber_view))

        modeconf = self._modes[mode][1]
        for comp_role, conf in modeconf.items():
            # Try to access the component needed
            try:
                comp = self._getComponent(comp_role)
            except LookupError:
                logging.debug("Failed to find component %s, skipping it", comp_role)
                continue

            # Check whether that actuator affects the target
            targets = {target.name} | set(target.affects.value)
            if not any(self.affects(comp.name, n) for n in targets):
                logging.debug("Actuator %s doesn't affect %s, so not moving it",
                              comp.name, target.name)
                continue

            mv = {}
            for axis, pos in conf.items():
                if axis == "power":
                    if model.hasVA(comp, "power"):
                        try:
                            if pos == 'on':
                                comp.power.value = comp.power.range[1]
                            else:
                                comp.power.value = comp.power.range[0]
                            logging.debug("Updating power of comp %s to %f", comp.name, comp.power.value)
                        except AttributeError:
                            logging.debug("Could not retrieve power range of %s component", comp_role)
                    continue
                if not hasattr(comp, "axes") or not isinstance(comp.axes, dict):
                    continue
                if isinstance(pos, str) and pos.startswith("MD:"):
                    pos = self.mdToValue(comp, pos[3:])[axis]
                if axis in comp.axes:
                    if axis == "band":
                        # Handle the filter wheel in a special way. Search
                        # for the key that corresponds to the value, most probably
                        # to the 'pass-through'
                        choices = comp.axes[axis].choices
                        for key, value in choices.items():
                            if value == pos:
                                pos = key
                                # Just to store current band in order to restore
                                # it once we leave this mode
                                if self._last_mode not in ALIGN_MODES:
                                    self._stored[comp_role, axis] = comp.position.value[axis]
                                break
                        else:
                            logging.debug("Choice %s is not present in %s axis", pos, axis)
                            continue
                    elif axis == "grating":
                        # If mirror is to be used but not found in grating
                        # choices, then we use zero order. In case of
                        # GRATING_NOT_MIRROR we either use the last known
                        # grating or the first grating that is not mirror.
                        choices = comp.axes[axis].choices
                        if pos == "mirror":
                            # Store current grating (if we use one at the moment)
                            # to restore it once we use a normal grating again
                            if choices[comp.position.value[axis]] != "mirror":
                                self._stored[comp_role, axis] = comp.position.value[axis]
                                self._stored[comp_role, 'wavelength'] = comp.position.value['wavelength']
                            # Use the special "mirror" grating, if it exists
                            for key, value in choices.items():
                                if value == "mirror":
                                    pos = key
                                    break
                            else:
                                # Fallback to zero order (aka "low-quality mirror")
                                axis = 'wavelength'
                                pos = 0
                        elif pos == GRATING_NOT_MIRROR:
                            if choices[comp.position.value[axis]] == "mirror":
                                # if there is a grating stored use this one
                                # otherwise find the non-mirror grating
                                if (comp_role, axis) in self._stored:
                                    pos = self._stored[comp_role, axis]
                                else:
                                    pos = self.findNonMirror(choices)
                                if (comp_role, 'wavelength') in self._stored:
                                    mv['wavelength'] = self._stored[comp_role, 'wavelength']
                            else:
                                pos = comp.position.value[axis]  # no change
                            try:
                                del self._stored[comp_role, axis]
                            except KeyError:
                                pass
                            try:
                                del self._stored[comp_role, 'wavelength']
                            except KeyError:
                                pass
                        else:
                            logging.debug("Using grating position as-is: '%s'", pos)
                            pass  # use pos as-is
                    elif axis == "slit-in":
                        if mode in ALIGN_MODES and (comp_role, axis) not in self._stored:
                            self._stored[comp_role, axis] = comp.position.value[axis]
                    elif hasattr(comp.axes[axis], "choices") and isinstance(comp.axes[axis].choices, dict):
                        choices = comp.axes[axis].choices
                        for key, value in choices.items():
                            if value == pos:
                                pos = key
                                break
                    # write actuator axis and position in dict
                    mv[axis] = pos
                else:
                    logging.debug("Not moving axis %s.%s as it is not present", comp_role, axis)

            try:
                # move actuator
                fmoves.append((comp.moveAbs(mv), comp, mv))
            except AttributeError:
                logging.warning("%s not an actuator", comp_role)

        # Now take care of the selectors based on the target detector
        fmoves.extend(self.selectorsToPath(target.name))

        # If we are about to leave alignment modes, restore values
        if self._last_mode in ALIGN_MODES and mode not in ALIGN_MODES:
            logging.debug("Leaving align mode %s for %s, will restore positions: %s",
                          self._last_mode, mode, self._stored)
            for (cr, an), pos in self._stored.copy().items(): # copy for deleting entries
                if an == "grating":
                    continue  # handled separately via GRATING_NOT_MIRROR
                comp = self._getComponent(cr)
                fmoves.append((comp.moveAbs({an: pos}), comp, {an: pos}))
                del self._stored[cr, an]

        # Save last mode
        self._last_mode = mode

        # wait for all the moves to be completed
        for f, comp, mv in fmoves:
            try:
                # Can be large, eg within 5 min one (any) move should finish.
                f.result(timeout=180)

                # To do an absolute move, an axis should be referenced (if it
                # supports referencing). If not, that's an error (but for now we
                # still try, just in case it might work anyway).
                for a in mv:
                    try:
                        if (model.hasVA(comp, "referenced") and
                            not comp.referenced.value.get(a, True)):
                            logging.error("%s.%s is not referenced, it might be a sign of a hardware issue",
                                          comp.name, a)
                    except Exception:
                        logging.exception("Failed to check %s.%s is referenced", comp.name, a)

            except IOError as e:
                logging.warning("Actuator move failed giving the error %s", e)
            except:
                logging.exception("Actuator move failed!")
                raise

        # When going to chamber view, store the current focus position, and
        # restore the special focus position for chamber, after _really_ all
        # the other moves have finished, because the grating/output selector
        # moves affects the current position of the focus.
        if self._chamber_view_own_focus and mode == "chamber-view":
            focus_comp = self._getComponent("focus")
            self._focus_out_chamber_view = focus_comp.position.value.copy()
            if self._focus_in_chamber_view is not None:
                logging.debug("Restoring focus from previous chamber view to %s",
                              self._focus_in_chamber_view)
                try:
                    focus_comp.moveAbsSync(self._focus_in_chamber_view)
                except IOError as e:
                    logging.warning("Actuator move failed giving the error %s", e)

    def selectorsToPath(self, target):
        """
        Sets the selectors so the optical path leads to the target component
        (usually a detector).
        target (str): component name
        return (list of futures)
        """
        fmoves = []
        for comp in self._actuators:
            # TODO: pre-cache this as comp/target -> axis/pos
            # TODO: don't do moves already done

            # TODO: extend the path computation to "for every actuator which _affects_
            # the target, move if position known, and update path to that actuator"?
            # Eg, this would improve path computation on SPARCv2 with fiber aligner
            mv = {}
            for an, ad in comp.axes.items():
                if hasattr(ad, "choices") and isinstance(ad.choices, dict):
                    for pos, value in ad.choices.items():
                        if target in value:
                            # set the position so it points to the target
                            mv[an] = pos

            comp_md = comp.getMetadata()
            if target in comp_md.get(model.MD_FAV_POS_ACTIVE_DEST, {}):
                mv.update(comp_md[model.MD_FAV_POS_ACTIVE])
            elif target in comp_md.get(model.MD_FAV_POS_DEACTIVE_DEST, {}):
                mv.update(comp_md[model.MD_FAV_POS_DEACTIVE])

            if mv:
                logging.debug("Move %s added so %s targets to %s", mv, comp.name, target)
                fmoves.append((comp.moveAbs(mv), comp, mv))
                # make sure this component is also on the optical path
                fmoves.extend(self.selectorsToPath(comp.name))

        return fmoves

    def guessMode(self, guess_stream):
        """
        Given a stream and by checking its components (e.g. role of detector)
        guesses and returns the corresponding optical path mode.
        guess_stream (object): The given optical stream
        returns (str): Mode estimated
        raises:
                LookupError if no mode can be inferred for the given stream
                IOError if given object is not a stream
        """
        if not isinstance(guess_stream, stream.Stream):
            raise IOError("Given object is not a stream")

        # Handle multiple detector streams
        if isinstance(guess_stream, stream.MultipleDetectorStream):
            for st in guess_stream.streams:
                try:
                    return self.guessMode(st)
                except LookupError:
                    pass
        elif isinstance(guess_stream, stream.OverlayStream):
            return "overlay"
        else:
            for mode, conf in self.guessed.items():
                # match the name using regex
                if re.match(conf[0] + '$', guess_stream.detector.role):
                    return mode
        # In case no mode was found yet
        raise LookupError("No mode can be inferred for the given stream")

    def getStreamDetector(self, path_stream):
        """
        Given a stream find the optical detector.
        path_stream (Stream): The given stream
        returns (HwComponent): detector
        raises:
                ValueError if given object is not a stream
                LookupError: if stream has no detector
        """
        if not isinstance(path_stream, stream.Stream):
            raise ValueError("Given object is not a stream")

        # Handle multiple detector streams
        if isinstance(path_stream, stream.MultipleDetectorStream):
            dets = []
            for st in path_stream.streams:
                try:
                    # Prefer the detectors which have a role in the mode, as it's much
                    # more likely to be the optical detector
                    # TODO: handle setting multiple optical paths? => return all the detectors
                    role = st.detector.role
                    for conf in self.guessed.values():
                        if re.match(conf[0] + '$', role):
                            return st.detector
                    dets.append(st.detector)
                except AttributeError:
                    pass
            if dets:
                logging.warning("No detector on stream %s has a known optical role", path_stream.name.value)
                return dets[0]
        elif isinstance(path_stream, stream.OverlayStream):
            return path_stream._ccd
        else:
            try:
                return path_stream.detector
            except AttributeError:
                pass  # will raise error just after

        raise LookupError("Failed to find a detector on stream %s" % (path_stream.name.value,))

    def findNonMirror(self, choices):
        """
        Given a dict of choices finds the one with value different than "mirror"
        """
        for key, value in choices.items():
            if value != "mirror":
                return key
        else:
            raise ValueError("Cannot find grating value in given choices")

    def mdToValue(self, comp, md_name):
        """
        Just retrieves the "md_name" metadata from component "comp"
        """
        md = comp.getMetadata()
        try:
            return md[md_name]
        except KeyError:
            raise KeyError("Metadata %s does not exist in component %s" % (md_name, comp.name))

    def affects(self, affecting, affected):
        """
        Returns True if "affecting" component affects -directly of indirectly-
        the "affected" component
        affecting (str): component name
        affected (str): component name
        return bool
        """
        path = self.findPath(affecting, affected)
        if path is None:
            return False
        else:
            return True

    def findPath(self, node1, node2, path=None):
        """
        Find any path between node1 and node2 (may not be shortest)
        """
        if path is None:
            path = []
        path = path + [node1]
        if node1 == node2:
            return path
        if node1 not in self._graph:
            return None
        for node in self._graph[node1]:
            if node not in path:
                new_path = self.findPath(node, node2, path)
                if new_path:
                    return new_path
        return None

    def _setCCDFan(self, enable):
        """
        Turn on/off the fan of the CCD
        enable (boolean): True to turn on/restore the fan, and False to turn if off
        """
        if not self._has_fan_speed:
            return

        if self._fan_enabled == enable:
            return
        self._fan_enabled = enable

        comp = self._getComponent("ccd")

        if enable:
            if self._enabled_fan_speed is not None:
                logging.debug("Turning fan on of %s", comp.name)
                comp.fanSpeed.value = max(comp.fanSpeed.value, self._enabled_fan_speed)
        else:
            if comp.fanSpeed.value == 0:
                # Already off => don't touch it
                self._enabled_fan_speed = None
                self._enabled_fan_temp = None
            else:
                logging.debug("Turning fan off of %s", comp.name)
                self._enabled_fan_speed = comp.fanSpeed.value
                comp.fanSpeed.value = 0

        # Raise targetTemperature to max/ambient to avoid the fan from
        # automatically starting again. (Some hardware have this built-in when
        # the current temperature is too high compared to the target)
        if self._has_fan_temp:
            temp = comp.targetTemperature
            if enable:
                if self._enabled_fan_temp is not None:
                    temp.value = min(comp.targetTemperature.value, self._enabled_fan_temp)
                    try:
                        self._waitTemperatureReached(comp, timeout=60)
                    except Exception as ex:
                        logging.warning("Failed to reach target temperature of CCD: %s",
                                        ex)
            else:
                # Set ~25°C == ambient temperature
                self._enabled_fan_temp = temp.value
                try:
                    try:
                        temp.value = min(comp.targetTemperature.range[1], 25)
                    except (AttributeError, NotApplicableError):
                        temp.value = util.find_closest(25, comp.targetTemperature.choices)
                except Exception:
                    logging.warning("Failed to change targetTemperature when disabling fan",
                                    exc_info=True)

    def _waitTemperatureReached(self, comp, timeout=None):
        """
        Wait until the current temperature of the component has reached the
          target temperature (within some margin).
        comp (Component)
        timeout (0<float or None): maximum time to wait (in s)
        raises:
            TimeoutError: if time-out reached
        """
        tstart = time.time()
        while timeout is None or time.time() < tstart + timeout:
            # TODO: adjust the timeout depending on whether the temperature
            # gets closer to the target over time or not.
            ttemp = comp.targetTemperature.value
            atemp = comp.temperature.value
            if atemp < ttemp + TEMP_EPSILON:
                return
            else:
                logging.debug(u"Waiting for temperature to reach %g °C (currently at %g °C)",
                              ttemp, atemp)
                time.sleep(1)

        raise TimeoutError("Target temperature (%g C) not reached after %g s" %
                           (comp.targetTemperature.value, timeout))
コード例 #36
0
ファイル: init.py プロジェクト: bobiwembley/sen
class UI(urwid.MainLoop):
    def __init__(self):
        self.d = DockerBackend()

        # root widget
        self.mainframe = urwid.Frame(urwid.SolidFill())
        self.buffers = []
        self.footer = Footer(self)

        self.executor = ThreadPoolExecutor(max_workers=4)

        root_widget = urwid.AttrMap(self.mainframe, "root")
        self.main_list_buffer = None  # singleton

        screen = urwid.raw_display.Screen()
        screen.set_terminal_properties(256)
        screen.register_palette(PALLETE)

        super().__init__(root_widget, screen=screen)
        self.handle_mouse = False
        self.current_buffer = None

    def run_in_background(self, task, *args, **kwargs):
        logger.info("running task %r(%s, %s) in background", task, args, kwargs)
        self.executor.submit(task, *args, **kwargs)

    def refresh(self):
        try:
            self.draw_screen()
        except AssertionError:
            logger.warning("application is not running")
            pass

    def _set_main_widget(self, widget, redraw):
        """
        add provided widget to widget list and display it

        :param widget:
        :return:
        """
        self.mainframe.set_body(widget)
        self.reload_footer()
        if redraw:
            logger.debug("redraw main widget")
            self.refresh()

    def display_buffer(self, buffer, redraw=True):
        """
        display provided buffer

        :param buffer: Buffer
        :return:
        """
        self.current_buffer = buffer
        self._set_main_widget(buffer.widget, redraw=redraw)

    def add_and_display_buffer(self, buffer, redraw=True):
        """
        add provided buffer to buffer list and display it

        :param buffer:
        :return:
        """
        if buffer not in self.buffers:
            logger.debug("adding new buffer {!r}".format(buffer))
            self.buffers.append(buffer)
        self.display_buffer(buffer, redraw=redraw)

    def pick_and_display_buffer(self, i):
        """
        pick i-th buffer from list and display it

        :param i: int
        :return: None
        """
        if len(self.buffers) == 1:
            # we don't need to display anything
            # listing is already displayed
            return
        else:
            try:
                self.display_buffer(self.buffers[i])
            except IndexError:
                # i > len
                self.display_buffer(self.buffers[0])

    @property
    def current_buffer_index(self):
        return self.buffers.index(self.current_buffer)

    def remove_current_buffer(self):
        # don't allow removing main_list
        if isinstance(self.current_buffer, MainListBuffer):
            logger.warning("you can't remove main list widget")
            return
        self.buffers.remove(self.current_buffer)
        self.current_buffer.destroy()
        # FIXME: we should display last displayed widget here
        self.display_buffer(self.buffers[0], True)

    def unhandled_input(self, key):
        logger.debug("unhandled input: %r", key)
        try:
            if key in ("q", "Q"):
                self.executor.shutdown(wait=False)
                raise urwid.ExitMainLoop()
            elif key == "ctrl o":
                self.pick_and_display_buffer(self.current_buffer_index - 1)
            elif key == "ctrl i":
                self.pick_and_display_buffer(self.current_buffer_index + 1)
            elif key == "x":
                self.remove_current_buffer()
            elif key == "/":
                self.prompt("/", search)
            elif key == "f4":
                self.footer.prompt("filter ", filter)
            elif key == "n":
                self.current_buffer.find_next()
            elif key == "N":
                self.current_buffer.find_previous()
            elif key in ["h", "?"]:
                self.display_help()
            elif key == "f5":
                self.display_tree()
        except NotifyError as ex:
            self.notify_message(str(ex), level="error")
            logger.error(repr(ex))

    def run(self):
        self.main_list_buffer = MainListBuffer(self.d, self)

        @log_traceback
        def chain_fcs():
            self.main_list_buffer.refresh(focus_on_top=True)
            self.add_and_display_buffer(self.main_list_buffer, redraw=True)

        self.run_in_background(chain_fcs)
        super().run()

    def display_logs(self, docker_container):
        self.add_and_display_buffer(LogsBuffer(docker_container, self))

    def display_and_follow_logs(self, docker_container):
        self.add_and_display_buffer(LogsBuffer(docker_container, self, follow=True))

    def inspect(self, docker_object):
        self.add_and_display_buffer(InspectBuffer(docker_object))

    def display_image_info(self, docker_image):
        try:
            self.add_and_display_buffer(ImageInfoBuffer(docker_image, self))
        except NotifyError as ex:
            self.notify_message(str(ex), level="error")
            logger.error(repr(ex))

    def refresh_main_buffer(self, refresh_buffer=True):
        assert self.main_list_buffer is not None
        if refresh_buffer:
            self.main_list_buffer.refresh()
        self.display_buffer(self.main_list_buffer)

    def display_help(self):
        self.add_and_display_buffer(HelpBuffer())

    def display_tree(self):
        self.add_and_display_buffer(TreeBuffer(self.d, self))

    # FOOTER

    def set_footer(self, widget):
        self.mainframe.set_footer(widget)

    def reload_footer(self):
        self.footer.reload_footer()

    def remove_notification_message(self, message):
        self.footer.remove_notification_message(message)

    def notify_widget(self, *args, **kwargs):
        self.footer.notify_widget(*args, **kwargs)

    def notify_message(self, *args, **kwargs):
        self.footer.notify_message(*args, **kwargs)

    def prompt(self, *args, **kwargs):
        self.footer.prompt(*args, **kwargs)
コード例 #37
0
ファイル: _bender.py プロジェクト: bender-bot/bender
class Bender(object):

    def __init__(self, backbone, brain=None):
        self._backbone = backbone
        self._brain = brain if brain is not None else Brain()
        self._brain_lock = threading.Lock()
        self._regex_to_response = OrderedDict()
        self._scripts = OrderedDict()

        self._pool = ThreadPoolExecutor(max_workers=4)
        self._futures = []  # list of futures submitted to the pool
        self._stop_loop = threading.Event()

    def register_script(self, name, script):
        self._scripts[name] = script

    def register_builtin_scripts(self):
        for name, script in scripts.get_builtin_scripts():
            self.register_script(name, script)

    def register_setuptools_scripts(self):
        for p in pkg_resources.iter_entry_points('bender_script'):
            obj = p.load()
            if inspect.isclass(obj):
                obj = obj()
            self.register_script(p.name, obj)

    def get_script(self, name):
        return self._scripts[name]

    def iter_scripts(self):
        return iter(self._scripts.items())

    def start(self):
        self._brain.load()
        self._backbone.on_message_received = self.on_message_received

        self.register_builtin_scripts()
        self.register_setuptools_scripts()

        for script in self._scripts.values():
            hooks.call_unique_hook(script, 'script_initialize_hook',
                                   brain=self._brain)

        hooks.call_unique_hook(self._backbone, 'backbone_start_hook')

    def shutdown(self):
        self._pool.shutdown(wait=True)
        for name, script in list(self._scripts.items()):
            self._scripts.pop(name)
            hooks.call_unique_hook(script, 'script_shutdown_hook',
                                   brain=self._brain)

        hooks.call_unique_hook(self._backbone, 'backbone_shutdown_hook',
                               brain=self._brain)
        self._brain.dump()
        self._stop_loop.set()

    def request_shutdown(self):
        self._stop_loop.set()

    def loop(self):
        self.start()
        self._stop_loop.wait()
        self.shutdown()

    def on_message_received(self, msg):

        def thread_exec(hook, brain, msg, match):
            try:
                hooks.call(hook, brain=self._brain, msg=msg, match=match,
                           bender=self)
            except Exception as e:
                msg.reply('*BZZT* %s' % e)
            else:
                with self._brain_lock:
                    brain.dump()

        handled = False
        for script in self._scripts.values():
            for hook in hooks.find_hooks(script, 'respond_hook'):
                match = re.match(hook.inputs['regex'], msg.get_body(),
                                 re.IGNORECASE | re.DOTALL)
                if match:
                    f = self._pool.submit(thread_exec, hook, self._brain, msg,
                                          match)
                    self._futures.append(f)
                    handled = True

        if not handled:
            msg.reply('Command not recognized')

    def wait_all_messages(self):
        while self._futures:
            f = self._futures.pop()
            f.result()  # wait until future returns
コード例 #38
0
class OpticalPathManager(object):
    """
    The purpose of this module is setting the physical components contained in
    the optical path of a SPARC system to the right position/configuration with
    respect to the mode given.
    """
    def __init__(self, microscope):
        """
        microscope (Microscope): the whole microscope component, thus it can
            handle all the components needed
        """
        self.microscope = microscope
        self._graph = affectsGraph(self.microscope)

        # Use subset for modes guessed
        if microscope.role == "sparc2":
            self._modes = copy.deepcopy(SPARC2_MODES)
        elif microscope.role in ("sparc-simplex", "sparc"):
            self._modes = copy.deepcopy(SPARC_MODES)
        else:
            raise NotImplementedError("Microscope role '%s' unsupported" % (microscope.role,))

        # keep list of already accessed components, to avoid creating new proxys
        # every time the mode changes
        self._known_comps = dict()  # str (role) -> component

        # All the actuators in the microscope, to cache proxy's to them
        self._actuators = []
        for comp in model.getComponents():
            if hasattr(comp, 'axes') and isinstance(comp.axes, dict):
                self._actuators.append(comp)

        # last known axes position
        self._stored = {}
        self._last_mode = None  # previous mode that was set
        # Removes modes which are not supported by the current microscope
        for m, (det, conf) in self._modes.items():
            try:
                comp = self._getComponent(det)
            except LookupError:
                logging.debug("Removing mode %s, which is not supported", m)
                del self._modes[m]

        # Create the guess information out of the mode
        # TODO: just make it a dict comprole -> mode
        self.guessed = self._modes.copy()
        # No stream should ever imply alignment mode
        for m in ALIGN_MODES:
            try:
                del self.guessed[m]
            except KeyError:
                pass  # Mode to delete is just not there

        # Handle different focus for chamber-view (in SPARCv2)
        if "chamber-view" in self._modes:
            self._focus_in_chamber_view = None
            self._focus_out_chamber_view = None
            # Check whether the focus affects the chamber view
            self._chamber_view_own_focus = False
            try:
                chamb_det = self._getComponent(self._modes["chamber-view"][0])
                focus = self._getComponent("focus")
                if self.affects(focus.name, chamb_det.name):
                    self._chamber_view_own_focus = True
            except LookupError:
                pass
            if not self._chamber_view_own_focus:
                logging.debug("No focus component affecting chamber")

        try:
            spec = self._getComponent("spectrometer")
        except LookupError:
            spec = None
        if self.microscope.role == "sparc2" and spec:
            # Remove the moves that don't affects the detector
            # TODO: do this for _all_ modes
            for mode in ('spectral', 'monochromator'):
                if mode in self._modes:
                    det_role = self._modes[mode][0]
                    det = self._getComponent(det_role)
                    modeconf = self._modes[mode][1]
                    for act_role in modeconf.keys():
                        try:
                            act = self._getComponent(act_role)
                        except LookupError:
                            # TODO: just remove that move too?
                            logging.debug("Failed to find component %s, skipping it", act_role)
                            continue
                        if not self.affects(act.name, det.name):
                            logging.debug("Actuator %s doesn't affect %s, so removing it from mode %s",
                                          act_role, det_role, mode)
                            del modeconf[act_role]

        # will take care of executing setPath asynchronously
        self._executor = ThreadPoolExecutor(max_workers=1)

    def __del__(self):
        logging.debug("Ending path manager")

        # Restore the spectrometer focus, so that on next start, this value will
        # be used again as "out of chamber view".
        if self._chamber_view_own_focus and self._last_mode == "chamber-view":
            focus_comp = self._getComponent("focus")
            if self._focus_out_chamber_view is not None:
                logging.debug("Restoring focus from before coming to chamber view to %s",
                              self._focus_out_chamber_view)
                try:
                    focus_comp.moveAbsSync(self._focus_out_chamber_view)
                except IOError as e:
                    logging.info("Actuator move failed giving the error %s", e)

        self._executor.shutdown(wait=False)

    def _getComponent(self, role):
        """
        same as model.getComponent, but optimised by caching the result
        return Component
        raise LookupError: if no component found
        """
        try:
            comp = self._known_comps[role]
        except LookupError:
            comp = model.getComponent(role=role)
            self._known_comps[role] = comp

        return comp

    @isasync
    def setPath(self, mode):
        """
        Just a wrapper of _doSetPath
        """
        f = self._executor.submit(self._doSetPath, mode)

        return f

    def _doSetPath(self, path):
        """
        Given a particular mode it sets all the necessary components of the
        optical path (found through the microscope component) to the
        corresponding positions.
        path (stream.Stream or str): The stream or the optical path mode
        raises:
                ValueError if the given mode does not exist
                IOError if a detector is missing
        """
        if isinstance(path, stream.Stream):
            mode = self.guessMode(path)
            if mode not in self._modes:
                raise ValueError("Mode '%s' does not exist" % (mode,))
            target = self.getStreamDetector(path)  # target detector
        else:
            mode = path
            if mode not in self._modes:
                raise ValueError("Mode '%s' does not exist" % (mode,))
            comp_role = self._modes[mode][0]
            comp = self._getComponent(comp_role)
            target = comp.name

        logging.debug("Going to optical path '%s', with target detector %s.", mode, target)

        fmoves = []  # moves in progress

        # Restore the spectrometer focus before any other move, as (on the SR193),
        # the value is grating/output dependent
        if self._chamber_view_own_focus and self._last_mode == "chamber-view":
            focus_comp = self._getComponent("focus")
            self._focus_in_chamber_view = focus_comp.position.value.copy()
            if self._focus_out_chamber_view is not None:
                logging.debug("Restoring focus from before coming to chamber view to %s",
                              self._focus_out_chamber_view)
                fmoves.append(focus_comp.moveAbs(self._focus_out_chamber_view))

        modeconf = self._modes[mode][1]
        for comp_role, conf in modeconf.items():
            # Try to access the component needed
            try:
                comp = self._getComponent(comp_role)
            except LookupError:
                logging.debug("Failed to find component %s, skipping it", comp_role)
                continue

            mv = {}
            for axis, pos in conf.items():
                if axis == "power":
                    if model.hasVA(comp, "power"):
                        try:
                            if pos == 'on':
                                comp.power.value = comp.power.range[1]
                            else:
                                comp.power.value = comp.power.range[0]
                            logging.debug("Updating power of comp %s to %f", comp.name, comp.power.value)
                        except AttributeError:
                            logging.debug("Could not retrieve power range of %s component", comp_role)
                    continue
                if isinstance(pos, str) and pos.startswith("MD:"):
                    pos = self.mdToValue(comp, pos[3:])[axis]
                if axis in comp.axes:
                    if axis == "band":
                        # Handle the filter wheel in a special way. Search
                        # for the key that corresponds to the value, most probably
                        # to the 'pass-through'
                        choices = comp.axes[axis].choices
                        for key, value in choices.items():
                            if value == pos:
                                pos = key
                                # Just to store current band in order to restore
                                # it once we leave this mode
                                if self._last_mode not in ALIGN_MODES:
                                    self._stored[axis] = comp.position.value[axis]
                                break
                        else:
                            logging.debug("Choice %s is not present in %s axis", pos, axis)
                            continue
                    elif axis == "grating":
                        # If mirror is to be used but not found in grating
                        # choices, then we use zero order. In case of
                        # GRATING_NOT_MIRROR we either use the last known
                        # grating or the first grating that is not mirror.
                        choices = comp.axes[axis].choices
                        if pos == "mirror":
                            # Store current grating (if we use one at the moment)
                            # to restore it once we use a normal grating again
                            if choices[comp.position.value[axis]] != "mirror":
                                self._stored[axis] = comp.position.value[axis]
                                self._stored['wavelength'] = comp.position.value['wavelength']
                            # Use the special "mirror" grating, if it exists
                            for key, value in choices.items():
                                if value == "mirror":
                                    pos = key
                                    break
                            else:
                                # Fallback to zero order (aka "low-quality mirror")
                                axis = 'wavelength'
                                pos = 0
                        elif pos == GRATING_NOT_MIRROR:
                            if choices[comp.position.value[axis]] == "mirror":
                                # if there is a grating stored use this one
                                # otherwise find the non-mirror grating
                                if axis in self._stored:
                                    pos = self._stored[axis]
                                else:
                                    pos = self.findNonMirror(choices)
                                if 'wavelength' in self._stored:
                                    mv['wavelength'] = self._stored['wavelength']
                            else:
                                pos = comp.position.value[axis]  # no change
                            try:
                                del self._stored[axis]
                            except KeyError:
                                pass
                            try:
                                del self._stored['wavelength']
                            except KeyError:
                                pass
                        else:
                            logging.debug("Using grating position as-is: '%s'", pos)
                            pass  # use pos as-is
                    elif axis == "slit-in":
                        if self._last_mode not in ALIGN_MODES:
                            # TODO: save also the component
                            self._stored[axis] = comp.position.value[axis]
                    elif hasattr(comp.axes[axis], "choices") and isinstance(comp.axes[axis].choices, dict):
                        choices = comp.axes[axis].choices
                        for key, value in choices.items():
                            if value == pos:
                                pos = key
                                break
                    mv[axis] = pos
                else:
                    logging.debug("Not moving axis %s.%s as it is not present", comp_role, axis)

            try:
                fmoves.append(comp.moveAbs(mv))
            except AttributeError:
                logging.debug("%s not an actuator", comp_role)

        # Now take care of the selectors based on the target detector
        fmoves.extend(self.selectorsToPath(target))

        # If we are about to leave alignment modes, restore values
        if self._last_mode in ALIGN_MODES and mode not in ALIGN_MODES:
            if 'band' in self._stored:
                try:
                    flter = self._getComponent("filter")
                    fmoves.append(flter.moveAbs({"band": self._stored['band']}))
                except LookupError:
                    logging.debug("No filter component available")
            if 'slit-in' in self._stored:
                try:
                    spectrograph = self._getComponent("spectrograph")
                    fmoves.append(spectrograph.moveAbs({"slit-in": self._stored['slit-in']}))
                except LookupError:
                    logging.debug("No spectrograph component available")

        # Save last mode
        self._last_mode = mode

        # wait for all the moves to be completed
        for f in fmoves:
            try:
                f.result()
            except IOError as e:
                logging.warning("Actuator move failed giving the error %s", e)

        # When going to chamber view, store the current focus position, and
        # restore the special focus position for chamber, after _really_ all
        # the other moves have finished, because the grating/output selector
        # moves affects the current position of the focus.
        if self._chamber_view_own_focus and mode == "chamber-view":
            focus_comp = self._getComponent("focus")
            self._focus_out_chamber_view = focus_comp.position.value.copy()
            if self._focus_in_chamber_view is not None:
                logging.debug("Restoring focus from previous chamber view to %s",
                              self._focus_in_chamber_view)
                try:
                    focus_comp.moveAbsSync(self._focus_in_chamber_view)
                except IOError as e:
                    logging.warning("Actuator move failed giving the error %s", e)

    def selectorsToPath(self, target):
        """
        Sets the selectors so the optical path leads to the target component
        (usually a detector).
        target (str): component name
        return (list of futures)
        """
        fmoves = []
        for comp in self._actuators:
            # TODO: pre-cache this as comp/target -> axis/pos

            # TODO: extend the path computation to "for every actuator which _affects_
            # the target, move if if position known, and update path to that actuator"?
            # Eg, this would improve path computation on SPARCv2 with fiber aligner
            mv = {}
            for an, ad in comp.axes.items():
                if hasattr(ad, "choices") and isinstance(ad.choices, dict):
                    for pos, value in ad.choices.items():
                        if target in value:
                            # set the position so it points to the target
                            mv[an] = pos

            comp_md = comp.getMetadata()
            if target in comp_md.get(model.MD_FAV_POS_ACTIVE_DEST, {}):
                mv.update(comp_md[model.MD_FAV_POS_ACTIVE])
            elif target in comp_md.get(model.MD_FAV_POS_DEACTIVE_DEST, {}):
                mv.update(comp_md[model.MD_FAV_POS_DEACTIVE])

            if mv:
                logging.debug("Move %s added so %s targets to %s", mv, comp.name, target)
                fmoves.append(comp.moveAbs(mv))
                # make sure this component is also on the optical path
                fmoves.extend(self.selectorsToPath(comp.name))

        return fmoves

    def guessMode(self, guess_stream):
        """
        Given a stream and by checking its components (e.g. role of detector)
        guesses and returns the corresponding optical path mode.
        guess_stream (object): The given optical stream
        returns (str): Mode estimated
        raises:
                LookupError if no mode can be inferred for the given stream
                IOError if given object is not a stream
        """
        if not isinstance(guess_stream, stream.Stream):
            raise IOError("Given object is not a stream")

        # Handle multiple detector streams
        if isinstance(guess_stream, stream.MultipleDetectorStream):
            for st in guess_stream.streams:
                try:
                    return self.guessMode(st)
                except LookupError:
                    pass
        else:
            for mode, conf in self.guessed.items():
                if conf[0] == guess_stream.detector.role:
                    return mode
        # In case no mode was found yet
        raise LookupError("No mode can be inferred for the given stream")

    def getStreamDetector(self, path_stream):
        """
        Given a stream find the detector.
        path_stream (object): The given stream
        returns (str): detector name
        raises:
                IOError if given object is not a stream
                LookupError: if stream has no detector
        """
        if not isinstance(path_stream, stream.Stream):
            raise IOError("Given object is not a stream")

        # Handle multiple detector streams
        if isinstance(path_stream, stream.MultipleDetectorStream):
            dets = []
            for st in path_stream.streams:
                try:
                    # Prefer the detectors which have a role in the mode, as it's much
                    # more likely to be the optical detector
                    # TODO: handle setting multiple optical paths? => return all the detectors
                    role = st.detector.role
                    name = st.detector.name
                    for conf in self.guessed.values():
                        if conf[0] == role:
                            return name
                    dets.append(name)
                except AttributeError:
                    pass
            if dets:
                logging.warning("No detector on stream %s has a known optical role", path_stream.name.value)
                return dets[0]
        else:
            try:
                return path_stream.detector.name
            except AttributeError:
                pass  # will raise error just after

        raise LookupError("Failed to find a detector on stream %s" % (path_stream.name.value))

    def findNonMirror(self, choices):
        """
        Given a dict of choices finds the one with value different than "mirror"
        """
        for key, value in choices.items():
            if value != "mirror":
                return key
        else:
            raise ValueError("Cannot find grating value in given choices")

    def mdToValue(self, comp, md_name):
        """
        Just retrieves the "md_name" metadata from component "comp"
        """
        md = comp.getMetadata()
        try:
            value = md.get(md_name)
            return value
        except KeyError:
            raise KeyError("Metadata %s does not exist in component %s" % (md_name, comp.name))

    def affects(self, affecting, affected):
        """
        Returns True if "affecting" component affects -directly of indirectly-
        the "affected" component
        """
        path = self.findPath(affecting, affected)
        if path is None:
            return False
        else:
            return True

    def findPath(self, node1, node2, path=[]):
        """
        Find any path between node1 and node2 (may not be shortest)
        """
        path = path + [node1]
        if node1 == node2:
            return path
        if node1 not in self._graph:
            return None
        for node in self._graph[node1]:
            if node not in path:
                new_path = self.findPath(node, node2, path)
                if new_path:
                    return new_path
        return None
コード例 #39
0
ファイル: test_utils.py プロジェクト: leeopop/coexecutor
def do_test3(workers):
    param = {"max_workers": workers}
    loop = asyncio.new_event_loop()

    lock = threading.Lock()
    tresult = []
    presult = []
    cresult = []

    pre_input1 = input_generator(workers, 0)
    pre_input2 = input_generator(workers, max(pre_input1))
    pre_input3 = input_generator(workers, max(pre_input2))

    def result_checker(list, lock, fut):
        with lock:
            try:
                list.append(fut.result())
            except Exception as e:
                list.append(e)

    texec = ThreadPoolExecutor(**param)
    pexec = ProcessPoolExecutor(**param)
    cexec = CoroutinePoolExecutor(**param, loop=loop)

    tstart = round(time.time()+1)
    input1 = [tstart + i for i in pre_input1]
    input2 = [tstart + i for i in pre_input2]
    input3 = [tstart + i for i in pre_input3]

    for x in input1:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    result_iter = texec.map(wake_at, input2)
    for x in input3:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    for x in result_iter:
        with lock:
            tresult.append(x)

    texec.shutdown(True)

    pstart = round(time.time() + _start_warm_up)
    input1 = [pstart + i for i in pre_input1]
    input2 = [pstart + i for i in pre_input2]
    input3 = [pstart + i for i in pre_input3]

    for x in input1:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    result_iter = pexec.map(wake_at, input2)
    for x in input3:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    for x in result_iter:
        with lock:
            presult.append(x)

    pexec.shutdown(True)

    cstart = round(time.time() + _start_warm_up)
    input1 = [cstart + i for i in pre_input1]
    input2 = [cstart + i for i in pre_input2]
    input3 = [cstart + i for i in pre_input3]

    async def async_main():
        for x in input1:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        result_iter = cexec.map(async_wake_at, input2)
        for x in input3:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        async for x in result_iter:
            with lock:
                cresult.append(x)
        await cexec.shutdown(False)

    loop.run_until_complete(async_main())

    try:
        loop.run_until_complete(cexec.shutdown(True))
        texec.shutdown(True)
        pexec.shutdown(True)
    finally:
        loop.close()

    tresult = [round((x - tstart) / _precision) for x in tresult]
    presult = [round((x - pstart) / _precision) for x in presult]
    cresult = [round((x - cstart) / _precision) for x in cresult]

    result = True
    for (t, p, c) in zip(tresult, presult, cresult):
        result = result and (t == p)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t,p,c)
            assert False
        result = result and (p == c)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (c == t)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
    return result
コード例 #40
0
ファイル: test_utils.py プロジェクト: leeopop/coexecutor
def do_test1(workers):
    param = {"max_workers": workers}
    start = round(time.time() + _start_warm_up)
    input = input_generator(workers, start)
    loop = asyncio.new_event_loop()

    lock = threading.Lock()
    tresult = []
    presult = []
    cresult = []

    def result_checker(list, lock, fut):
        with lock:
            try:
                list.append(fut.result())
            except Exception as e:
                list.append(e)

    texec = ThreadPoolExecutor(**param)
    pexec = ProcessPoolExecutor(**param)
    cexec = CoroutinePoolExecutor(**param, loop=loop)

    for x in input:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))

        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))

        future = cexec.submit(async_wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, cresult, lock))

    texec.shutdown(False)
    pexec.shutdown(False)
    loop.run_until_complete(cexec.shutdown(False))

    try:
        loop.run_until_complete(cexec.shutdown(True))
        texec.shutdown(True)
        pexec.shutdown(True)
    finally:
        loop.close()

    tresult = [round((x - start) / _precision) for x in tresult]
    presult = [round((x - start) / _precision) for x in presult]
    cresult = [round((x - start) / _precision) for x in cresult]

    result = True
    for (t, p, c) in zip(tresult, presult, cresult):
        result = result and (t == p)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (p == c)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (c == t)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
    return result