示例#1
0
def _is_event_loop():  # pragma: no cover
    import asyncio

    try:
        if sys.version_info >= (3, 7):
            asyncio.get_running_loop()

        asyncio.get_event_loop()
    except RuntimeError:
        return False
    else:
        return True
示例#2
0
        async def main():
            loop = asyncio.get_running_loop()
            loop.call_exception_handler = mock.Mock()

            nonlocal lazyboy
            lazyboy = asyncio.create_task(spin())
            raise FancyExit
示例#3
0
文件: coro.py 项目: vivisect/synapse
def executor(func, *args, **kwargs):
    '''
    Execute a non-coroutine function in the ioloop executor pool.

    Args:
        func: Function to execute.
        *args: Args for the function.
        **kwargs: Kwargs for the function.

    Examples:

        Execute a blocking API call in the executor pool::

            import requests

            def block(url, params=None):
                return requests.get(url, params=params).json()

            fut = s_coro.executor(block, 'http://some.tld/thign')
            resp = await fut

    Returns:
        asyncio.Future: An asyncio future.
    '''

    def real():
        return func(*args, **kwargs)

    return asyncio.get_running_loop().run_in_executor(None, real)
示例#4
0
文件: glob.py 项目: vivisect/synapse
def initloop():

    global _glob_loop
    global _glob_thrd

    # if there's no global loop....
    if _glob_loop is None:

        # check if it's us....
        try:
            _glob_loop = asyncio.get_running_loop()
            # if we get here, it's us!
            _glob_thrd = threading.currentThread()

        except RuntimeError:

            # otherwise, lets fire one...
            _glob_loop = asyncio.new_event_loop()
            greedy_threshold = os.environ.get('SYN_GREEDY_CORO')
            if greedy_threshold is not None:
                _glob_loop.slow_callback_duration = float(greedy_threshold)

            _glob_thrd = threading.Thread(target=_glob_loop.run_forever, name='SynLoop')
            _glob_thrd.setDaemon(True)
            _glob_thrd.start()

    return _glob_loop
示例#5
0
文件: base.py 项目: vivisect/synapse
    async def __anit__(self):

        self.loop = asyncio.get_running_loop()
        if __debug__:
            import synapse.lib.threads as s_threads  # avoid import cycle
            self.tid = s_threads.iden()
            self.call_stack = traceback.format_stack()  # For cleanup debugging

        self.isfini = False
        self.anitted = True  # For assertion purposes
        self.finievt = None
        self.entered = False
        self.exitinfo = None

        self.exitok = None
        self.entered = False
        self.exitinfo = None

        # hold a weak ref to other bases we should fini if they
        # are still around when we go down...
        self.tofini = weakref.WeakSet()

        self._syn_funcs = collections.defaultdict(list)

        self._syn_refs = 1  # one ref for the ctor
        self._syn_links = []
        self._fini_funcs = []
        self._fini_atexit = False
        self._active_tasks = set()  # the free running tasks associated with me
示例#6
0
        async def main():
            loop = asyncio.get_running_loop()
            loop.call_exception_handler = call_exc_handler_mock

            nonlocal lo_task
            lo_task = asyncio.create_task(leftover())
            return 123
示例#7
0
async def display_date():
    loop = asyncio.get_running_loop()
    end_time = loop.time() + 5.0
    while True:
        print(datetime.datetime.now())
        if (loop.time() + 1.0) >= end_time:
            break
        await asyncio.sleep(1)
示例#8
0
def get_running_loop():
    try:
        import asyncio
        return asyncio.get_running_loop()
    except AttributeError:  # 3.5 / 3.6
        loop = asyncio._get_running_loop()  # pylint: disable=protected-access
        if loop is None:
            raise RuntimeError('No running event loop')
        return loop
示例#9
0
    async def get_password(self, prompt):
        if prompt in self.passwords:
            return await self.passwords[prompt]

        loop = asyncio.get_running_loop()
        fut = loop.create_future()
        self.passwords[prompt] = fut
        await loop.run_in_executor(None, self.__get_password, fut, prompt)
        return await fut
示例#10
0
 def __init__(self, fd):
     self.fd = fd
     self.queue = asyncio.Queue()
     def done():
         while not self.queue.empty():
             self.queue.get_nowait().set_result(None)
             self.queue.task_done()
     self.loop = asyncio.get_running_loop()
     self.loop.add_reader(self.fd, done)
示例#11
0
文件: base.py 项目: vivisect/synapse
    async def __aexit__(self, exc, cls, tb):
        # Either there should be no running loop or we shall be on the right one
        try:
            assert asyncio.get_running_loop() == self.loop
        except RuntimeError:
            pass

        self.exitok = cls is None
        self.exitinfo = (exc, cls, tb)
        await self.fini()
示例#12
0
async def main():
    loop = asyncio.get_running_loop()
    server = await loop.create_server(
        lambda: UpcaseServerProtocol(),
        port=44444,
        reuse_port=True,
        reuse_address=True,
    )
    async with server:
        await server.serve_forever()
示例#13
0
文件: task.py 项目: vivisect/synapse
async def executor(func, *args, **kwargs):
    '''
    Execute a function in an executor thread.

    Args:
        todo ((func,args,kwargs)): A todo tuple.
    '''
    def syncfunc():
        return func(*args, **kwargs)

    loop = asyncio.get_running_loop()
    return await loop.run_in_executor(None, syncfunc)
示例#14
0
async def run(task, timeout=2):
    loop = asyncio.get_running_loop()
    result = None
    ev = asyncio.Event()
    loop.add_signal_handler(signal.SIGINT, ev.set)
    done, pending = await asyncio.wait(
        [task(), ev.wait()], timeout=timeout, return_when=asyncio.FIRST_COMPLETED
    )
    if done:
        result = "done"
    else:
        result = "timeout"
    print(done, pending)
    print(f"@@result {result} is_cancel={ev.is_set()}")
示例#15
0
文件: base.py 项目: vivisect/synapse
    async def addSignalHandlers(self):
        '''
        Register SIGTERM/SIGINT signal handlers with the ioloop to fini this object.
        '''

        def sigterm():
            print('Caught SIGTERM, shutting down.')
            asyncio.create_task(self.fini())

        def sigint():
            print('Caught SIGINT, shutting down.')
            asyncio.create_task(self.fini())

        loop = asyncio.get_running_loop()
        loop.add_signal_handler(signal.SIGINT, sigint)
        loop.add_signal_handler(signal.SIGTERM, sigterm)
示例#16
0
    async def test_base_waitfini(self):
        loop = asyncio.get_running_loop()

        base = await s_base.Base.anit()

        self.false(await base.waitfini(timeout=0.1))

        async def callfini():
            await asyncio.sleep(0.1)
            await base.fini()

        loop.create_task(callfini())
        # actually wait...
        self.true(await base.waitfini(timeout=0.3))
        self.true(base.isfini)

        # bounce off the isfini block
        self.true(await base.waitfini(timeout=0.3))
示例#17
0
    async def test_pullfile(self):

        async with self.getTestAxon() as axon:

            axonurl = axon.getLocalUrl()

            testhash = hashlib.sha256(b'test').hexdigest()
            visihash = hashlib.sha256(b'visi').hexdigest()
            nonehash = hashlib.sha256(b'none').hexdigest()

            testbash = hashlib.sha256(b'test').digest()
            visibash = hashlib.sha256(b'visi').digest()

            self.eq(((4, visibash), (4, testbash)), await axon.puts([b'visi', b'test']))

            def pullfile():

                with self.getTestDir() as wdir:

                    outp = self.getTestOutp()
                    self.eq(0, s_pullfile.main(['-a', axonurl,
                                                '-o', wdir,
                                                '-l', testhash,
                                                '-l', nonehash], outp))
                    oldcwd = os.getcwd()
                    os.chdir(wdir)
                    self.eq(0, s_pullfile.main(['-a', axonurl,
                                                '-l', visihash], outp))

                    os.chdir(oldcwd)

                    with open(pathlib.Path(wdir, testhash), 'rb') as fd:
                        self.eq(b'test', fd.read())

                    with open(pathlib.Path(wdir, visihash), 'rb') as fd:
                        self.eq(b'visi', fd.read())

                    self.true(outp.expect(f'b\'{nonehash}\' not in axon store'))
                    self.true(outp.expect(f'Fetching {testhash} to file'))
                    self.true(outp.expect(f'Fetching {visihash} to file'))

            loop = asyncio.get_running_loop()
            await loop.run_in_executor(None, pullfile)
示例#18
0
def sendto(sock, data, addr, ttl, fut=None, registed=False):
    loop = asyncio.get_running_loop()
    fd = sock.fileno()
    if fut is None:
        fut = loop.create_future()
    if registed:
        loop.remove_writer(fd)
    if not data:
        return
    sock.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, ttl)

    try:
        n = sock.sendto(data, addr)
        logging.error(f"Sent {n} bytes with TTL {ttl}.")
    except (BlockingIOError, InterruptedError):
        loop.add_writer(fd, sendto, loop, sock, data, addr, fut, True)
    else:
        fut.set_result(n)
    return fut
示例#19
0
async def icmp_recv(expected=3):
    # Get a reference to the current event loop because
    # we want to access low-level APIs.
    loop = asyncio.get_running_loop()

    # Listen for ICMP packets
    sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    sock.setblocking(False)
    sock.bind(('', 1))

    for i in range(expected):
        # Wait for data
        data = await loop.sock_recv(sock, 1024)

        # ip header is the first 20 bytes
        ip_header = data[:20]

        type_ = struct.unpack("!B", data[20:21])[0]
        code = struct.unpack("!B", data[21:22])[0]
        checksum = data[22:24]
        nh_mtu = None
        # TODO: Check if type 3, 11 MUST be 56 bytes
        if type_ == 3:
            nh_mtu = struct.unpack("!H", data[26:28])[0]
            logging.error(f"Next-hop MTU: {nh_mtu}")
        if type_ == 11:
            icmp_ip_recv_header = data[28:48]
            icmp_udp_src_port = data[48:50]
            icmp_udp_dst_port = data[50:52]
            icmp_udp_len = struct.unpack("!H", data[52:54])[0]
            icmp_udp_checksum = data[54:56]
            icmp_udp_data_len = icmp_udp_len - 8
            # icmp_recv_data = struct.unpack("!Q", data[48:56])[0]
            icmp_udp_data = data[56:56 + icmp_udp_data_len]
            logging.error(f"ICMP payload: {icmp_udp_data}")
        ip_src = socket.inet_ntop(socket.AF_INET, ip_header[12:16])
        logging.error(f"Reply from: {ip_src}, ICMP type: {type_}")
示例#20
0
async def main(args=None):
    if args is None:
        args = sys.argv[1:]

    args, rest = parse_args(args)
    Filter = create_filter_class(args)

    loop = aio.get_running_loop()
    stop_event = aio.Event()
    loop.add_signal_handler(signal.SIGINT, lambda: stop_event.set())

    p = await spawn(rest)

    async with Watcher() as watcher:
        async for changes in watcher(args.path, stop_event=stop_event,
                                     filter_class=Filter):
            print(changes)

            await kill(p)
            p = await spawn(rest)

    await kill(p)

    return 0
示例#21
0
    async def go(self):
        headers = {"Content-Type": "application/octet-stream"}
        async with aiohttp.ClientSession(headers=headers) as session:
            self.session = session

            proc = None
            self.code_analysis_port = None
            if self.remove_comments_enabled:
                ready = False

                for _ in range(7):
                    try:
                        self.code_analysis_port = utils.get_free_tcp_port()
                        proc = subprocess.Popen(
                            [
                                "rust-code-analysis-cli",
                                "--serve",
                                "--port",
                                str(self.code_analysis_port),
                            ]
                        )
                    except FileNotFoundError:
                        raise Exception(
                            "rust-code-analysis is required for comment removal"
                        )

                    for _ in range(7):
                        try:
                            await self.session.get(
                                f"http://localhost:{self.code_analysis_port}/ping",
                                raise_for_status=True,
                            )
                            ready = True
                            break
                        except Exception:
                            if proc.poll() is not None:
                                break

                            time.sleep(1)

                    if ready:
                        break

                assert ready, "rust-code-analysis should be able to start"

            if os.path.exists(self.repo_out_dir):
                self.repo = pygit2.Repository(self.repo_out_dir)
                try:
                    last_commit_hash = utils.get_original_hash(self.repo, "HEAD")
                    self.rev_start = f"children({last_commit_hash})"
                except KeyError:
                    pass
            else:
                os.makedirs(self.repo_out_dir)
                self.repo = pygit2.init_repository(self.repo_out_dir)

            with hglib.open(self.repo_dir) as hg:
                revs = get_revs(hg, self.rev_start, self.rev_end)

            all_commits_done = True
            if self.limit is not None:
                if len(revs) > self.limit:
                    all_commits_done = False

                revs = revs[: self.limit]

            logger.info(f"Mining {len(revs)} commits...")

            cwd = os.getcwd()
            os.chdir(self.repo_dir)

            CHUNK_SIZE = 256
            revs_groups = [
                revs[i : (i + CHUNK_SIZE)] for i in range(0, len(revs), CHUNK_SIZE)
            ]

            with concurrent.futures.ThreadPoolExecutor(
                initializer=_init_thread, max_workers=os.cpu_count() + 1
            ) as executor:
                commits = executor.map(_hg_log, revs_groups)
                commits = tqdm(commits, total=len(revs_groups))
                commits = list(itertools.chain.from_iterable(commits))

                commits_num = len(commits)

                logger.info(f"Converting {commits_num} commits...")

                loop = asyncio.get_running_loop()
                loop.set_default_executor(executor)

                with hglib.open(".") as hg:
                    with open("errors.txt", "a", buffering=1) as f:
                        for commit in tqdm(commits):
                            try:
                                await self.convert(hg, commit)
                            except Exception as e:
                                logger.error(f"Error during transformation: {e}")
                                traceback.print_exc()
                                f.write(f"{commit.node} - {commit.parents}\n")

            os.chdir(cwd)

            while len(hg_servers) > 0:
                hg_server = hg_servers.pop()
                hg_server.close()

            if proc is not None:
                proc.terminate()

            return all_commits_done
示例#22
0
 def __init__(self, fd, reg_num):
     self.fd = fd
     self.reg_num = reg_num
     self.db = None
     self.loop = asyncio.get_running_loop()
示例#23
0
    def closeEvent(self, event: QCloseEvent):
        for task in asyncio.all_tasks():
            task.cancel()

        asyncio.get_running_loop().stop()
示例#24
0
 def __init__(self):
     loop = asyncio.get_running_loop()
     self.timeout_handle = loop.call_later(
         TIMEOUT,
         self._timeout,
     )
async def work():
    loop = asyncio.get_running_loop()
    treadPool = ThreadPoolExecutor(max_workers=10)
    fileReadingSem = asyncio.Semaphore(8)

    def LoadPetCard(cardPath):
        with open(cardPath, mode='r') as f:
            contents = f.read()
            parsed = json.loads(contents)
            return parsed

    async def LoadPetCardAsync(cardPath):
        try:
            await fileReadingSem.acquire()
            return await loop.run_in_executor(treadPool, LoadPetCard, cardPath)
        finally:
            fileReadingSem.release()

    async def GetPetCardImageInfo(petDirPath):
        cardPath = os.path.join(petDirPath, "card.json")
        await fileReadingSem.acquire()
        try:
            card = await LoadPetCardAsync(cardPath)
        finally:
            fileReadingSem.release()
        #print("card is {0}. type {1}".format(card,type(card)))
        petStr = "unknown"
        typeStr = "unknown"
        sexStr = "unknown"
        pet = card['pet']
        if pet['animal'] == "2":
            petStr = "cat"
        elif pet['animal'] == "1":
            petStr = "dog"
        if pet['sex'] == "2":
            sexStr = "male"
        elif pet['sex'] == "3":
            sexStr = "female"

        if pet['art'][:2] == "rl":
            typeStr = "lost"
        else:
            typeStr = "found"
        imageFiles = [
            x for x in os.listdir(petDirPath)
            if x.endswith(".png") or x.endswith(".jpg")
        ]

        return {
            "cardId": pet['art'],
            "cardType": typeStr,
            "species": petStr,
            "sex": sexStr,
            "photoCount": len(imageFiles)
        }

    petDirs = [
        os.path.join(dbPath, x) for x in os.listdir(dbPath)
        if os.path.isdir(os.path.join(dbPath, x))
    ]
    #petDirs = petDirs[0:4096]
    print("Found {0} pet directories".format(len(petDirs)))

    tasks = [
        asyncio.create_task(GetPetCardImageInfo(petDir)) for petDir in petDirs
    ]
    gatheredResults = []
    for coro in tqdm(asyncio.as_completed(tasks),
                     desc="Pets processed",
                     total=len(petDirs),
                     ascii=True):
        taskRes = await coro
        gatheredResults.append(taskRes)
    print("Analyzed {0} images. Dumping to CSV summary".format(
        len(gatheredResults)))
    df1 = pd.DataFrame.from_records(gatheredResults)
    df1.to_csv(outFile, index=False)
    print("Done")
示例#26
0
 async def _wrapper(*args: Any, **kwargs: Any) -> Any:
     loop = asyncio.get_running_loop()
     pfunc = partial(func, *args, **kwargs)
     result = await loop.run_in_executor(None, pfunc)
     return result
示例#27
0
 def __init__(  # pylint: disable=super-init-not-called
         self, *args: Any, **kwargs: Any) -> None:
     """Wrap AsyncZeroconf."""
     self.zeroconf = HaZeroconf(*args, **kwargs)
     self.loop = asyncio.get_running_loop()
示例#28
0
 async def __aenter__(self):
     assert asyncio.get_running_loop() == self.loop
     self.entered = True
     return self
示例#29
0
async def run_blocking(func):
    "run blocking funcion in async executor"
    loop = asyncio.get_running_loop()
    return await loop.run_in_executor(None, func)
示例#30
0
 async def print_events(device):
     async for event in device.async_read_loop():
         cat_event = evdev.categorize(event)
         if isinstance(cat_event, evdev.KeyEvent):
             asyncio.get_running_loop().stop()
             return cat_event
示例#31
0
async def __tts(count):
    await PlayTTS(text=f'在我面前好像有{count}个人').execute()
    asyncio.get_running_loop().run_in_executor(None,
                                               asyncio.get_running_loop().stop)
示例#32
0
async def _unittest_slow_presentation_pub_sub_anon(
        generated_packages: typing.List[pyuavcan.dsdl.GeneratedPackageInfo],
        transport_factory: TransportFactory) -> None:
    assert generated_packages
    import uavcan.node
    from pyuavcan.transport import Priority

    asyncio.get_running_loop().slow_callback_duration = 5.0

    tran_a, tran_b, transmits_anon = transport_factory(None, None)
    assert tran_a.local_node_id is None
    assert tran_b.local_node_id is None

    pres_a = pyuavcan.presentation.Presentation(tran_a)
    pres_b = pyuavcan.presentation.Presentation(tran_b)

    assert pres_a.transport is tran_a

    sub_heart = pres_b.make_subscriber_with_fixed_subject_id(
        uavcan.node.Heartbeat_1_0)

    with pytest.raises(TypeError):
        # noinspection PyTypeChecker
        pres_a.make_client_with_fixed_service_id(uavcan.node.Heartbeat_1_0,
                                                 123)  # type: ignore
    with pytest.raises(TypeError):
        # noinspection PyTypeChecker
        pres_a.get_server_with_fixed_service_id(
            uavcan.node.Heartbeat_1_0)  # type: ignore

    if transmits_anon:
        pub_heart = pres_a.make_publisher_with_fixed_subject_id(
            uavcan.node.Heartbeat_1_0)
    else:
        with pytest.raises(
                pyuavcan.transport.OperationNotDefinedForAnonymousNodeError):
            pres_a.make_publisher_with_fixed_subject_id(
                uavcan.node.Heartbeat_1_0)
        pres_a.close()
        pres_b.close()
        return  # The test ends here.

    assert pub_heart._maybe_impl is not None  # pylint: disable=protected-access
    assert pub_heart._maybe_impl.proxy_count == 1  # pylint: disable=protected-access
    pub_heart_new = pres_a.make_publisher_with_fixed_subject_id(
        uavcan.node.Heartbeat_1_0)
    assert pub_heart_new._maybe_impl is not None  # pylint: disable=protected-access
    assert pub_heart is not pub_heart_new
    assert pub_heart._maybe_impl is pub_heart_new._maybe_impl  # pylint: disable=protected-access
    assert pub_heart._maybe_impl.proxy_count == 2  # pylint: disable=protected-access
    pub_heart_new.close()
    del pub_heart_new
    assert pub_heart._maybe_impl.proxy_count == 1  # pylint: disable=protected-access

    pub_heart_impl_old = pub_heart._maybe_impl  # pylint: disable=protected-access
    pub_heart.close()
    assert pub_heart_impl_old.proxy_count == 0

    pub_heart = pres_a.make_publisher_with_fixed_subject_id(
        uavcan.node.Heartbeat_1_0)
    assert pub_heart._maybe_impl is not pub_heart_impl_old  # pylint: disable=protected-access

    assert pub_heart.transport_session.destination_node_id is None
    assert sub_heart.transport_session.specifier.data_specifier == pub_heart.transport_session.specifier.data_specifier
    assert pub_heart.port_id == pyuavcan.dsdl.get_fixed_port_id(
        uavcan.node.Heartbeat_1_0)
    assert sub_heart.dtype is uavcan.node.Heartbeat_1_0

    heart = uavcan.node.Heartbeat_1_0(
        uptime=123456,
        health=uavcan.node.Health_1_0(uavcan.node.Health_1_0.CAUTION),
        mode=uavcan.node.Mode_1_0(uavcan.node.Mode_1_0.OPERATIONAL),
        vendor_specific_status_code=0xC0,
    )
    assert pub_heart.priority == pyuavcan.presentation.DEFAULT_PRIORITY
    pub_heart.priority = Priority.SLOW
    assert pub_heart.priority == Priority.SLOW
    await pub_heart.publish(heart)

    item = await sub_heart.receive_for(1)
    assert item
    rx, transfer = item  # type: typing.Any, pyuavcan.transport.TransferFrom
    assert repr(rx) == repr(heart)
    assert transfer.source_node_id is None
    assert transfer.priority == Priority.SLOW
    assert transfer.transfer_id == 0

    stat = sub_heart.sample_statistics()
    # Remember that anonymous transfers over redundant transports are NOT deduplicated.
    # Hence, to support the case of redundant transports, we use 'greater or equal' here.
    assert stat.transport_session.transfers >= 1
    assert stat.transport_session.frames >= 1
    assert stat.transport_session.drops == 0
    assert stat.deserialization_failures == 0
    assert stat.messages >= 1

    pres_a.close()
    pres_a.close()  # Double-close has no effect
    pres_b.close()
    pres_b.close()  # Double-close has no effect

    # Make sure the transport sessions have been closed properly, this is supremely important.
    assert list(pres_a.transport.input_sessions) == []
    assert list(pres_b.transport.input_sessions) == []
    assert list(pres_a.transport.output_sessions) == []
    assert list(pres_b.transport.output_sessions) == []

    await asyncio.sleep(
        1
    )  # Let all pending tasks finalize properly to avoid stack traces in the output.
示例#33
0
async def _unittest_slow_presentation_pub_sub(
        generated_packages: typing.List[pyuavcan.dsdl.GeneratedPackageInfo],
        transport_factory: TransportFactory) -> None:
    assert generated_packages
    import uavcan.node
    from test_dsdl_namespace.numpy import Complex_254_255
    from pyuavcan.transport import Priority

    asyncio.get_running_loop().slow_callback_duration = 5.0

    tran_a, tran_b, _ = transport_factory(123, 42)
    assert tran_a.local_node_id == 123
    assert tran_b.local_node_id == 42

    pres_a = pyuavcan.presentation.Presentation(tran_a)
    pres_b = pyuavcan.presentation.Presentation(tran_b)

    assert pres_a.transport is tran_a

    pub_heart = pres_a.make_publisher_with_fixed_subject_id(
        uavcan.node.Heartbeat_1_0)
    sub_heart = pres_b.make_subscriber_with_fixed_subject_id(
        uavcan.node.Heartbeat_1_0)

    pub_record = pres_b.make_publisher(Complex_254_255, 2222)
    sub_record = pres_a.make_subscriber(Complex_254_255, 2222)
    sub_record2 = pres_a.make_subscriber(Complex_254_255, 2222)

    heart = uavcan.node.Heartbeat_1_0(
        uptime=123456,
        health=uavcan.node.Health_1_0(uavcan.node.Health_1_0.CAUTION),
        mode=uavcan.node.Mode_1_0(uavcan.node.Mode_1_0.OPERATIONAL),
        vendor_specific_status_code=0xC0,
    )

    pub_heart.transfer_id_counter.override(23)
    await pub_heart.publish(heart)
    item = await sub_heart.receive(asyncio.get_running_loop().time() + 1)
    assert item
    rx, transfer = item  # type: typing.Any, pyuavcan.transport.TransferFrom
    assert repr(rx) == repr(heart)
    assert transfer.source_node_id == 123
    assert transfer.priority == Priority.NOMINAL
    assert transfer.transfer_id == 23

    stat = sub_heart.sample_statistics()
    assert stat.transport_session.transfers == 1
    assert stat.transport_session.frames >= 1  # 'greater' is needed to accommodate redundant transports.
    assert stat.transport_session.drops == 0
    assert stat.deserialization_failures == 0
    assert stat.messages == 1

    await pub_heart.publish(heart)
    item = await sub_heart.receive(asyncio.get_running_loop().time() + 1)
    assert item
    rx, _ = item
    assert repr(rx) == repr(heart)

    await pub_heart.publish(heart)
    rx = (await sub_heart.receive(asyncio.get_event_loop().time() +
                                  _RX_TIMEOUT))[0]  # type: ignore
    assert repr(rx) == repr(heart)
    rx = await sub_heart.receive_for(_RX_TIMEOUT)
    assert rx is None

    sub_heart.close()
    sub_heart.close()  # Shall not raise.

    handler_output: typing.List[typing.Tuple[
        Complex_254_255, pyuavcan.transport.TransferFrom]] = []

    async def handler(message: Complex_254_255,
                      cb_transfer: pyuavcan.transport.TransferFrom) -> None:
        print("HANDLER:", message, cb_transfer)
        handler_output.append((message, cb_transfer))

    sub_record2.receive_in_background(handler)

    record = Complex_254_255(bytes_=[1, 2, 3, 1])
    assert pub_record.priority == pyuavcan.presentation.DEFAULT_PRIORITY
    pub_record.priority = Priority.NOMINAL
    assert pub_record.priority == Priority.NOMINAL
    with pytest.raises(TypeError, match=".*Heartbeat.*"):
        # noinspection PyTypeChecker
        await pub_heart.publish(record)  # type: ignore

    pub_record.publish_soon(record)
    await asyncio.sleep(
        0.1)  # Needed to make the deferred publication get the message out
    item2 = await sub_record.receive(asyncio.get_running_loop().time() + 1)
    assert item2
    rx, transfer = item2
    assert repr(rx) == repr(record)
    assert transfer.source_node_id == 42
    assert transfer.priority == Priority.NOMINAL
    assert transfer.transfer_id == 0

    # Broken transfer
    stat = sub_record.sample_statistics()
    assert stat.transport_session.transfers == 1
    assert stat.transport_session.frames >= 1  # 'greater' is needed to accommodate redundant transports.
    assert stat.transport_session.drops == 0
    assert stat.deserialization_failures == 0
    assert stat.messages == 1

    await pub_record.transport_session.send(
        pyuavcan.transport.Transfer(
            timestamp=pyuavcan.transport.Timestamp.now(),
            priority=Priority.NOMINAL,
            transfer_id=12,
            fragmented_payload=[memoryview(b"\xFF" * 15)
                                ],  # Invalid union tag.
        ),
        tran_a.loop.time() + 1.0,
    )
    assert (await sub_record.receive(asyncio.get_event_loop().time() +
                                     _RX_TIMEOUT)) is None

    stat = sub_record.sample_statistics()
    assert stat.transport_session.transfers == 2
    assert stat.transport_session.frames >= 2  # 'greater' is needed to accommodate redundant transports.
    assert stat.transport_session.drops == 0
    assert stat.deserialization_failures == 1
    assert stat.messages == 1

    # Close the objects explicitly and ensure that they are finalized. This also removes the warnings that some tasks
    # have been removed while pending.
    pub_heart.close()
    sub_record.close()
    sub_record2.close()
    pub_record.close()
    await asyncio.sleep(1.1)

    pres_a.close()
    pres_a.close()  # Double-close has no effect
    pres_b.close()
    pres_b.close()  # Double-close has no effect

    # Make sure the transport sessions have been closed properly, this is supremely important.
    assert list(pres_a.transport.input_sessions) == []
    assert list(pres_b.transport.input_sessions) == []
    assert list(pres_a.transport.output_sessions) == []
    assert list(pres_b.transport.output_sessions) == []

    assert len(handler_output) == 1
    assert repr(handler_output[0][0]) == repr(record)
    assert handler_output[0][1].source_node_id == 42
    assert handler_output[0][1].transfer_id == 0
    assert handler_output[0][1].priority == Priority.NOMINAL

    await asyncio.sleep(
        1
    )  # Let all pending tasks finalize properly to avoid stack traces in the output.
示例#34
0
文件: sblocks2.py 项目: xitop/edzed
 async def __call__(self, *args):
     loop = asyncio.get_running_loop()
     with self._executor() as pool:
         return await loop.run_in_executor(pool, self._func, *args)
示例#35
0
async def _unittest_slow_node(
    generated_packages: typing.List[pyuavcan.dsdl.GeneratedPackageInfo]
) -> None:
    from pyuavcan.application import Node
    from uavcan.node import Version_1_0, Heartbeat_1_0, GetInfo_1_0, Mode_1_0, Health_1_0

    asyncio.get_running_loop().slow_callback_duration = 3.0

    assert generated_packages
    remote_pres = Presentation(UDPTransport("127.1.1.1"))
    remote_hb_sub = remote_pres.make_subscriber_with_fixed_subject_id(
        Heartbeat_1_0)
    remote_info_cln = remote_pres.make_client_with_fixed_service_id(
        GetInfo_1_0, 258)

    trans = RedundantTransport()
    pres = Presentation(trans)
    try:
        info = GetInfo_1_0.Response(
            protocol_version=Version_1_0(
                *pyuavcan.UAVCAN_SPECIFICATION_VERSION),
            software_version=Version_1_0(*pyuavcan.__version_info__[:2]),
            name="org.uavcan.pyuavcan.test.node",
        )
        node = Node(pres, info, with_diagnostic_subscriber=True)
        print("node:", node)
        assert node.presentation is pres
        node.start()
        node.start()  # Idempotency

        node.heartbeat_publisher.priority = pyuavcan.transport.Priority.FAST
        node.heartbeat_publisher.period = 0.5
        node.heartbeat_publisher.mode = Mode_1_0.MAINTENANCE  # type: ignore
        node.heartbeat_publisher.health = Health_1_0.ADVISORY  # type: ignore
        node.heartbeat_publisher.vendor_specific_status_code = 93
        with pytest.raises(ValueError):
            node.heartbeat_publisher.period = 99.0
        with pytest.raises(ValueError):
            node.heartbeat_publisher.vendor_specific_status_code = -299

        assert node.heartbeat_publisher.priority == pyuavcan.transport.Priority.FAST
        assert node.heartbeat_publisher.period == pytest.approx(0.5)
        assert node.heartbeat_publisher.mode == Mode_1_0.MAINTENANCE
        assert node.heartbeat_publisher.health == Health_1_0.ADVISORY
        assert node.heartbeat_publisher.vendor_specific_status_code == 93

        assert None is await remote_hb_sub.receive_for(2.0)

        assert trans.local_node_id is None
        trans.attach_inferior(UDPTransport("127.1.1.2"))
        assert trans.local_node_id == 258

        for _ in range(2):
            hb_transfer = await remote_hb_sub.receive_for(2.0)
            assert hb_transfer is not None
            hb, transfer = hb_transfer
            assert transfer.source_node_id == 258
            assert transfer.priority == pyuavcan.transport.Priority.FAST
            assert 1 <= hb.uptime <= 9
            assert hb.mode.value == Mode_1_0.MAINTENANCE
            assert hb.health.value == Health_1_0.ADVISORY
            assert hb.vendor_specific_status_code == 93

        info_transfer = await remote_info_cln.call(GetInfo_1_0.Request())
        assert info_transfer is not None
        resp, transfer = info_transfer
        assert transfer.source_node_id == 258
        assert isinstance(resp, GetInfo_1_0.Response)
        assert resp.name.tobytes().decode() == "org.uavcan.pyuavcan.test.node"
        assert resp.protocol_version.major == pyuavcan.UAVCAN_SPECIFICATION_VERSION[
            0]
        assert resp.software_version.major == pyuavcan.__version_info__[0]

        trans.detach_inferior(trans.inferiors[0])
        assert trans.local_node_id is None

        assert None is await remote_hb_sub.receive_for(2.0)

        node.close()
        node.close()  # Idempotency
    finally:
        pres.close()
        remote_pres.close()
        await asyncio.sleep(1.0)  # Let the background tasks terminate.
示例#36
0
 def loop(self) -> Optional[asyncio.AbstractEventLoop]:
     return self._loop if self._loop else asyncio.get_running_loop() \
             if sys.version_info >= (3, 7) else asyncio.get_event_loop()

if __name__ == '__main__':
    call_period = 10  # Seconds

    logger.debug("Starting Main")

    # Begin the recurring task
    logger.info("Starting main loop. The loop will run " +
                "continuously every {} seconds.".format(call_period))
    recurring_timer = AsyncRecurringTimer(call_period, main, recurring=True)

    try:
        # There is an existing event loop
        # AKA working in ipython
        loop = asyncio.get_running_loop()
        client_coroutine = recurring_timer.start()
        client_task = asyncio.create_task(client_coroutine)

    except RuntimeError:
        # There is no running event loop
        logger.info("Starting new Async Event Loop")
        loop = asyncio.get_event_loop()
        # Now.. I need to set the coroutine to be executed in the loop
        # once started
        client_coroutine = recurring_timer.start()
        client_task = loop.create_task(client_coroutine)

    try:
        loop.run_forever()
    finally:
示例#38
0
    async def test_cron(self):
        MONO_DELT = 1543827303.0
        unixtime = datetime.datetime(year=2018, month=12, day=5, hour=7, minute=0, tzinfo=tz.utc).timestamp()
        sync = asyncio.Event()
        lastquery = None
        s_provenance.reset()

        def timetime():
            return unixtime

        def looptime():
            return unixtime - MONO_DELT

        async def myeval(query, user=None):
            nonlocal lastquery
            lastquery = query
            sync.set()
            return
            yield None

        loop = asyncio.get_running_loop()

        with mock.patch.object(loop, 'time', looptime), mock.patch('time.time', timetime):
            async with self.getTestCoreAndProxy() as (realcore, core):

                outp = self.getTestOutp()
                async with await s_cmdr.getItemCmdr(core, outp=outp) as cmdr:

                    # Various silliness

                    await cmdr.runCmdLine('cron')
                    self.true(outp.expect('Manages cron jobs in a cortex'))
                    await cmdr.runCmdLine('cron timemachine')
                    self.true(outp.expect('invalid choice'))

                    await cmdr.runCmdLine('cron list')
                    self.true(outp.expect('No cron jobs found'))

                    outp.clear()

                    await cmdr.runCmdLine("cron add -M+1,beeroclock {[graph:node='*' :type=m1]}")
                    self.true(outp.expect('failed to parse parameter'))

                    await cmdr.runCmdLine("cron add -m nosuchmonth -d=-2 {#foo}")
                    self.true(outp.expect('failed to parse fixed parameter'))

                    outp.clear()
                    await cmdr.runCmdLine("cron add -m 8nosuchmonth -d=-2 {#foo}")
                    self.true(outp.expect('failed to parse fixed parameter'))

                    await cmdr.runCmdLine("cron add -d=, {#foo}")
                    self.true(outp.expect('failed to parse day value'))

                    await cmdr.runCmdLine("cron add -dMon -m +3 {#foo}")
                    self.true(outp.expect('provide a recurrence value with day of week'))

                    await cmdr.runCmdLine("cron add -dMon -m June {#foo}")
                    self.true(outp.expect('fix month or year with day of week'))

                    await cmdr.runCmdLine("cron add -dMon -m +3 -y +2 {#foo}")
                    self.true(outp.expect('more than 1 recurrence'))

                    await cmdr.runCmdLine("cron add --year=2019 {#foo}")
                    self.true(outp.expect('year may not be a fixed value'))

                    await cmdr.runCmdLine("cron add {#foo}")
                    self.true(outp.expect('must provide at least one optional'))

                    await cmdr.runCmdLine("cron add -H3 -M +4 {#foo}")
                    self.true(outp.expect('fixed unit may not be larger'))

                    outp.clear()
                    await cmdr.runCmdLine('cron add -d Tuesday,1 {#foo}')
                    self.true(outp.expect('failed to parse day value'))

                    outp.clear()
                    await cmdr.runCmdLine('cron add -d Fri,3 {#foo}')
                    self.true(outp.expect('failed to parse day value'))

                    outp.clear()
                    await cmdr.runCmdLine('cron add }')
                    self.true(outp.expect('query parameter must start with {'))

                    ##################
                    oldsplices = len(await alist(core.splices(0, 1000)))

                    # Start simple: add a cron job that creates a node every minute
                    outp.clear()
                    await cmdr.runCmdLine("cron add -M +1 {[graph:node='*' :type=m1]}")
                    self.true(outp.expect('Created cron job'))
                    guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]

                    unixtime += 60
                    await cmdr.runCmdLine('cron list')
                    self.true(outp.expect(':type=m1'))

                    # Make sure it ran
                    await self.agenlen(1, core.eval('graph:node:type=m1'))

                    # Make sure the provenance of the new splices looks right
                    splices = await alist(core.splices(oldsplices, 1000))
                    self.gt(len(splices), 1)
                    aliases = [splice[1]['prov'] for splice in splices]
                    self.true(all(a == aliases[0] for a in aliases))
                    prov = await core.getProvStack(aliases[0])
                    rootiden = prov[1][1][1]['user']
                    correct = ({}, (
                               ('cron', {'iden': guid}),
                               ('storm', {'q': "[graph:node='*' :type=m1]", 'user': rootiden})))
                    self.eq(prov, correct)

                    await cmdr.runCmdLine(f"cron mod {guid[:6]} {{[graph:node='*' :type=m2]}}")
                    self.true(outp.expect('Modified cron job'))
                    await cmdr.runCmdLine(f"cron mod xxx {{[graph:node='*' :type=m2]}}")
                    self.true(outp.expect('does not match'))
                    await cmdr.runCmdLine(f"cron mod xxx yyy")
                    self.true(outp.expect('expected second argument to start with {'))

                    # Make sure the old one didn't run and the new query ran
                    unixtime += 60
                    await self.agenlen(1, core.eval('graph:node:type=m1'))
                    await self.agenlen(1, core.eval('graph:node:type=m2'))

                    outp.clear()

                    # Delete the job
                    await cmdr.runCmdLine(f"cron del {guid}")
                    self.true(outp.expect('Deleted cron job'))
                    await cmdr.runCmdLine(f"cron del xxx")
                    self.true(outp.expect('does not match'))

                    # Make sure deleted job didn't run
                    unixtime += 60
                    await self.agenlen(1, core.eval('graph:node:type=m1'))
                    await self.agenlen(1, core.eval('graph:node:type=m2'))

                    # Test fixed minute, i.e. every hour at 17 past
                    unixtime = datetime.datetime(year=2018, month=12, day=5, hour=7, minute=10,
                                                 tzinfo=tz.utc).timestamp()
                    await cmdr.runCmdLine("cron add -M 17 {[graph:node='*' :type=m3]}")
                    guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]

                    unixtime += 7 * MINSECS

                    # Make sure it runs.  We add the cron list to give the cron scheduler a chance to run
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(1, core.eval('graph:node:type=m3'))
                    await cmdr.runCmdLine(f"cron del {guid}")

                    ##################

                    # Test day increment
                    await cmdr.runCmdLine("cron add -d +2 {[graph:node='*' :type=d1]}")
                    self.true(outp.expect('Created cron job'))
                    guid1 = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]

                    unixtime += DAYSECS

                    # Make sure it *didn't* run
                    await self.agenlen(0, core.eval('graph:node:type=d1'))

                    unixtime += DAYSECS

                    # Make sure it runs.  We add the cron list to give the cron scheduler a chance to run

                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(1, core.eval('graph:node:type=d1'))

                    unixtime += DAYSECS * 2
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(2, core.eval('graph:node:type=d1'))

                    ##################

                    # Test fixed day of week: every Monday and Thursday at 3am
                    unixtime = datetime.datetime(year=2018, month=12, day=11, hour=7, minute=10,
                                                 tzinfo=tz.utc).timestamp()  # A Tuesday

                    await cmdr.runCmdLine("cron add -H 3 -d Mon,Thursday {[graph:node='*' :type=d2]}")
                    guid2 = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
                    unixtime = datetime.datetime(year=2018, month=12, day=13, hour=3, minute=10,
                                                 tzinfo=tz.utc).timestamp()  # Now Thursday
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(1, core.eval('graph:node:type=d2'))

                    await cmdr.runCmdLine(f"cron del {guid1}")
                    await cmdr.runCmdLine(f"cron del {guid2}")

                    await cmdr.runCmdLine("cron add -H 3 -d Noday {[graph:node='*' :type=d2]}")
                    self.true(outp.expect('failed to parse day value "Noday"'))

                    ##################

                    # Test fixed day of month: second-to-last day of month
                    await cmdr.runCmdLine("cron add -d-2 -mDec {[graph:node='*' :type=d3]}")
                    guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]

                    unixtime = datetime.datetime(year=2018, month=12, day=29, hour=0, minute=0,
                                                 tzinfo=tz.utc).timestamp()  # Now Thursday
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(0, core.eval('graph:node:type=d3'))  # Not yet
                    unixtime += DAYSECS
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(1, core.eval('graph:node:type=d3'))
                    await cmdr.runCmdLine(f"cron del {guid}")

                    ##################

                    # Test month increment

                    await cmdr.runCmdLine("cron add -m +2 -d=4 {[graph:node='*' :type=month1]}")

                    unixtime = datetime.datetime(year=2019, month=2, day=4, hour=0, minute=0,
                                                 tzinfo=tz.utc).timestamp()  # Now Thursday
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(1, core.eval('graph:node:type=month1'))

                    ##################

                    # Test year increment

                    await cmdr.runCmdLine("cron add -y +2 {[graph:node='*' :type=year1]}")
                    guid2 = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
                    unixtime = datetime.datetime(year=2021, month=1, day=1, hour=0, minute=0,
                                                 tzinfo=tz.utc).timestamp()  # Now Thursday
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(1, core.eval('graph:node:type=year1'))

                    # Make sure second-to-last day works for February
                    await cmdr.runCmdLine("cron add -m February -d=-2 {[graph:node='*' :type=year2]}")
                    unixtime = datetime.datetime(year=2021, month=2, day=27, hour=0, minute=0,
                                                 tzinfo=tz.utc).timestamp()  # Now Thursday
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(1, core.eval('graph:node:type=year2'))

                    ##################

                    # Test 'at' command
                    outp.clear()
                    await cmdr.runCmdLine('at')
                    self.true(outp.expect('Adds a non-recurring'))

                    await cmdr.runCmdLine('at --not-a-real-flag')
                    self.true(outp.expect('the following arguments'))

                    await cmdr.runCmdLine('at {#foo} {#bar}')
                    self.true(outp.expect('only a single query'))

                    await cmdr.runCmdLine('at {#foo}')
                    self.true(outp.expect('at least'))

                    await cmdr.runCmdLine('at +1')
                    self.true(outp.expect('missing unit'))

                    await cmdr.runCmdLine('at +1parsec')
                    self.true(outp.expect('Trouble parsing'))

                    await cmdr.runCmdLine('at +1day')
                    self.true(outp.expect('Missing query'))

                    await cmdr.runCmdLine("at +5 minutes {[graph:node='*' :type=at1]}")
                    unixtime += 5 * MINSECS
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(1, core.eval('graph:node:type=at1'))

                    await cmdr.runCmdLine("at +1 day +7 days {[graph:node='*' :type=at2]}")
                    guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
                    unixtime += DAYSECS
                    await self.agenlen(1, core.eval('graph:node:type=at2'))
                    unixtime += 6 * DAYSECS + 1
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(2, core.eval('graph:node:type=at2'))

                    await cmdr.runCmdLine("at 202104170415 {[graph:node='*' :type=at3]}")

                    unixtime = datetime.datetime(year=2021, month=4, day=17, hour=4, minute=15,
                                                 tzinfo=tz.utc).timestamp()  # Now Thursday
                    await cmdr.runCmdLine('cron list')
                    await self.agenlen(1, core.eval('graph:node:type=at3'))
                    ##################

                    # Test 'stat' command

                    await cmdr.runCmdLine(f'cron stat xxx')
                    self.true(outp.expect('provided iden does not match any'))

                    await cmdr.runCmdLine(f'cron stat {guid[:6]}')
                    self.true(outp.expect('last result:     finished successfully with 1 nodes'))
                    self.true(outp.expect('entries:         <None>'))
                    await cmdr.runCmdLine(f'cron stat {guid2[:6]}')
                    self.true(outp.expect("{'month': 1, 'hour': 0, 'minute': 0, 'dayofmonth': 1}"))

                    ##################

                    # Delete an expired at job
                    outp.clear()
                    await cmdr.runCmdLine(f"cron del {guid}")
                    self.true(outp.expect('Deleted cron job'))

                    ##################

                    # Test the aliases
                    outp.clear()
                    await cmdr.runCmdLine('cron add --hourly 15 {#bar}')
                    guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
                    await cmdr.runCmdLine(f'cron stat {guid[:6]}')
                    self.true(outp.expect("{'minute': 15}"))

                    outp.clear()
                    await cmdr.runCmdLine('cron add --daily 05:47 {#bar}')
                    guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
                    await cmdr.runCmdLine(f'cron stat {guid[:6]}')
                    self.true(outp.expect("{'hour': 5, 'minute': 47"))

                    outp.clear()
                    await cmdr.runCmdLine('cron add --monthly=-1:12:30 {#bar}')
                    guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
                    await cmdr.runCmdLine(f'cron stat {guid[:6]}')
                    self.true(outp.expect("{'hour': 12, 'minute': 30, 'dayofmonth': -1}"))

                    outp.clear()
                    await cmdr.runCmdLine('cron add --yearly 04:17:12:30 {#bar}')
                    guid = outp.mesgs[-1].strip().rsplit(' ', 1)[-1]
                    await cmdr.runCmdLine(f'cron stat {guid[:6]}')
                    self.true(outp.expect("{'month': 4, 'hour': 12, 'minute': 30, 'dayofmonth': 17}"))

                    outp.clear()
                    await cmdr.runCmdLine('cron add --yearly 04:17:12 {#bar}')
                    self.true(outp.expect('Failed to parse parameter'))

                    outp.clear()
                    await cmdr.runCmdLine('cron add --daily xx:xx {#bar}')
                    self.true(outp.expect('Failed to parse ..ly parameter'))

                    outp.clear()
                    await cmdr.runCmdLine('cron add --hourly 1 -M 17 {#bar}')
                    self.true(outp.expect('may not use both'))
示例#39
0
async def inject_recover_scenario_aio(log_dir, config, cluster,
                                      workload_factory, failure_factory):
    cmd_log = path.join(log_dir, config["cmd_log"])
    latency_log = path.join(log_dir, config["latency_log"])
    availability_log = path.join(log_dir, config["availability_log"])

    init_logs(cmd_log, latency_log, availability_log, config["ss_metrics"])
    if not (config["verbose"]):
        gobekli_stdout = logging.getLogger("gobekli-stdout")
        gobekli_stdout.handlers = []

    workload = workload_factory()
    task = asyncio.create_task(workload.start())

    try:
        loop = asyncio.get_running_loop()

        end_time = loop.time() + config["warmup"]
        while workload.is_active:
            if (loop.time() + 1) >= end_time:
                break
            await asyncio.sleep(1)

        # inject
        fault = failure_factory()

        inject_side_thread = ThreadAsyncWaiter(
            lambda: fault.inject(cluster, workload))
        await inject_side_thread.wait(period_ms=500)

        end_time = loop.time() + config["exploitation"]
        while workload.is_active:
            if (loop.time() + 1) >= end_time:
                break
            await asyncio.sleep(1)

        # recover
        await ThreadAsyncWaiter(lambda: fault.recover()).wait(period_ms=500)

        end_time = loop.time() + config["cooldown"]
        while workload.is_active:
            if (loop.time() + 1) >= end_time:
                break
            await asyncio.sleep(1)
    except:
        workload.stop()

        try:
            await task
        except:
            e, v = sys.exc_info()[:2]
            stacktrace = traceback.format_exc()
            chaos_event_log.info(
                m("error on waiting for workflow's tast on handling error",
                  error_type=str(e),
                  error_value=str(v),
                  stacktrace=stacktrace).with_time())

        raise

    workload.stop()
    validation_result = await task
    await workload.dispose()

    scenario = "inject-recover"
    workload = config["workload"]["name"]

    result = ExperimentResult()
    result.is_valid = validation_result.is_valid
    result.error = validation_result.error
    result.title = f"{workload} with {scenario} using {fault.title}"
    result.availability_log = config["availability_log"]
    result.latency_log = config["latency_log"]
    result.analysis = analyze_inject_recover_availability(
        log_dir, config["availability_log"], config["latency_log"])
    return result
示例#40
0
async def async_save_json(filename: str, data: dict):
    """Save JSON data to a file."""
    loop = asyncio.get_running_loop()
    return await loop.run_in_executor(None, save_json, filename, data)
示例#41
0
async def send_bird(ctx,
                    bird: str,
                    media_type: str,
                    filters: Filter,
                    on_error=None,
                    message=None):
    """Gets bird media and sends it to the user.

    `ctx` - Discord context object\n
    `bird` (str) - bird to send\n
    `media_type` (str) - type of media (images/songs)\n
    `filters` (bot.filters Filter)\n
    `on_error` (function) - async function to run when an error occurs, passes error as argument\n
    `message` (str) - text message to send before bird\n
    """
    if bird == "":
        logger.error("error - bird is blank")
        await ctx.send("**There was an error fetching birds.**")
        if on_error is not None:
            await on_error(GenericError("bird is blank", code=100))
        else:
            await ctx.send("*Please try again.*")
        return

    # add special condition for screech owls
    # since screech owl is a genus and SciOly
    # doesn't specify a species
    if bird == "Screech Owl":
        logger.info("choosing specific Screech Owl")
        bird = random.choice(screech_owls)

    delete = await ctx.send("**Fetching.** This may take a while.")
    # trigger "typing" discord message
    await ctx.trigger_typing()

    try:
        filename, extension = await get_media(ctx, bird, media_type, filters)
    except GenericError as e:
        await delete.delete()
        if e.code == 100:
            await ctx.send(
                f"**This combination of filters has no valid {media_type} for the current bird.**"
            )
        elif e.code == 201:
            capture_exception(e)
            logger.exception(e)
            await ctx.send(
                "**A network error has occurred.**\n*Please try again later.*")
            database.incrby("cooldown:global", amount=1)
            database.expire("cooldown:global", 300)
        else:
            capture_exception(e)
            logger.exception(e)
            await ctx.send(
                f"**An error has occurred while fetching {media_type}.**\n**Reason:** {e}"
            )
        if on_error is not None:
            await on_error(e)
        else:
            await ctx.send("*Please try again.*")
        return

    if os.stat(
            filename).st_size > MAX_FILESIZE:  # another filesize check (4mb)
        await delete.delete()
        await ctx.send("**Oops! File too large :(**\n*Please try again.*")
        return

    if media_type == "images":
        if filters.bw:
            # prevent the black and white conversion from blocking
            loop = asyncio.get_running_loop()
            fn = functools.partial(_black_and_white, filename)
            filename = await loop.run_in_executor(None, fn)

    elif media_type == "songs" and not filters.vc:
        # remove spoilers in tag metadata
        audioFile = eyed3.load(filename)
        if audioFile is not None and audioFile.tag is not None:
            audioFile.tag.remove(filename)

    if message is not None:
        await ctx.send(message)

    if media_type == "songs" and filters.vc:
        await voice_functions.play(ctx, filename)
    else:
        # change filename to avoid spoilers
        file_obj = discord.File(filename, filename=f"bird.{extension}")
        await ctx.send(file=file_obj)
    await delete.delete()
示例#42
0
    async def test_agenda(self):
        MONO_DELT = 1543827303.0
        unixtime = datetime.datetime(year=2018, month=12, day=5, hour=7, minute=0, tzinfo=tz.utc).timestamp()
        sync = asyncio.Event()
        lastquery = None

        def timetime():
            return unixtime

        def looptime():
            return unixtime - MONO_DELT

        async def myeval(query, user=None):
            nonlocal lastquery
            lastquery = query
            sync.set()
            if 'sleep' in query:
                await asyncio.sleep(60)

            if query == 'badquery':
                raise Exception('test exception')
            return
            yield None

        loop = asyncio.get_running_loop()
        with mock.patch.object(loop, 'time', looptime), mock.patch('time.time', timetime), self.getTestDir() as dirn:
            core = mock.Mock()
            core.eval = myeval
            core.slab = await s_lmdbslab.Slab.anit(dirn, map_size=s_t_utils.TEST_MAP_SIZE, readonly=False)
            db = core.slab.initdb('hive')
            core.hive = await s_hive.SlabHive.anit(core.slab, db=db)
            core.boss = await s_boss.Boss.anit()
            async with await s_agenda.Agenda.anit(core) as agenda:
                agenda.onfini(core.hive)
                agenda.onfini(core.slab)
                agenda.onfini(core.boss)

                await agenda.enable()
                await agenda.enable()  # make sure it doesn't blow up
                self.eq([], agenda.list())

                rootiden = 'aaaaa'

                await self.asyncraises(ValueError, agenda.add(rootiden, '', {s_agenda.TimeUnit.MINUTE: 1}))

                # Schedule a one-shot 1 minute from now
                await agenda.add(rootiden, '[test:str=foo]', {s_agenda.TimeUnit.MINUTE: 1})
                await asyncio.sleep(0)  # give the scheduler a shot to wait
                unixtime += 61
                await sync.wait()  # wait for the query to run
                sync.clear()
                self.eq(lastquery, '[test:str=foo]')
                core.reset_mock()
                lastquery = None

                appts = agenda.list()
                self.len(1, appts)
                self.eq(appts[0][1]['startcount'], 1)
                self.eq(appts[0][1]['nexttime'], None)

                # Schedule a query to run every Wednesday and Friday at 10:15am
                guid = await agenda.add(rootiden, '[test:str=bar]', {s_tu.HOUR: 10, s_tu.MINUTE: 15},
                                        incunit=s_agenda.TimeUnit.DAYOFWEEK, incvals=(2, 4))

                # every 6th of the month at 7am and 8am (the 6th is a Thursday)
                guid2 = await agenda.add(rootiden, '[test:str=baz]',
                                         {s_tu.HOUR: (7, 8), s_tu.MINUTE: 0, s_tu.DAYOFMONTH: 6},
                                         incunit=s_agenda.TimeUnit.MONTH, incvals=1)

                xmas = {s_tu.DAYOFMONTH: 25, s_tu.MONTH: 12, s_tu.YEAR: 2018}
                lasthanu = {s_tu.DAYOFMONTH: 10, s_tu.MONTH: 12, s_tu.YEAR: 2018}

                # And one-shots for Christmas and last day of Hanukkah of 2018
                await agenda.add(rootiden, '#happyholidays', (xmas, lasthanu))

                await asyncio.sleep(0)
                unixtime += 1
                # Nothing should happen
                self.none(lastquery)

                # Advance to the first event on Wednesday the 5th
                unixtime = datetime.datetime(year=2018, month=12, day=5, hour=10, minute=16, tzinfo=tz.utc).timestamp()
                await sync.wait()
                sync.clear()
                self.eq(lastquery, '[test:str=bar]')

                # Then two on the 6th
                unixtime = datetime.datetime(year=2018, month=12, day=6, hour=7, minute=15, tzinfo=tz.utc).timestamp()
                await sync.wait()
                sync.clear()
                self.eq(lastquery, '[test:str=baz]')
                lastquery = None
                unixtime = datetime.datetime(year=2018, month=12, day=6, hour=8, minute=15, tzinfo=tz.utc).timestamp()
                await sync.wait()
                sync.clear()
                self.eq(lastquery, '[test:str=baz]')

                # Then back to the 10:15 on Friday
                unixtime = datetime.datetime(year=2018, month=12, day=7, hour=10, minute=16, tzinfo=tz.utc).timestamp()
                await sync.wait()
                sync.clear()
                self.eq(lastquery, '[test:str=bar]')

                # Then Dec 10
                unixtime = datetime.datetime(year=2018, month=12, day=10, hour=10, minute=16, tzinfo=tz.utc).timestamp()
                await sync.wait()
                sync.clear()
                self.eq(lastquery, '#happyholidays')

                # Then the Wednesday again
                unixtime = datetime.datetime(year=2018, month=12, day=12, hour=10, minute=16, tzinfo=tz.utc).timestamp()
                await sync.wait()
                sync.clear()
                self.eq(lastquery, '[test:str=bar]')

                # Cancel the Wednesday/Friday appt
                await agenda.delete(guid)
                await self.asyncraises(s_exc.NoSuchIden, agenda.delete(b'1234'))

                # Then Dec 25
                unixtime = datetime.datetime(year=2018, month=12, day=25, hour=10, minute=16, tzinfo=tz.utc).timestamp()
                await sync.wait()
                sync.clear()
                self.eq(lastquery, '#happyholidays')

                # Then Jan 6
                unixtime = datetime.datetime(year=2019, month=1, day=6, hour=10, minute=16, tzinfo=tz.utc).timestamp()
                await sync.wait()
                sync.clear()
                self.eq(lastquery, '[test:str=baz]')

                # Modify the last appointment
                await self.asyncraises(ValueError, agenda.mod(guid2, '', ))
                await agenda.mod(guid2, '#baz')
                self.eq(agenda.appts[guid2].query, '#baz')

                # Delete the other recurring appointment
                await agenda.delete(guid2)

                # Then nothing left scheduled
                self.len(0, agenda.apptheap)

                # Test that isrunning updated, cancelling works
                guid = await agenda.add(rootiden, 'inet:ipv4=1 | sleep 120', {},
                                        incunit=s_agenda.TimeUnit.MINUTE, incvals=1)
                unixtime += 60
                await sync.wait()
                sync.clear()
                self.len(1, core.boss.tasks)
                task = next(iter(core.boss.tasks.values()))
                appt_info = [info for g, info in agenda.list() if g == guid][0]
                self.eq(appt_info['isrunning'], True)
                await task.kill()
                appt_info = [info for g, info in agenda.list() if g == guid][0]
                self.eq(appt_info['isrunning'], False)
                self.eq(appt_info['lastresult'], 'cancelled')
                await agenda.delete(guid)

                # Test bad queries record exception
                guid = await agenda.add(rootiden, '#foo', {},
                                        incunit=s_agenda.TimeUnit.MINUTE, incvals=1)
                # bypass the API because it would actually syntax check
                agenda.appts[guid].query = 'badquery'
                unixtime += 60
                await sync.wait()
                sync.clear()
                appt_info = [info for g, info in agenda.list() if g == guid][0]
                self.eq(appt_info['isrunning'], False)
                self.eq(appt_info['lastresult'], 'raised exception test exception')
示例#43
0
async def main(host, port):
    loop = asyncio.get_running_loop()
    server = await loop.create_server(TimeoutServer, host, port)
    await server.serve_forever()
示例#44
0
 def __await__(self):
     yield from asyncio.get_running_loop().run_in_executor(
         None, self._await_terminal_state, None).__await__()
     if self._exception:
         raise self._exception
     return self._result
示例#45
0
    async def test_pushfile(self):

        async with self.getTestAxon() as axon:

            async with self.getTestCore() as core:

                coreurl = core.getLocalUrl()
                axonurl = axon.getLocalUrl()

                async with axon.getLocalProxy() as axonprox:

                    async with core.getLocalProxy() as coreprox:

                        def pushfile():

                            with self.getTestDir() as dirn:

                                nullpath = os.path.join(dirn, 'null.txt')
                                visipath = os.path.join(dirn, 'visi.txt')

                                with s_common.genfile(visipath) as fd:
                                    fd.write(b'visi')

                                self.len(1, axonprox.wants([visihash]))

                                outp = self.getTestOutp()
                                args = ['-a', axonurl,
                                        '-c', coreurl,
                                        '-t', 'foo.bar,baz.faz',
                                        visipath]

                                self.eq(0, s_pushfile.main(args, outp))
                                self.true(outp.expect('Uploaded [visi.txt] to axon'))
                                self.true(outp.expect('file: visi.txt (4) added to core'))

                                self.len(0, axonprox.wants([visihash]))
                                self.eq(b'visi', b''.join([buf for buf in axonprox.get(visihash)]))

                                outp = self.getTestOutp()
                                self.eq(0, s_pushfile.main(args, outp))
                                self.true(outp.expect('Axon already had [visi.txt]'))

                                self.len(1, coreprox.eval(f'file:bytes={s_common.ehex(visihash)}'))
                                self.len(1, coreprox.eval('file:bytes:size=4'))
                                self.len(1, coreprox.eval('#foo.bar'))
                                self.len(1, coreprox.eval('#baz.faz'))

                                # Ensure user can't push a non-existant file and that it won't exist
                                args = ['-a', axonurl, nullpath]
                                self.raises(s_exc.NoSuchFile, s_pushfile.main, args, outp=outp)

                                self.len(1, axonprox.wants([nullhash]))

                                with s_common.genfile(nullpath) as fd:
                                    fd.write(b'')

                                outp = self.getTestOutp()
                                args = ['-a', axonurl,
                                        '-c', coreurl,
                                        '-t', 'empty',
                                        nullpath]

                                self.eq(0, s_pushfile.main(args, outp))

                                self.len(0, axonprox.wants([nullhash]))
                                self.eq(b'', b''.join([buf for buf in axonprox.get(nullhash)]))
                            return 1

                        loop = asyncio.get_running_loop()
                        ret = await loop.run_in_executor(None, pushfile)
                        self.eq(1, ret)
示例#46
0
async def watch_and_shrink_cache(
    *,
    flowdb_connection: "Connection",
    pool: Executor,
    sleep_time: int = 86400,
    timeout: Optional[int] = 600,
    loop: bool = True,
    size_threshold: int = None,
    dry_run: bool = False,
    protected_period: Optional[int] = None,
) -> None:
    """
    Background task to periodically trigger a shrink of the cache.

    Parameters
    ----------
    flowdb_connection : Connection
        Flowdb connection to check dates on
    pool : Executor
        Executor to run the date check with
    sleep_time : int, default 86400
        Number of seconds to sleep for between checks
    timeout : int or None, default 600
        Seconds to wait for a cache shrink to complete before cancelling it
    loop : bool, default True
        Set to false to return after the first check
    size_threshold : int, default None
        Optionally override the maximum cache size set in flowdb.
    dry_run : bool, default False
        Set to true to just report the objects that would be removed and not remove them
    protected_period : int, default None
        Optionally specify a number of seconds within which cache entries are excluded. If None,
        the value stored in cache.cache_config will be used.Set to a negative number to ignore cache protection
        completely.

    Returns
    -------
    None

    """
    shrink_func = partial(
        shrink_below_size,
        connection=flowdb_connection,
        size_threshold=size_threshold,
        dry_run=dry_run,
        protected_period=protected_period,
    )
    while True:
        logger.debug("Checking if cache should be shrunk.")

        try:  # Set the shrink function running with a copy of the current execution context (db conn etc) in background thread
            await asyncio.wait_for(
                asyncio.get_running_loop().run_in_executor(
                    pool,
                    copy_context().run, shrink_func),
                timeout=timeout,
            )
        except TimeoutError:
            logger.error(
                f"Failed to complete cache shrink within {timeout}s. Trying again in {sleep_time}s."
            )
        if not loop:
            break
        await asyncio.sleep(sleep_time)
示例#47
0
 async def main():
     self.assertIsInstance(
         asyncio.get_running_loop(),
         asyncio.ProactorEventLoop)
示例#48
0
async def foo():
    loop = asyncio.get_running_loop()
    loop.create_task(nested())  # new in Python 3.4

    loop.run_in_executor(None, nested) # new in Python 3.4
示例#49
0
async def main():
	clientloop=asyncio.get_running_loop()
	on_con_lost=clientloop.create_future()
	transport,protocol = await clientloop.create_connection(lambda: ChatbotClient(input(),on_con_lost,clientloop), 'localhost',10000)
示例#50
0
async def spawn_tasks(
    *,
    lifecycle: Optional[lifecycles.LifeCycleFn] = None,
    registry: Optional[registries.OperatorRegistry] = None,
    settings: Optional[configuration.OperatorSettings] = None,
    memories: Optional[containers.ResourceMemories] = None,
    standalone: bool = False,
    priority: int = 0,
    peering_name: Optional[str] = None,
    liveness_endpoint: Optional[str] = None,
    namespace: Optional[str] = None,
    stop_flag: Optional[primitives.Flag] = None,
    ready_flag: Optional[primitives.Flag] = None,
    vault: Optional[credentials.Vault] = None,
) -> Tasks:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle(
    )
    registry = registry if registry is not None else registries.get_default_registry(
    )
    settings = settings if settings is not None else configuration.OperatorSettings(
    )
    memories = memories if memories is not None else containers.ResourceMemories(
    )
    vault = vault if vault is not None else global_vault
    vault = vault if vault is not None else credentials.Vault()
    event_queue: posting.K8sEventQueue = asyncio.Queue()
    freeze_mode: primitives.Toggle = primitives.Toggle()
    signal_flag: asyncio_Future = asyncio.Future()
    ready_flag = ready_flag if ready_flag is not None else asyncio.Event()
    tasks: MutableSequence[asyncio_Task] = []

    # Global credentials store for this operator, also for CRD-reading & peering mode detection.
    auth.vault_var.set(vault)

    # Special case: pass the settings container through the user-side handlers (no explicit args).
    # Toolkits have to keep the original operator context somehow, and the only way is contextvars.
    posting.settings_var.set(settings)

    # Few common background forever-running infrastructural tasks (irregular root tasks).
    tasks.extend([
        loop.create_task(
            _stop_flag_checker(
                signal_flag=signal_flag,
                stop_flag=stop_flag,
            )),
        loop.create_task(
            _startup_cleanup_activities(
                root_tasks=tasks,  # used as a "live" view, populated later.
                ready_flag=ready_flag,
                registry=registry,
                settings=settings,
                vault=vault,  # to purge & finalize the caches in the end.
            )),
    ])

    # Kill all the daemons gracefully when the operator exits (so that they are not "hung").
    tasks.extend([
        loop.create_task(
            _root_task_checker(name="daemon killer",
                               ready_flag=ready_flag,
                               coro=daemons.daemon_killer(settings=settings,
                                                          memories=memories))),
    ])

    # Keeping the credentials fresh and valid via the authentication handlers on demand.
    tasks.extend([
        loop.create_task(
            _root_task_checker(name="credentials retriever",
                               ready_flag=ready_flag,
                               coro=activities.authenticator(registry=registry,
                                                             settings=settings,
                                                             vault=vault))),
    ])

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.extend([
        loop.create_task(
            _root_task_checker(name="poster of events",
                               ready_flag=ready_flag,
                               coro=posting.poster(event_queue=event_queue))),
    ])

    # Liveness probing -- so that Kubernetes would know that the operator is alive.
    if liveness_endpoint:
        tasks.extend([
            loop.create_task(
                _root_task_checker(name="health reporter",
                                   ready_flag=ready_flag,
                                   coro=probing.health_reporter(
                                       registry=registry,
                                       settings=settings,
                                       endpoint=liveness_endpoint))),
        ])

    # Monitor the peers, unless explicitly disabled.
    ourselves: Optional[peering.Peer] = await peering.Peer.detect(
        id=peering.detect_own_id(),
        priority=priority,
        standalone=standalone,
        namespace=namespace,
        name=peering_name,
    )
    if ourselves:
        tasks.extend([
            loop.create_task(peering.peers_keepalive(ourselves=ourselves)),
            loop.create_task(
                _root_task_checker(name="watcher of peering",
                                   ready_flag=ready_flag,
                                   coro=queueing.watcher(
                                       namespace=namespace,
                                       settings=settings,
                                       resource=ourselves.resource,
                                       processor=functools.partial(
                                           peering.process_peering_event,
                                           ourselves=ourselves,
                                           freeze_mode=freeze_mode)))),
        ])

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.extend([
            loop.create_task(
                _root_task_checker(name=f"watcher of {resource.name}",
                                   ready_flag=ready_flag,
                                   coro=queueing.watcher(
                                       namespace=namespace,
                                       settings=settings,
                                       resource=resource,
                                       freeze_mode=freeze_mode,
                                       processor=functools.partial(
                                           processing.process_resource_event,
                                           lifecycle=lifecycle,
                                           registry=registry,
                                           settings=settings,
                                           memories=memories,
                                           resource=resource,
                                           event_queue=event_queue)))),
        ])

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        # Handle NotImplementedError when ran on Windows since asyncio only supports Unix signals
        try:
            loop.add_signal_handler(signal.SIGINT, signal_flag.set_result,
                                    signal.SIGINT)
            loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result,
                                    signal.SIGTERM)
        except NotImplementedError:
            logger.warning(
                "OS signals are ignored: can't add signal handler in Windows.")

    else:
        logger.warning(
            "OS signals are ignored: running not in the main thread.")

    return tasks
示例#51
0
文件: task.py 项目: vivisect/synapse
def loop():
    try:
        return asyncio.get_running_loop()
    except Exception:
        return None
示例#52
0
 async def block_to_async(self, partial: functools.partial):
     loop = asyncio.get_running_loop()
     return await loop.run_in_executor(None, partial)
async def amain():
    if WINDOWS:
        loop: asyncio.AbstractEventLoop = asyncio.get_running_loop()

        def noop_callback():
            loop.call_later(0.2, noop_callback)

        noop_callback()

    parser = argparse.ArgumentParser(
        description='''Run a Pluggable Transport as a standalone tunnel server
        or client.''',
        epilog='''Note: the logging output of this script may log client and
        server IP addresses. Do not use this script if such addresses may be
        sensitive.'''
    )

    role_group = parser.add_mutually_exclusive_group(required=True)
    role_group.add_argument(
        '-S', '--server', action='store_true',
        help='''Run as server end of tunnel. Since the PT directly forwards
        unobfuscated traffic upstream, no client information will be logged
        even if verbosity is turned up.'''
    )
    role_group.add_argument(
        '-E', '--ext-server', action='store_true',
        help='''Run as server end of tunnel using ExtOrPort. Compared with -S, 
        running ptadapter with this option allows client addresses and 
        transport names to be logged, but also increases connection
        overhead.'''
    )
    role_group.add_argument(
        '-C', '--client', action='store_true',
        help='''Run as client end of tunnel.'''
    )

    parser.add_argument(
        'configfile', type=argparse.FileType('rt'),
        help='''Configuration file.'''
    )
    parser.add_argument(
        '-v', '--verbose', action='count',
        help='''Increase verbosity level. Specify once to see INFO logs, twice
        to see DEBUG.'''
    )
    parser.add_argument(
        '-t', '--log-no-time', action='store_true',
        help='''Suppress timestamps in logging output.'''
    )

    args = parser.parse_args()

    if not args.verbose:
        loglevel = logging.WARNING
    elif args.verbose == 1:
        loglevel = logging.INFO
    else:
        loglevel = logging.DEBUG

    rootlogger.setLevel(loglevel)

    if args.log_no_time:
        formatter = logging.Formatter('%(levelname)-8s %(name)s %(message)s')
    else:
        formatter = logging.Formatter(
            '[%(asctime)s] %(levelname)-8s %(name)s %(message)s')
    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)
    rootlogger.addHandler(stream_handler)

    rootlogger.debug('Command line arguments: %r', args)

    conf = configparser.ConfigParser(empty_lines_in_values=False)
    conf.read_file(args.configfile)
    args.configfile.close()
    rootlogger.debug('Read config file')

    if args.client:
        await run_client(conf)
    elif args.server:
        await run_server(conf, False)
    else:
        await run_server(conf, True)
示例#54
0
async def spawn_tasks(
    lifecycle: Optional[lifecycles.LifeCycleFn] = None,
    registry: Optional[registries.OperatorRegistry] = None,
    standalone: bool = False,
    priority: int = 0,
    peering_name: Optional[str] = None,
    namespace: Optional[str] = None,
    stop_flag: Optional[Flag] = None,
    ready_flag: Optional[Flag] = None,
) -> Tasks:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle(
    )
    registry = registry if registry is not None else registries.get_default_registry(
    )
    event_queue: posting.K8sEventQueue = asyncio.Queue(loop=loop)
    freeze_flag: asyncio.Event = asyncio.Event(loop=loop)
    signal_flag: asyncio_Future = asyncio.Future(loop=loop)
    tasks = []

    # A top-level task for external stopping by setting a stop-flag. Once set,
    # this task will exit, and thus all other top-level tasks will be cancelled.
    tasks.extend([
        loop.create_task(
            _stop_flag_checker(
                signal_flag=signal_flag,
                ready_flag=ready_flag,
                stop_flag=stop_flag,
            )),
    ])

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.extend([
        loop.create_task(
            _root_task_checker("poster of events",
                               posting.poster(event_queue=event_queue))),
    ])

    # Monitor the peers, unless explicitly disabled.
    ourselves: Optional[peering.Peer] = peering.Peer.detect(
        id=peering.detect_own_id(),
        priority=priority,
        standalone=standalone,
        namespace=namespace,
        name=peering_name,
    )
    if ourselves:
        tasks.extend([
            loop.create_task(peering.peers_keepalive(ourselves=ourselves)),
            loop.create_task(
                _root_task_checker(
                    "watcher of peering",
                    queueing.watcher(
                        namespace=namespace,
                        resource=ourselves.resource,
                        handler=functools.partial(
                            peering.peers_handler,
                            ourselves=ourselves,
                            freeze=freeze_flag)))),  # freeze is set/cleared
        ])

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.extend([
            loop.create_task(
                _root_task_checker(
                    f"watcher of {resource.name}",
                    queueing.watcher(
                        namespace=namespace,
                        resource=resource,
                        handler=functools.partial(
                            handling.resource_handler,
                            lifecycle=lifecycle,
                            registry=registry,
                            resource=resource,
                            event_queue=event_queue,
                            freeze=freeze_flag)))),  # freeze is only checked
        ])

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        loop.add_signal_handler(signal.SIGINT, signal_flag.set_result,
                                signal.SIGINT)
        loop.add_signal_handler(signal.SIGTERM, signal_flag.set_result,
                                signal.SIGTERM)
    else:
        logger.warning(
            "OS signals are ignored: running not in the main thread.")

    return tasks
示例#55
0
 async def console(self):
     self.send_buffer = asyncio.Queue(loop=asyncio.get_running_loop())
     asyncio.ensure_future(self.display_incomming_data())
     asyncio.ensure_future(self.send_outgoing_data())
     await self.execute_hooks()
示例#56
0
    async def abcpow_on(self, ctx):
        self.game = True
        loop = asyncio.get_running_loop()
        if self.game:
            # every round, reset this flag, eliminate players that failed to type the word
            # self.trackedPlayersPrevious = everyone who survives from previous round
            # make a shallow copy

            self.round += 1
            self.trackedPlayersPrevious = self.trackedPlayers.copy()
            for player, submitted in list(self.trackedPlayers.items()):
                if not submitted:
                    del self.trackedPlayers[player]
                else:
                    self.trackedPlayers[player] = False

            if self.round >= 2 and len(self.trackedPlayers) == 0:
                res = discord.Embed(
                    title="Winners",
                    description="\n".join(
                        [x for x in self.trackedPlayersPrevious.keys()]),
                    color=util.generate_random_color())
                await ctx.send(embed=res)
                await self.stop_abcpow(ctx)
                return
            # if self.round == self.maxRound:
            #     sortedPlayers = sorted(self.trackedPlayers.items(), key=lambda x: x[1], reverse=True)
            #     res = discord.Embed(title="Leaderboards", description="\n".join([(str(i[0]) + ": " + str(i[1])) for i in sortedPlayers]), color=util.generate_random_color())
            #     await ctx.send(embed=res)
            #     await self.stop_chainage(ctx)
            #     return

            spaceInsertedWord, self.currentWord = powutil.generate_random_word_alphabetized(
                self.minL, self.maxL)
            res = discord.Embed(
                title="ABCPow! Round " + str(self.round),
                description="You have **" +
                (str(self.roundTimer[self.round])
                 if self.round < len(self.roundTimer) else str(self.minTime)) +
                "** seconds to enter the letters in ABC order: **" +
                spaceInsertedWord + "**\n**Remaining players: **\n" +
                ", ".join([x for x in self.trackedPlayers.keys()]),
                color=util.generate_random_color())
            await ctx.send(embed=res)

            # start the timer ONLY when all of the above are complete
            self.timer = loop.time() + self.roundTimer[
                self.round] if self.round < len(
                    self.roundTimer) else self.minTime
            """
            Logic for timer that recursively calls this function.
            Important for advancing rounds and resetting timer! 
            """
            while self.game:
                if (loop.time()) >= self.timer:
                    res = discord.Embed(title="Round over!",
                                        color=util.generate_random_color())
                    self.game = False
                    await ctx.send(embed=res)
                    await asyncio.sleep(1)
                    await self.abcpow_on(ctx)
                    # self.timer = 10e22 C, C++, Java, Python
                    break
                await asyncio.sleep(1)
            await asyncio.sleep(3)
        else:
            self.timer = 10e22
示例#57
0
def on(
    event_type: T.Union[T.Type[K], T.Type[object]],
    namespace: object,
    listener: T.Optional[ListenerCb[K]] = None,
    *,
    once: bool = False,
    loop: T.Optional[AbstractEventLoop] = None,
    scope: T.Union[str, T.Tuple[str, ...]] = "",
    raise_on_exc: bool = False,
) -> T.Union[ListenerCb[K], T.ContextManager[None], T.Callable[[ListenerCb[K]],
                                                               ListenerCb[K]]]:
    """Add a listener to event type.

    Context can't be specified when using this function in decorator mode.
    Context can't be specified when passing once=True.

    Args:

        event_type: Specify which event type or scope namespace will trigger this listener execution.

        namespace: Specify the namespace in which the listener will be attached.

        listener: Callable to be executed when there is an emission of the given event.

        once: Define whether the given listener is to be removed after it's first execution.

        loop: Specify a loop to bound to the given listener and ensure it is always executed in the
              correct context. (Default: Current running loop for coroutines functions, None for
              any other callable)

        scope: Specify a scope for specializing this listener registration.

        raise_on_exc: Whether an untreated exception raised by this listener will make an event
                      emission to fail.

    Raises:

        TypeError: Failed to bound loop to listener.

        ValueError: event_type is not a type instance, or it is a builtin type, or it is
                    BaseExceptions or listener is not callable.

    Returns:

        If listener isn't provided, this method returns a function that takes a Callable as a \
        single argument. As such it can be used as a decorator. In both the decorated and \
        undecorated forms this function returns the given event listener.

    """

    if listener is None:
        return lambda cb: on(
            event_type,
            namespace,
            cb,
            once=once,
            loop=loop,
            scope=scope,
            raise_on_exc=raise_on_exc,
        )

    if not callable(listener):
        raise ValueError("Listener must be callable")

    scope = parse_scope(scope)

    # Define listeners options
    opts = ListenerOpts.NOP
    if once:
        opts |= ListenerOpts.ONCE
    if raise_on_exc:
        opts |= ListenerOpts.RAISE

    if loop is None and iscoroutinefunction(listener):
        # Automatically set loop for Coroutines to avoid problems with emission from another thread
        with suppress(RuntimeError):
            loop = get_running_loop()

    if loop:
        listener = BoundLoopListenerWrapper(loop, listener)

    # Retrieve listeners
    listeners = retrieve_listeners_from_namespace(namespace)

    # Group listener's opts and context
    with (nullcontext(listeners.context) if listeners.context is None
          or listeners.context.active else listeners.context):
        listener_info = (opts, copy_context())

    # Add the given listener to the correct queue
    if event_type is None:
        raise ValueError("Event type can't be NoneType")
    elif issubclass(event_type, type):
        # Event type must be a class. Reject Metaclass and cia.
        raise ValueError("Event type must be an concrete type")
    elif issubclass(event_type,
                    BaseException) and not issubclass(event_type, Exception):
        raise ValueError("Event type can't be a BaseException")
    else:
        listeners.scope[scope][event_type][listener] = listener_info

    if event_type is not NewListener:
        emit(NewListener(event_type), namespace, sync=True, scope=scope)

    return listener
示例#58
0
文件: base.py 项目: vivisect/synapse
 async def __aenter__(self):
     assert asyncio.get_running_loop() == self.loop
     self.entered = True
     return self
示例#59
0
文件: utils.py 项目: vivisect/synapse
 def __init__(self, *args, **kwargs):
     io.StringIO.__init__(self, *args, **kwargs)
     asyncio.Event.__init__(self, loop=asyncio.get_running_loop())
     self.mesg = ''
示例#60
0
async def downloader(url):
    loop = asyncio.get_running_loop()
    r = await loop.run_in_executor(None, requests.get, url)
    return r