示例#1
0
文件: server.py 项目: nrryuya/trinity
def _test() -> None:
    import argparse
    from pathlib import Path
    import signal

    from eth.chains.ropsten import ROPSTEN_GENESIS_HEADER

    from p2p import ecies
    from p2p.constants import ROPSTEN_BOOTNODES

    from trinity.constants import ROPSTEN_NETWORK_ID
    from trinity._utils.chains import load_nodekey

    from tests.core.integration_test_helpers import (
        FakeAsyncLevelDB, FakeAsyncHeaderDB, FakeAsyncChainDB, FakeAsyncRopstenChain)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-debug', action="store_true")
    parser.add_argument('-bootnodes', type=str, default=[])
    parser.add_argument('-nodekey', type=str)

    args = parser.parse_args()

    logging.basicConfig(
        level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()
    db = FakeAsyncLevelDB(args.db)
    headerdb = FakeAsyncHeaderDB(db)
    chaindb = FakeAsyncChainDB(db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    chain = FakeAsyncRopstenChain(db)

    # NOTE: Since we may create a different priv/pub key pair every time we run this, remote nodes
    # may try to establish a connection using the pubkey from one of our previous runs, which will
    # result in lots of DecryptionErrors in receive_handshake().
    if args.nodekey:
        privkey = load_nodekey(Path(args.nodekey))
    else:
        privkey = ecies.generate_privkey()

    port = 30303
    if args.bootnodes:
        bootstrap_nodes = args.bootnodes.split(',')
    else:
        bootstrap_nodes = ROPSTEN_BOOTNODES
    bootstrap_nodes = [Node.from_uri(enode) for enode in bootstrap_nodes]

    server = FullServer(
        privkey,
        port,
        chain,
        chaindb,
        headerdb,
        db,
        ROPSTEN_NETWORK_ID,
        bootstrap_nodes=bootstrap_nodes,
    )
    server.logger.setLevel(log_level)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await server.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(server.run())
    loop.run_forever()
    loop.close()
示例#2
0
 def __init__(self, *args: Any, **kwargs: Any) -> None:
     super().__init__(*args, **kwargs)
     self._subscription = set()
     self._messages = defaultdict(deque)
     self._messages_ready = asyncio.Event(loop=self.loop)
     self._subscription_ready = asyncio.Event(loop=self.loop)
 def __init__(self, worker_pool):
     self._worker_pool = worker_pool
     self._n_submitted = 0
     self._n_complete = 0
     self._waiting = False
     self._done = asyncio.Event()
示例#4
0
    async def typeracer(self, ctx, difficulty: str = "medium", length=5):
        if length not in range(
                1, 21) or difficulty.lower() not in ("easy", "medium", "hard"):
            embed = discord.Embed(
                title="🏁 " + self._("Typeracer"),
                description=self.
                _("Invalid data entered! Check `{prefix}help typeracer` for more information."
                  ).format(prefix=ctx.prefix),
                color=self.bot.errorColor)
            await ctx.send(embed=embed)
            return

        embed = discord.Embed(
            title="🏁 " + self._("Typeracing begins in 10 seconds!"),
            description=self._("Prepare your keyboard of choice!"),
            color=self.bot.embedBlue)
        await ctx.send(embed=embed)
        await asyncio.sleep(10)
        words_path = Path(self.bot.BASE_DIR, 'etc',
                          f'words_{difficulty.lower()}.txt')
        with open(words_path, 'r') as fp:
            words = fp.readlines()
        text = []
        words = [x.strip() for x in words]
        for i in range(0, length):
            text.append(random.choice(words))
        text = " ".join(text)
        typeracer_text = text  #The raw text that needs to be typed
        text = fill(text, 60)  #Limit a line to 60 chars, then \n
        tempimg_path = Path(self.bot.BASE_DIR, 'temp', 'typeracer.png')

        async def create_image():
            img = Image.new("RGBA", (1, 1),
                            color=0)  #img of size 1x1 full transparent
            draw = ImageDraw.Draw(img)
            font = ImageFont.truetype('arial.ttf', 40)  #Font
            textwidth, textheight = draw.textsize(
                text, font)  #Size text will take up on image
            margin = 20
            img = img.resize(
                (textwidth + margin,
                 textheight + margin))  #Resize image to size of text
            draw = ImageDraw.Draw(
                img)  #This needs to be redefined after resizing image
            draw.text((margin / 2, margin / 2), text, font=font,
                      fill="white")  #Draw the text in between the two margins
            img.save(tempimg_path)
            with open(tempimg_path, 'rb') as fp:
                embed = discord.Embed(
                    description="🏁 " +
                    self._("Type in text from above as fast as you can!"),
                    color=self.bot.embedBlue)
                await ctx.send(embed=embed,
                               file=discord.File(fp, 'snedtyperace.png'))
            os.remove(tempimg_path)

        self.bot.loop.create_task(create_image())

        winners = {}
        ending = asyncio.Event()
        start_time = datetime.datetime.utcnow().timestamp()

        #on_message, but not really
        def tr_check(message):
            if ctx.channel.id == message.channel.id and message.channel == ctx.channel:
                if typeracer_text.lower() == message.content.lower():
                    winners[message.author] = datetime.datetime.utcnow(
                    ).timestamp() - start_time  #Add winner to list
                    self.bot.loop.create_task(message.add_reaction("✅"))
                    ending.set(
                    )  #Set the event ending, which starts the ending code
                #If it is close enough, we will add a marker to show that it is incorrect
                elif lev.distance(typeracer_text.lower(),
                                  message.content.lower()) < 3:
                    self.bot.loop.create_task(message.add_reaction("❌"))

        #This is basically an on_message created temporarily, since the check will never return True
        listen_for_msg = ctx.bot.loop.create_task(
            self.bot.wait_for('message', check=tr_check))

        #Wait for ending to be set, which happens on the first message that meets check
        try:
            await asyncio.wait_for(ending.wait(), timeout=60)
        except asyncio.TimeoutError:
            embed = discord.Embed(
                title="🏁 " + self._("Typeracing results"),
                description=self.
                _("Nobody was able to complete the typerace within **60** seconds. Typerace cancelled."
                  ),
                color=self.bot.errorColor)
            await ctx.send(embed=embed)
        else:
            embed = discord.Embed(
                title="🏁 " + self._("First Place"),
                description=self.
                _("{winner} finished first, everyone else has **10 seconds** to submit their reply!"
                  ).format(winner=list(winners.keys())[0].mention),
                color=self.bot.embedGreen)
            await ctx.send(embed=embed)
            await asyncio.sleep(10)
            desc = self._("**Participants:**\n")
            for winner in winners:
                desc = (
                    f"{desc}**#{list(winners.keys()).index(winner)+1}** {winner.mention} **{round(winners[winner], 1)}** seconds - **{round((len(typeracer_text)/5) / (winners[winner] / 60))}**WPM\n"
                )
            embed = discord.Embed(title="🏁 " + self._("Typeracing results"),
                                  description=desc,
                                  color=self.bot.embedGreen)
            await ctx.send(embed=embed)
        finally:
            listen_for_msg.cancel()  #Stop listening for messages
示例#5
0
 def __init__(self):
     super().__init__()
     self.receive_event = threading.Event()
     self.async_receive_event = asyncio.Event()
     self.loop = asyncio.get_event_loop()
     self.message = None
示例#6
0
 def run_proxy(self, *args):
     loop = ensure_event_loop()
     self.shutdown_event = asyncio.Event()
     run_app_sync(loop=loop, shutdown_event=self.shutdown_event)
示例#7
0
 def __init__(self, configuration_prefix):
     self.config = config.from_env(configuration_prefix)
     self.protocol = None
     self.channel = None
     self.closed_event = asyncio.Event()
     self.loop = asyncio.get_event_loop()
示例#8
0
async def handle_info(hass: HomeAssistant,
                      connection: websocket_api.ActiveConnection, msg: dict):
    """Handle an info request via a subscription."""
    registrations: dict[str, SystemHealthRegistration] = hass.data[DOMAIN]
    data = {}
    pending_info = {}

    for domain, domain_data in zip(
            registrations,
            await
            asyncio.gather(*(get_integration_info(hass, registration)
                             for registration in registrations.values())),
    ):
        for key, value in domain_data["info"].items():
            if asyncio.iscoroutine(value):
                value = asyncio.create_task(value)
            if isinstance(value, asyncio.Task):
                pending_info[(domain, key)] = value
                domain_data["info"][key] = {"type": "pending"}
            else:
                domain_data["info"][key] = _format_value(value)

        data[domain] = domain_data

    # Confirm subscription
    connection.send_result(msg["id"])

    stop_event = asyncio.Event()
    connection.subscriptions[msg["id"]] = stop_event.set

    # Send initial data
    connection.send_message(
        websocket_api.messages.event_message(msg["id"], {
            "type": "initial",
            "data": data
        }))

    # If nothing pending, wrap it up.
    if not pending_info:
        connection.send_message(
            websocket_api.messages.event_message(msg["id"],
                                                 {"type": "finish"}))
        return

    tasks = [asyncio.create_task(stop_event.wait()), *pending_info.values()]
    pending_lookup = {val: key for key, val in pending_info.items()}

    # One task is the stop_event.wait() and is always there
    while len(tasks) > 1 and not stop_event.is_set():
        # Wait for first completed task
        done, tasks = await asyncio.wait(tasks,
                                         return_when=asyncio.FIRST_COMPLETED)

        if stop_event.is_set():
            for task in tasks:
                task.cancel()
            return

        # Update subscription of all finished tasks
        for result in done:
            domain, key = pending_lookup[result]
            event_msg = {
                "type": "update",
                "domain": domain,
                "key": key,
            }

            if result.exception():
                exception = result.exception()
                _LOGGER.error(
                    "Error fetching system info for %s - %s",
                    domain,
                    key,
                    exc_info=(type(exception), exception,
                              exception.__traceback__),
                )
                event_msg["success"] = False
                event_msg["error"] = {"type": "failed", "error": "unknown"}
            else:
                event_msg["success"] = True
                event_msg["data"] = _format_value(result.result())

            connection.send_message(
                websocket_api.messages.event_message(msg["id"], event_msg))

    connection.send_message(
        websocket_api.messages.event_message(msg["id"], {"type": "finish"}))
示例#9
0
import asyncio
import logging
import itertools
import sys
import time
from functools import partial

logger = logging.getLogger(__name__)
debug = True
logging.basicConfig(level=logging.INFO,
                    format="%(relativeCreated)-10d" + logging.BASIC_FORMAT)

q = asyncio.Queue()
loop = asyncio.get_event_loop()
ev = asyncio.Event()


async def worker(client):
    logger.info("init")
    await ev.wait()
    logger.info("start")

    r = []
    END = None

    while True:
        futs = []
        items = []

        async def bulk_get():
            nonlocal items
示例#10
0
 def __init__(self, rpc_method: str, params: Dict[str, Any]) -> None:
     self.id = id(self)
     self.rpc_method = rpc_method
     self.params = params
     self._event = asyncio.Event()
     self.response: Any = None
示例#11
0
文件: running.py 项目: s-soroosh/kopf
async def spawn_tasks(
        lifecycle: Optional[Callable] = None,
        registry: Optional[registries.GlobalRegistry] = None,
        standalone: bool = False,
        priority: int = 0,
        peering_name: str = peering.PEERING_DEFAULT_NAME,
        namespace: Optional[str] = None,
) -> Collection[asyncio.Task]:
    """
    Spawn all the tasks needed to run the operator.

    The tasks are properly inter-connected with the synchronisation primitives.
    """
    loop = asyncio.get_running_loop()

    # The freezer and the registry are scoped to this whole task-set, to sync them all.
    lifecycle = lifecycle if lifecycle is not None else lifecycles.get_default_lifecycle()
    registry = registry if registry is not None else registries.get_default_registry()
    event_queue = asyncio.Queue(loop=loop)
    freeze_flag = asyncio.Event(loop=loop)
    should_stop = asyncio.Event(loop=loop)
    tasks = []

    # A top-level task for external stopping by setting a stop-flag. Once set,
    # this task will exit, and thus all other top-level tasks will be cancelled.
    tasks.extend([
        loop.create_task(_stop_flag_checker(should_stop)),
    ])

    # K8s-event posting. Events are queued in-memory and posted in the background.
    # NB: currently, it is a global task, but can be made per-resource or per-object.
    tasks.extend([
        loop.create_task(posting.poster(
            event_queue=event_queue)),
    ])

    # Monitor the peers, unless explicitly disabled.
    ourselves: Optional[peering.Peer] = peering.Peer.detect(
        id=peering.detect_own_id(), priority=priority,
        standalone=standalone, namespace=namespace, name=peering_name,
    )
    if ourselves:
        tasks.extend([
            loop.create_task(peering.peers_keepalive(
                ourselves=ourselves)),
            loop.create_task(queueing.watcher(
                namespace=namespace,
                resource=ourselves.resource,
                handler=functools.partial(peering.peers_handler,
                                          ourselves=ourselves,
                                          freeze=freeze_flag))),  # freeze is set/cleared
        ])

    # Resource event handling, only once for every known resource (de-duplicated).
    for resource in registry.resources:
        tasks.extend([
            loop.create_task(queueing.watcher(
                namespace=namespace,
                resource=resource,
                handler=functools.partial(handling.custom_object_handler,
                                          lifecycle=lifecycle,
                                          registry=registry,
                                          resource=resource,
                                          event_queue=event_queue,
                                          freeze=freeze_flag))),  # freeze is only checked
        ])

    # On Ctrl+C or pod termination, cancel all tasks gracefully.
    if threading.current_thread() is threading.main_thread():
        loop.add_signal_handler(signal.SIGINT, should_stop.set)
        loop.add_signal_handler(signal.SIGTERM, should_stop.set)
    else:
        logger.warning("OS signals are ignored: running not in the main thread.")

    return tasks
示例#12
0
 def __init__(self, teletask):
     """Initialize TelegramQueue class."""
     self.teletask = teletask
     self.telegram_received_cbs = []
     self.queue_stopped = asyncio.Event()
示例#13
0
    def __init__(
        self,
        http_port: int = 37077,
        interval: float = 0.5,
        video_path: str = "/webcam/video.mjpg",
        filenames:
        str = "/webcam/pyobs-{DAY-OBS|date:}-{FRAMENUM|string:04d}.fits",
        fits_namespaces: Optional[List[str]] = None,
        fits_headers: Optional[Dict[str, Any]] = None,
        centre: Optional[Tuple[float, float]] = None,
        rotation: float = 0.0,
        cache_size: int = 5,
        live_view: bool = True,
        flip: bool = False,
        sleep_time: int = 600,
        **kwargs: Any,
    ):
        """Creates a new BaseWebcam.

        On the receiving end, a VFS root with a HTTPFile must exist with the same name as in image_path and video_path,
        i.e. "webcam" in the default settings.

        Args:
            http_port: HTTP port for webserver.
            exposure_time: Initial exposure time.
            interval: Min interval for grabbing images.
            video_path: VFS path to video.
            filename: Filename pattern for FITS images.
            fits_namespaces: List of namespaces for FITS headers that this camera should request.
            fits_headers: Additional FITS headers.
            centre: (x, y) tuple of camera centre.
            rotation: Rotation east of north.
            cache_size: Size of cache for previous images.
            live_view: If True, live view is served via web server.
            flip: Whether to flip around Y axis.
            sleep_time: Time in s with inactivity after which the camera should go to sleep.
        """
        Module.__init__(self, **kwargs)
        ImageFitsHeaderMixin.__init__(
            self,
            fits_namespaces=fits_namespaces,
            fits_headers=fits_headers,
            centre=centre,
            rotation=rotation,
            filenames=filenames,
        )

        # store
        self._is_listening = False
        self._port = http_port
        self._interval = interval
        self._new_image_event = asyncio.Event()
        self._video_path = video_path
        self._frame_num = 0
        self._live_view = live_view
        self._image_type = ImageType.OBJECT
        self._image_request_lock = asyncio.Lock()
        self._image_requests: List[ImageRequest] = []
        self._next_image: Optional[NextImage] = None
        self._last_image: Optional[LastImage] = None
        self._last_time = 0.0
        self._flip = flip
        self._sleep_time = sleep_time

        # active
        self._active = False
        self._active_time = 0.0
        self.add_background_task(self._active_update)

        # image cache
        self._cache = DataCache(cache_size)

        # define web server
        self._app = web.Application()
        self._app.add_routes([
            web.get("/", self.web_handler),
            web.get("/video.mjpg", self.video_handler),
            web.get("/{filename}", self.image_handler),
        ])
        self._runner = web.AppRunner(self._app)
        self._site: Optional[web.TCPSite] = None
示例#14
0
    def __init__(
        self,
        *,
        compression: typing.Optional[str] = shard.GatewayCompression.
        PAYLOAD_ZLIB_STREAM,
        initial_activity: typing.Optional[presences.Activity] = None,
        initial_idle_since: typing.Optional[datetime.datetime] = None,
        initial_is_afk: bool = False,
        initial_status: presences.Status = presences.Status.ONLINE,
        intents: typing.Optional[intents_.Intents] = None,
        large_threshold: int = 250,
        shard_id: int = 0,
        shard_count: int = 1,
        event_consumer: typing.Callable[
            [shard.GatewayShard, str, data_binding.JSONObject], None],
        http_settings: config.HTTPSettings,
        proxy_settings: config.ProxySettings,
        data_format: str = shard.GatewayDataFormat.JSON,
        token: str,
        url: str,
    ) -> None:

        if data_format != shard.GatewayDataFormat.JSON:
            raise NotImplementedError(
                f"Unsupported gateway data format: {data_format}")

        query = {"v": _VERSION, "encoding": str(data_format)}

        if compression is not None:
            if compression == shard.GatewayCompression.PAYLOAD_ZLIB_STREAM:
                query["compress"] = "zlib-stream"
            else:
                raise NotImplementedError(
                    f"Unsupported compression format {compression}")

        scheme, netloc, path, params, _, _ = urllib.parse.urlparse(
            url, allow_fragments=True)
        new_query = urllib.parse.urlencode(query)

        self._activity = initial_activity
        self._closing = asyncio.Event()
        self._closed = asyncio.Event()
        self._chunking_rate_limit = rate_limits.WindowedBurstRateLimiter(
            f"shard {shard_id} chunking rate limit",
            *_CHUNKING_RATELIMIT,
        )
        self._event_consumer = event_consumer
        self._handshake_completed = asyncio.Event()
        self._heartbeat_latency = float("nan")
        self._http_settings = http_settings
        self._idle_since = initial_idle_since
        self._intents = intents
        self._is_afk = initial_is_afk
        self._large_threshold = large_threshold
        self._last_heartbeat_ack_received = float("nan")
        self._last_heartbeat_sent = float("nan")
        self._logger = logging.getLogger(f"hikari.gateway.{shard_id}")
        self._proxy_settings = proxy_settings
        self._run_task: typing.Optional[asyncio.Task[None]] = None
        self._seq: typing.Optional[int] = None
        self._session_id: typing.Optional[str] = None
        self._shard_count = shard_count
        self._shard_id = shard_id
        self._status = initial_status
        self._token = token
        self._total_rate_limit = rate_limits.WindowedBurstRateLimiter(
            f"shard {shard_id} total rate limit",
            *_TOTAL_RATELIMIT,
        )
        self._url = urllib.parse.urlunparse(
            (scheme, netloc, path, params, new_query, ""))
        self._user_id: typing.Optional[snowflakes.Snowflake] = None
        self._ws: typing.Optional[_V6GatewayTransport] = None
示例#15
0
async def _get_directly_linked_peers_without_handshake(
        peer1_class=LESPeer, peer1_chaindb=None,
        peer2_class=LESPeer, peer2_chaindb=None):
    """See get_directly_linked_peers().

    Neither the P2P handshake nor the sub-protocol handshake will be performed here.
    """
    if peer1_chaindb is None:
        peer1_chaindb = get_fresh_mainnet_chaindb()
    if peer2_chaindb is None:
        peer2_chaindb = get_fresh_mainnet_chaindb()
    peer1_private_key = ecies.generate_privkey()
    peer2_private_key = ecies.generate_privkey()
    peer1_remote = kademlia.Node(
        peer2_private_key.public_key, kademlia.Address('0.0.0.0', 0, 0))
    peer2_remote = kademlia.Node(
        peer1_private_key.public_key, kademlia.Address('0.0.0.0', 0, 0))
    initiator = auth.HandshakeInitiator(peer1_remote, peer1_private_key)
    peer2_reader = asyncio.StreamReader()
    peer1_reader = asyncio.StreamReader()
    # Link the peer1's writer to the peer2's reader, and the peer2's writer to the
    # peer1's reader.
    peer2_writer = type(
        "mock-streamwriter",
        (object,),
        {"write": peer1_reader.feed_data,
         "close": lambda: None}
    )
    peer1_writer = type(
        "mock-streamwriter",
        (object,),
        {"write": peer2_reader.feed_data,
         "close": lambda: None}
    )

    peer1, peer2 = None, None
    handshake_finished = asyncio.Event()

    async def do_handshake():
        nonlocal peer1, peer2
        aes_secret, mac_secret, egress_mac, ingress_mac = await auth._handshake(
            initiator, peer1_reader, peer1_writer)

        # Need to copy those before we pass them on to the Peer constructor because they're
        # mutable. Also, the 2nd peer's ingress/egress MACs are reversed from the first peer's.
        peer2_ingress = egress_mac.copy()
        peer2_egress = ingress_mac.copy()

        peer1 = peer1_class(
            remote=peer1_remote, privkey=peer1_private_key, reader=peer1_reader,
            writer=peer1_writer, aes_secret=aes_secret, mac_secret=mac_secret,
            egress_mac=egress_mac, ingress_mac=ingress_mac, chaindb=peer1_chaindb,
            network_id=1)

        peer2 = peer2_class(
            remote=peer2_remote, privkey=peer2_private_key, reader=peer2_reader,
            writer=peer2_writer, aes_secret=aes_secret, mac_secret=mac_secret,
            egress_mac=peer2_egress, ingress_mac=peer2_ingress, chaindb=peer2_chaindb,
            network_id=1)

        handshake_finished.set()

    asyncio.ensure_future(do_handshake())

    responder = auth.HandshakeResponder(peer2_remote, peer2_private_key)
    auth_msg = await peer2_reader.read(constants.ENCRYPTED_AUTH_MSG_LEN)

    # Can't assert return values, but checking that the decoder doesn't raise
    # any exceptions at least.
    _, _ = responder.decode_authentication(auth_msg)

    peer2_nonce = keccak(os.urandom(constants.HASH_LEN))
    auth_ack_msg = responder.create_auth_ack_message(peer2_nonce)
    auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)
    peer2_writer.write(auth_ack_ciphertext)

    await handshake_finished.wait()

    return peer1, peer2
示例#16
0
 def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
     self._loop = loop
     self._exc = None  # type: Optional[Exception]
     self._event = asyncio.Event(loop=loop)
     self._waiters = collections.deque()  # type: Deque[asyncio.Future[Any]]
示例#17
0
async def get_directly_linked_peers_without_handshake(peer1_class=LESPeer,
                                                      peer1_headerdb=None,
                                                      peer2_class=LESPeer,
                                                      peer2_headerdb=None):
    """See get_directly_linked_peers().

    Neither the P2P handshake nor the sub-protocol handshake will be performed here.
    """
    cancel_token = CancelToken("get_directly_linked_peers_without_handshake")
    if peer1_headerdb is None:
        peer1_headerdb = get_fresh_mainnet_headerdb()
    if peer2_headerdb is None:
        peer2_headerdb = get_fresh_mainnet_headerdb()
    peer1_private_key = ecies.generate_privkey()
    peer2_private_key = ecies.generate_privkey()
    peer1_remote = kademlia.Node(peer2_private_key.public_key,
                                 kademlia.Address('0.0.0.0', 0, 0))
    peer2_remote = kademlia.Node(peer1_private_key.public_key,
                                 kademlia.Address('0.0.0.0', 0, 0))
    use_eip8 = False
    initiator = auth.HandshakeInitiator(peer1_remote, peer1_private_key,
                                        use_eip8, cancel_token)
    peer2_reader = asyncio.StreamReader()
    peer1_reader = asyncio.StreamReader()
    # Link the peer1's writer to the peer2's reader, and the peer2's writer to the
    # peer1's reader.
    peer2_writer = MockStreamWriter(peer1_reader.feed_data)
    peer1_writer = MockStreamWriter(peer2_reader.feed_data)

    peer1, peer2 = None, None
    handshake_finished = asyncio.Event()

    async def do_handshake():
        nonlocal peer1
        aes_secret, mac_secret, egress_mac, ingress_mac = await auth._handshake(
            initiator, peer1_reader, peer1_writer, cancel_token)

        peer1 = peer1_class(remote=peer1_remote,
                            privkey=peer1_private_key,
                            reader=peer1_reader,
                            writer=peer1_writer,
                            aes_secret=aes_secret,
                            mac_secret=mac_secret,
                            egress_mac=egress_mac,
                            ingress_mac=ingress_mac,
                            headerdb=peer1_headerdb,
                            network_id=1)

        handshake_finished.set()

    asyncio.ensure_future(do_handshake())

    use_eip8 = False
    responder = auth.HandshakeResponder(peer2_remote, peer2_private_key,
                                        use_eip8, cancel_token)
    auth_cipher = await peer2_reader.read(constants.ENCRYPTED_AUTH_MSG_LEN)

    initiator_ephemeral_pubkey, initiator_nonce, _ = decode_authentication(
        auth_cipher, peer2_private_key)
    responder_nonce = keccak(os.urandom(constants.HASH_LEN))
    auth_ack_msg = responder.create_auth_ack_message(responder_nonce)
    auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)
    peer2_writer.write(auth_ack_ciphertext)

    await handshake_finished.wait()

    aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
        initiator_nonce, responder_nonce, initiator_ephemeral_pubkey,
        auth_cipher, auth_ack_ciphertext)
    assert egress_mac.digest() == peer1.ingress_mac.digest()
    assert ingress_mac.digest() == peer1.egress_mac.digest()
    peer2 = peer2_class(remote=peer2_remote,
                        privkey=peer2_private_key,
                        reader=peer2_reader,
                        writer=peer2_writer,
                        aes_secret=aes_secret,
                        mac_secret=mac_secret,
                        egress_mac=egress_mac,
                        ingress_mac=ingress_mac,
                        headerdb=peer2_headerdb,
                        network_id=1)

    return peer1, peer2
示例#18
0
 def __init__(self,
              config: MemoryOperationImplementationNetworkConfig) -> None:
     super().__init__(config)
     self.operations = self.config.operations
     self.completed_event = asyncio.Event()
示例#19
0
import plugins
client = plugins.client  # type: discord.Client

try:
    import markovify
except ImportError:
    logging.warning(
        "Markovify could not be imported and as such !summary +strict will not work."
    )

# The messages stored per session, where every key is a channel id
stored_messages = defaultdict(partial(deque, maxlen=10000))
logs_from_limit = 5000
max_summaries = 5
max_admin_summaries = 15
update_task = asyncio.Event()
update_task.set()

# Define some regexes for option checking in "summary" command
valid_num = re.compile(r"\*(?P<num>\d+)")
valid_member = utils.member_mention_pattern
valid_member_silent = re.compile(r"@\((?P<name>.+)\)")
valid_role = re.compile(r"<@&(?P<id>\d+)>")
valid_channel = utils.channel_mention_pattern
valid_options = ("+re", "+regex", "+case", "+tts", "+nobot", "+bot",
                 "+coherent", "+strict")

on_no_messages = "**There were no messages to generate a summary from, {0.author.name}.**"
on_fail = "**I was unable to construct a summary, {0.author.name}.**"

summary_options = Config("summary_options",
示例#20
0
 def __init__(self, probername, influxclient, dests):
     self.probername = probername
     self.dests = {d.dest: d for d in dests}  # Fast lookup
     self.process = None
     self.influxclient = influxclient
     self.stop_event = asyncio.Event()
示例#21
0
 def __init__(self, network: Network, timeout: float):
     self.network = network
     self.sessions: Dict[ClientSession, Optional[asyncio.Task]] = dict()
     self.timeout = timeout
     self.new_connection_event = asyncio.Event()
示例#22
0
 def __init__(self):
     self.ready_event = asyncio.Event()
示例#23
0
 def __post_init__(self):
     self.should_write = asyncio.Event()
     self.should_write.set()
     self.reader = create_task(self.background_read(), name="bg-read")
     self.writer = create_task(self.background_write(), name="bg-write")
示例#24
0
文件: service.py 项目: veox/trinity
 def __init__(self) -> None:
     self.started = asyncio.Event()
     self.stopped = asyncio.Event()
     self.cleaned_up = asyncio.Event()
     self.cancelled = asyncio.Event()
     self.finished = asyncio.Event()
示例#25
0
 def __init__(self, runner, callback, commands):
     self.runner = runner
     self.callback = callback
     self.commands = commands
     self.ready = asyncio.Event()
     self.running = False
示例#26
0
def _test() -> None:
    import argparse
    from pathlib import Path
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from eth.chains.mainnet import MainnetChain, MAINNET_GENESIS_HEADER, MAINNET_VM_CONFIGURATION
    from eth.db.backends.level import LevelDB
    from tests.trinity.core.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncMainnetChain, FakeAsyncRopstenChain,
        FakeAsyncHeaderDB, connect_to_peers_loop)
    from trinity.constants import DEFAULT_PREFERRED_NODES
    from trinity.protocol.common.context import ChainContext
    from trinity.utils.chains import load_nodekey

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-fast', action="store_true")
    parser.add_argument('-light', action="store_true")
    parser.add_argument('-nodekey', type=str)
    parser.add_argument('-enode',
                        type=str,
                        required=False,
                        help="The enode we should connect to")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()

    base_db = LevelDB(args.db)
    headerdb = FakeAsyncHeaderDB(base_db)
    chaindb = FakeAsyncChainDB(base_db)
    try:
        genesis = chaindb.get_canonical_block_header_by_number(0)
    except HeaderNotFound:
        genesis = ROPSTEN_GENESIS_HEADER
        chaindb.persist_header(genesis)

    peer_pool_class: Type[Union[ETHPeerPool, LESPeerPool]] = ETHPeerPool
    if args.light:
        peer_pool_class = LESPeerPool

    if genesis.hash == ROPSTEN_GENESIS_HEADER.hash:
        network_id = RopstenChain.network_id
        vm_config = ROPSTEN_VM_CONFIGURATION  # type: ignore
        chain_class = FakeAsyncRopstenChain
    elif genesis.hash == MAINNET_GENESIS_HEADER.hash:
        network_id = MainnetChain.network_id
        vm_config = MAINNET_VM_CONFIGURATION  # type: ignore
        chain_class = FakeAsyncMainnetChain
    else:
        raise RuntimeError("Unknown genesis: %s", genesis)

    if args.nodekey:
        privkey = load_nodekey(Path(args.nodekey))
    else:
        privkey = ecies.generate_privkey()

    context = ChainContext(
        headerdb=headerdb,
        network_id=network_id,
        vm_configuration=vm_config,
    )

    peer_pool = peer_pool_class(privkey=privkey, context=context)

    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]

    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))
    chain = chain_class(base_db)
    syncer: BaseHeaderChainSyncer = None
    if args.fast:
        syncer = FastChainSyncer(chain, chaindb, cast(ETHPeerPool, peer_pool))
    elif args.light:
        syncer = LightChainSyncer(chain, headerdb,
                                  cast(LESPeerPool, peer_pool))
    else:
        syncer = RegularChainSyncer(chain, chaindb,
                                    cast(ETHPeerPool, peer_pool))
    syncer.logger.setLevel(log_level)
    syncer.min_peers_to_sync = 1

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await syncer.cancel()
        loop.stop()

    async def run() -> None:
        await syncer.run()
        syncer.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()
示例#27
0
 def create_event(self):
     """Create an event object."""
     return asyncio.Event()
示例#28
0
 def __init__(self, url):
     self.url = url
     self.ws = None
     self._ws_connected = asyncio.Event()
示例#29
0
    async def __anit__(self, path, **kwargs):

        await s_base.Base.__anit__(self)

        kwargs.setdefault('map_size', s_const.gibibyte)
        kwargs.setdefault('lockmemory', False)

        opts = kwargs

        self.path = pathlib.Path(path)

        self.optspath = self.path.with_suffix('.opts.yaml')

        if self.optspath.exists():
            opts.update(s_common.yamlload(self.optspath))

        initial_mapsize = opts.get('map_size')
        if initial_mapsize is None:
            raise s_exc.BadArg('Slab requires map_size')

        mdbpath = self.path / 'data.mdb'
        if mdbpath.exists():
            mapsize = max(initial_mapsize, os.path.getsize(mdbpath))
        else:
            mapsize = initial_mapsize

        # save the transaction deltas in case of error...
        self.xactops = []
        self.recovering = False

        opts.setdefault('max_dbs', 128)
        opts.setdefault('writemap', True)

        self.maxsize = opts.pop('maxsize', None)
        self.growsize = opts.pop('growsize', None)

        self.readonly = opts.get('readonly', False)
        self.lockmemory = opts.pop('lockmemory', False)

        self.mapsize = _mapsizeround(mapsize)
        if self.maxsize is not None:
            self.mapsize = min(self.mapsize, self.maxsize)

        self._saveOptsFile()

        self.lenv = lmdb.open(path, **opts)

        self.scans = set()

        self.dirty = False
        if self.readonly:
            self.xact = None
            self.txnrefcount = 0
        else:
            self._initCoXact()

        self.resizeevent = threading.Event(
        )  # triggered when a resize event occurred
        self.lockdoneevent = asyncio.Event(
        )  # triggered when a memory locking finished

        # LMDB layer uses these for status reporting
        self.locking_memory = False
        self.prefaulting = False
        self.max_could_lock = 0
        self.lock_progress = 0
        self.lock_goal = 0

        if self.lockmemory:

            async def memlockfini():
                self.resizeevent.set()
                await self.memlocktask

            self.memlocktask = s_coro.executor(self._memorylockloop)
            self.onfini(memlockfini)

        self.onfini(self._onCoFini)
        self.schedCoro(self._runSyncLoop())
示例#30
0
 def __init__(self, real_syncer):
     self._real_syncer = real_syncer
     self._ready = asyncio.Event()
     self._headers_requested = asyncio.Event()