예제 #1
0
    def stopall(self, loop):
        '''Cancels all tasks and gets puts unretrieved input items back into the queue'''
        lg.warning('In stopall')

        results = []
        cancelled = []  # Tasks cancelled, we'll wait for their return
        # TODO: Need to cancel only our threads!
        for i in Task.all_tasks():
            lg.debug('TASK {:04x}: {}'.format(id(i) % (1 << 16), i))
            if i.done():  # Finished
                try:
                    res = i.result()
                    lg.debug('RESULT: {}'.format(res))
                    results.append(res)
                except KeyboardInterrupt:  # The task that got interrupt
                    lg.debug('INTERRUPTED: {}'.format(self.args.get(i)))
                continue

            #res.append(self.args[i])
            i.cancel()
            cancelled.append(i)

        print('CANCELLED: {} {}'.format(len(cancelled), len(Task.all_tasks())))
        for i in cancelled:
            #for i in Task.all_tasks():
            lg.debug('TASK {:04x}: {}'.format(id(i) % (1 << 16), i))
            try:
                res = loop.run_until_complete(i)
            except KeyboardInterrupt:
                lg.debug('after ruc: interrupt: {}'.format(self.args.get(i)))
                continue
            except CancelledError:
                lg.debug('after ruc: cancel')
                continue
            lg.info('AFTER RUC: {}'.format(res))
            results.append(res)

        # Results have been through the done_task already, so actually there's no need in them
        if 0:
            lg.info('results: {}, args: {}'.format(len(results),
                                                   len(self.args)))
        else:
            lg.debug('results: {}, args: {}'.format(results, self.args))
        lg.info('toget: {}'.format(self.toget.qsize()))

        toget_new = [
            v for k, v in self.args.items()
            if k not in (self.grimreaper_task, self.extender_task)
        ]

        lg.info('Retrieving queue')
        task = ensure_future(self.deplete_queue(loop))
        res = loop.run_until_complete(task)
        toget_new.extend(res)

        return results, toget_new
예제 #2
0
파일: overseer.py 프로젝트: Kiltres/Monocle
 def update_coroutines_count(self, simple=True, loop=LOOP):
     try:
         tasks = Task.all_tasks(loop)
         self.coroutines_count = len(tasks) if simple else sum(not t.done() for t in tasks)
     except RuntimeError:
         # Set changed size during iteration
         self.coroutines_count = '-1'
예제 #3
0
 def update_coroutines_count(self, simple=True, loop=LOOP):
     try:
         tasks = Task.all_tasks(loop)
         self.coroutines_count = len(tasks) if simple else sum(not t.done() for t in tasks)
     except RuntimeError:
         # Set changed size during iteration
         self.coroutines_count = '-1'
예제 #4
0
async def print_performance_metrics() -> None:
    """
    Periodically log some performance metrics.
    """
    loop = asyncio.get_event_loop()
    while True:
        # Measure elapsed time after a sleep.
        # If the event loop is clogged, sleep will take more time to execute.
        # For instance "sleep(1)" might take 1.5s to execute.
        start = loop.time()
        await asyncio.sleep(INTERVAL)
        elapsed_time = loop.time() - start
        delta = elapsed_time - INTERVAL

        # Number of tasks scheduled on the event loop.
        tasks = [t for t in Task.all_tasks(loop) if not t.done()]
        active_tasks = len(tasks)

        vmem = psutil.virtual_memory()
        LOGGER.get().info(
            "performance_stats",
            event_loop_error=f"{delta/INTERVAL:.2%}",
            event_loop_delta=f"{delta*1000:.3f}ms",
            active_tasks=active_tasks,
            cpu=f'{psutil.cpu_percent()/100:.2%}',
            memory=f'{vmem.used / vmem.total:.2%}',
        )
예제 #5
0
파일: test_event.py 프로젝트: vdt/asphalt
 async def test_dispatch_event_cancel(self, source):
     """Test that dispatching an event when there are no listeners will still work."""
     source.event_a.connect(lambda event: None)
     future = source.event_a.dispatch()
     future.cancel()
     task = next(t for t in Task.all_tasks()
                 if t is not Task.current_task())
     await task
예제 #6
0
    def all_tasks(loop: Optional[asyncio.AbstractEventLoop] = None) -> Set[asyncio.Task]:
        """Return a set of all tasks for the loop."""
        from asyncio import Task

        if loop is None:
            loop = get_running_loop()

        return {t for t in Task.all_tasks(loop) if not t.done()}
예제 #7
0
def progress_bar(requests, loop, freq=0.01):
    width, _ = get_terminal_size()
    done_count = len(tuple(filter(lambda t: t.done(), Task.all_tasks())))
    tasks_left_count = requests - done_count
    progress = int(done_count / requests * width)
    print("\r" + "*" * progress + "." * (width - progress), end="")
    if tasks_left_count > 0:
        loop.call_later(freq, progress_bar, requests, loop)
예제 #8
0
    def close(self):
        if self.eventloop and not isinstance(self.eventloop, str):
            for task in Task.all_tasks(self.eventloop):
                task.cancel()

            run_coroutine_threadsafe(self.disconnect(expected=True),
                                     self.eventloop).result()

        self.__pool.shutdown()
예제 #9
0
	def signal_handler(self):
		"""Signal handler for asynchronous event loop

		Stops it gracefully
		"""
		with open("/dev/null") as sys.stderr:
			self._thread_executor.shutdown(wait=False)
			[task.cancel() for task in Task.all_tasks() if task is not Task.current_task()]
			self._event_loop.stop()
async def test_workflow(
    client,
    fake_project_data,
    logged_user,
    computational_system_mock,
    storage_subsystem_mock,
):
    # empty list
    projects = await _request_list(client)
    assert not projects

    # creation
    await _request_create(client, fake_project_data)

    # list not empty
    projects = await _request_list(client)
    assert len(projects) == 1
    for key in projects[0].keys():
        if key not in ("uuid", "prjOwner", "creationDate", "lastChangeDate"):
            assert projects[0][key] == fake_project_data[key]

    modified_project = deepcopy(projects[0])
    modified_project["name"] = "some other name"
    modified_project["description"] = "John Raynor killed Kerrigan"
    modified_project["workbench"]["ReNamed"] = modified_project[
        "workbench"].pop(list(modified_project["workbench"].keys())[0])
    modified_project["workbench"]["ReNamed"]["position"]["x"] = 0
    # modify
    pid = modified_project["uuid"]
    await _request_update(client, modified_project, pid)

    # list not empty
    projects = await _request_list(client)
    assert len(projects) == 1

    for key in projects[0].keys():
        if key not in ("lastChangeDate", ):
            assert projects[0][key] == modified_project[key]

    # get
    project = await _request_get(client, pid)
    for key in project.keys():
        if key not in ("lastChangeDate", ):
            assert project[key] == modified_project[key]

    # delete
    await _request_delete(client, pid)

    # wait for delete tasks to finish
    tasks = Task.all_tasks()
    for task in tasks:
        if "delete_project" in task._coro.__name__:
            await wait_for(task, timeout=60.0)

    # list empty
    projects = await _request_list(client)
    assert not projects
예제 #11
0
파일: common.py 프로젝트: SAPikachu/nyapass
    def _cleanup(disable_pending_task_warnings=False):
        # Close the server
        loop.run_until_complete(asyncio.gather(
            *[x.shutdown() for x in instances]
        ))
        loop.stop()
        if disable_pending_task_warnings:
            [t.result() for t in Task.all_tasks()]

        loop.close()
예제 #12
0
        async def cancel_tasks():
            current = Task.current_task(self._aio_loop)
            tasks = [
                task for task in Task.all_tasks(self._aio_loop)
                if task is not current
            ]

            for task in tasks:
                task.cancel()

            await asyncio.gather(*tasks)
예제 #13
0
async def monitor(request):
    snapshot = tracemalloc.take_snapshot()
    top_stats = snapshot.statistics('lineno')

    tasks = Task.all_tasks()

    return json({
        "top_20_trace": [str(x) for x in top_stats[:20]],
        "tasks": {
            "number": len(tasks),
            "array": map(show_coro, tasks),
        }
    })
예제 #14
0
    def test_broken_line_doesnt_interrupt(self):
        conn, out, proto = self.make_proto()
        proto.data_received(
            b":server.host COMMAND this is :a command\r\nPRIVMSG\r\n:server.host PRIVMSG me :hi\r\n"
        )

        conn.loop.run_until_complete(
            asyncio.gather(*Task.all_tasks(conn.loop)))

        assert out == [
            {
                "irc_tags": None,
                "chan": "server.host",
                "content": None,
                "content_raw": None,
                "db": None,
                "db_executor": None,
                "hook": None,
                "host": "",
                "irc_command": "COMMAND",
                "irc_ctcp_text": None,
                "irc_paramlist": ["this", "is", "a command"],
                "irc_prefix": "server.host",
                "irc_raw": ":server.host COMMAND this is :a command",
                "mask": "server.host",
                "nick": "server.host",
                "target": None,
                "type": EventType.other,
                "user": "",
            },
            {
                "irc_tags": None,
                "chan": "server.host",
                "content": "hi",
                "content_raw": "hi",
                "db": None,
                "db_executor": None,
                "hook": None,
                "host": "",
                "irc_command": "PRIVMSG",
                "irc_ctcp_text": None,
                "irc_paramlist": ["me", "hi"],
                "irc_prefix": "server.host",
                "irc_raw": ":server.host PRIVMSG me :hi",
                "mask": "server.host",
                "nick": "server.host",
                "target": None,
                "type": EventType.message,
                "user": "",
            },
        ]
예제 #15
0
def cleanup(overseer, manager):
    try:
        if hasattr(overseer, 'print_handle'):
            overseer.print_handle.cancel()
        if hasattr(overseer, 'worker30'):
            overseer.worker30.cancel()
        if hasattr(overseer, 'worker_raider'):
            overseer.worker_raider.cancel()
        overseer.running = False
        print('Exiting, please wait until all tasks finish')

        log = get_logger('cleanup')
        print('Finishing tasks...')

        LOOP.create_task(overseer.exit_progress())
        pending = gather(*Task.all_tasks(loop=LOOP), return_exceptions=True)
        try:
            LOOP.run_until_complete(wait_for(pending, 40))
        except TimeoutError as e:
            print('Coroutine completion timed out, moving on.')
        except Exception as e:
            log = get_logger('cleanup')
            log.exception('A wild {} appeared during exit!',
                          e.__class__.__name__)

        db_proc.stop()
        overseer.refresh_dict()

        print('Dumping pickles...')
        dump_pickle('accounts', get_accounts())
        dump_pickle('accounts30', get_accounts30())
        FORT_CACHE.pickle()
        altitudes.pickle()
        if conf.CACHE_CELLS:
            dump_pickle('cells', Worker.cells)

        spawns.pickle()
        while not db_proc.queue.empty():
            pending = db_proc.queue.qsize()
            # Spaces at the end are important, as they clear previously printed
            # output - \r doesn't clean whole line
            print('{} DB items pending     '.format(pending), end='\r')
            sleep(.5)
    finally:
        print('Closing pipes, sessions, and event loop...')
        manager.shutdown()
        SessionManager.close()
        close_sessions()
        LOOP.close()
        print('Done.')
예제 #16
0
async def stop() -> dict:
    logger.info("stopping")

    if not state.streaming:
        return {"success": True, "message": "Not currently streaming"}

    for task in Task.all_tasks():
        if task.get_name() == STREAMING_TASK_NAME:
            task.cancel()
            break

    state.streaming = False

    return {"success": True, "message": "Stopped streaming"}
예제 #17
0
파일: aiorun.py 프로젝트: wallies/aiorun
    def sep():
        tasks = Task.all_tasks(loop=loop)
        do_not_cancel = set()
        for t in tasks:
            # TODO: we don't need access to the coro. We could simply
            # TODO: store the task itself in the weakset.
            if t._coro in _DO_NOT_CANCEL_COROS:
                do_not_cancel.add(t)

        tasks -= do_not_cancel

        logger.critical('Cancelling pending tasks.')
        for t in tasks:
            logger.debug('Cancelling task: %s', t)
            t.cancel()
        return tasks, do_not_cancel
예제 #18
0
    async def _monitor_lag(self, loop: AbstractEventLoop):
        log.info("Monitoring async lag started")
        while loop.is_running():
            start = loop.time()
            await sleep(self._interval)
            # The closer this gap is to our intended sleep time
            # the less load the system is under. Large gaps mean
            # the running loop is dealing with a lot of work
            time_slept = loop.time() - start
            self.lag = time_slept - self._interval
            log.debug(f"Current async lag (ms): {self.lag * 1000}")

            tasks = [task for task in Task.all_tasks(loop) if not task.done()]
            self.active_tasks = len(tasks)
            log.debug(f"Active tasks: {self.active_tasks}")

            self._warn(tasks)
        log.info("Monitoring async lag stopped")
예제 #19
0
파일: scan.py 프로젝트: Kiltres/Monocle
def cleanup(overseer, manager):
    try:
        overseer.print_handle.cancel()
        overseer.running = False
        print('Exiting, please wait until all tasks finish')

        log = get_logger('cleanup')
        print('Finishing tasks...')

        LOOP.create_task(overseer.exit_progress())
        pending = gather(*Task.all_tasks(loop=LOOP), return_exceptions=True)
        try:
            LOOP.run_until_complete(wait_for(pending, 40))
        except TimeoutError as e:
            print('Coroutine completion timed out, moving on.')
        except Exception as e:
            log = get_logger('cleanup')
            log.exception('A wild {} appeared during exit!', e.__class__.__name__)

        db_proc.stop()
        overseer.refresh_dict()

        print('Dumping pickles...')
        dump_pickle('accounts', ACCOUNTS)
        FORT_CACHE.pickle()
        altitudes.pickle()
        if conf.CACHE_CELLS:
            dump_pickle('cells', Worker.cells)

        spawns.pickle()
        while not db_proc.queue.empty():
            pending = db_proc.queue.qsize()
            # Spaces at the end are important, as they clear previously printed
            # output - \r doesn't clean whole line
            print('{} DB items pending     '.format(pending), end='\r')
            sleep(.5)
    finally:
        print('Closing pipes, sessions, and event loop...')
        manager.shutdown()
        SessionManager.close()
        close_sessions()
        LOOP.close()
        print('Done.')
예제 #20
0
def _cancel_all_tasks(loop):
    to_cancel = Task.all_tasks(loop)
    if not to_cancel:
        return

    for task in to_cancel:
        task.cancel()

    loop.run_until_complete(
        gather(*to_cancel, loop=loop, return_exceptions=True))

    for task in to_cancel:
        if task.cancelled():
            continue
        if task.exception() is not None:
            loop.call_exception_handler({
                "message": "unhandled exception during asyncio.run() shutdown",
                "exception": task.exception(),
                "task": task,
            })
예제 #21
0
    def _cancel_all_tasks(loop):
        from asyncio import Task, gather

        to_cancel = Task.all_tasks(loop)
        if not to_cancel:
            return

        for task in to_cancel:
            task.cancel()

        loop.run_until_complete(
            gather(*to_cancel, loop=loop, return_exceptions=True))

        for task in to_cancel:
            if task.cancelled():
                continue
            if task.exception() is not None:
                loop.call_exception_handler({
                    'message': 'unhandled exception during asyncio.run() shutdown',
                    'exception': task.exception(),
                    'task': task,
                })
예제 #22
0
async def test_workflow(
    client,
    postgres_db: sa.engine.Engine,
    docker_registry: str,
    simcore_services,
    fake_project_data,
    catalog_subsystem_mock,
    logged_user,
    primary_group: Dict[str, str],
    standard_groups: List[Dict[str, str]],
    computational_system_mock,
    storage_subsystem_mock,
):
    # empty list
    projects = await _request_list(client)
    assert not projects

    # creation
    await _request_create(client, fake_project_data)
    catalog_subsystem_mock([fake_project_data])
    # list not empty
    projects = await _request_list(client)
    assert len(projects) == 1

    assert not ProjectState(**projects[0].pop("state")).locked.value
    for key in projects[0].keys():
        if key not in (
            "uuid",
            "prjOwner",
            "creationDate",
            "lastChangeDate",
            "accessRights",
        ):
            assert projects[0][key] == fake_project_data[key]
    assert projects[0]["prjOwner"] == logged_user["email"]
    assert projects[0]["accessRights"] == {
        str(primary_group["gid"]): {"read": True, "write": True, "delete": True}
    }

    modified_project = deepcopy(projects[0])
    modified_project["name"] = "some other name"
    modified_project["description"] = "John Raynor killed Kerrigan"
    modified_project["workbench"]["ReNamed"] = modified_project["workbench"].pop(
        list(modified_project["workbench"].keys())[0]
    )
    modified_project["workbench"]["ReNamed"]["position"]["x"] = 0
    # share with some group
    modified_project["accessRights"].update(
        {str(standard_groups[0]["gid"]): {"read": True, "write": True, "delete": False}}
    )
    # modify
    pid = modified_project["uuid"]
    await _request_update(client, modified_project, pid)

    # list not empty
    projects = await _request_list(client)
    assert len(projects) == 1

    for key in projects[0].keys():
        if key not in ("lastChangeDate", "state"):
            assert projects[0][key] == modified_project[key]

    # get
    project = await _request_get(client, pid)
    for key in project.keys():
        if key not in ("lastChangeDate", "state"):
            assert project[key] == modified_project[key]

    # delete
    await _request_delete(client, pid)

    # wait for delete tasks to finish
    tasks = Task.all_tasks()
    for task in tasks:
        # TODO: 'async_generator_asend' has no __name__ attr. Python 3.8 gets coros names
        # Expects "delete_project" coros to have __name__ attrs
        # pylint: disable=protected-access
        if "delete_project" in getattr(task._coro, "__name__", ""):
            await wait_for(task, timeout=60.0)

    # list empty
    projects = await _request_list(client)
    assert not projects
예제 #23
0
 def info(self):
     return Task.all_tasks()
예제 #24
0
 def all_tasks(loop=None):
     from asyncio import Task
     return {t for t in Task.all_tasks(loop) if not t.done()}
예제 #25
0
파일: main.py 프로젝트: alon/emolog
def cancel_outstanding_tasks():
    for task in Task.all_tasks():
        logger.warning('canceling task {}'.format(task))
        task.cancel()
예제 #26
0
            elif raw[1] == "storio":
                change_game_id(author.id, 3)
                await author.send("You switch the game cartridge to Storio.")

        else:
            await author.send(games[game_id[str(author.id)]](author.id, raw))

        log_message(author.id, message.content)


# I took this from Zote, and it works so yay
fail_delay = 25
loop = get_event_loop()
while True:
    try:
        print("Initializing...")
        loop.run_until_complete(bot.start(inf.TOKEN))
    except Exception as exc:
        # Generic exception because I'm bad
        start = time()
        pending = Task.all_tasks(loop=bot.loop)
        gathered = gather(*pending, loop=bot.loop)
        try:
            gathered.cancel()
            bot.loop.run_until_complete(gathered)
            gathered.exception()
        except Exception:  # This could be better too
            pass
    print(f"Attempting restart in {fail_delay} seconds...")
    sleep(fail_delay)
예제 #27
0
 def done_task(self, future):
     '''Default one, we don't care about results'''
     # If only the current task (finished btw) has left
     if len(Task.all_tasks()) == 1: self.loop.stop()
예제 #28
0
 group2 = gather(*group2)
 # 取消某个组执行
 # group1.cancel()
 # 封装后不再需要加*来修饰
 from concurrent.futures import CancelledError, BrokenExecutor, TimeoutError
 try:
     loop_instance.run_until_complete(gather(group1, group2))
 except CancelledError as e:
     logging.error("some tasks stoped!")
 except BrokenExecutor as e:
     raise Exception("Task start failed!")
 except TimeoutError as e:
     raise Exception("Too many times to exec,Timeout!")
 except KeyboardInterrupt as e:
     # 获取所有的任务
     all_tasks = Task.all_tasks()
     for task in all_tasks:
         # 停止所有任务
         task.cancel()
         logging.warning(f"{task.print_stack()} cancelled by user!")
     # loop_instance.stop()
     # loop_instance.run_forever()
 finally:
     # 查看异步调用函数返回结果
     try:
         logging.info(group1.result())
     except Exception:
         pass
     # 查看是否任务取消
     logging.warning("group1 cancelled!") if group1.cancelled(
     ) else logging.info("group1 executed!")
예제 #29
0
async def get_urls(site_url, depth=0, loop=None):
    async def retrieve_site(url, timeout=1):
        logging.debug("%s: retrieving", url)
        await connection_sem.acquire()
        try:
            response = await wait_for(get(url), timeout)
        # Potential Errors:
        # ValueError: Host could not be detected
        # ...
            logging.debug("%s: Connected, retrieving text", url)
        except (ValueError, TimeoutError, ClientOSError):
            logging.debug("%s: Connection error", url)

        try:
            text = await wait_for(response.text(), timeout)
            logging.debug("%s: Retrieved", url)
        # Potential Errors:
        # ...
        except (UnicodeDecodeError, UnboundLocalError, TimeoutError):
            logging.debug("%s: Could not retrieve text", url)
            text = ''

        connection_sem.release()
        return text

    async def process_anchors(text):
        hrefs = URL_RE.findall(text)
        href_targets = set()
        for href in hrefs:
            if not href.startswith('http'):
                href = urljoin(site_url, href)
            # Remove potential query parameter from URL
            href, *_ = href.split('?')
            if not URL_IGNORE_RE.match(href) and depth < MAX_DEPTH:
                href_targets.add(href)
        for href_target in href_targets:
            if r.sismember('seen', href_target):
                continue
            logging.debug("Enqueueing %s", site_url)
            loop.create_task(get_urls(href_target, depth + 1, loop=loop))

    r.sadd('seen', site_url)
    logging.info("Crawling %s", site_url)

    if not r.exists(site_url):
        logging.debug("%s not in cache", site_url)
        text = await retrieve_site(site_url, timeout=5)

        # Only cache if text has been retrieved successfully
        if text:
            r.set(site_url, text)
            r.expire(site_url, 60 * 60 * 24)
    else:
        logging.debug("Retrieved %s from cache", site_url)
        text = r.get(site_url)

    await process_anchors(str(text))
    tasks = Task.all_tasks(loop)
    logging.info("Semaphore locked? %s", connection_sem.locked())
    logging.info("Remaining tasks: %d", len(tasks) - 1)
    if len(tasks) == 1:
        loop.stop()
예제 #30
0
                        help="ssb-drive's uuid (default is youngest drive)")

    args = parser.parse_args()
    sess = ssb.peer.session.SSB_SESSION(args.username)

    if args.sync:
        if args.port:
            print("** cannot be server for syncing, aborting")
        else:
            logger.addHandler(logging.StreamHandler())
            theLoop = get_event_loop()
            try:
                theLoop.run_until_complete(ssb.peer.session.main(args, sess))
            finally:
                sess.worm.flush()
                for t in Task.all_tasks():
                    t.cancel()
                theLoop.close()
        sys.exit(0)

    if args.uuid:
        ref = ssb.adt.lfs.get_lfs_by_uuid(sess.worm, args.uuid)
        if not ref:
            print("** no such drive")
            sys.exit(0)
        fs = ssb.adt.lfs.SSB_LFS(sess.worm, ref)
        if args.udel:
            print("** not implemented")
            sys.exit(0)
        if args.delete:
            fs.close()
예제 #31
0
def listen(**kwargs):
    if kwargs.get('debug', False):
        logging.getLogger('kinesyslog').setLevel('DEBUG')
        logging.getLogger('asyncio').setLevel('INFO')
        if kwargs.get('debug_asyncio', False):
            logging.getLogger('asyncio').setLevel('DEBUG')
            os.environ['PYTHONASYNCIODEBUG'] = '1'
    else:
        logging.getLogger('botocore').setLevel('ERROR')

    prometheus_loaded = False
    loop = get_event_loop()
    loop.set_exception_handler(shutdown_exception_handler)

    from . import proxy, util
    from .message import GelfMessage, SyslogMessage
    from .protocol import BaseLoggingProtocol
    from .server import (DatagramGelfServer, DatagramSyslogServer, GelfServer,
                         SecureGelfServer, SecureSyslogServer, SyslogServer)
    from .sink import MessageSink
    from .spool import EventSpoolReader, EventSpoolWriter
    from .util import create_registry

    if 0 not in kwargs.get('prometheus_port', [0]):
        try:
            from .prometheus import StatsServer, StatsSink, StatsRegistry
            prometheus_loaded = True
        except ImportError:
            pass
    if not prometheus_loaded:
        from .stats import StatsServer, StatsSink, StatsRegistry  # noqa F811

    if kwargs.get('gelf', False):
        message_class = GelfMessage
        TLS = SecureGelfServer
        TCP = GelfServer
        UDP = DatagramGelfServer
    else:
        message_class = SyslogMessage
        TLS = SecureSyslogServer
        TCP = SyslogServer
        UDP = DatagramSyslogServer

    registry = create_registry(StatsRegistry)
    servers = []
    try:
        for port in kwargs['prometheus_port']:
            if port:
                server = proxy.wrap(
                    StatsServer
                ) if port in kwargs['proxy_protocol'] else StatsServer
                servers.append(
                    server(host=kwargs['address'],
                           port=port,
                           registry=registry))
        for port in kwargs['tls_port']:
            if port:
                server = proxy.wrap(
                    TLS) if port in kwargs['proxy_protocol'] else TLS
                servers.append(
                    server(host=kwargs['address'],
                           port=port,
                           registry=registry,
                           certfile=kwargs['cert'],
                           keyfile=kwargs['key']))
        for port in kwargs['tcp_port']:
            if port:
                server = proxy.wrap(
                    TCP) if port in kwargs['proxy_protocol'] else TCP
                servers.append(
                    server(host=kwargs['address'],
                           port=port,
                           registry=registry))
        for port in kwargs['udp_port']:
            if port:
                servers.append(
                    UDP(host=kwargs['address'], port=port, registry=registry))
    except Exception as e:
        logger.error('Failed to validate {0} configuration: {1}'.format(
            e.__traceback__.tb_next.tb_frame.f_code.co_names[1], e))

    if servers:
        if registry.active:
            registry.get('kinesyslog_listener_count').set(labels={},
                                                          value=len(servers) -
                                                          1)
    else:
        logger.error(
            'No valid servers configured -  you must enable at least one UDP, TCP, or TLS port'
        )
        sys.exit(posix.EX_CONFIG)

    try:
        with ExitStack() as stack:
            spool_writer = EventSpoolWriter(spool_dir=kwargs['spool_dir'])
            spool_reader = EventSpoolReader(delivery_stream=kwargs['stream'],
                                            spool_dir=kwargs['spool_dir'],
                                            registry=registry,
                                            region_name=kwargs['region'],
                                            profile_name=kwargs['profile'])
            account = spool_reader.get_account()
            stack.enter_context(spool_reader)
            sinks = []

            for server in servers:
                sink = MessageSink if issubclass(
                    server.PROTOCOL, BaseLoggingProtocol) else StatsSink
                sink = sink(spool=spool_writer,
                            server=server,
                            message_class=message_class,
                            group_prefix=kwargs['group_prefix'],
                            account=account)
                context = stack.enter_context(sink)
                sinks.append(context)

            for i, server in enumerate(servers):
                try:
                    loop.run_until_complete(
                        server.start(sink=sinks[i], loop=loop))
                    util.setproctitle('{0} (master:{1})'.format(
                        __name__, i + 1))
                except Exception as e:
                    logger.error('Failed to start {}: {}'.format(
                        server.__class__.__name__, e),
                                 exc_info=True)
                    servers.remove(server)

            if servers:
                # Everything started successfully, set up signal handlers and wait until termination
                try:
                    logger.info('Successfully started {} servers'.format(
                        len(servers)))
                    signal.signal(signal.SIGTERM, util.interrupt)
                    signal.signal(signal.SIGCHLD, util.interrupt)
                    loop.run_forever()
                except (KeyboardInterrupt, ChildProcessError, SystemExit) as e:
                    signal.signal(signal.SIGTERM, signal.SIG_DFL)
                    signal.signal(signal.SIGCHLD, signal.SIG_DFL)
                    logger.info('Shutting down servers: {0}({1})'.format(
                        e.__class__.__name__, e))

                # Time passes...

                for server in servers:
                    loop.run_until_complete(server.stop())
            else:
                raise Exception('All servers failed')
    except Exception as e:
        logger.error('Unhandled exception: {0}'.format(e))
        if isinstance(e, PermissionError):
            sys.exit(posix.EX_NOPERM)
        else:
            sys.exit(posix.EX_CONFIG)
    finally:
        tasks = gather(*Task.all_tasks(loop=loop),
                       loop=loop,
                       return_exceptions=True)
        tasks.add_done_callback(lambda f: loop.stop())
        tasks.cancel()
        while not tasks.done() and not loop.is_closed():
            loop.run_forever()
예제 #32
0
async def number_of_tasks():
    print("Number of tasks: %s" % len(Task.all_tasks()))
예제 #33
0
파일: __main__.py 프로젝트: cxr00/Zote
_dat = {
    "img": zdn,
    "blacklist": blacklist,
    "text": text,
    "start": start,
    "cache": che,
    "cog": cog
}

inst = create_bot_instance(cfg, _dat)

fail_delay = 25
loop = get_event_loop()
while True:
    try:
        print("Initializing...")
        loop.run_until_complete(inst.start(token()))
    except aiohttp.errors.ClientOSError as exc:
        start = time()
        log_error_message("Event Loop", exc)
        pending = Task.all_tasks(loop=inst.loop)
        gathered = gather(*pending, loop=inst.loop)
        try:
            gathered.cancel()
            inst.loop.run_until_complete(gathered)
            gathered.exception()
        except Exception:  # This could be better
            pass
    print(f"Attempting restart in {fail_delay} seconds...")
    sleep(fail_delay)
예제 #34
0
파일: core.py 프로젝트: glomex/kinesyslog
def listen(**args):
    logging.basicConfig(
        level='INFO',
        format='%(asctime)-15s %(levelname)s:%(name)s %(message)s')
    loop = get_event_loop()
    loop.set_exception_handler(shutdown_exception_handler)
    if args.get('gelf', False):
        message_class = GelfMessage
        TLS = SecureGelfServer
        TCP = GelfServer
        UDP = DatagramGelfServer
    else:
        message_class = SyslogMessage
        TLS = SecureSyslogServer
        TCP = SyslogServer
        UDP = DatagramSyslogServer

    if args.get('debug', False):
        logging.getLogger('kinesyslog').setLevel('DEBUG')
        logging.getLogger('asyncio').setLevel('INFO')
        loop.set_debug(True)
    else:
        logging.getLogger('botocore').setLevel('ERROR')

    servers = []
    try:
        for port in args['tls_port']:
            if port:
                server = proxy.wrap(
                    TLS) if port in args['proxy_protocol'] else TLS
                servers.append(
                    server(host=args['address'],
                           port=port,
                           certfile=args['cert'],
                           keyfile=args['key']))
        for port in args['tcp_port']:
            if port:
                server = proxy.wrap(
                    TCP) if port in args['proxy_protocol'] else TCP
                servers.append(server(host=args['address'], port=port))
        for port in args['udp_port']:
            if port:
                servers.append(UDP(host=args['address'], port=port))
    except Exception:
        logging.error('Failed to start server', exc_info=True)

    if not servers:
        logging.error(
            'No servers configured! You must enable at least one UDP, TCP, or TLS port.'
        )
        return

    for signame in ('SIGINT', 'SIGTERM'):
        loop.add_signal_handler(getattr(signal, signame), partial(loop.stop))

    try:
        with EventSpool(delivery_stream=args['stream'],
                        spool_dir=args['spool_dir']) as spool:
            with MessageSink(spool=spool,
                             message_class=message_class,
                             raw=args.get('raw', False)) as sink:
                for server in servers:
                    loop.run_until_complete(server.start_server(sink=sink))
                loop.run_forever()
    except KeyboardInterrupt:
        tasks = gather(*Task.all_tasks(loop=loop),
                       loop=loop,
                       return_exceptions=True)
        tasks.add_done_callback(partial(loop.stop))
        tasks.cancel()
        while not tasks.done() and not loop.is_closed():
            loop.run_forever()
    finally:
        loop.close()