Example #1
0
    def run(self):
        if self.daemon:
            pid = os.fork()
            if pid < 0:
                os._exit(os.EX_SOFTWARE)
            # main process quit
            if pid > 0:
                os._exit(os.EX_OK)
        # start child process:
        os.umask(0)
        try:
            os.setsid()
        except OSError:
            pass
        # secondly forking
        pid2 = os.fork()
        if pid2 < 0:
            os._exit(os.EX_SOFTWARE)
        # master:
        if pid2 > 0:
            self.start_master()

        # Start : start httpmq worker process
        signal.signal(signal.SIGPIPE, signal.SIG_IGN)
        for s in self.exit_signal:
            signal.signal(s, self.worker_signal_handler)
        # create sync thread
        sync_thread = threading.Thread(target=self.sync_handler, args=())
        sync_thread.start()

        # start handle http request:
        asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
        self.app.listen(self.port)
        tornado.ioloop.IOLoop.current().start()
Example #2
0
def pytest_configure(config):
    bins = config.getoption('--redis-server')[:]
    REDIS_SERVERS[:] = bins or ['/usr/bin/redis-server']
    VERSIONS.update({srv: _read_server_version(srv)
                     for srv in REDIS_SERVERS})
    assert VERSIONS, ("Expected to detect redis versions", REDIS_SERVERS)

    class DynamicFixturePlugin:
        @pytest.fixture(scope='session',
                        params=REDIS_SERVERS,
                        ids=format_version)
        def server_bin(self, request):
            """Common for start_server and start_sentinel
            server bin path parameter.
            """
            return request.param
    config.pluginmanager.register(DynamicFixturePlugin(), 'server-bin-fixture')

    if config.getoption('--uvloop'):
        try:
            import uvloop
        except ImportError:
            raise RuntimeError(
                "Can not import uvloop, make sure it is installed")
        asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
Example #3
0
def run():
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    loop = zmq.asyncio.ZMQEventLoop()
    asyncio.set_event_loop(loop)

    context = zmq.asyncio.Context()

    pull_socket = context.socket(zmq.PULL)
    pull_socket.connect(settings.API_ADDRESS)

    exporter = ZMQRequestResultExporter(zmq_context=context, address=settings.AGGREGATOR_ADDRESS, loop=loop)
    client = TestClient(result_exporter=exporter, loop=loop)

    exporter.connect()
    export_fut = asyncio.ensure_future(periodically_export_results(exporter=exporter, period=5, loop=loop))
    try:
        loop.run_until_complete(main(socket=pull_socket, client=client, loop=loop))
    except KeyboardInterrupt:
        logger.info('Shutting down ...')
    finally:
        export_fut.cancel()
        pull_socket.disonnect()
        loop.stop()

    logger.info('Stopped')
Example #4
0
def pytest_configure(config):
    bins = config.getoption('--redis-server')[:]
    cmd = 'which redis-server'
    if not bins:
        path = os.popen(cmd).read().rstrip()
        assert path, (
            "There is no redis-server on your computer."
            " Please install it first")
        REDIS_SERVERS[:] = [path]
    else:
        REDIS_SERVERS[:] = bins

    VERSIONS.update({srv: _read_server_version(srv)
                     for srv in REDIS_SERVERS})
    assert VERSIONS, ("Expected to detect redis versions", REDIS_SERVERS)

    class DynamicFixturePlugin:
        @pytest.fixture(scope='session',
                        params=REDIS_SERVERS,
                        ids=format_version)
        def server_bin(self, request):
            """Common for start_server and start_sentinel
            server bin path parameter.
            """
            return request.param
    config.pluginmanager.register(DynamicFixturePlugin(), 'server-bin-fixture')

    if config.getoption('--uvloop'):
        try:
            import uvloop
        except ImportError:
            raise RuntimeError(
                "Can not import uvloop, make sure it is installed")
        asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
Example #5
0
    def test_default_event_loop(self):
        port = find_unused_port()

        asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
        self.addCleanup(asyncio.set_event_loop_policy, None)
        self.addCleanup(asyncio.set_event_loop, None)

        @asyncio.coroutine
        def create():
            server = yield from aiozmq.rpc.serve_rpc(
                MyHandler(self.loop),
                bind='tcp://127.0.0.1:{}'.format(port),
                loop=None)
            client = yield from aiozmq.rpc.connect_rpc(
                connect='tcp://127.0.0.1:{}'.format(port),
                loop=None)
            return client, server

        self.loop = loop = asyncio.get_event_loop()
        self.client, self.server = loop.run_until_complete(create())

        @asyncio.coroutine
        def communicate():
            ret = yield from self.client.call.func(1)
            self.assertEqual(2, ret)

        loop.run_until_complete(communicate())
Example #6
0
 def setUp(self):
     # Trigger a cleanup of the mapping so we start with a clean slate.
     AsyncIOLoop().close()
     # If we don't clean up after ourselves other tests may fail on
     # py34.
     self.orig_policy = asyncio.get_event_loop_policy()
     asyncio.set_event_loop_policy(asyncio.DefaultEventLoopPolicy())
Example #7
0
 def setUp(self):
     policy = greenio.GreenTrolliusEventLoopPolicy()
     trollius.set_event_loop_policy(policy)
     if asyncio is not None:
         asyncio.set_event_loop_policy(policy)
     self.loop = policy.new_event_loop()
     policy.set_event_loop(self.loop)
Example #8
0
def pytest_pyfunc_call(pyfuncitem):
    """
    Run asyncio marked test functions in an event loop instead of a normal
    function call.
    """
    for marker_name, fixture_name in _markers_2_fixtures.items():
        if marker_name in pyfuncitem.keywords:
            event_loop = pyfuncitem.funcargs[fixture_name]

            forbid_global_loop = pyfuncitem.keywords[marker_name].kwargs.get('forbid_global_loop')

            policy = asyncio.get_event_loop_policy()
            if forbid_global_loop:
                asyncio.set_event_loop_policy(ForbiddenEventLoopPolicy())
            else:
                policy.set_event_loop(event_loop)

            funcargs = pyfuncitem.funcargs
            testargs = {arg: funcargs[arg]
                        for arg in pyfuncitem._fixtureinfo.argnames}
            try:
                event_loop.run_until_complete(
                    asyncio.async(pyfuncitem.obj(**testargs), loop=event_loop))
            finally:
                if forbid_global_loop:
                    asyncio.set_event_loop_policy(policy)
                event_loop.close()
                return True
Example #9
0
 def __init__(self, name, path=None,
              template_inputs=None,
              logger=None,
              event_loop=None):
     self.__name = name
     self._tmplt = tosca_template.ToscaTemplate(path=path, a_file=True)
     self.__path = path
     self.origin_nodes = self._tmplt.graph.nodetemplates
     self.vertices = self._tmplt.graph.vertices
     self.inputs_definitions = self._tmplt.inputs
     self.__outputs = self._tmplt.outputs
     self.template_inputs = template_inputs if template_inputs else {}
     self.__status = self.PENDING
     if not logger:
         self.logger = log.UnifiedLogger(
             log_to_console=True,
             level="DEBUG").setup_logger(__name__)
     else:
         self.logger = logger
     if not event_loop:
         asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
         self.event_loop = asyncio.get_event_loop()
     else:
         self.event_loop = event_loop
     self.__orchestra_nodes = [node.OrchestraNode(self, origin_node)
                               for origin_node in self.origin_nodes]
     self.__deployment_plan = None
Example #10
0
File: app.py Project: pybee/toga
    def __init__(self, interface):
        self.interface = interface
        self.interface._impl = self
        App.app = self  # Add a reference for the PythonAppDelegate class to use.

        asyncio.set_event_loop_policy(EventLoopPolicy())
        self.loop = asyncio.get_event_loop()
def crawler_event_loop(urls):
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    event_loop = asyncio.get_event_loop()
    tasks = [async_fetch(url) for url in urls]
    results = event_loop.run_until_complete(asyncio.gather(*tasks))
    with open('Films_Massive_Dev_Chart.json', 'w') as f:
        f.write(json.dumps(results, indent=4))
Example #12
0
def install(gtk=False):
    """Set the default event loop policy.

    Call this as early as possible to ensure everything has a reference to the
    correct event loop.

    Set ``gtk`` to True if you intend to use Gtk in your application.

    If ``gtk`` is True and Gtk is not available, will raise `ValueError`.

    Note that this class performs some monkey patching of asyncio to ensure
    correct functionality.
    """

    if gtk:
        from .gtk import GtkEventLoopPolicy
        policy = GtkEventLoopPolicy()
    else:
        from .glib_events import GLibEventLoopPolicy
        policy = GLibEventLoopPolicy()

    # There are some libraries that use SafeChildWatcher directly (which is
    # completely reasonable), so we have to ensure that it is our version. I'm
    # sorry, I know this isn't great but it's basically the best that we have.
    from .glib_events import GLibChildWatcher
    asyncio.SafeChildWatcher = GLibChildWatcher
    asyncio.set_event_loop_policy(policy)
Example #13
0
def set_loop() -> None:
    """Attempt to use uvloop."""
    import asyncio
    from asyncio.events import BaseDefaultEventLoopPolicy

    policy = None

    if sys.platform == 'win32':
        if hasattr(asyncio, 'WindowsProactorEventLoopPolicy'):
            # pylint: disable=no-member
            policy = asyncio.WindowsProactorEventLoopPolicy()
        else:
            class ProactorPolicy(BaseDefaultEventLoopPolicy):
                """Event loop policy to create proactor loops."""

                _loop_factory = asyncio.ProactorEventLoop

            policy = ProactorPolicy()
    else:
        try:
            import uvloop
        except ImportError:
            pass
        else:
            policy = uvloop.EventLoopPolicy()

    if policy is not None:
        asyncio.set_event_loop_policy(policy)
Example #14
0
def attempt_use_uvloop() -> None:
    """Attempt to use uvloop."""
    try:
        import uvloop
        set_event_loop_policy(uvloop.EventLoopPolicy())
    except ImportError:
        pass
Example #15
0
def main():
    asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
    parse_command_line()
    app = tornado.web.Application(
        handlers=[(r'/api/depts/(.*)', HomeHandler), ]
    )
    app.listen(options.port)
    IOLoop.current().start()
Example #16
0
    def tearDown(self):
        policy = asyncio.get_event_loop_policy()
        if policy.loop is not None:
            self.assertTrue(policy.loop.is_closed())
            self.assertTrue(policy.loop.shutdown_ag_run)

        asyncio.set_event_loop_policy(None)
        super().tearDown()
Example #17
0
File: app.py Project: pybee/toga
    def __init__(self, interface):
        self.interface = interface
        self.interface._impl = self

        self._cursor_visible = True

        asyncio.set_event_loop_policy(EventLoopPolicy())
        self.loop = asyncio.get_event_loop()
Example #18
0
    def setUp(self):
        if os.environ.get('USE_UVLOOP'):
            import uvloop
            asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(None)
        self.loop = loop
Example #19
0
def set_up_loop():
    """Set loop and loop policy."""

    # Use uvloop
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    loop = asyncio.get_event_loop()

    return loop
Example #20
0
    def setUp(self):
        policy = aioeventlet.EventLoopPolicy()
        asyncio.set_event_loop_policy(policy)
        self.addCleanup(asyncio.set_event_loop_policy, None)

        self.loop = policy.get_event_loop()
        self.addCleanup(self.loop.close)
        self.addCleanup(asyncio.set_event_loop, None)
Example #21
0
def pytest_configure(config):
    if config.getoption('--uvloop'):
        try:
            import uvloop
        except ImportError:
            raise RuntimeError(
                "Can not import uvloop, make sure it is installed")
        asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
Example #22
0
def run():
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    loop = zmq.asyncio.ZMQEventLoop()
    asyncio.set_event_loop(loop)

    app = ApiApplication(loop=loop)
    aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader('web/templates'))
    aiohttp.web.run_app(app, host='0.0.0.0', port=settings.PORT)
def main():
    if WINDOWS:
        asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())

    try:
        asyncio.run(amain())
    except (KeyboardInterrupt, SystemExit) as e:
        rootlogger.info('Received %r', e)
Example #24
0
 def __init__(self, template_path='./', middlewares=[]):
     asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
     self.loop = asyncio.get_event_loop()
     self.app = web.Application(loop=self.loop,
                                middlewares=middlewares+[dev.request_logger,
                                                         dev.web_socket])
     self.app.stdout = sys.stdout
     self.setup_jinja(template_path)
    def download(self):
        """ Set up state, launch the downloads """
        asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) # makes thing much faster
        self._loop = asyncio.get_event_loop()
        self._loop.slow_callback_duration = 3

        main_task = asyncio.ensure_future(self.download_all())
        return self._loop.run_until_complete( main_task )
Example #26
0
def loop(loop_factory, fast, loop_debug):  # type: ignore
    """Return an instance of the event loop."""
    policy = loop_factory()
    asyncio.set_event_loop_policy(policy)
    with loop_context(fast=fast) as _loop:
        if loop_debug:
            _loop.set_debug(True)  # pragma: no cover
        asyncio.set_event_loop(_loop)
        yield _loop
Example #27
0
def attempt_use_uvloop():
    """Attempt to use uvloop."""
    import asyncio

    try:
        import uvloop
        asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    except ImportError:
        pass
Example #28
0
    def test_default_event_loop(self):
        asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
        self.addCleanup(asyncio.set_event_loop_policy, None)

        self.addCleanup(self.loop.close)
        self.loop = asyncio.get_event_loop()
        self.client, self.server = self.make_pipeline_pair(use_loop=False)
        self.assertIs(self.client._loop, self.loop)
        self.assertIs(self.server._loop, self.loop)
Example #29
0
 def configure(self, options, conf):
     self.enabled = True
     self.report_filename = options.aioxmpp_bench_report
     if options.aioxmpp_eventloop is not None:
         module_name, cls_name = options.aioxmpp_eventloop.rsplit(".", 1)
         module = importlib.import_module(module_name)
         cls = getattr(module, cls_name)()
         asyncio.set_event_loop_policy(cls)
         asyncio.set_event_loop(asyncio.new_event_loop())
Example #30
0
def main():
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    loop = asyncio.get_event_loop()
    app, srv, handler = loop.run_until_complete(init(loop))

    try:
        loop.run_forever()
    except KeyboardInterrupt:
        loop.run_until_complete(cleanup(app, srv, handler))
Example #31
0
import asyncio
from six.moves import input
import threading
from azure.iot.device.aio import IoTHubModuleClient
from azure.iot.device import MethodResponse
import subprocess
from subprocess import PIPE

logging.basicConfig(level=logging.DEBUG)

gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GstRtspServer, GObject, GLib

import asyncio_glib
asyncio.set_event_loop_policy(asyncio_glib.GLibEventLoopPolicy())

loop = asyncio.get_event_loop()

module_status = {
    "status":
    "ready",
    "videoPipeline":
    "v4l2src device=/dev/video0 ! videoconvert ! videoscale! video/x-raw ! x264enc tune=zerolatency ! rtph264pay name=pay0"
}

Gst.init(None)


class USBtoRtspMediaFactory(GstRtspServer.RTSPMediaFactory):
    def __init__(self):
Example #32
0
from sanic_openapi import swagger_blueprint
from .logger import Logger
from .proxy import escape_kubernet
from .jobs import Jobs
from sanic import Sanic
import sentry_sdk
import asyncio
import getpass
import uuid
import os
import sys
import errno
import nest_asyncio

# TODO remove this fix when papermill support uvloop of Sanic support option to don't use uvloop
asyncio.set_event_loop_policy(None)
nest_asyncio.apply()

__version__ = "0.19.0"


class Runner:
    __naas_folder = ".naas"
    # Declare path variable
    __path_lib_files = os.path.dirname(os.path.abspath(__file__))
    __path_user_files = None
    __port = 5000
    __html_files = "html"
    __manager_index = "manager.html"
    __app = None
    __nb = None
Example #33
0
def run_pylinkjs_app(**kwargs):
    # exit on Ctrl-C
    signal.signal(signal.SIGINT, signal_handler)

    if 'port' not in kwargs:
        kwargs['port'] = 8300
    if 'default_html' not in kwargs:
        kwargs['default_html'] = 'index.html'
    if 'html_dir' not in kwargs:
        kwargs['html_dir'] = '.'
    if 'login_html_page' not in kwargs:
        kwargs['login_html_page'] = os.path.join(os.path.dirname(__file__),
                                                 'login.html')
    if 'cookie_secret' not in kwargs:
        logging.warning('COOKIE SECRET IS INSECURE!  PLEASE CHANGE')
        kwargs['cookie_secret'] = 'GENERIC COOKIE SECRET'
    if 'heartbeat_callback' not in kwargs:
        kwargs['heartbeat_callback'] = None
    if 'heartbeat_interval' not in kwargs:
        kwargs['heartbeat_interval'] = None
    if 'login_handler' not in kwargs:
        kwargs['login_handler'] = LoginHandler
    if 'logout_handler' not in kwargs:
        kwargs['logout_handler'] = LogoutHandler
    if 'extra_settings' not in kwargs:
        kwargs['extra_settings'] = {}

    asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())

    request_handlers = [
        (r"/websocket/.*", PyLinkJSWebSocketHandler, {
            'all_jsclients': ALL_JSCLIENTS
        }),
        (r"/login", kwargs['login_handler']),
        (r"/logout", kwargs['logout_handler']),
        (r"/.*", MainHandler),
    ]

    app = tornado.web.Application(request_handlers,
                                  default_html=kwargs['default_html'],
                                  html_dir=kwargs['html_dir'],
                                  login_html_page=kwargs['login_html_page'],
                                  cookie_secret=kwargs['cookie_secret'],
                                  on_404=kwargs.get('on_404', None),
                                  **kwargs['extra_settings'])

    caller_globals = inspect.stack()[1][0].f_globals

    # start additional ioloops on new threads
    threading.Thread(target=start_pycallback_handler_ioloop,
                     args=(caller_globals, ),
                     daemon=True).start()
    threading.Thread(target=start_retval_handler_ioloop, args=(),
                     daemon=True).start()
    threading.Thread(target=start_execjs_handler_ioloop, args=(),
                     daemon=True).start()
    if kwargs['heartbeat_interval']:
        threading.Thread(target=heartbeat_threadworker,
                         args=(kwargs['heartbeat_callback'],
                               kwargs['heartbeat_interval']),
                         daemon=True).start()

    # start the tornado server
    app.listen(kwargs['port'])
    app.settings['on_context_close'] = kwargs.get('onContextClose', None)
    app.settings['on_context_open'] = kwargs.get('onContextOpen', None)
    logging.info('**** Starting app on port %d' % kwargs['port'])
    IOLoop.current().start()
Example #34
0
def uvloop_setup(reload: bool = False) -> None:
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
Example #35
0
import re

from datetime import datetime
from sys import platform

from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.utils import translation
from django.conf import settings

from base.servers.tornado_django_hybrid import run as run_server

try:
    from asyncio import set_event_loop_policy
    from tornado.platform.asyncio import AnyThreadEventLoopPolicy
    set_event_loop_policy(AnyThreadEventLoopPolicy())
except ImportError:
    pass

naiveip_re = re.compile(
    r"""^(?:
(?P<addr>
    (?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) |         # IPv4 address
    (?P<ipv6>\[[a-fA-F0-9:]+\]) |               # IPv6 address
    (?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)


class Command(BaseCommand):
    help = 'Run django using the tornado server'
    requires_migrations_checks = True
Example #36
0
"""
Application Factory module.

Provides the means to create a fully-configured web application instance.
"""

import asyncio
import uvloop
from pathlib import Path, PurePath
from typing import Optional
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())     # Install uvloop I/O policy before anything else!

from aiohttp import web
from aiohttp_swagger import setup_swagger
import aiotask_context as context

from {{cookiecutter.app_name}}.config import get_config, server_option
from {{cookiecutter.app_name}}.database import db
from {{cookiecutter.app_name}}.middlewares import sentry_middleware_factory, request_context_middleware
from {{cookiecutter.app_name}}.routes import setup_routes
from {{cookiecutter.app_name}}.logger import get_logger


_logger = get_logger()


def get_current_request() -> Optional[str]:
    """
    A helper function that returns the ID of the current application request from the
    thread-local context.
Example #37
0
#!/usr/bin/env python3

import monocle.sanitized as conf

from asyncio import gather, set_event_loop_policy, Task, wait_for, TimeoutError
try:
    if conf.UVLOOP:
        from uvloop import EventLoopPolicy
        set_event_loop_policy(EventLoopPolicy())
except ImportError:
    pass

from multiprocessing.managers import BaseManager, DictProxy
from queue import Queue, Full
from argparse import ArgumentParser
from signal import signal, SIGINT, SIGTERM, SIG_IGN
from logging import getLogger, basicConfig, WARNING, INFO
from logging.handlers import RotatingFileHandler
from os.path import exists, join
from sys import platform
from time import monotonic, sleep

from sqlalchemy.exc import DBAPIError
from aiopogo import close_sessions, activate_hash_server

from monocle.shared import LOOP, get_logger, SessionManager, ACCOUNTS
from monocle.utils import get_address, dump_pickle
from monocle.worker import Worker
from monocle.overseer import Overseer
from monocle.db import FORT_CACHE
from monocle import altitudes, db_proc, spawns
Example #38
0
def loop(c):
    asyncio.set_event_loop_policy(StrictEventLoopPolicy())
    loop = asyncio.get_event_loop()
    loop.set_debug(True)
    return loop
Example #39
0
        plt.ylabel('Requets per Second', fontsize=16)
        print(plot_file_name)
        plt.savefig(plot_file_name, dpi=96)
        print("Plot is saved to {}".format(plot_file_name))
        if verbose:
            plt.show()


def main(argv):
    args = ARGS.parse_args()

    count = args.count
    tries = args.tries
    verbose = args.verbose
    plot_file_name = args.plot_file_name[0]
    use_multiprocessing = args.use_multiprocessing

    res = run_tests(tries, count, use_multiprocessing, [
        test_raw_zmq, test_zmq_with_poller, test_aiozmq_rpc, test_core_aiozmq,
        test_zmq_with_thread
    ])

    print()

    print_and_plot_results(count, res, verbose, plot_file_name)


if __name__ == '__main__':
    asyncio.set_event_loop_policy(aiozmq.ZmqEventLoopPolicy())
    sys.exit(main(sys.argv))
Example #40
0
# -*- coding: utf-8 -*-
import sys
import time
import asyncio
import uvloop

asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
import traceback
import logging
import readline
import discord

import josecommon as jcommon
import ext.jose as jose_bot
import jcoin.josecoin as josecoin
import joseconfig as jconfig
import joseerror as je

# profiling
from pympler import tracker
from random import SystemRandom

random = SystemRandom()

logging.basicConfig(level=logging.INFO)

start_time = time.time()

#default stuff
client = discord.Client()
jcommon.set_client(client)  # to jcommon
Example #41
0
 def hook(_ForkWatcher):
     _ForkWatcher.current_pid = _os.getpid()
     # Force instantiation of a new event loop policy as a workaround
     # for https://bugs.python.org/issue22087.
     asyncio.set_event_loop_policy(None)
Example #42
0
import os

from aiohttp import web
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop

from meross_iot.http_api import MerossHttpClient
from meross_iot.model.http.exception import BadLoginException
from tests import async_get_client

if os.name == 'nt':
    import asyncio

    asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
else:
    import asyncio


class TestHttpMethods(AioHTTPTestCase):
    async def get_application(self):
        return web.Application()

    async def setUpAsync(self):
        # Wait some time before next test-burst
        await asyncio.sleep(10)
        self.meross_client, self.requires_logout = await async_get_client()

    @unittest_run_loop
    async def test_subdevice_listing(self):
        devices = await self.meross_client.async_list_devices()
        # look for a msxh0 hub
        hub = None
Example #43
0
 def tearDown(self):
     _SCENARIO.clear()
     _FIXTURES.clear()
     _FIXTURES.update(self.oldsetup)
     asyncio.set_event_loop_policy(self.policy)
Example #44
0
def init(server_config: ServerConfig,
         authenticator,
         authorizer,
         execution_service: ExecutionService,
         schedule_service: ScheduleService,
         execution_logging_service: ExecutionLoggingService,
         config_service: ConfigService,
         alerts_service: AlertsService,
         file_upload_feature: FileUploadFeature,
         file_download_feature: FileDownloadFeature,
         secret,
         server_version,
         conf_folder,
         *,
         start_server=True):
    ssl_context = None
    if server_config.is_ssl():
        ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
        ssl_context.load_cert_chain(server_config.get_ssl_cert_path(),
                                    server_config.get_ssl_key_path())

    auth = TornadoAuth(authenticator)
    if auth.is_enabled():
        identification = AuthBasedIdentification(auth)
    else:
        identification = IpBasedIdentification(server_config.ip_validator,
                                               server_config.user_header_name)

    downloads_folder = file_download_feature.get_result_files_folder()

    handlers = [
        (r'/conf', GetServerConf), (r'/scripts', GetScripts),
        (r'/scripts/([^/]*)', ScriptConfigSocket),
        (r'/scripts/([^/]*)/([^/]*)/list-files', ScriptParameterListFiles),
        (r'/executions/start', ScriptExecute),
        (r'/executions/stop/(.*)', ScriptStop),
        (r'/executions/kill/(.*)', ScriptKill),
        (r'/executions/io/(.*)', ScriptStreamSocket),
        (r'/executions/active', GetActiveExecutionIds),
        (r'/executions/config/(.*)', GetExecutingScriptConfig),
        (r'/executions/cleanup/(.*)', CleanupExecutingScript),
        (r'/executions/status/(.*)', GetExecutionStatus),
        (r'/history/execution_log/short', GetShortHistoryEntriesHandler),
        (r'/history/execution_log/long/(.*)', GetLongHistoryEntryHandler),
        (r'/schedule', AddSchedule), (r'/auth/info', AuthInfoHandler),
        (r'/result_files/(.*)', DownloadResultFile, {
            'path': downloads_folder
        }), (r'/admin/scripts', AdminUpdateScriptEndpoint),
        (r'/admin/scripts/([^/]*)', AdminGetScriptEndpoint),
        (r'/admin/scripts/([^/]*)/code', AdminGetScriptCodeEndpoint),
        (r"/", ProxiedRedirectHandler, {
            "url": "/index.html"
        })
    ]

    if auth.is_enabled():
        handlers.append((r'/login', LoginHandler))
        handlers.append((r'/auth/config', AuthConfigHandler))
        handlers.append((r'/logout', LogoutHandler))

    handlers.append((r'/theme/(.*)', ThemeStaticFileHandler, {
        'path': os.path.join(conf_folder, 'theme')
    }))
    handlers.append((r"/(.*)", AuthorizedStaticFileHandler, {"path": "web"}))

    settings = {
        "cookie_secret": secret,
        "login_url": "/login.html",
        'websocket_ping_interval': 30,
        'websocket_ping_timeout': 300,
        'compress_response': True,
        'xsrf_cookies':
        server_config.xsrf_protection != XSRF_PROTECTION_DISABLED,
    }

    application = tornado.web.Application(handlers, **settings)
    autoapply_xheaders(application)

    application.auth = auth

    application.server_config = server_config
    application.server_version = server_version
    application.authorizer = authorizer
    application.downloads_folder = downloads_folder
    application.file_download_feature = file_download_feature
    application.file_upload_feature = file_upload_feature
    application.execution_service = execution_service
    application.schedule_service = schedule_service
    application.execution_logging_service = execution_logging_service
    application.config_service = config_service
    application.alerts_service = alerts_service
    application.identification = identification
    application.max_request_size_mb = server_config.max_request_size_mb

    if os_utils.is_win() and env_utils.is_min_version('3.8'):
        asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
    io_loop = tornado.ioloop.IOLoop.current()

    global _http_server
    _http_server = httpserver.HTTPServer(application,
                                         ssl_options=ssl_context,
                                         max_buffer_size=10 * BYTES_IN_MB)
    _http_server.listen(server_config.port, address=server_config.address)

    intercept_stop_when_running_scripts(io_loop, execution_service)

    http_protocol = 'https' if server_config.ssl else 'http'
    print('Server is running on: %s://%s:%s' %
          (http_protocol, server_config.address, server_config.port))

    if start_server:
        io_loop.start()
Example #45
0
  dat = r.json()
  # filter stupid forks that didn't change anything
  blacklisted_times = ["2019-04-20T00:56:04Z"]
  forks += [arr['full_name'].replace("/battlechess", "") for arr in dat if arr['pushed_at'] not in blacklisted_times]
  print("battling", forks)
  score = defaultdict(int)
  # TODO: not n^2 tournament, double elimination?
  for u1 in forks:
    for u2 in forks:
      if u1 != u2:
        result = await battle(u1, u2)
        print("result of %s vs %s is %s" % (u1, u2, result))
        if result == '1-0':
          score[u1] += 2
          score[u2] += 0
        elif result == '0-1':
          score[u1] += 0
          score[u2] += 2
        elif result == '1/2-1/2':
          score[u1] += 1
          score[u2] += 1
  print("final scores:")
  for k,v in sorted(score.items(), key=lambda x: -x[1]):
    print("%30s : %d" % (k,v))

if __name__ == "__main__":
  asyncio.set_event_loop_policy(chess.engine.EventLoopPolicy())
  loop = asyncio.get_event_loop()
  result = loop.run_until_complete(main())

Example #46
0
    def __init__(self, **config):
        # get port for API access
        self.port = config.get('general', {}).get('port', self.port)

        # run in verbose mode (print all events)
        self.verbose = bool(
            int(config.get('general', {}).get('verbose', self.verbose)))

        # enable API access?
        self.api = bool(int(config.get('general', {}).get('api', self.api)))

        # Trading type
        self.trading_type = TradingType(
            config.get('general', {}).get('trading_type',
                                          'simulation').upper())

        # Load exchange instances
        self.exchanges = getExchanges(config.get('exchange',
                                                 {}).get('exchanges', []),
                                      trading_type=self.trading_type,
                                      verbose=self.verbose)

        # instantiate the Strategy Manager
        self.manager = StrategyManager(self, self.trading_type, self.exchanges)

        # set event loop to use uvloop
        if uvloop:
            asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

        # install event loop
        self.event_loop = asyncio.get_event_loop()

        # setup subscriptions
        self._handler_subscriptions = {
            m: []
            for m in EventType.__members__.values()
        }

        # setup `now` handler for backtest
        self._latest = datetime.fromtimestamp(0)

        # register internal management event handler before all strategy handlers
        self.registerHandler(self.manager)

        # install event handlers
        strategies = getStrategies(
            config.get('strategy', {}).get('strategies', []))
        for strategy in strategies:
            self.log.critical("Installing strategy: {}".format(strategy))
            self.registerHandler(strategy)

        # warn if no event handlers installed
        if not self.event_handlers:
            self.log.critical('Warning! No event handlers set')

        # install print handler if verbose
        if self.verbose:
            self.log.critical('Installing print handler')
            self.registerHandler(PrintHandler())

        # install webserver
        if self.api:
            self.log.critical('Installing API handlers')

            if PerspectiveManager is not None:
                table_handler = TableHandler()
                table_handler.installTables(self.table_manager)
                self.registerHandler(table_handler)

            self.api_handlers.append((r"/", RedirectHandler, {
                "url": "/index.html"
            }))
            self.api_handlers.append(
                (r"/api/v1/ws", PerspectiveTornadoHandler, {
                    "manager": self.table_manager,
                    "check_origin": True
                }))
            self.api_handlers.append((r"/static/js/(.*)", StaticFileHandler, {
                "path":
                os.path.join(os.path.dirname(__file__), '..', '..', 'ui',
                             'assets', 'static', 'js')
            }))
            self.api_handlers.append((r"/static/css/(.*)", StaticFileHandler, {
                "path":
                os.path.join(os.path.dirname(__file__), '..', '..', 'ui',
                             'assets', 'static', 'css')
            }))
            self.api_handlers.append(
                (r"/static/fonts/(.*)", StaticFileHandler, {
                    "path":
                    os.path.join(os.path.dirname(__file__), '..', '..', 'ui',
                                 'assets', 'static', 'fonts')
                }))
            self.api_handlers.append((r"/(.*)", StaticFileHandler, {
                "path":
                os.path.join(os.path.dirname(__file__), '..', '..', 'ui',
                             'assets', 'static', 'html')
            }))
            self.api_application = ServerApplication(
                handlers=self.api_handlers)
            self.log.critical('.......')
            self.log.critical(f'listening on 0.0.0.0:{self.port}')
            self.log.critical('.......')
            self.api_application.listen(self.port)
Example #47
0
 def update_event(self, inp=-1):
     self.set_output_val(0, asyncio.set_event_loop_policy(self.input(0)))
Example #48
0
    async_test_home_assistant,
    get_test_home_assistant,
    init_recorder_component,
    mock_storage as mock_storage,
)
from tests.test_util.aiohttp import mock_aiohttp_client  # noqa: E402, isort:skip
from tests.components.recorder.common import (  # noqa: E402, isort:skip
    async_recorder_block_till_done,
)

_LOGGER = logging.getLogger(__name__)

logging.basicConfig(level=logging.DEBUG)
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)

asyncio.set_event_loop_policy(runner.HassEventLoopPolicy(False))
# Disable fixtures overriding our beautiful policy
asyncio.set_event_loop_policy = lambda policy: None


def pytest_configure(config):
    """Register marker for tests that log exceptions."""
    config.addinivalue_line(
        "markers", "no_fail_on_log_exception: mark test to not fail on logged exception"
    )


def pytest_runtest_setup():
    """Prepare pytest_socket and freezegun.

    pytest_socket:
Example #49
0
# Contains test that will run the full query.
import asyncio
import logging
import os

import pytest
from func_adl_xAOD.common.math_utils import DeltaR  # NOQA
from testfixtures import LogCapture
from tests.atlas.xaod.config import f_single, run_long_running_tests
from tests.atlas.xaod.utils import as_awkward, as_pandas, as_pandas_async

# These are *long* tests and so should not normally be run. Each test can take of order 30 seconds or so!!
pytestmark = run_long_running_tests

if os.name == 'nt':
    asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())  # type: ignore


@pytest.fixture(autouse=True)
def turn_on_logging():
    logging.basicConfig(level=logging.DEBUG)
    yield None
    logging.basicConfig(level=logging.WARNING)


def test_select_first_of_array():
    # The hard part is that First() here does not return a single item, but, rather, an array that
    # has to be aggregated over.
    training_df = as_pandas(f_single
                            .Select(lambda e: e.Jets("AntiKt4EMTopoJets")
                                    .Select(lambda _: e.Tracks("InDetTrackParticles"))
Example #50
0
# Only bother if asyncio has been loaded by Tornado
if 'asyncio' in sys.modules and tornado.version_info[0] >= 5:

    jupyter_event_loop_initialized = False

    if 'notebook' in sys.modules:
        import traitlets
        from notebook.notebookapp import NotebookApp
        jupyter_event_loop_initialized = (
            traitlets.config.Application.initialized() and isinstance(
                traitlets.config.Application.instance(), NotebookApp))

    if not jupyter_event_loop_initialized:
        import asyncio
        import tornado.platform.asyncio
        asyncio.set_event_loop_policy(
            tornado.platform.asyncio.AnyThreadEventLoopPolicy())


def has_keyword(func, keyword):
    if PY3:
        return keyword in inspect.signature(func).parameters
    else:
        # https://stackoverflow.com/questions/50100498/determine-keywords-of-a-tornado-coroutine
        if gen.is_coroutine_function(func):
            func = func.__wrapped__
        return keyword in inspect.getargspec(func).args


if lru_cache:
    has_keyword = lru_cache(1000)(has_keyword)
import asyncio

import tornado.ioloop
import tornado.web
from tornado.platform.asyncio import AnyThreadEventLoopPolicy

from file_server import FileServer
from websocket_server import WebSocket

if __name__ == "__main__":
    asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())

    server = tornado.web.Application([(r"/socket", WebSocket, {
        'named_arguement': 'example'
    }), (r"/(.*)", FileServer, {
        "path": "./files"
    })])

    server.listen(8000)

    # Start IOLoop
    tornado.ioloop.IOLoop.current().start()
Example #52
0
# flake8: noqa
# temp fix for https://github.com/django/asgiref/issues/143
import sys
if sys.platform == 'win32' and (3, 8, 0) <= sys.version_info < (
        3, 9, 0):  # pragma: no cover
    import asyncio
    asyncio.set_event_loop_policy(
        asyncio.WindowsSelectorEventLoopPolicy())  # pragma: no cover

from . import fields
from . import validators
from .app import APIFlask
from .blueprint import APIBlueprint
from .decorators import auth_required
from .decorators import doc
from .decorators import input
from .decorators import output
from .exceptions import abort
from .exceptions import HTTPError
from .schemas import Schema
from .security import HTTPBasicAuth
from .security import HTTPTokenAuth

__version__ = '0.6.0dev'
Example #53
0
def main():
    """
    Loads the Ryu apps we want to run from the config file.
    This should exit on keyboard interrupt.
    """

    # Run asyncio loop in a greenthread so we can evaluate other eventlets
    # TODO: Remove once Ryu migrates to asyncio
    asyncio.set_event_loop_policy(aioeventlet.EventLoopPolicy())

    service = MagmaService('pipelined', mconfigs_pb2.PipelineD())

    # Optionally pipe errors to Sentry
    sentry_init(service_name=service.name)

    service_config = service.config

    if environment.is_dev_mode():
        of_rest_server.configure(service_config)

    # Set Ryu config params
    cfg.CONF.ofp_listen_host = "127.0.0.1"

    # override mconfig using local config.
    # TODO: move config compilation to separate module.
    enable_nat = service.config.get('enable_nat', service.mconfig.nat_enabled)
    service.config['enable_nat'] = enable_nat
    logging.info("Nat: %s", enable_nat)
    vlan_tag = service.config.get(
        'sgi_management_iface_vlan',
        service.mconfig.sgi_management_iface_vlan,
    )
    service.config['sgi_management_iface_vlan'] = vlan_tag

    sgi_ip = service.config.get(
        'sgi_management_iface_ip_addr',
        service.mconfig.sgi_management_iface_ip_addr,
    )
    service.config['sgi_management_iface_ip_addr'] = sgi_ip

    sgi_gateway_ip = service.config.get(
        'sgi_management_iface_gw',
        service.mconfig.sgi_management_iface_gw,
    )
    service.config['sgi_management_iface_gw'] = sgi_gateway_ip

    # Keep router mode off for smooth upgrade path
    service.config['dp_router_enabled'] = service.config.get(
        'dp_router_enabled',
        False,
    )
    if 'virtual_mac' not in service.config:
        if service.config['dp_router_enabled']:
            up_bridge_name = service.config.get(
                'uplink_bridge',
                UPLINK_OVS_BRIDGE_NAME,
            )
            mac_addr = get_if_hwaddr(up_bridge_name)
        else:
            mac_addr = get_if_hwaddr(service.config.get('bridge_name'))

        service.config['virtual_mac'] = mac_addr

    # this is not read from yml file.
    service.config['uplink_port'] = OFPP_LOCAL
    uplink_port_name = service.config.get('ovs_uplink_port_name', None)
    if enable_nat is False and uplink_port_name is not None:
        service.config['uplink_port'] = BridgeTools.get_ofport(
            uplink_port_name, )

    # header enrichment related configuration.
    service.config['proxy_port_name'] = PROXY_PORT_NAME
    he_enabled_flag = False
    if service.mconfig.he_config:
        he_enabled_flag = service.mconfig.he_config.enable_header_enrichment
    he_enabled = service.config.get('he_enabled', he_enabled_flag)
    service.config['he_enabled'] = he_enabled

    # tune datapath according to config
    tune_datapath(service.config)

    # monitoring related configuration
    mtr_interface = service.config.get('mtr_interface', None)
    if mtr_interface:
        mtr_ip = get_ip_from_if(mtr_interface)
        service.config['mtr_ip'] = mtr_ip

    # Load the ryu apps
    service_manager = ServiceManager(service)
    service_manager.load()

    def callback(returncode):
        if returncode != 0:
            logging.error(
                "Failed to set MASQUERADE: %d",
                returncode,
            )

    # TODO fix this hack for XWF
    if enable_nat is True or service.config.get('setup_type') == 'XWF':
        ip_table_rule = 'POSTROUTING -o %s -j MASQUERADE' % service.config[
            'nat_iface']
        check_and_add = 'iptables -t nat -C %s || iptables -t nat -A %s' % \
                (ip_table_rule, ip_table_rule)
        logging.debug("check_and_add: %s", check_and_add)
        call_process(
            check_and_add,
            callback,
            service.loop,
        )

    service.loop.create_task(
        monitor_ifaces(service.config['monitored_ifaces'], ), )

    manager = AppManager.get_instance()
    # Add pipelined rpc servicer
    pipelined_srv = PipelinedRpcServicer(
        service.loop,
        manager.applications.get('GYController', None),
        manager.applications.get('EnforcementController', None),
        manager.applications.get('EnforcementStatsController', None),
        manager.applications.get('DPIController', None),
        manager.applications.get('UEMacAddressController', None),
        manager.applications.get('CheckQuotaController', None),
        manager.applications.get('IPFIXController', None),
        manager.applications.get('VlanLearnController', None),
        manager.applications.get('TunnelLearnController', None),
        manager.applications.get('Classifier', None),
        manager.applications.get('InOutController', None),
        manager.applications.get('NGServiceController', None),
        service.config,
        service_manager,
    )
    pipelined_srv.add_to_server(service.rpc_server)

    if service.config['setup_type'] == 'CWF':
        bridge_ip = service.config['bridge_ip_address']
        has_quota_port = service.config['has_quota_port']
        no_quota_port = service.config['no_quota_port']

        def on_exit_server_thread():
            service.StopService(None, None)

        # For CWF start quota check servers
        start_check_quota_server(
            run_flask,
            bridge_ip,
            has_quota_port,
            True,
            on_exit_server_thread,
        )
        start_check_quota_server(
            run_flask,
            bridge_ip,
            no_quota_port,
            False,
            on_exit_server_thread,
        )

    if service.config['setup_type'] == 'LTE':
        polling_interval = service.config.get(
            'ovs_gtp_stats_polling_interval',
            MIN_OVSDB_DUMP_POLLING_INTERVAL,
        )
        collector = GTPStatsCollector(
            polling_interval,
            service.loop,
        )
        collector.start()

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
Example #54
0
from sys import platform, version_info
from asyncio import set_event_loop_policy, WindowsSelectorEventLoopPolicy

if platform == "win32" and version_info >= (3, 8, 0):
    set_event_loop_policy(WindowsSelectorEventLoopPolicy())

from FFxivPythonTrigger import *

Logger.print_log_level = Logger.DEBUG
try:
    register_module("SocketLogger")

    # core
    register_modules([
        "HttpApi",
        "ChatLog",
        "XivMemory",
        "XivMagic",
        "Command",
    ])

    # functions
    register_modules([
        "MoPlus",
        "CutsceneSkipper",
        "SuperJump",
        "ActorQuery",
        "Zoom",
        "Teleporter",
        "XivCombo",
        # "ACTLogLines",
Example #55
0
 def restore_asyncio_events__event_loop_policy(self, policy):
     asyncio.set_event_loop_policy(policy)
Example #56
0
def main():
    """
    Loads the Ryu apps we want to run from the config file.
    This should exit on keyboard interrupt.
    """

    # Run asyncio loop in a greenthread so we can evaluate other eventlets
    # TODO: Remove once Ryu migrates to asyncio
    asyncio.set_event_loop_policy(aioeventlet.EventLoopPolicy())

    service = MagmaService('pipelined', mconfigs_pb2.PipelineD())
    service_config = service.config

    if environment.is_dev_mode():
        of_rest_server.configure(service_config)

    # Set Ryu config params
    cfg.CONF.ofp_listen_host = "127.0.0.1"

    # override mconfig using local config.
    # TODO: move config compilation to separate module.
    enable_nat = service.config.get('enable_nat', service.mconfig.nat_enabled)
    service.config['enable_nat'] = enable_nat
    logging.info("Nat: %s", enable_nat)
    vlan_tag = service.config.get('sgi_management_iface_vlan',
                                  service.mconfig.sgi_management_iface_vlan)
    service.config['sgi_management_iface_vlan'] = vlan_tag

    sgi_ip = service.config.get('sgi_management_iface_ip_addr',
                                service.mconfig.sgi_management_iface_ip_addr)
    service.config['sgi_management_iface_ip_addr'] = sgi_ip

    sgi_gateway_ip = service.config.get(
        'sgi_management_iface_gw', service.mconfig.sgi_management_iface_gw)
    service.config['sgi_management_iface_gw'] = sgi_gateway_ip

    if 'virtual_mac' not in service.config:
        service.config['virtual_mac'] = get_if_hwaddr(
            service.config.get('bridge_name'))

    # this is not read from yml file.
    service.config['uplink_port'] = OFPP_LOCAL
    uplink_port_name = service.config['ovs_uplink_port_name']
    if enable_nat is False and uplink_port_name is not None:
        service.config['uplink_port'] = BridgeTools.get_ofport(
            uplink_port_name)

    service.config['proxy_port_name'] = PROXY_PORT_NAME

    # Load the ryu apps
    service_manager = ServiceManager(service)
    service_manager.load()

    def callback(returncode):
        if returncode != 0:
            logging.error("Failed to set MASQUERADE: %d", returncode)

    # TODO fix this hack for XWF
    if enable_nat is True or service.config.get('setup_type') == 'XWF':
        call_process(
            'iptables -t nat -A POSTROUTING -o %s -j MASQUERADE' %
            service.config['nat_iface'], callback, service.loop)

    service.loop.create_task(
        monitor_ifaces(service.config['monitored_ifaces'], service.loop), )

    manager = AppManager.get_instance()
    # Add pipelined rpc servicer
    pipelined_srv = PipelinedRpcServicer(
        service.loop, manager.applications.get('GYController', None),
        manager.applications.get('EnforcementController', None),
        manager.applications.get('EnforcementStatsController', None),
        manager.applications.get('DPIController', None),
        manager.applications.get('UEMacAddressController', None),
        manager.applications.get('CheckQuotaController', None),
        manager.applications.get('IPFIXController', None),
        manager.applications.get('VlanLearnController', None),
        manager.applications.get('TunnelLearnController', None),
        manager.applications.get('Classifier', None), service.config,
        service_manager)
    pipelined_srv.add_to_server(service.rpc_server)

    if service.config['setup_type'] == 'CWF':
        bridge_ip = service.config['bridge_ip_address']
        has_quota_port = service.config['has_quota_port']
        no_quota_port = service.config['no_quota_port']

        def on_exit_server_thread():
            service.StopService(None, None)

        # For CWF start quota check servers
        start_check_quota_server(run_flask, bridge_ip, has_quota_port, True,
                                 on_exit_server_thread)
        start_check_quota_server(run_flask, bridge_ip, no_quota_port, False,
                                 on_exit_server_thread)

    if service.config['setup_type'] == 'LTE':
        polling_interval = service.config.get('ovs_gtp_stats_polling_interval',
                                              MIN_OVSDB_DUMP_POLLING_INTERVAL)
        collector = GTPStatsCollector(polling_interval, service.loop)
        collector.start()

    # Run the service loop
    service.run()

    # Cleanup the service
    service.close()
Example #57
0
def main():
    if platform.system().upper() == 'WINDOWS' and len(sys.argv) == 1:
        #auto start on double click with default settings
        _, err = asyncio.run(run_auto())
        if err is not None:
            print(err)
        return

    if sys.version_info[0] == 3 and sys.version_info[
            1] >= 8 and sys.platform.startswith('win'):
        asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())

    import argparse

    parser = argparse.ArgumentParser(description='Gather gather gather')
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        default=0,
                        help='Increase verbosity, can be stacked')
    parser.add_argument('-s',
                        '--silent',
                        action='store_false',
                        help='Silent mode')
    parser.add_argument(
        '--sql',
        help=
        'SQL connection string. When using SQLITE it works best with FULL FILE PATH!!!'
    )

    subparsers = parser.add_subparsers(help='commands')
    subparsers.required = True
    subparsers.dest = 'command'

    nest_group = subparsers.add_parser(
        'nest',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        help='Start the Nest server')
    nest_group.add_argument('--ip',
                            default='127.0.0.1',
                            help='IP address to listen on')
    nest_group.add_argument('--port',
                            type=int,
                            default=5000,
                            help='IP address to listen on')
    nest_group.add_argument('--work-dir',
                            default='./workdir',
                            help='Working directory for caching and tempfiles')
    nest_group.add_argument('--backend',
                            default='networkx',
                            help='graph backend, pls dont change this')

    adinfo_group = subparsers.add_parser('adinfo',
                                         help='Get a list of AD info entries')
    dbinit_group = subparsers.add_parser('dbinit', help='Creates database')

    version_group = subparsers.add_parser('version', help='version info')

    ldap_group = subparsers.add_parser(
        'ldap',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        help='Enumerate potentially vulnerable users via LDAP',
        epilog=MSLDAPURLDecoder.help_epilog)
    ldap_group.add_argument('ldap_url',
                            help='Connection specitication in URL format')
    ldap_group.add_argument('--ldap-workers',
                            type=int,
                            default=4,
                            help='LDAP worker count for parallelization')
    ldap_group.add_argument('--ldap-queue-size',
                            type=int,
                            default=4,
                            help='LDAP worker queue max size.')
    ldap_group.add_argument('-d',
                            '--ad-id',
                            help='AD id from DB. signals resumption task')
    ldap_group.add_argument('-c',
                            '--calculate-edges',
                            action='store_true',
                            help='Calculate edges after enumeration')

    auto_group = subparsers.add_parser('auto', help='auto mode, windows only!')
    auto_group.add_argument('--ldap-workers',
                            type=int,
                            default=4,
                            help='LDAP worker count for parallelization')
    auto_group.add_argument('--smb-workers',
                            type=int,
                            default=50,
                            help='SMB worker count for parallelization')
    auto_group.add_argument('-d', '--dns', help='DNS server for resolving IPs')

    recalc_group = subparsers.add_parser('recalc',
                                         help='Recalculate edges from SDs')
    recalc_group.add_argument('graphid', help='graph id from DB.')

    enum_group = subparsers.add_parser(
        'enum',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        help='Enumerate all stuffs',
        epilog=MSLDAPURLDecoder.help_epilog)
    enum_group.add_argument('ldap_url',
                            help='Connection specitication in URL format')
    enum_group.add_argument('smb_url',
                            help='Connection specitication in URL format')
    enum_group.add_argument(
        '-q',
        '--same-query',
        action='store_true',
        help=
        'Use the same query for LDAP as for SMB. LDAP url must still be present, but without a query'
    )
    enum_group.add_argument('--ldap-workers',
                            type=int,
                            default=4,
                            help='LDAP worker count for parallelization')
    enum_group.add_argument('--smb-workers',
                            type=int,
                            default=50,
                            help='SMB worker count for parallelization')
    enum_group.add_argument('--smb-folder-depth',
                            type=int,
                            default=1,
                            help='Files enumeration folder depth')
    enum_group.add_argument('--smb-share-enum',
                            action='store_true',
                            help='Enables file enumeration in shares')
    enum_group.add_argument('-d', '--dns', help='DNS server for resolving IPs')
    enum_group.add_argument(
        '-n',
        '--do-not-store',
        action='store_false',
        help=
        'Skip storing membership and SD info to DB. Will skip edge calculation, and will leave the raw file on disk'
    )
    enum_group.add_argument('-k',
                            '--kerberoast',
                            help='Kerberos URL for kerberoasting')

    share_group = subparsers.add_parser('shares',
                                        help='Enumerate shares on target')
    share_group.add_argument(
        'ad_id', help='ID of the domainfo to poll targets rom the DB')
    share_group.add_argument('smb_url',
                             help='Credential specitication in URL format')
    share_group.add_argument('--smb-workers',
                             type=int,
                             default=50,
                             help='SMB worker count for parallelization')
    share_group.add_argument('-d',
                             '--dns',
                             help='DNS server for resolving IPs')

    smball_group = subparsers.add_parser('smball',
                                         help='Enumerate shares on target')
    smball_group.add_argument(
        'ad_id', help='ID of the domainfo to poll targets rom the DB')
    smball_group.add_argument('smb_url',
                              help='Credential specitication in URL format')
    smball_group.add_argument('--smb-workers',
                              type=int,
                              default=50,
                              help='SMB worker count for parallelization')
    smball_group.add_argument('-d',
                              '--dns',
                              help='DNS server for resolving IPs')

    files_group = subparsers.add_parser('files',
                                        help='Enumerate files on targets')
    #files_group.add_argument('src', choices=['file', 'ldap', 'domain', 'cmd'])
    files_group.add_argument('src', choices=['domain'])
    files_group.add_argument('smb_url',
                             help='Credential specitication in URL format')
    #files_group.add_argument('-l', '--ldap-url', help='ldap_connection_string. Use this to get targets from the domain controller')
    files_group.add_argument(
        '-d', '--ad-id', help='ID of the domainfo to poll targets from the DB')
    files_group.add_argument(
        '-s',
        '--with-sid',
        action='store_true',
        help='Also fetches the SId for each file and folder')
    #files_group.add_argument('-i', '--lookup-ad', help='ID of the domainfo to look up comupter names. Advisable to set for LDAP and file pbased targets')
    #files_group.add_argument('-t', '--target-file', help='taget file with hostnames. One per line.')
    files_group.add_argument('--smb-folder-depth',
                             type=int,
                             default=1,
                             help='Recursion depth for folder enumeration')
    files_group.add_argument(
        '--smb-workers',
        type=int,
        default=50,
        help='SMB worker count for parallelization. Read: connection/share')
    files_group.add_argument('--smb-queue-size',
                             type=int,
                             default=100000,
                             help='SMB worker queue max size.')

    localgroup_group = subparsers.add_parser(
        'localgroups', help='Enumerate local group memberships on target')
    localgroup_group.add_argument(
        'ad_id', help='ID of the domainfo to poll targets rom the DB')
    localgroup_group.add_argument(
        'smb_url', help='Credential specitication in URL format')
    localgroup_group.add_argument('--smb-workers',
                                  type=int,
                                  default=50,
                                  help='SMB worker count for parallelization')
    localgroup_group.add_argument('-d',
                                  '--dns',
                                  help='DNS server for resolving IPs')

    session_group = subparsers.add_parser(
        'sessions', help='Enumerate connected sessions on target')
    session_group.add_argument(
        'ad_id', help='ID of the domainfo to poll targets rom the DB')
    session_group.add_argument('smb_url',
                               help='Credential specitication in URL format')
    session_group.add_argument('--smb-workers',
                               type=int,
                               default=50,
                               help='SMB worker count for parallelization')
    session_group.add_argument('-d',
                               '--dns',
                               help='DNS server for resolving IPs')

    kerberoast_group = subparsers.add_parser('kerberoast', help='Kerberoast')
    kerberoast_group.add_argument(
        'ad_id', help='ID of the domainfo to poll targets rom the DB')
    kerberoast_group.add_argument('kerberos_url', help='Kerberos URL')

    credential_group = subparsers.add_parser(
        'creds', help='Add credential information from impacket')
    credential_group.add_argument(
        'impacket_file',
        help='file with LM and NT hashes, generated by impacket secretsdump.py'
    )
    credential_group.add_argument('-d',
                                  '--domain-id',
                                  type=int,
                                  default=-1,
                                  help='Domain ID to identify the domain')

    passwords_group = subparsers.add_parser(
        'passwords', help='Add password information from hashcat potfile')
    passwords_group.add_argument('potfile',
                                 help='hashcat potfile with cracked hashes')
    passwords_group.add_argument(
        '--disable-usercheck',
        action='store_true',
        help=
        'Disables the user pre-check when inserting to DB. All unique passwords will be uploaded.'
    )
    passwords_group.add_argument(
        '--disable-passwordcheck',
        action='store_true',
        help=
        'Disables the password uniqueness check. WILL FAIL IF PW IS ALREADY IN THE DB.'
    )

    uncracked_group = subparsers.add_parser(
        'uncracked', help='Polls the DB for uncracked passwords')
    uncracked_group.add_argument('-t',
                                 '--hash-type',
                                 default='NT',
                                 choices=['NT', 'LM'])
    uncracked_group.add_argument('--history',
                                 action='store_true',
                                 help='Show password history hashes as well')
    uncracked_group.add_argument('-d',
                                 '--domain-id',
                                 type=int,
                                 default=-1,
                                 help='Domain ID to identify the domain')

    cracked_group = subparsers.add_parser(
        'cracked', help='Polls the DB for cracked passwords')
    cracked_group.add_argument('-d',
                               '--domain-id',
                               type=int,
                               default=-1,
                               help='Domain ID to identify the domain')

    args = parser.parse_args()

    asyncio.run(run(args))
Example #58
0
File: runner.py Project: 1e1/core-1
def run(runtime_config: RuntimeConfig) -> int:
    """Run Home Assistant."""
    asyncio.set_event_loop_policy(HassEventLoopPolicy(runtime_config.debug))
    return asyncio.run(setup_and_run_hass(runtime_config))
Example #59
0
def tearDownModule():
    asyncio.set_event_loop_policy(None)
def run_test():
    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
    loop = asyncio.get_event_loop()
    loop.run_until_complete(run_test_async(loop))
    loop.close()