Esempio n. 1
0
    def run(self):
        # Windows-compatible, with subprocess support:
        if sys.platform.startswith('win'):
            loop = asyncio.ProactorEventLoop()
            asyncio.set_event_loop(loop)
        else:
            loop = asyncio.get_event_loop()

        html_app = web.Application()
        html_app.router.add_get('/vuespa.js', self._handle_vuespa_js)
        html_app.router.add_get('/vuespa.ws', self._handle_vuespa_ws)

        # Now, allow the websocket to receive messages via HTTP requests.
        # We want a new tab to be able to call '/api/blahws123/select?q=0096.pdf'
        # and have this websocket select the right tab.
        html_app.router.add_get('/vuespa.ws.http/{id}/{fn}',
                                self._handle_vuespa_ws_http)
        html_app.router.add_post('/vuespa.ws.http/{id}/{fn}',
                                 self._handle_vuespa_ws_http)
        lame_response = lambda req: web.Response(
            content_type='text/plain',
            body='Add /fn?arg=8 to URL to call `fn` with `{arg: 8}`')
        html_app.router.add_get('/vuespa.ws.http/{id}', lame_response)
        html_app.router.add_post('/vuespa.ws.http/{id}', lame_response)

        if self._config_web_callback is not None:
            self._config_web_callback(html_app)

        html_app.router.add_get('/{path:.*}', self._handle_vue)
        html_app.router.add_post('/{path:.*}', self._handle_vue)

        # Run the application on a randomly selected port (or specified port)
        if self._port is not None:
            self._port_vue = self._port + 1
        html_server_handler = html_app.make_handler()
        html_server = loop.run_until_complete(
            loop.create_server(html_server_handler, self.host, self._port))
        self._port = html_server.sockets[0].getsockname()[1]

        dist_exists = os.path.lexists(os.path.join(self._vue_path, 'dist'))
        needs_npm_mod = self._development or not dist_exists

        # Install node packages if no node_modules folder.
        if (needs_npm_mod and not os.path.lexists(
                os.path.join(self._vue_path, 'node_modules'))):
            node_install = loop.run_until_complete(
                asyncio.create_subprocess_shell('npm install',
                                                cwd=self._vue_path))
            loop.run_until_complete(node_install.communicate())

        promises = []
        ui_proc = None
        if self._development:
            # Ensure node process is installed first.

            # BUT FIRST, work around excessive websocket closing.
            # See https://github.com/vuejs-templates/webpack/issues/1205
            vue_config_path = os.path.join(self._vue_path, 'vue.config.js')
            if os.path.lexists(vue_config_path):
                with open(vue_config_path) as f:
                    js_src = f.read()
                js_pat = r'(module\.exports *= *)(.*)'
                m = re.search(js_pat, js_src, flags=re.M | re.DOTALL)
                if m is not None:
                    try:
                        j = json.loads(m.group(2))
                    except:
                        raise ValueError(
                            f"Looks like {vue_config_path} has invalid JSON: {m.group(2)}"
                        )
                    if not j.get('devServer', {}).get('disableHostCheck',
                                                      False):
                        j.setdefault('devServer', {})
                        j['devServer']['disableHostCheck'] = True
                        with open(vue_config_path, 'w') as f:
                            f.write(
                                re.sub(js_pat,
                                       lambda m: m.group(1) + json.dumps(
                                           j, indent=2),
                                       js_src,
                                       flags=re.M | re.DOTALL))

            ui_proc = loop.run_until_complete(
                asyncio.create_subprocess_shell(
                    f"FORCE_COLOR=1 npx --no-install vue-cli-service serve",
                    stdout=asyncio.subprocess.PIPE,
                    # Leave stderr connected
                    cwd=self._vue_path))

            # We need to get the port first, so read lines from stdout until we
            # find that information.  Then, communicate.
            async def streamer(stream_in, stream_out, re_stop=None):
                """Returns `None` on process exit, or a regex Match object.
                """
                while True:
                    line = await stream_in.readline()
                    if not line:
                        break
                    line = line.decode()
                    stream_out.write(line)
                    if re_stop is not None:
                        m = re.search(re_stop, line)
                        if m is not None:
                            return m

            m = loop.run_until_complete(
                streamer(
                    ui_proc.stdout,
                    sys.stdout,
                    # Note that regex looks weird because we must strip color code
                    re_stop=re.compile(
                        '- Local: .*?http://[^:]+:\x1b\\[[^m]*m(?P<port>\d+)'))
            )
            self._port_vue = int(m.group('port'))
            promises.append(streamer(ui_proc.stdout, sys.stdout))
            promises.append(ui_proc.wait())
        elif not dist_exists:
            # Build UI once, otherwise use cached version
            proc = loop.run_until_complete(
                asyncio.create_subprocess_shell('npm run build',
                                                cwd=self._vue_path))
            loop.run_until_complete(proc.communicate())

        webbrowser.open(f'http://localhost:{self.port}')
        try:
            # Terminate either when a child process terminates OR when a
            # KeyboardInterrupt is sent.
            if promises:
                loop.run_until_complete(
                    asyncio.wait(promises,
                                 return_when=asyncio.FIRST_COMPLETED))
            else:
                # Nothing will terminate early.
                loop.run_forever()
        except KeyboardInterrupt:
            pass
        finally:
            if ui_proc is not None:
                ui_proc.kill()
            html_server.close()
            loop.run_until_complete(html_server.wait_closed())
Esempio n. 2
0
from async_timeout import timeout

from goodbyecaptcha import util
from goodbyecaptcha.proxy import ProxyDB
from goodbyecaptcha.solver import Solver

SECRET_KEY = "CHANGEME"
BANNED_TIMEOUT = 45 * 60  # 45 minutes
SOLVE_DURATION = 3 * 60  # 3 minutes

proxies = ProxyDB(last_banned_timeout=BANNED_TIMEOUT)
proxy_source = None  # Can be URL or file location
proxy_username, proxy_password = (None, None)

if sys.platform == "win32":
    parent_loop = asyncio.ProactorEventLoop()
    asyncio.set_event_loop(parent_loop)
else:
    parent_loop = asyncio.get_event_loop()
    asyncio.get_child_watcher().attach_loop(parent_loop)

app = web.Application()

# Clear Chrome temporary profiles
dir = f"{Path.home()}/.pyppeteer/.dev_profile"
shutil.rmtree(dir, ignore_errors=True)


# Bugs are to be expected, despite my efforts. Apparently, event loops paired
# with threads is nothing short of a hassle.
class TaskRerun(object):
Esempio n. 3
0
    from aiohttp.web_fileresponse import FileResponse
except ImportError:
    aiohttp = web = None
    StaticResource = HTTPNotFound = HTTPForbidden = Response = FileResponse = object

try:
    from watchdog.observers import Observer
except ImportError:
    Observer = None

LRJS_PATH = os.path.join(os.path.dirname(__file__), 'livereload.js')
REBUILDING_REFRESH_DELAY = 0.35
IDLE_REFRESH_DELAY = 0.05

if sys.platform == 'win32':
    asyncio.set_event_loop(asyncio.ProactorEventLoop())


class CommandAuto(Command):
    """Automatic rebuilds for Nikola."""

    name = "auto"
    has_server = True
    doc_purpose = "builds and serves a site; automatically detects site changes, rebuilds, and optionally refreshes a browser"
    dns_sd = None
    delta_last_rebuild = datetime.timedelta(milliseconds=100)
    web_runner = None  # type: web.AppRunner

    cmd_options = [{
        'name': 'port',
        'short': 'p',
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--setup", "-s", action="store_true")
    parser.add_argument("--phone", "-p", action="append")
    parser.add_argument("--token", "-t", action="append", dest="tokens")
    parser.add_argument("--heroku", action="store_true")
    parser.add_argument("--translate", action="store_true")
    arguments = parser.parse_args()
    logging.debug(arguments)

    if arguments.translate:
        from .translations import translateutil
        translateutil.ui()
        return

    if sys.platform == 'win32':
        # Subprocess support; not needed in 3.8
        asyncio.set_event_loop(asyncio.ProactorEventLoop())

    clients = []

    phones = set(arguments.phone if arguments.phone else [])
    phones.update(
        map(
            lambda f: f[18:-8],
            filter(
                lambda f: f[:19] == "friendly-telegram-+" and f[
                    -8:] == ".session",
                os.listdir(os.path.dirname(utils.get_base_dir())))))

    authtoken = os.environ.get("authorization_strings", False)  # for heroku
    if authtoken:
        try:
            authtoken = json.loads(authtoken)
        except json.decoder.JSONDecodeError:
            logging.warning("authtoken invalid")
            authtoken = False

    if arguments.tokens and not authtoken:
        authtoken = {}
    if arguments.tokens:
        for token in arguments.tokens:
            phone = sorted(phones).pop(0)
            phones.remove(phone)  # Handled seperately by authtoken logic
            authtoken.update(**{phone: token})

    try:
        from . import api_token
    except ImportError:
        try:
            api_token = collections.namedtuple("api_token", ["ID", "HASH"])(
                os.environ["api_id"], os.environ["api_hash"])
        except KeyError:
            run_config({})
            return
    if authtoken:
        for phone, token in authtoken.items():
            try:
                clients += [
                    TelegramClient(StringSession(token),
                                   api_token.ID,
                                   api_token.HASH,
                                   connection_retries=None).start(phone)
                ]
            except ValueError:
                run_config({})
                return
            clients[-1].phone = phone  # for consistency
    if len(clients) == 0 and len(phones) == 0:
        phones = [input("Please enter your phone: ")]
    for phone in phones:
        try:
            if arguments.heroku:
                clients += [
                    TelegramClient(StringSession(),
                                   api_token.ID,
                                   api_token.HASH,
                                   connection_retries=None).start(phone)
                ]
            else:
                clients += [
                    TelegramClient(os.path.join(
                        os.path.dirname(utils.get_base_dir()),
                        "friendly-telegram" +
                        (("-" + phone) if phone else "")),
                                   api_token.ID,
                                   api_token.HASH,
                                   connection_retries=None).start(phone)
                ]
        except ValueError:
            # Bad API hash/ID
            run_config({})
            return
        except PhoneNumberInvalidError:
            print(
                "Please check the phone number. Use international format (+XX...) and don't put spaces in it."
            )
            continue
        clients[
            -1].phone = phone  # so we can format stuff nicer in configurator

    if arguments.heroku:
        key = input(
            "Please enter your Heroku API key (from https://dashboard.heroku.com/account): "
        ).strip()
        from . import heroku
        heroku.publish(clients, key, api_token)
        print(
            "Installed to heroku successfully! Type .help in Telegram for help."
        )
        return

    loops = []
    for client in clients:
        loops += [amain(client, clients, arguments.setup)]

    asyncio.get_event_loop().set_exception_handler(lambda _, x: logging.error(
        "Exception on event loop! %s", x["message"], exc_info=x["exception"]))
    asyncio.get_event_loop().run_until_complete(asyncio.gather(*loops))
Esempio n. 5
0
 def new_loop(self):
     return asyncio.ProactorEventLoop()
Esempio n. 6
0
"""
import sys
import asyncio

__doc__ = "异步策略测试"

# -----------------------------------默认异步策略-----------------------------------------
loop_policy = asyncio.get_event_loop_policy()
print("loop_policy -> ", loop_policy)

# -----------------------------------不同的异步策略----------------------------------------
select_loop = asyncio.SelectorEventLoop()
print("select_loop - > ", select_loop)

abstract_loop = asyncio.AbstractEventLoop()
print("abstract_loop - > ", abstract_loop)

if sys.platform == 'win32':
    proactor_loop = asyncio.ProactorEventLoop()
    print("proactor_loop -> ", proactor_loop)

# 更改异步策略
loop = asyncio.get_event_loop()
print("loop -> ", loop)

asyncio.set_event_loop(abstract_loop)
print(asyncio.get_event_loop())

asyncio.new_event_loop()
print(asyncio.get_event_loop())
Esempio n. 7
0
 def create_event_loop(self):
     return asyncio.ProactorEventLoop()
Esempio n. 8
0
 def getNewLoop(self):
     if sys.platform == 'win32':
         new_loop = asyncio.ProactorEventLoop()
     else:
         new_loop = asyncio.new_event_loop()
     return new_loop
Esempio n. 9
0
            self.instances.append(
                Instance(i,
                         shard_list,
                         shard_count,
                         name,
                         self.loop,
                         main=self))
            await asyncio.sleep(shard_per_cluster * 5)


if __name__ == "__main__":
    with open("assets/data/names.txt", "r") as f:
        names = f.read().splitlines()

    if sys.platform.startswith("win"):
        loop = (asyncio.ProactorEventLoop()
                )  # subprocess pipes only work with this under Win
        asyncio.set_event_loop(loop)
    else:
        loop = asyncio.get_event_loop()
    loop.create_task(Main().launch())
    try:
        loop.run_forever()
    except KeyboardInterrupt:

        def shutdown_handler(_loop, context):
            if "exception" not in context or not isinstance(
                    context["exception"], asyncio.CancelledError):
                _loop.default_exception_handler(context)  # TODO: fix context

        loop.set_exception_handler(shutdown_handler)
Esempio n. 10
0
def event_loop_selector():
    loop1 = asyncio.ProactorEventLoop()
    loop2 = asyncio.SelectorEventLoop()
Esempio n. 11
0
    def solve(
        self,
        timeout: Optional[timedelta] = None,
        nr_solutions: Optional[int] = None,
        processes: Optional[int] = None,
        random_seed: Optional[int] = None,
        all_solutions: bool = False,
        intermediate_solutions: bool = False,
        free_search: bool = False,
        **kwargs,
    ):
        """Solves the Instance using its given solver configuration.

        Find the solutions to the given MiniZinc instance using the given solver
        configuration. First, the Instance will be ensured to be in a state
        where the solver specified in the solver configuration can understand
        the problem and then the solver will be requested to find the
        appropriate solution(s) to the problem.

        Args:
            timeout (Optional[timedelta]): Set the time limit for the process of
                solving the instance.
            nr_solutions (Optional[int]): The requested number of solution.
                (Only available on satisfaction problems and when the ``-n``
                flag is supported by the solver).
            processes (Optional[int]): Set the number of processes the solver
                can use. (Only available when the ``-p`` flag is supported by
                the solver).
            random_seed (Optional[int]): Set the random seed for solver. (Only
                available when the ``-r`` flag is supported by the solver).
            free_search (bool): Allow the solver to ignore the search definition
                within the instance. (Only available when the ``-f`` flag is
                supported by the solver).
            all_solutions (bool): Request to solver to find all solutions. (Only
                available on satisfaction problems and when the ``-a`` flag is
                supported by the solver)
            intermediate_solutions (bool): Request the solver to output any
                intermediate solutions that are found during the solving
                process. (Only available on optimisation problems and when the
                ``-a`` flag is supported by the solver)
            **kwargs: Other flags to be passed onto the solver. ``--`` can be
                omitted in the name of the flag. If the type of the flag is
                Boolean, then its value signifies its occurrence.

        Returns:
            Tuple[Status, Optional[Union[List[Dict], Dict]], Dict]:
                tuple containing solving status, values assigned in the
                solution, and statistical information. If no solutions is found
                the second member of the tuple is ``None``.

        Raises:
            MiniZincError: An error occurred while compiling or solving the
                model instance.

        """
        coroutine = self.solve_async(
            timeout=timeout,
            nr_solutions=nr_solutions,
            processes=processes,
            random_seed=random_seed,
            all_solutions=all_solutions,
            intermediate_solutions=intermediate_solutions,
            free_search=free_search,
            **kwargs,
        )
        if sys.version_info >= (3, 7):
            if sys.platform == "win32":
                asyncio.set_event_loop_policy(
                    asyncio.WindowsProactorEventLoopPolicy())
            return asyncio.run(coroutine)
        else:
            if sys.platform == "win32":
                loop = asyncio.ProactorEventLoop()
            else:
                loop = asyncio.events.new_event_loop()

            try:
                asyncio.events.set_event_loop(loop)
                return loop.run_until_complete(coroutine)
            finally:
                asyncio.events.set_event_loop(None)
                loop.close()
Esempio n. 12
0
        def _command_routine():
            import time
            import signal
            import asyncio
            import os.path
            import traceback
            import subprocess

            _Context.tasks = []
            _Context.loop = None
            _Context.event = None
            _Context.condition = True
            _Context.main_task = None
            _Context.stdin_task = None
            _Context.time_barrier = None

            def _make_signal_codes():
                class _Result(object):
                    def __init__(self):
                        super().__init__()
                        self.SIGTERM = None
                        self.SIGUSR1 = None

                _result = _Result()
                try:
                    _result.SIGTERM = signal.SIGTERM
                except AttributeError:
                    pass
                try:
                    _result.SIGUSR1 = signal.SIGUSR1
                except AttributeError:
                    pass
                return _result

            _Context.signal_codes = _make_signal_codes()

            def _print_message(message):
                print(message, file=sys.stderr)
                sys.stderr.flush()

            async def _coroutine():
                _Context.event = asyncio.Condition()
                _Context.time_barrier = time.monotonic() + _Context.timeout

                try:

                    async def _sigusr1_coroutine():
                        async with _Context.event:
                            _Context.time_barrier = time.monotonic(
                            ) + _Context.timeout
                            _Context.event.notify_all()

                    async def _sigterm_coroutine():
                        _print_message("SIGTERM received")
                        async with _Context.event:
                            _Context.condition = False
                            _Context.event.notify_all()

                    def _add_signal_handler(key, coroutine):
                        def _routine():
                            _Context.tasks.append(
                                _Context.loop.create_task(coroutine()))

                        try:
                            _Context.loop.add_signal_handler(
                                sig=key,
                                callback=lambda: _Context.loop.
                                call_soon_threadsafe(_routine))
                        except NotImplementedError:
                            traceback.print_exc(file=sys.stderr)
                            sys.stderr.flush()

                    if _Context.signal_codes.SIGUSR1 is None:
                        _print_message("SIGUSR1 is not supported")
                    else:
                        _add_signal_handler(key=_Context.signal_codes.SIGUSR1,
                                            coroutine=_sigusr1_coroutine)
                    if _Context.signal_codes.SIGTERM is None:
                        _print_message("SIGTERM is not supported")
                    else:
                        _add_signal_handler(key=_Context.signal_codes.SIGTERM,
                                            coroutine=_sigterm_coroutine)

                    async def _stdin_coroutine():
                        _subprocess = await asyncio.create_subprocess_exec(
                            sys.executable,
                            os.path.abspath(__file__),
                            "stdin_subprocess_main",
                            stdin=None,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.DEVNULL)

                        try:
                            _subprocess_task = _Context.loop.create_task(
                                _subprocess.wait())
                            try:
                                while not _subprocess_task.done():
                                    _size = int(await
                                                _subprocess.stdout.readline())
                                    if not (0 < _size): break
                                    async with _Context.event:
                                        _Context.time_barrier = time.monotonic(
                                        ) + _Context.timeout
                                        _Context.event.notify_all()
                            finally:
                                if not _subprocess_task.done():
                                    _subprocess_task.cancel()
                                await asyncio.gather(_subprocess_task,
                                                     return_exceptions=True)

                        finally:
                            _subprocess.terminate()
                            await _subprocess.wait()

                    _Context.stdin_task = _Context.loop.create_task(
                        _stdin_coroutine())
                    _Context.tasks.append(_Context.stdin_task)

                    async def _main_coroutine():
                        async with _Context.event:
                            while _Context.condition:
                                _time = time.monotonic()
                                try:
                                    if not (_Context.time_barrier > _time):
                                        raise asyncio.TimeoutError()
                                    await asyncio.wait_for(
                                        _Context.event.wait(),
                                        timeout=_Context.time_barrier - _time)
                                except asyncio.TimeoutError:
                                    _print_message(
                                        "timeout occurred, shutting down the system"
                                    )
                                    subprocess.check_call(
                                        ("systemd-run", "--on-active=5",
                                         "systemctl", "poweroff"))
                                    raise
                        _print_message("exiting without shutdown")

                    _Context.main_task = _Context.loop.create_task(
                        _main_coroutine())
                    _Context.tasks.append(_Context.main_task)

                    async def _manage_tasks():
                        await asyncio.wait(_Context.tasks,
                                           return_when=asyncio.FIRST_COMPLETED)
                        _done = tuple([
                            _task for _task in _Context.tasks if _task.done()
                        ])
                        for _task in _done:
                            _Context.tasks.remove(_task)
                        await asyncio.gather(*_done, return_exceptions=True)

                    while not _Context.main_task.done():
                        await _manage_tasks()
                    await _Context.main_task

                finally:
                    if not ((_Context.stdin_task is None)
                            or _Context.stdin_task.done()):
                        _Context.stdin_task.cancel()
                    await asyncio.gather(*_Context.tasks,
                                         return_exceptions=True)

            if "win32" == sys.platform:
                asyncio.set_event_loop(asyncio.ProactorEventLoop())

            _Context.loop = asyncio.get_event_loop()
            _Context.loop.run_until_complete(_coroutine())
Esempio n. 13
0
        print("No test files identified, ideally you should run tox")
        return

    code, _ = await async_exec("pytest",
                               "-vv",
                               "--force-sugar",
                               "--",
                               *test_files,
                               display=True)
    print("=============================")

    if code == 0:
        printc(PASS, "Yay! This will most likely pass tox")
    else:
        printc(FAIL, "Tests not passing")

    if skip_lint:
        printc(FAIL, "LINT DISABLED")


if __name__ == "__main__":
    LOOP = (asyncio.ProactorEventLoop()
            if sys.platform == "win32" else asyncio.get_event_loop())

    try:
        LOOP.run_until_complete(main())
    except (FileNotFoundError, KeyboardInterrupt):
        pass
    finally:
        LOOP.close()
Esempio n. 14
0
def parse_arguments():
    """Parse the arguments"""
    parser = argparse.ArgumentParser()
    parser.add_argument("--setup", "-s", action="store_true")
    parser.add_argument("--phone", "-p", action="append")
    parser.add_argument("--token", "-t", action="append", dest="tokens")
    parser.add_argument("--heroku", action="store_true")
    parser.add_argument("--local-db", dest="local", action="store_true")
    parser.add_argument("--web-only", dest="web_only", action="store_true")
    parser.add_argument("--no-web", dest="web", action="store_false")
    parser.add_argument("--data-root",
                        dest="data_root",
                        default="",
                        help="Root path to store session files in")
    parser.add_argument(
        "--no-auth",
        dest="no_auth",
        action="store_true",
        help="Disable authentication and API token input, exitting if needed")
    parser.add_argument("--test-dc",
                        dest="test_dc",
                        const=None,
                        nargs="?",
                        default=False,
                        help="Connect to the test DC")
    parser.add_argument("--proxy-host",
                        dest="proxy_host",
                        action="store",
                        help="MTProto proxy host, without port")
    parser.add_argument("--proxy-port",
                        dest="proxy_port",
                        action="store",
                        type=int,
                        help="MTProto proxy port")
    parser.add_argument("--proxy-secret",
                        dest="proxy_secret",
                        action="store",
                        help="MTProto proxy secret")
    if __debug__:
        parser.add_argument(
            "--self-test",
            dest="self_test",
            const=1,
            nargs="?",
            default=False,
            type=int,
            help=
            ("Run self-tests then exit.\nAs this is designed for testing on an unprivileged "
             "environment, this will use a DB which is initialised as to prevent anyone "
             "with access to the Telegram account being tested from using the bot."
             ))
    parser.add_argument(
        "--heroku-web-internal",
        dest="heroku_web_internal",
        action="store_true",
        help=
        "This is for internal use only. If you use it, things will go wrong.")
    parser.add_argument(
        "--heroku-deps-internal",
        dest="heroku_deps_internal",
        action="store_true",
        help=
        "This is for internal use only. If you use it, things will go wrong.")
    parser.add_argument(
        "--docker-deps-internal",
        dest="docker_deps_internal",
        action="store_true",
        help=
        "This is for internal use only. If you use it, things will go wrong.")
    parser.add_argument(
        "--heroku-restart-internal",
        dest="heroku_restart_internal",
        action="store_true",
        help=
        "This is for internal use only. If you use it, things will go wrong.")
    arguments = parser.parse_args()
    logging.debug(arguments)
    if sys.platform == "win32":
        # Subprocess support; not needed in 3.8 but not harmful
        asyncio.set_event_loop(asyncio.ProactorEventLoop())

    return arguments
Esempio n. 15
0
 def start(self):
     if sys.platform == "win32":
         loop = asyncio.ProactorEventLoop()
         asyncio.set_event_loop(loop)
     asyncio.ensure_future(MicroSSHServer.start(self))
Esempio n. 16
0
    def __init__(self,
                 *,
                 address=None,
                 port=51234,
                 persist_file="accessory.state",
                 pincode=None,
                 encoder=None,
                 loader=None,
                 loop=None,
                 mac=None,
                 listen_address=None,
                 advertised_address=None,
                 interface_choice=None,
                 zeroconf_instance=None):
        """
        Initialize a new AccessoryDriver object.

        :param pincode: The pincode that HAP clients must prove they know in order
            to pair with this `Accessory`. Defaults to None, in which case a random
            pincode is generated. The pincode has the format "xxx-xx-xxx", where x is
            a digit.
        :type pincode: bytearray

        :param port: The local port on which the accessory will be accessible.
            In other words, this is the port of the HAPServer.
        :type port: int

        :param address: The local address on which the accessory will be accessible.
            In other words, this is the address of the HAPServer. If not given, the
            driver will try to select an address.
        :type address: str

        :param persist_file: The file name in which the state of the accessory
            will be persisted. This uses `expandvars`, so may contain `~` to
            refer to the user's home directory.
        :type persist_file: str

        :param encoder: The encoder to use when persisting/loading the Accessory state.
        :type encoder: AccessoryEncoder

        :param mac: The MAC address which will be used to identify the accessory.
            If not given, the driver will try to select a MAC address.
        :type mac: str

        :param listen_address: The local address on the HAPServer will listen.
            If not given, the value of the address parameter will be used.
        :type listen_address: str

        :param advertised_address: The address of the HAPServer announced via mDNS.
            This can be used to announce an external address from behind a NAT.
            If not given, the value of the address parameter will be used.
        :type advertised_address: str

        :param interface_choice: The zeroconf interfaces to listen on.
        :type InterfacesType: [InterfaceChoice.Default, InterfaceChoice.All]

        :param zeroconf_instance: A Zeroconf instance. When running multiple accessories or
            bridges a single zeroconf instance can be shared to avoid the overhead
            of processing the same data multiple times.
        """
        if loop is None:
            if sys.platform == "win32":
                loop = asyncio.ProactorEventLoop()
            else:
                loop = asyncio.new_event_loop()

            executor_opts = {"max_workers": None}
            if sys.version_info >= (3, 6):
                executor_opts["thread_name_prefix"] = "SyncWorker"

            self.executor = ThreadPoolExecutor(**executor_opts)
            loop.set_default_executor(self.executor)
            self.tid = threading.current_thread()
        else:
            self.tid = threading.main_thread()
            self.executor = None

        self.loop = loop

        self.accessory = None
        if zeroconf_instance is not None:
            self.advertiser = zeroconf_instance
        elif interface_choice is not None:
            self.advertiser = Zeroconf(interfaces=interface_choice)
        else:
            self.advertiser = Zeroconf()
        self.persist_file = os.path.expanduser(persist_file)
        self.encoder = encoder or AccessoryEncoder()
        self.topics = {}  # topic: set of (address, port) of subscribed clients
        self.loader = loader or Loader()
        self.aio_stop_event = asyncio.Event(loop=loop)
        self.stop_event = threading.Event()

        self.safe_mode = False

        self.mdns_service_info = None
        self.srp_verifier = None

        address = address or util.get_local_address()
        advertised_address = advertised_address or address
        self.state = State(address=advertised_address,
                           mac=mac,
                           pincode=pincode,
                           port=port)

        listen_address = listen_address or address
        network_tuple = (listen_address, self.state.port)
        self.http_server = HAPServer(network_tuple, self)
Esempio n. 17
0
if __name__ == "__main__":

    runopts = get_runopts()
    gpio_setup()
    f = serial.Serial('/dev/arduino', baudrate=38400, timeout=1)

    ############################################ MAIN LOOP ###########################################

    while True:
        #Ts=syncronous_measure(f,instr,runopts)
        #t1=time.time()
        #print('sync:',t1-t0)
        #print(Ts)

        if os.name == 'nt':
            ioloop = asyncio.ProactorEventLoop(
            )  # for subprocess' pipes on Windows
            asyncio.set_event_loop(ioloop)
        else:
            ioloop = asyncio.get_event_loop()

            ############################################ MEASUREMENT ###########################################
            a = ioloop.run_until_complete(
                asynchronous_measure(f, instr, runopts))
            # Ts=tasks[0].result()
            # print('async:',t1-t0)

            ##### UNPACKING TASK OUTS ########
            Temps = a[0].result()
            Ts = Temps[0]
            Ts2 = Temps[1]
            Ts3 = Temps[2]
Esempio n. 18
0
def main():
    # TODO: *actual* argparsing

    if '--no-checks' not in sys.argv:
        sanity_checks()

    finalize_logging()

    import asyncio

    if sys.platform == 'win32':
        loop = asyncio.ProactorEventLoop()  # needed for subprocesses
        asyncio.set_event_loop(loop)

    tried_requirementstxt = False
    tryagain = True

    loops = 0
    max_wait_time = 60

    while tryagain:
        # Maybe I need to try to import stuff first, then actually import stuff
        # It'd save me a lot of pain with all that awful exception type checking

        m = None
        try:
            from musicbot import MusicBot
            m = MusicBot()

            sh.terminator = ''
            sh.terminator = '\n'
            m.run()

        except SyntaxError:
            log.exception("Syntax error (this is a bug, not your fault)")
            break

        except ImportError:
            # TODO: if error module is in pip or dpy requirements...

            if not tried_requirementstxt:
                tried_requirementstxt = True

                log.exception("Error starting bot")
                log.info("Attempting to install dependencies...")

                err = PIP.run_install('--upgrade -r requirements.txt')

                if err: # TODO: add the specific error check back as not to always tell users to sudo it
                    print()
                    log.critical("You may need to %s to install dependencies." %
                                 ['use sudo', 'run as admin'][sys.platform.startswith('win')])
                    break
                else:
                    print()
                    log.info("Ok lets hope it worked")
                    print()
            else:
                log.exception("Unknown ImportError, exiting.")
                break

        except Exception as e:
            if hasattr(e, '__module__') and e.__module__ == 'musicbot.exceptions':
                if e.__class__.__name__ == 'HelpfulError':
                    log.info(e.message)
                    break

                elif e.__class__.__name__ == "TerminateSignal":
                    break

                elif e.__class__.__name__ == "RestartSignal":
                    loops = 0
                    pass
            else:
                log.exception("Error starting bot")

        finally:
            if not m or not m.init_ok:
                if any(sys.exc_info()):
                    # How to log this without redundant messages...
                    traceback.print_exc()
                break

            asyncio.set_event_loop(asyncio.new_event_loop())
            loops += 1

        sleeptime = min(loops * 2, max_wait_time)
        if sleeptime:
            log.info("Restarting in {} seconds...".format(loops*2))
            time.sleep(sleeptime)

    print()
    log.info("All done.")
Esempio n. 19
0
    def __init__(self,
                 *,
                 address=None,
                 port=51234,
                 persist_file='accessory.state',
                 pincode=None,
                 encoder=None,
                 loader=None,
                 loop=None,
                 mac=None,
                 listen_address=None,
                 advertised_address=None,
                 interface_choice=None):
        """
        Initialize a new AccessoryDriver object.

        :param pincode: The pincode that HAP clients must prove they know in order
            to pair with this `Accessory`. Defaults to None, in which case a random
            pincode is generated. The pincode has the format "xxx-xx-xxx", where x is
            a digit.
        :type pincode: bytearray

        :param port: The local port on which the accessory will be accessible.
            In other words, this is the port of the HAPServer.
        :type port: int

        :param address: The local address on which the accessory will be accessible.
            In other words, this is the address of the HAPServer. If not given, the
            driver will try to select an address.
        :type address: str

        :param persist_file: The file name in which the state of the accessory
            will be persisted. This uses `expandvars`, so may contain `~` to
            refer to the user's home directory.
        :type persist_file: str

        :param encoder: The encoder to use when persisting/loading the Accessory state.
        :type encoder: AccessoryEncoder

        :param mac: The MAC address which will be used to identify the accessory.
            If not given, the driver will try to select a MAC address.
        :type mac: str

        :param listen_address: The local address on the HAPServer will listen.
            If not given, the value of the address parameter will be used.
        :type listen_address: str

        :param advertised_address: The address of the HAPServer announced via mDNS.
            This can be used to announce an external address from behind a NAT.
            If not given, the value of the address parameter will be used.
        :type advertised_address: str

        :param interface_choice: The zeroconf interfaces to listen on.
        :type InterfacesType: [InterfaceChoice.Default, InterfaceChoice.All]
        """
        if sys.platform == 'win32':
            self.loop = loop or asyncio.ProactorEventLoop()
        else:
            self.loop = loop or asyncio.new_event_loop()

        executor_opts = {'max_workers': None}
        if sys.version_info >= (3, 6):
            executor_opts['thread_name_prefix'] = 'SyncWorker'

        self.executor = ThreadPoolExecutor(**executor_opts)
        self.loop.set_default_executor(self.executor)

        self.accessory = None
        self.http_server_thread = None
        if interface_choice is not None:
            self.advertiser = Zeroconf(interfaces=interface_choice)
        else:
            self.advertiser = Zeroconf()
        self.persist_file = os.path.expanduser(persist_file)
        self.encoder = encoder or AccessoryEncoder()
        self.topics = {}  # topic: set of (address, port) of subscribed clients
        self.topic_lock = threading.Lock(
        )  # for exclusive access to the topics
        self.loader = loader or Loader()
        self.aio_stop_event = asyncio.Event(loop=self.loop)
        self.stop_event = threading.Event()
        self.event_queue = queue.Queue()  # (topic, bytes)
        self.send_event_thread = None  # the event dispatch thread
        self.sent_events = 0
        self.accumulated_qsize = 0

        self.safe_mode = False

        self.mdns_service_info = None
        self.srp_verifier = None

        address = address or util.get_local_address()
        advertised_address = advertised_address or address
        self.state = State(address=advertised_address,
                           mac=mac,
                           pincode=pincode,
                           port=port)

        listen_address = listen_address or address
        network_tuple = (listen_address, self.state.port)
        self.http_server = HAPServer(network_tuple, self)
Esempio n. 20
0
 def setUp(self):
     if os.name == "nt":
         self.loop = asyncio.ProactorEventLoop()
     else:
         self.loop = asyncio.new_event_loop()
     asyncio.set_event_loop(self.loop)
Esempio n. 21
0
 def start(self):
     self.loop = asyncio.ProactorEventLoop()
     self.running = True
     self.loop.run_until_complete(self.handshake())
Esempio n. 22
0
    def run(self,
            cmd,
            protocol=None,
            stdin=None,
            cwd=None,
            env=None,
            **kwargs):
        """Execute a command and communicate with it.

        Parameters
        ----------
        cmd : list or str
          Sequence of program arguments. Passing a single string causes
          execution via the platform shell.
        protocol : WitlessProtocol, optional
          Protocol class handling interaction with the running process
          (e.g. output capture). A number of pre-crafted classes are
          provided (e.g `KillOutput`, `NoCapture`, `GitProgress`).
        stdin : byte stream, optional
          File descriptor like, used as stdin for the process. Passed
          verbatim to subprocess.Popen().
        cwd : path-like, optional
          If given, commands are executed with this path as PWD,
          the PWD of the parent process is used otherwise. Overrides
          any `cwd` given to the constructor.
        env : dict, optional
          Environment to be used for command execution. If `cwd`
          was given, 'PWD' in the environment is set to its value.
          This must be a complete environment definition, no values
          from the current environment will be inherited. Overrides
          any `env` given to the constructor.
        kwargs :
          Passed to the Protocol class constructor.

        Returns
        -------
        dict
          At minimum there will be keys 'stdout', 'stderr' with
          unicode strings of the cumulative standard output and error
          of the process as values.

        Raises
        ------
        CommandError
          On execution failure (non-zero exit code) this exception is
          raised which provides the command (cmd), stdout, stderr,
          exit code (status), and a message identifying the failed
          command, as properties.
        FileNotFoundError
          When a given executable does not exist.
        """
        if protocol is None:
            # by default let all subprocess stream pass through
            protocol = NoCapture

        cwd = cwd or self.cwd
        env = self._get_adjusted_env(
            env or self.env,
            cwd=cwd,
        )

        # start a new event loop, which we will close again further down
        # if this is not done events like this will occur
        #   BlockingIOError: [Errno 11] Resource temporarily unavailable
        #   Exception ignored when trying to write to the signal wakeup fd:
        # It is unclear to me why it happens when reusing an event looped
        # that it stopped from time to time, but starting fresh and doing
        # a full termination seems to address the issue
        if sys.platform == "win32":
            # use special event loop that supports subprocesses on windows
            event_loop = asyncio.ProactorEventLoop()
        else:
            event_loop = asyncio.SelectorEventLoop()
        asyncio.set_event_loop(event_loop)
        try:
            # include the subprocess manager in the asyncio event loop
            results = event_loop.run_until_complete(
                run_async_cmd(
                    event_loop,
                    cmd,
                    protocol,
                    stdin,
                    protocol_kwargs=kwargs,
                    cwd=cwd,
                    env=env,
                ))
        finally:
            # be kind to callers and leave asyncio as we found it
            asyncio.set_event_loop(None)
            # terminate the event loop, cannot be undone, hence we start a fresh
            # one each time (see BlockingIOError notes above)
            event_loop.close()

        # log before any exception is raised
        lgr.log(8, "Finished running %r with status %s", cmd, results['code'])

        # make it such that we always blow if a protocol did not report
        # a return code at all
        if results.get('code', True) not in [0, None]:
            # the runner has a better idea, doc string warns Protocol
            # implementations not to return these
            results.pop('cmd', None)
            results.pop('cwd', None)
            raise CommandError(
                # whatever the results were, we carry them forward
                cmd=cmd,
                cwd=self.cwd,
                **results,
            )
        # denoise, must be zero at this point
        results.pop('code', None)
        return results
 def setUp(self):
     self.loop = asyncio.ProactorEventLoop()
     self.set_event_loop(self.loop)
Esempio n. 24
0
def get_event_loop():
    """Get loop of asyncio"""
    if sys.platform == "win32":
        return asyncio.ProactorEventLoop()
    return asyncio.new_event_loop()
Esempio n. 25
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--setup", "-s", action="store_true")
    parser.add_argument("--config", "-c", action="append")
    parser.add_argument("--value", "-v", action="append")
    parser.add_argument("--phone", "-p", action="append")
    parser.add_argument("--token", "-t", action="append", dest="tokens")
    parser.add_argument("--heroku", action="store_true")
    parser.add_argument("--translate", action="store_true")
    arguments = parser.parse_args()
    logging.debug(arguments)

    if arguments.translate:
        from .translations import translateutil
        translateutil.ui()
        return

    if sys.platform == 'win32':
        # Subprocess support
        asyncio.set_event_loop(asyncio.ProactorEventLoop())

    clients = []

    phones = arguments.phone if arguments.phone else []
    phones += set(
        map(
            lambda f: f[18:-8],
            filter(
                lambda f: f[:19] == "friendly-telegram-+" and f[
                    -8:] == ".session",
                os.listdir(os.path.dirname(utils.get_base_dir())))))

    authtoken = os.environ.get("authorization_strings", False)  # for heroku
    if authtoken:
        authtoken = json.loads(authtoken)

    if arguments.tokens and not authtoken:
        authtoken = {}
    if arguments.tokens:
        for token in arguments.tokens:
            phone = phones.pop(0)
            authtoken.update(**{phone: token})
    if authtoken or arguments.heroku:
        from telethon.sessions import StringSession
    if arguments.heroku:

        def session_name(phone):
            return StringSession()
    else:

        def session_name(phone):
            return os.path.join(
                os.path.dirname(utils.get_base_dir()),
                "friendly-telegram" + (("-" + phone) if phone else ""))

    try:
        from . import api_token
    except ImportError:
        try:
            api_token = collections.namedtuple("api_token", ["ID", "HASH"])(
                os.environ["api_id"], os.environ["api_hash"])
        except KeyError:
            run_config({})
            return
    if authtoken:
        for phone, token in authtoken.items():
            try:
                clients += [
                    TelegramClient(StringSession(token),
                                   api_token.ID,
                                   api_token.HASH,
                                   connection_retries=None).start(phone)
                ]
            except ValueError:
                run_config({})
                return
            clients[-1].phone = phone  # for consistency
    if os.path.isfile(
            os.path.join(os.path.dirname(utils.get_base_dir()),
                         'friendly-telegram.session')):
        try:
            clients += [
                TelegramClient(session_name(None), api_token.ID,
                               api_token.HASH).start()
            ]
        except ValueError:
            run_config({})
            return
        print(
            "You're using the legacy session format. Please contact support, this will break in a future update."
        )
    if len(clients) == 0 and len(phones) == 0:
        phones += [input("Please enter your phone: ")]
    for phone in phones:
        try:
            try:
                clients += [
                    TelegramClient(session_name(phone),
                                   api_token.ID,
                                   api_token.HASH,
                                   connection_retries=None).start(phone)
                ]
            except ValueError:
                run_config({})
                return
            clients[
                -1].phone = phone  # so we can format stuff nicer in configurator
        except PhoneNumberInvalidError:
            print(
                "Please check the phone number. Use international format (+XX...) and don't put spaces in it."
            )
            return

    if arguments.heroku:
        key = input("Please enter your Heroku API key: ").strip()
        from . import heroku
        heroku.publish(clients, key, api_token)
        return

    cfg = arguments.config if arguments.config else []
    vlu = arguments.value if arguments.value else []

    loops = []
    for client in clients:
        atexit.register(client.disconnect)
        loops += [amain(client, dict(zip(cfg, vlu)), clients, arguments.setup)]

    asyncio.get_event_loop().set_exception_handler(lambda _, x: logging.error(
        "Exception on event loop! %s", x["message"], exc_info=x["exception"]))
    asyncio.get_event_loop().run_until_complete(asyncio.gather(*loops))
Esempio n. 26
0
        def setUp(self):
            policy = asyncio.get_event_loop_policy()
            self.loop = asyncio.ProactorEventLoop()

            # ensure that the event loop is passed explicitly in asyncio
            policy.set_event_loop(None)
Esempio n. 27
0
 def new_event_loop(self) -> asyncio.AbstractEventLoop:
     return asyncio.ProactorEventLoop(proactor=self.__proactor)
Esempio n. 28
0
def commonBuildDevelop(parser, argv, bobRoot, develop):
    def _downloadArgument(arg):
        if arg.startswith('packages=') or arg in [
                'yes', 'no', 'deps', 'forced', 'forced-deps', 'forced-fallback'
        ]:
            return arg
        raise argparse.ArgumentTypeError("{} invalid.".format(arg))

    parser.add_argument('packages',
                        metavar='PACKAGE',
                        type=str,
                        nargs='+',
                        help="(Sub-)package to build")
    parser.add_argument(
        '--destination',
        metavar="DEST",
        default=None,
        help="Destination of build result (will be overwritten!)")
    parser.add_argument(
        '-j',
        '--jobs',
        default=None,
        type=int,
        nargs='?',
        const=...,
        help="Specifies  the  number of jobs to run simultaneously.")
    parser.add_argument('-k',
                        '--keep-going',
                        default=None,
                        action='store_true',
                        help="Continue  as much as possible after an error.")
    parser.add_argument('-f',
                        '--force',
                        default=None,
                        action='store_true',
                        help="Force execution of all build steps")
    parser.add_argument('-n',
                        '--no-deps',
                        default=None,
                        action='store_true',
                        help="Don't build dependencies")
    parser.add_argument('-p',
                        '--with-provided',
                        dest='build_provided',
                        default=None,
                        action='store_true',
                        help="Build provided dependencies")
    parser.add_argument('--without-provided',
                        dest='build_provided',
                        default=None,
                        action='store_false',
                        help="Build without provided dependencies")
    group = parser.add_mutually_exclusive_group()
    group.add_argument('-b',
                       '--build-only',
                       dest='build_mode',
                       default=None,
                       action='store_const',
                       const='build-only',
                       help="Don't checkout, just build and package")
    group.add_argument('-B',
                       '--checkout-only',
                       dest='build_mode',
                       action='store_const',
                       const='checkout-only',
                       help="Don't build, just check out sources")
    group.add_argument('--normal',
                       dest='build_mode',
                       action='store_const',
                       const='normal',
                       help="Checkout, build and package")
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--clean',
                       action='store_true',
                       default=None,
                       help="Do clean builds (clear build directory)")
    group.add_argument('--incremental',
                       action='store_false',
                       dest='clean',
                       help="Reuse build directory for incremental builds")
    parser.add_argument(
        '--always-checkout',
        default=[],
        action='append',
        metavar="RE",
        help="Regex pattern of packages that should always be checked out")
    parser.add_argument(
        '--resume',
        default=False,
        action='store_true',
        help="Resume build where it was previously interrupted")
    parser.add_argument(
        '-q',
        '--quiet',
        default=0,
        action='count',
        help="Decrease verbosity (may be specified multiple times)")
    parser.add_argument(
        '-v',
        '--verbose',
        default=0,
        action='count',
        help="Increase verbosity (may be specified multiple times)")
    parser.add_argument('--no-logfiles',
                        default=None,
                        action='store_true',
                        help="Disable logFile generation.")
    parser.add_argument('-D',
                        default=[],
                        action='append',
                        dest="defines",
                        help="Override default environment variable")
    parser.add_argument('-c',
                        dest="configFile",
                        default=[],
                        action='append',
                        help="Use config File")
    parser.add_argument('-e',
                        dest="white_list",
                        default=[],
                        action='append',
                        metavar="NAME",
                        help="Preserve environment variable")
    parser.add_argument('-E',
                        dest="preserve_env",
                        default=False,
                        action='store_true',
                        help="Preserve whole environment")
    parser.add_argument('--upload',
                        default=None,
                        action='store_true',
                        help="Upload to binary archive")
    parser.add_argument('--link-deps',
                        default=None,
                        help="Add linked dependencies to workspace paths",
                        dest='link_deps',
                        action='store_true')
    parser.add_argument(
        '--no-link-deps',
        default=None,
        help="Do not add linked dependencies to workspace paths",
        dest='link_deps',
        action='store_false')
    parser.add_argument(
        '--download',
        metavar="MODE",
        default=None,
        help=
        "Download from binary archive (yes, no, deps, forced, forced-deps, forced-fallback, packages=<packages>",
        type=_downloadArgument)
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--sandbox',
                       action='store_true',
                       default=None,
                       help="Enable sandboxing")
    group.add_argument('--no-sandbox',
                       action='store_false',
                       dest='sandbox',
                       help="Disable sandboxing")
    parser.add_argument('--clean-checkout',
                        action='store_true',
                        default=None,
                        dest='clean_checkout',
                        help="Do a clean checkout if SCM state is dirty.")
    args = parser.parse_args(argv)

    defines = processDefines(args.defines)

    startTime = time.time()

    try:
        from ...develop.make import makeSandboxHelper
        makeSandboxHelper()
    except ImportError:
        pass

    if sys.platform == 'win32':
        loop = asyncio.ProactorEventLoop()
        asyncio.set_event_loop(loop)
        multiprocessing.set_start_method('spawn')
        executor = concurrent.futures.ProcessPoolExecutor()
    else:
        # The ProcessPoolExecutor is a barely usable for our interactive use
        # case. On SIGINT any busy executor should stop. The only way how this
        # does not explode is that we ignore SIGINT before spawning the process
        # pool and re-enable SIGINT in every executor. In the main process we
        # have to ignore BrokenProcessPool errors as we will likely hit them.
        # To "prime" the process pool a dummy workload must be executed because
        # the processes are spawned lazily.
        loop = asyncio.get_event_loop()
        origSigInt = signal.getsignal(signal.SIGINT)
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        # fork early before process gets big
        if sys.platform == 'msys':
            multiprocessing.set_start_method('fork')
        else:
            multiprocessing.set_start_method('forkserver')
        executor = concurrent.futures.ProcessPoolExecutor()
        executor.submit(dummy).result()
        signal.signal(signal.SIGINT, origSigInt)
    loop.set_default_executor(executor)

    try:
        recipes = RecipeSet()
        recipes.defineHook('releaseNameFormatter',
                           LocalBuilder.releaseNameFormatter)
        recipes.defineHook('developNameFormatter',
                           LocalBuilder.developNameFormatter)
        recipes.defineHook('developNamePersister', None)
        recipes.setConfigFiles(args.configFile)
        recipes.parse()

        # if arguments are not passed on cmdline use them from default.yaml or set to default yalue
        if develop:
            cfg = recipes.getCommandConfig().get('dev', {})
        else:
            cfg = recipes.getCommandConfig().get('build', {})

        defaults = {
            'destination': '',
            'force': False,
            'no_deps': False,
            'build_mode': 'normal',
            'clean': not develop,
            'upload': False,
            'download': "deps" if develop else "yes",
            'sandbox': not develop,
            'clean_checkout': False,
            'no_logfiles': False,
            'link_deps': True,
            'jobs': 1,
            'keep_going': False,
        }

        for a in vars(args):
            if getattr(args, a) == None:
                setattr(args, a, cfg.get(a, defaults.get(a)))

        if args.jobs is ...:
            args.jobs = os.cpu_count()
        elif args.jobs <= 0:
            parser.error("--jobs argument must be greater than zero!")

        envWhiteList = recipes.envWhiteList()
        envWhiteList |= set(args.white_list)

        if develop:
            nameFormatter = recipes.getHook('developNameFormatter')
            developPersister = DevelopDirOracle(
                nameFormatter, recipes.getHook('developNamePersister'))
            nameFormatter = developPersister.getFormatter()
        else:
            nameFormatter = recipes.getHook('releaseNameFormatter')
            nameFormatter = LocalBuilder.releaseNamePersister(nameFormatter)
        nameFormatter = LocalBuilder.makeRunnable(nameFormatter)
        packages = recipes.generatePackages(nameFormatter, defines,
                                            args.sandbox)
        if develop: developPersister.prime(packages)

        verbosity = cfg.get('verbosity', 0) + args.verbose - args.quiet
        setVerbosity(verbosity)
        builder = LocalBuilder(
            recipes, verbosity, args.force, args.no_deps,
            True if args.build_mode == 'build-only' else False,
            args.preserve_env, envWhiteList, bobRoot, args.clean,
            args.no_logfiles)

        builder.setArchiveHandler(getArchiver(recipes))
        builder.setUploadMode(args.upload)
        builder.setDownloadMode(args.download)
        builder.setCleanCheckout(args.clean_checkout)
        builder.setAlwaysCheckout(args.always_checkout +
                                  cfg.get('always_checkout', []))
        builder.setLinkDependencies(args.link_deps)
        builder.setJobs(args.jobs)
        builder.setKeepGoing(args.keep_going)
        if args.resume: builder.loadBuildState()

        backlog = []
        providedBacklog = []
        results = []
        for p in args.packages:
            for package in packages.queryPackagePath(p):
                packageStep = package.getPackageStep()
                backlog.append(packageStep)
                # automatically include provided deps when exporting
                build_provided = (args.destination and args.build_provided
                                  == None) or args.build_provided
                if build_provided:
                    providedBacklog.extend(packageStep._getProvidedDeps())

        success = runHook(
            recipes, 'preBuildHook',
            ["/".join(p.getPackage().getStack()) for p in backlog])
        if not success:
            raise BuildError(
                "preBuildHook failed!",
                help=
                "A preBuildHook is set but it returned with a non-zero status."
            )
        success = False
        if args.jobs > 1:
            setTui(args.jobs)
            builder.enableBufferedIO()
        try:
            builder.cook(backlog,
                         True if args.build_mode == 'checkout-only' else False)
            for p in backlog:
                resultPath = p.getWorkspacePath()
                if resultPath not in results:
                    results.append(resultPath)
            builder.cook(providedBacklog,
                         True if args.build_mode == 'checkout-only' else False,
                         1)
            for p in providedBacklog:
                resultPath = p.getWorkspacePath()
                if resultPath not in results:
                    results.append(resultPath)
            success = True
        finally:
            if args.jobs > 1: setTui(1)
            builder.saveBuildState()
            runHook(recipes, 'postBuildHook',
                    ["success" if success else "fail"] + results)

    finally:
        executor.shutdown()
        loop.close()

    # tell the user
    if results:
        if len(results) == 1:
            print("Build result is in", results[0])
        else:
            print("Build results are in:\n  ", "\n   ".join(results))

        endTime = time.time()
        stats = builder.getStatistic()
        activeOverrides = len(stats.getActiveOverrides())
        print("Duration: " +
              str(datetime.timedelta(seconds=(endTime - startTime))) + ", " +
              str(stats.checkouts) + " checkout" +
              ("s" if (stats.checkouts != 1) else "") + " (" +
              str(activeOverrides) +
              (" overrides" if (activeOverrides != 1) else " override") +
              " active), " + str(stats.packagesBuilt) + " package" +
              ("s" if (stats.packagesBuilt != 1) else "") + " built, " +
              str(stats.packagesDownloaded) + " downloaded.")

        # copy build result if requested
        ok = True
        if args.destination:
            for result in results:
                ok = copyTree(result, args.destination) and ok
        if not ok:
            raise BuildError(
                "Could not copy everything to destination. Your aggregated result is probably incomplete."
            )
    else:
        print("Your query matched no packages. Naptime!")
Esempio n. 29
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--setup", "-s", action="store_true")
    parser.add_argument("--config", "-c", action="append")
    parser.add_argument("--value", "-v", action="append")
    parser.add_argument("--phone", "-p", action="append")
    arguments = parser.parse_args()
    logging.debug(arguments)

    if sys.platform == 'win32':
        # Subprocess support
        asyncio.set_event_loop(asyncio.ProactorEventLoop())

    clients = []

    phones = arguments.phone if arguments.phone else []
    phones += set(
        map(
            lambda f: f[18:-8],
            filter(
                lambda f: f[:19] == "friendly-telegram-+" and f[
                    -8:] == ".session",
                os.listdir(os.path.dirname(utils.get_base_dir())))))

    try:
        from . import api_token
        if os.path.isfile(
                os.path.join(os.path.dirname(utils.get_base_dir()),
                             'friendly-telegram.session')):
            clients += [
                TelegramClient('friendly-telegram', api_token.ID,
                               api_token.HASH).start()
            ]
        if len(clients) == 0 and len(phones) == 0:
            phones += [input("Please enter your phone: ")]
        for phone in phones:
            try:
                clients += [
                    TelegramClient(os.path.join(
                        os.path.dirname(utils.get_base_dir()),
                        'friendly-telegram-' + phone),
                                   api_token.ID,
                                   api_token.HASH,
                                   connection_retries=None).start(phone)
                ]
                clients[
                    -1].phone = phone  # so we can format stuff nicer in configurator
            except PhoneNumberInvalidError:
                print(
                    "Please check the phone number. Use international format (+XX...) and don't put spaces in it."
                )
                return
    except ImportError:
        run_config({})
        return
    finally:
        # Stop modules taking personal data so easily, or by accident
        try:
            del api_token.ID
            del api_token.HASH
        except UnboundLocalError:
            pass

    cfg = arguments.config if arguments.config else []
    vlu = arguments.value if arguments.value else []

    loops = []
    for client in clients:
        atexit.register(client.disconnect)
        loops += [amain(client, dict(zip(cfg, vlu)), clients, arguments.setup)]

    asyncio.get_event_loop().set_exception_handler(lambda _, x: logging.error(
        "Exception on event loop! %s", x["message"], exc_info=x["exception"]))

    asyncio.get_event_loop().run_until_complete(asyncio.gather(*loops))
Esempio n. 30
0
def setup(args):
    try:
        global loop
        if sys.platform == "win32":  # Required to use asyncio subprocesses
            loop = asyncio.ProactorEventLoop()
            asyncio.set_event_loop(loop)
        else:
            loop = asyncio.get_event_loop()
        global headless
        headless = args.headless
        global servermode
        servermode = args.servermode
        global debug
        debug = args.debug
        if servermode and importlib.util.find_spec("cravat_multiuser") is not None:
            try:
                global cravat_multiuser
                import cravat_multiuser

                loop.create_task(cravat_multiuser.setup_module())
                global server_ready
                server_ready = True
            except Exception as e:
                logger.exception(e)
                logger.info("Exiting...")
                print(
                    "Error occurred while loading open-cravat-multiuser.\nCheck {} for details.".format(
                        log_path
                    )
                )
                exit()
        else:
            servermode = False
            server_ready = False
        wu.servermode = args.servermode
        ws.servermode = args.servermode
        wr.servermode = args.servermode
        wu.filerouter.servermode = args.servermode
        wu.server_ready = server_ready
        ws.server_ready = server_ready
        wr.server_ready = server_ready
        wu.filerouter.server_ready = server_ready
        wr.wu = wu
        if server_ready:
            cravat_multiuser.servermode = servermode
            cravat_multiuser.server_ready = server_ready
            cravat_multiuser.logger = logger
            wu.cravat_multiuser = cravat_multiuser
            ws.cravat_multiuser = cravat_multiuser
        if servermode and server_ready == False:
            msg = 'open-cravat-server package is required to run OpenCRAVAT Server.\nRun "pip install open-cravat-server" to get the package.'
            logger.info(msg)
            logger.info("Exiting...")
            print(msg)
            exit()
        global ssl_enabled
        ssl_enabled = False
        global protocol
        protocol = None
        global http_only
        http_only = args.http_only
        if "conf_dir" in sysconf:
            pem_path = os.path.join(sysconf["conf_dir"], "cert.pem")
            if os.path.exists(pem_path) and http_only == False:
                ssl_enabled = True
                global sc
                sc = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
                sc.load_cert_chain(pem_path)
        if ssl_enabled:
            protocol = "https://"
        else:
            protocol = "http://"
    except Exception as e:
        logger.exception(e)
        if debug:
            traceback.print_exc()
        logger.info("Exiting...")
        print(
            "Error occurred while starting OpenCRAVAT server.\nCheck {} for details.".format(
                log_path
            )
        )
        exit()