Example #1
0
    def clone_volume(self, src_vm, name):
        ''' Clone single volume from the specified vm

        :param QubesVM src_vm: source VM
        :param str name: name of volume to clone ('root', 'private' etc)
        :return cloned volume object
        '''
        config = self.vm.volume_config[name]
        dst_pool = self.vm.app.get_pool(config['pool'])
        dst = dst_pool.init_volume(self.vm, config)
        src_volume = src_vm.volumes[name]
        msg = "Importing volume {!s} from vm {!s}"
        self.vm.log.info(msg.format(src_volume.name, src_vm.name))

        # First create the destination volume
        create_op_ret = dst.create()
        # clone/import functions may be either synchronous or asynchronous
        # in the later case, we need to wait for them to finish
        if asyncio.iscoroutine(create_op_ret):
            yield from create_op_ret

        # Then import data from source volume
        clone_op_ret = dst.import_volume(src_volume)

        # clone/import functions may be either synchronous or asynchronous
        # in the later case, we need to wait for them to finish
        if asyncio.iscoroutine(clone_op_ret):
            yield from clone_op_ret
        self.vm.volumes[name] = dst
        return self.vm.volumes[name]
Example #2
0
    def __call__(self, request):
        try:
            # process request
            ret = self.process_request(request)
            if asyncio.iscoroutine(ret):
                ret = yield from ret
            # if process_request() returned none, call next middleware
            if ret is None:
                ret = self.handler(request)
                if asyncio.iscoroutine(ret):
                    ret = yield from ret
            # passing response to process_response
            response = ret
        except Exception as e:
            # process exception
            ret = self.process_exception(request, e)
            if asyncio.iscoroutine(ret):
                ret = yield from ret
            if ret is None:
                if settings.DEBUG:
                    return HttpInternalError(e)
                raise
            response = ret
        # if has response from next middleware, or process_request, or
        # process_exception, call process_response(response)

        ret = self.process_response(request, response)
        if asyncio.iscoroutine(ret):
            ret = yield from ret
        if not isinstance(ret, StreamResponse):
            raise RuntimeError(
                "%s.process_response() must return a StreamResponse"
                % self.__class__.__name__)
        return ret
Example #3
0
    def test_iscoroutine(self):
        async def foo():
            pass

        f = foo()
        try:
            self.assertTrue(asyncio.iscoroutine(f))
        finally:
            f.close()  # silence warning

        # Test that asyncio.iscoroutine() uses collections.abc.Coroutine
        class FakeCoro:
            def send(self, value):
                pass

            def throw(self, typ, val=None, tb=None):
                pass

            def close(self):
                pass

            def __await__(self):
                yield

        self.assertTrue(asyncio.iscoroutine(FakeCoro()))
Example #4
0
    def test_coroutine_type_when_patched(self):
        @asyncio.coroutine
        def a_coroutine():
            pass

        a_patched_coroutine = patch_is_patched()(a_coroutine)

        self.assertEqual(asyncio.iscoroutinefunction(a_patched_coroutine),
                         asyncio.iscoroutinefunction(a_coroutine))
        self.assertEqual(inspect.isgeneratorfunction(a_patched_coroutine),
                         inspect.isgeneratorfunction(a_coroutine))
        coro = a_coroutine()
        patched_coro = a_patched_coroutine()
        try:
            self.assertEqual(asyncio.iscoroutine(patched_coro),
                             asyncio.iscoroutine(coro))
        finally:
            run_coroutine(coro)
            run_coroutine(patched_coro)

        if not _using_await:
            return

        a_coroutine = _using_await.transform(a_coroutine)
        a_patched_coroutine = patch_is_patched()(a_coroutine)
        self.assertEqual(asyncio.iscoroutinefunction(a_patched_coroutine),
                         asyncio.iscoroutinefunction(a_coroutine))
        coro = a_coroutine()
        patched_coro = a_patched_coroutine()
        try:
            self.assertEqual(asyncio.iscoroutine(patched_coro),
                             asyncio.iscoroutine(coro))
        finally:
            run_coroutine(coro)
            run_coroutine(patched_coro)
Example #5
0
    def update(self, id, **kwargs):
        data = yield from self.request.json()

        result_allowed = self.validate_allowed_fields(data)
        if asyncio.iscoroutine(result_allowed):  # pragma: no cover
            yield from result_allowed

        result_required = self.validate_required_fields(data)
        if asyncio.iscoroutine(result_required):  # pragma: no cover
            yield from result_required

        data = self.prepare_update(data)
        if asyncio.iscoroutine(data):  # pragma: no cover
            data = yield from data

        updated_fields = data.keys()

        pool = yield from self.get_pool()
        with(yield from pool.cursor()) as cur:
            yield from self.before_update(cur)
            query = 'update {table} set {fields} where {id_column}=%s'.format(
                table=self.get_table_name(),
                fields=','.join(['{}=%s'.format(x)for x in updated_fields]),
                id_column=self.id_column
            )
            try:
                yield from cur.execute(
                    query, tuple([data[f] for f in updated_fields]+[id])
                )
            except DatabaseError as e:
                raise self.exception(HTTPConflict, e)
            yield from self.after_update(cur)

        return Response(status=204)
Example #6
0
def _aiotraversal_on_cleanup(app):
    for task in app['aiotraversal']['on_cleanup']:
        if asyncio.iscoroutine(task):
            yield from task
        else:
            res = task(app)
            if asyncio.iscoroutine(res):
                yield from res
Example #7
0
    def test_iscoroutine(self):
        async def foo(): pass

        f = foo()
        try:
            self.assertTrue(asyncio.iscoroutine(f))
        finally:
            f.close() # silence warning

        self.assertTrue(asyncio.iscoroutine(FakeCoro()))
Example #8
0
async def async_noy_sup_setUp(sup):
    if hasattr(sup, "setup"):
        res = sup.setup()
        if res and asyncio.iscoroutine(res):
            return await res
        else:
            return res

    if hasattr(sup, "setUp"):
        res = sup.setUp()
        if res and asyncio.iscoroutine(res):
            return await res
        else:
            return res
Example #9
0
async def async_noy_sup_tearDown(sup):
    if hasattr(sup, "teardown"):
        res = sup.teardown()
        if res and asyncio.iscoroutine(res):
            return await res
        else:
            return res

    if hasattr(sup, "tearDown"):
        res = sup.tearDown()
        if res and asyncio.iscoroutine(res):
            return await res
        else:
            return res
Example #10
0
    def wrapper(*args, **kwargs):
        connexion_request = api.get_request(*args, **kwargs)
        while asyncio.iscoroutine(connexion_request):
            connexion_request = yield from connexion_request

        connexion_response = function(connexion_request)
        while asyncio.iscoroutine(connexion_response):
            connexion_response = yield from connexion_response

        framework_response = api.get_response(connexion_response, mimetype,
                                              connexion_request)
        while asyncio.iscoroutine(framework_response):
            framework_response = yield from framework_response

        return framework_response
Example #11
0
    def stop(self, io_loop):
        """
        Asynchronously stop the application.

        :param tornado.ioloop.IOLoop io_loop: loop to run until all
            callbacks, timeouts, and queued calls are complete

        Call this method to start the application shutdown process.
        The IOLoop will be stopped once the application is completely
        shut down.

        """
        running_async = False
        shutdown = _ShutdownHandler(io_loop)
        for callback in self.on_shutdown_callbacks:
            try:
                maybe_future = callback(self.tornado_application)

                if asyncio.iscoroutine(maybe_future):
                    maybe_future = asyncio.create_task(maybe_future)

                if concurrent.is_future(maybe_future):
                    shutdown.add_future(maybe_future)
                    running_async = True
            except Exception as error:
                self.logger.warning('exception raised from shutdown '
                                    'callback %r, ignored: %s',
                                    callback, error, exc_info=1)

        if not running_async:
            shutdown.on_shutdown_ready()
Example #12
0
    def async_add_job(
            self,
            target: Callable[..., Any],
            *args: Any) -> Optional[asyncio.Future]:
        """Add a job from within the event loop.

        This method must be run in the event loop.

        target: target to call.
        args: parameters for method to call.
        """
        task = None

        # Check for partials to properly determine if coroutine function
        check_target = target
        while isinstance(check_target, functools.partial):
            check_target = check_target.func

        if asyncio.iscoroutine(check_target):
            task = self.loop.create_task(target)  # type: ignore
        elif is_callback(check_target):
            self.loop.call_soon(target, *args)
        elif asyncio.iscoroutinefunction(check_target):
            task = self.loop.create_task(target(*args))
        else:
            task = self.loop.run_in_executor(  # type: ignore
                None, target, *args)

        # If a task is scheduled
        if self._track_task and task is not None:
            self._pending_tasks.append(task)

        return task
Example #13
0
 def execute(self, fn, *args, **kwargs):
     result = fn(*args, **kwargs)
     if isinstance(result, Future) or iscoroutine(result):
         future = ensure_future(result, loop=self.loop)
         self.futures.append(future)
         return Promise.resolve(future)
     return result
Example #14
0
 def execute(self, fn, *args, **kwargs):
     result = fn(*args, **kwargs)
     if isinstance(result, Future) or iscoroutine(result):
         future = ensure_future(result)
         self.futures.append(future)
         return future
     return result
Example #15
0
    def handle_request(self, message, payload):
        request = Request(self._app, message, payload,
                          self.transport, self.writer, self.keep_alive_timeout)
        try:
            match_info = yield from self._app.router.resolve(request)

            assert isinstance(match_info, AbstractMatchInfo), match_info

            request._match_info = match_info
            handler = match_info.handler

            resp = handler(request)
            if (asyncio.iscoroutine(resp) or
                    isinstance(resp, asyncio.Future)):
                resp = yield from resp
            if not isinstance(resp, StreamResponse):
                raise RuntimeError(("Handler should return response "
                                    "instance, got {!r}")
                                   .format(type(resp)))
        except HTTPException as exc:
            resp = exc

        yield from resp.write_eof()
        if resp.keep_alive:
            # Don't need to read request body if any on closing connection
            yield from request.release()
        self.keep_alive(resp.keep_alive)
Example #16
0
    async def extract_info(self, loop, *args, on_error=None, retry_on_error=False, **kwargs):
        """
            Runs ytdl.extract_info within the threadpool. Returns a future that will fire when it's done.
            If `on_error` is passed and an exception is raised, the exception will be caught and passed to
            on_error as an argument.
        """
        if callable(on_error):
            try:
                return await loop.run_in_executor(self.thread_pool, functools.partial(self.unsafe_ytdl.extract_info, *args, **kwargs))

            except Exception as e:

                # (youtube_dl.utils.ExtractorError, youtube_dl.utils.DownloadError)
                # I hope I don't have to deal with ContentTooShortError's
                if asyncio.iscoroutinefunction(on_error):
                    asyncio.ensure_future(on_error(e), loop=loop)

                elif asyncio.iscoroutine(on_error):
                    asyncio.ensure_future(on_error, loop=loop)

                else:
                    loop.call_soon_threadsafe(on_error, e)

                if retry_on_error:
                    return await self.safe_extract_info(loop, *args, **kwargs)
        else:
            return await loop.run_in_executor(self.thread_pool, functools.partial(self.unsafe_ytdl.extract_info, *args, **kwargs))
def yields(value):
    """
    Returns ``True`` iff the value yields.

    .. seealso:: http://stackoverflow.com/questions/20730248/maybedeferred-analog-with-asyncio
    """
    return isinstance(value, Future) or iscoroutine(value)
Example #18
0
	def handle_object(self, obj):
		#1. Check that obj really does represent an openmath object
		cd, name, id, rtype, args, extras = verify_call(obj)
		self.logger.debug('Client request: {} {}\nCall {}.{} with args {}\nExtra info {}'.format(
		  rtype, id, cd, name, args, extras))
		
		#2. See if we know what to do with that object
		method_name = "proc_{}__{}".format(cd, name)
		try:
			handler = getattr(self, method_name)
		except AttributeError:
			self.report_error("Unknown symbol: cd={}, name={}".format(cd, name))
			#TODO: Should really send procedure_terminated. Not sure what error symbol tho
			return
			
			
		if not asyncio.iscoroutine(handler):
			raise SCSCPError
		
		#3. If so, do it
		self.logger.info("Calling {}.{}".format(cd, name))
		coro = handler(*args)
		task = asyncio.async(coro)
		callback = functools.partial(self.call_ended, id, rtype)
		task.add_done_callback(callback)
		self._tasks[id] = task
Example #19
0
 def stick(function, **binding):
     if not asyncio.iscoroutine(function):
         function = asyncio.coroutine(function)
     bindings = getattr(function, STICKER, [])
     bindings.append(binding)
     setattr(function, STICKER, bindings)
     return function
Example #20
0
    def make_response(self, request, response):
        """Convert a handler result to web response."""
        while iscoroutine(response):
            response = yield from response

        if isinstance(response, StreamResponse):
            return response

        if isinstance(response, str):
            return Response(text=response, content_type='text/html', charset=self.app.cfg.ENCODING)

        if isinstance(response, (list, dict, MultiDict, MultiDictProxy)):
            if isinstance(response, (MultiDict, MultiDictProxy)):
                response = dict(response)
            return Response(text=json.dumps(response, ensure_ascii=self.app.cfg.JSON_ENSURE_ASCII,
                                                      indent=self.app.cfg.JSON_INDENT_SIZE,
                                                      escape_forward_slashes=self.app.cfg.JSON_ESCAPE_FORWARD_SLASHES),
                            content_type=self.app.cfg.JSON_CONTENT_TYPE)

        if isinstance(response, bytes):
            response = Response(
                body=response, content_type='text/html', charset=self.app.cfg.ENCODING)
            return response

        if response is None:
            response = ''

        return Response(text=str(response), content_type='text/html')
Example #21
0
    def make_response(self, request, response):
        """Convert a handler result to web response."""
        while iscoroutine(response):
            response = yield from response

        if isinstance(response, StreamResponse):
            return response

        if isinstance(response, str):
            return Response(text=response, content_type='text/html', charset=self.app.cfg.ENCODING)

        if isinstance(response, (list, dict)):
            return Response(text=json.dumps(response), content_type='application/json')

        if isinstance(response, (MultiDict, MultiDictProxy)):
            response = dict(response)
            return Response(text=json.dumps(response), content_type='application/json')

        if isinstance(response, bytes):
            response = Response(
                body=response, content_type='text/html', charset=self.app.cfg.ENCODING)
            return response

        if response is None:
            response = ''

        return Response(text=str(response), content_type='text/html')
Example #22
0
    def make_response(self, request, response):
        while iscoroutine(response):
            response = yield from response

        status = 200

        if not response:
            response = ''
            status = 204

        if isinstance(response, (list, tuple)):
            if len(response) == 2 and isinstance(response[1], int):
                response, status = response

        if isinstance(response, (dict, )):
            device, browser = get_device_and_browser(request)
            if device != OTHER or browser != OTHER:
                response = clean_tags(response['text'])

        if isinstance(response, str):
            response = Response(text=response, content_type='text/html', charset=self.app.cfg.ENCODING)

        elif isinstance(response, (list, tuple, dict)):
            response = Response(text=json.dumps(response), content_type='application/json')

        elif isinstance(response, bytes):
            response = Response(body=response, content_type='text/html', charset=self.app.cfg.ENCODING)

        response.set_status(status)
        return response
Example #23
0
        def func(*args, **kwargs):
            if not supress_all_args:
                arg_string = ""
                for i in range(0, len(args)):
                    var_name = fn.__code__.co_varnames[i]
                    if var_name != "self" and var_name not in supress_args:
                        arg_string += var_name + ":" + str(args[i]) + ","
                arg_string = arg_string[0:len(arg_string) - 1]
                string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__name__, arg_string))
                if len(kwargs):
                    string = (
                        RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(
                            fn.__name__,
                            arg_string, kwargs))
                logger.log(debug_level, string)

            wrapped_fn = fn
            if not asyncio.iscoroutine(fn):
                wrapped_fn = asyncio.coroutine(fn)
            result = yield from wrapped_fn(*args, **kwargs)

            if not supress_result:
                string = BLUE + BOLD + '<< ' + END + 'Return {0} with result : {1}'.format(fn.__name__, result)
                logger.log(debug_level, string)
            return result
Example #24
0
    def get_response(cls, response, mimetype=None, request=None):
        """Get response.
        This method is used in the lifecycle decorators

        :rtype: aiohttp.web.Response
        """
        while asyncio.iscoroutine(response):
            response = yield from response

        url = str(request.url) if request else ''

        logger.debug('Getting data and status code',
                     extra={
                         'data': response,
                         'url': url
                     })

        if isinstance(response, ConnexionResponse):
            response = cls._get_aiohttp_response_from_connexion(response, mimetype)

        if isinstance(response, web.StreamResponse):
            logger.debug('Got stream response with status code (%d)',
                         response.status, extra={'url': url})
        else:
            logger.debug('Got data and status code (%d)',
                         response.status, extra={'data': response.body, 'url': url})

        return response
Example #25
0
    async def debug(self, ctx, *, code):
        """Evaluates code

        Modified function, originally made by Rapptz"""
        code = code.strip('` ')
        python = '```py\n{}\n```'
        result = None

        global_vars = globals().copy()
        global_vars['bot'] = self.bot
        global_vars['ctx'] = ctx
        global_vars['message'] = ctx.message
        global_vars['author'] = ctx.message.author
        global_vars['channel'] = ctx.message.channel
        global_vars['server'] = ctx.message.server

        try:
            result = eval(code, global_vars, locals())
        except Exception as e:
            await self.bot.say(python.format(type(e).__name__ + ': ' + str(e)))
            return

        if asyncio.iscoroutine(result):
            result = await result

        result = python.format(result)
        if not ctx.message.channel.is_private:
            censor = (settings.email, settings.password)
            r = "[EXPUNGED]"
            for w in censor:
                if w != "":
                    result = result.replace(w, r)
                    result = result.replace(w.lower(), r)
                    result = result.replace(w.upper(), r)
        await self.bot.say(result)
Example #26
0
def set_defaults(**defaults):
    # We need to be able to await these values multiple times; futures let us do that
    futured = {
        key: asyncio.ensure_future(value) if asyncio.iscoroutine(value) else value
        for key, value in defaults.items()
    }
    _defaults.update(futured)
Example #27
0
    def async_add_job(
            self,
            target: Callable[..., Any],
            *args: Any) -> Optional[asyncio.tasks.Task]:
        """Add a job from within the eventloop.

        This method must be run in the event loop.

        target: target to call.
        args: parameters for method to call.
        """
        task = None

        if asyncio.iscoroutine(target):
            task = self.loop.create_task(target)
        elif is_callback(target):
            self.loop.call_soon(target, *args)
        elif asyncio.iscoroutinefunction(target):
            task = self.loop.create_task(target(*args))
        else:
            task = self.loop.run_in_executor(None, target, *args)

        # If a task is scheduled
        if self._track_task and task is not None:
            self._pending_tasks.append(task)

        return task
Example #28
0
    def __init__(self, _type, plugin, func_hook):
        """
        :type _type: str
        :type plugin: Plugin
        :type func_hook: hook._Hook
        """
        self.type = _type
        self.plugin = plugin
        self.function = func_hook.function
        self.function_name = self.function.__name__

        self.required_args = inspect.getargspec(self.function)[0]
        if self.required_args is None:
            self.required_args = []

        # don't process args starting with "_"
        self.required_args = [arg for arg in self.required_args if not arg.startswith("_")]

        if asyncio.iscoroutine(self.function) or asyncio.iscoroutinefunction(self.function):
            self.threaded = False
        else:
            self.threaded = True

        self.permissions = func_hook.kwargs.pop("permissions", [])
        self.single_thread = func_hook.kwargs.pop("singlethread", False)

        if func_hook.kwargs:
            # we should have popped all the args, so warn if there are any left
            logger.warning("Ignoring extra args {} from {}".format(func_hook.kwargs, self.description))
Example #29
0
    async def get_file_revisions(self):
        result = self.provider.revisions(self.path)

        if asyncio.iscoroutine(result):
            result = await result

        return self.write({'data': [r.json_api_serialized() for r in result]})
Example #30
0
 def func(*args, **kwargs):
     arg_string = ""
     for i in range(0, len(args)):
         var_name = fn.__code__.co_varnames[i]
         if var_name not in ['self', 'cls']:
             arg_string += var_name + ":" + str(args[i]) + ","
     arg_string = arg_string[0:len(arg_string) - 1]
     string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__name__, arg_string))
     if len(kwargs):
         string = (
             RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(fn.__name__, arg_string,
                                                                                          kwargs))
     logger.log(debug_level, string)
     wrapped_fn = fn
     if not asyncio.iscoroutine(fn):
         wrapped_fn = asyncio.coroutine(fn)
     try:
         result = yield from wrapped_fn(*args, **kwargs)
         string = BLUE + BOLD + '<< ' + END + 'Return {0} with result :{1}'.format(fn.__name__, result)
         logger.log(debug_level, string)
         return result
     except Exception as e:
         string = (RED + BOLD + '>> ' + END + '{0} raised exception :{1}'.format(fn.__name__, str(e)))
         logger.log(debug_level, string)
         raise e
Example #31
0
    async def view_get(self, request, database, hash, correct_hash_provided,
                       **kwargs):
        _format, kwargs = await self.get_format(request, database, kwargs)

        if _format == "csv":
            return await self.as_csv(request, database, hash, **kwargs)

        if _format is None:
            # HTML views default to expanding all foreign key labels
            kwargs["default_labels"] = True

        extra_template_data = {}
        start = time.time()
        status_code = 200
        templates = []
        try:
            response_or_template_contexts = await self.data(
                request, database, hash, **kwargs)
            if isinstance(response_or_template_contexts, Response):
                return response_or_template_contexts

            else:
                data, extra_template_data, templates = response_or_template_contexts
        except QueryInterrupted:
            raise DatasetteError(
                """
                SQL query took too long. The time limit is controlled by the
                <a href="https://datasette.readthedocs.io/en/stable/config.html#sql-time-limit-ms">sql_time_limit_ms</a>
                configuration option.
            """,
                title="SQL Interrupted",
                status=400,
                messagge_is_html=True,
            )
        except (sqlite3.OperationalError, InvalidSql) as e:
            raise DatasetteError(str(e), title="Invalid SQL", status=400)

        except (sqlite3.OperationalError) as e:
            raise DatasetteError(str(e))

        except DatasetteError:
            raise

        end = time.time()
        data["query_ms"] = (end - start) * 1000
        for key in ("source", "source_url", "license", "license_url"):
            value = self.ds.metadata(key)
            if value:
                data[key] = value

        # Special case for .jsono extension - redirect to _shape=objects
        if _format == "jsono":
            return self.redirect(
                request,
                path_with_added_args(
                    request,
                    {"_shape": "objects"},
                    path=request.path.rsplit(".jsono", 1)[0] + ".json",
                ),
                forward_querystring=False,
            )

        if _format in self.ds.renderers.keys():
            # Dispatch request to the correct output format renderer
            # (CSV is not handled here due to streaming)
            result = call_with_supported_arguments(
                self.ds.renderers[_format][0],
                datasette=self.ds,
                columns=data.get("columns") or [],
                rows=data.get("rows") or [],
                sql=data.get("query", {}).get("sql", None),
                query_name=data.get("query_name"),
                database=database,
                table=data.get("table"),
                request=request,
                view_name=self.name,
                # These will be deprecated in Datasette 1.0:
                args=request.args,
                data=data,
            )
            if asyncio.iscoroutine(result):
                result = await result
            if result is None:
                raise NotFound("No data")

            r = Response(
                body=result.get("body"),
                status=result.get("status_code", 200),
                content_type=result.get("content_type", "text/plain"),
                headers=result.get("headers"),
            )
        else:
            extras = {}
            if callable(extra_template_data):
                extras = extra_template_data()
                if asyncio.iscoroutine(extras):
                    extras = await extras
            else:
                extras = extra_template_data
            url_labels_extra = {}
            if data.get("expandable_columns"):
                url_labels_extra = {"_labels": "on"}

            renderers = {}
            for key, (_, can_render) in self.ds.renderers.items():
                it_can_render = call_with_supported_arguments(
                    can_render,
                    datasette=self.ds,
                    columns=data.get("columns") or [],
                    rows=data.get("rows") or [],
                    sql=data.get("query", {}).get("sql", None),
                    query_name=data.get("query_name"),
                    database=database,
                    table=data.get("table"),
                    request=request,
                    view_name=self.name,
                )
                if asyncio.iscoroutine(it_can_render):
                    it_can_render = await it_can_render
                if it_can_render:
                    renderers[key] = path_with_format(request, key,
                                                      {**url_labels_extra})

            url_csv_args = {"_size": "max", **url_labels_extra}
            url_csv = path_with_format(request, "csv", url_csv_args)
            url_csv_path = url_csv.split("?")[0]
            context = {
                **data,
                **extras,
                **{
                    "renderers":
                    renderers,
                    "url_csv":
                    url_csv,
                    "url_csv_path":
                    url_csv_path,
                    "url_csv_hidden_args": [(key, value) for key, value in urllib.parse.parse_qsl(request.query_string) if key not in ("_labels", "_facet", "_size")] + [("_size", "max")],
                    "datasette_version":
                    __version__,
                    "config":
                    self.ds.config_dict(),
                },
            }
            if "metadata" not in context:
                context["metadata"] = self.ds.metadata
            r = await self.render(templates, request=request, context=context)
            r.status = status_code

        ttl = request.args.get("_ttl", None)
        if ttl is None or not ttl.isdigit():
            if correct_hash_provided:
                ttl = self.ds.config("default_cache_ttl_hashed")
            else:
                ttl = self.ds.config("default_cache_ttl")

        return self.set_response_headers(r, ttl)
Example #32
0
    def _sender_routine(self):
        """ Background task, that sends pending batches to leader nodes for
        batch's partition. This incapsulates same logic as Java's `Sender`
        background thread. Because we use asyncio this is more event based
        loop, rather than counting timeout till next possible even like in
        Java.
        """

        tasks = set()
        txn_task = None  # Track a single task for transaction interactions
        try:
            while True:
                # If indempotence or transactions are turned on we need to
                # have a valid PID to send any request below
                log.debug('+maybe wait for pid')
                yield from self._maybe_wait_for_pid()
                log.debug('-maybe wait for pid')

                waiters = set()
                # As transaction coordination is done via a single, separate
                # socket we do not need to pump it to several nodes, as we do
                # with produce requests.
                # We will only have 1 task at a time and will try to spawn
                # another once that is done.
                txn_manager = self._txn_manager
                muted_partitions = self._muted_partitions
                if txn_manager is not None and \
                        txn_manager.transactional_id is not None:
                    if txn_task is None or txn_task.done():
                        txn_task = self._maybe_do_transactional_request()
                        if txn_task is not None:
                            log.debug('has txn task')
                            tasks.add(txn_task)
                        else:
                            log.debug('creating new txn task')
                            # Waiters will not be awaited on exit, tasks will
                            waiters.add(txn_manager.make_task_waiter())
                    # We can't have a race condition between
                    # AddPartitionsToTxnRequest and a ProduceRequest, so we
                    # mute the partition until added.
                    muted_partitions = (muted_partitions
                                        | txn_manager.partitions_to_add())
                batches, unknown_leaders_exist = \
                    self._message_accumulator.drain_by_nodes(
                        ignore_nodes=self._in_flight,
                        muted_partitions=muted_partitions)

                # create produce task for every batch
                for node_id, batches in batches.items():
                    task = ensure_future(self._send_produce_req(
                        node_id, batches),
                                         loop=self._loop)
                    self._in_flight.add(node_id)
                    for tp in batches:
                        self._muted_partitions.add(tp)
                    tasks.add(task)

                if unknown_leaders_exist:
                    # we have at least one unknown partition's leader,
                    # try to update cluster metadata and wait backoff time
                    fut = self.client.force_metadata_update()
                    waiters |= tasks.union([fut])
                else:
                    fut = self._message_accumulator.data_waiter()
                    waiters |= tasks.union([fut])

                # wait when:
                # * At least one of produce task is finished
                # * Data for new partition arrived
                # * Metadata update if partition leader unknown
                log.debug('+SENDER WAIT FOR %r' % (waiters, ))
                if waiters:
                    done, _ = yield from asyncio.wait(
                        waiters,
                        return_when=asyncio.FIRST_COMPLETED,
                        loop=self._loop)
                    log.debug('-SENDER WAIT FOR')
                else:
                    yield from asyncio.sleep(0.5)

                # done tasks should never produce errors, if they are it's a
                # bug
                for task in done:
                    task.result()

                tasks -= done

        except asyncio.CancelledError:
            # done tasks should never produce errors, if they are it's a bug
            for task in tasks:
                yield from task
        except (ProducerFenced, OutOfOrderSequenceNumber,
                TransactionalIdAuthorizationFailed):
            raise
        except Exception:  # pragma: no cover
            log.error("Unexpected error in sender routine", exc_info=True)
            if self._on_irrecoverable_error:
                res = self._on_irrecoverable_error(exc)
                if asyncio.iscoroutine(res):  # callback can be async def
                    yield from res
            raise KafkaError("Unexpected error during batch delivery")
Example #33
0
    def start(self):
        """Start processing of incoming requests.

        It reads request line, request headers and request payload, then
        calls handle_request() method. Subclass has to override
        handle_request(). start() handles various exceptions in request
        or response handling. Connection is being closed always unless
        keep_alive(True) specified.
        """
        reader = self.reader

        while True:
            message = None
            self._keep_alive = False
            self._request_count += 1
            self._reading_request = False

            payload = None
            try:
                prefix = reader.set_parser(self._request_prefix)
                yield from prefix.read()
                self._reading_request = True

                # stop keep-alive timer
                if self._keep_alive_handle is not None:
                    self._keep_alive_handle.cancel()
                    self._keep_alive_handle = None

                # start slow request timer
                if self._timeout and self._timeout_handle is None:
                    self._timeout_handle = self._loop.call_later(
                        self._timeout, self.cancel_slow_request)

                # read request headers
                httpstream = reader.set_parser(self._request_parser)
                message = yield from httpstream.read()

                # cancel slow request timer
                if self._timeout_handle is not None:
                    self._timeout_handle.cancel()
                    self._timeout_handle = None

                payload = streams.FlowControlStreamReader(
                    reader, loop=self._loop)
                reader.set_parser(aiohttp.HttpPayloadParser(message), payload)

                handler = self.handle_request(message, payload)
                if (asyncio.iscoroutine(handler) or
                        isinstance(handler, asyncio.Future)):
                    yield from handler

            except (ConnectionError, asyncio.CancelledError,
                    errors.ConnectionError):
                self.log_debug('Ignored premature client disconnection.')
                break
            except errors.HttpException as exc:
                self.handle_error(exc.code, message, None, exc, exc.headers)
            except Exception as exc:
                self.handle_error(500, message, None, exc)
            finally:
                if self.transport is None:
                    self.log_debug('Ignored premature client disconnection.')
                    break

                if payload and not payload.is_eof():
                    self.log_debug('Uncompleted request.')
                    self._request_handler = None
                    self.transport.close()
                    break
                else:
                    reader.unset_parser()

                if self._request_handler:
                    if self._keep_alive and self._keep_alive_period:
                        self.log_debug(
                            'Start keep-alive timer for %s sec.',
                            self._keep_alive_period)
                        self._keep_alive_handle = self._loop.call_later(
                            self._keep_alive_period, self.transport.close)
                    else:
                        self.log_debug('Close client connection.')
                        self._request_handler = None
                        self.transport.close()
                        break
                else:
                    break
Example #34
0
 async def invoke_startup(self):
     for hook in pm.hook.startup(datasette=self):
         if callable(hook):
             hook = hook()
         if asyncio.iscoroutine(hook):
             hook = await hook
Example #35
0
    def run(self, result=None):  # pylint: disable=R0915
        orig_result = result
        if result is None:
            result = self.defaultTestResult()
            startTestRun = getattr(result, 'startTestRun', None)  # pylint: disable=C0103
            if startTestRun is not None:
                startTestRun()

        result.startTest(self)

        testMethod = getattr(self, self._testMethodName)  # pylint: disable=C0103
        if (getattr(self.__class__, "__unittest_skip__", False)
                or getattr(testMethod, "__unittest_skip__", False)):
            # If the class or method was skipped.
            try:
                skip_why = (
                    getattr(self.__class__, '__unittest_skip_why__', '')
                    or getattr(testMethod, '__unittest_skip_why__', ''))
                self._addSkip(result, self, skip_why)
            finally:
                result.stopTest(self)
            return
        expecting_failure_method = getattr(testMethod,
                                           "__unittest_expecting_failure__",
                                           False)
        expecting_failure_class = getattr(self,
                                          "__unittest_expecting_failure__",
                                          False)
        expecting_failure = expecting_failure_class or expecting_failure_method
        outcome = _Outcome(result)

        self.loop = asyncio.new_event_loop()  # pylint: disable=W0201
        asyncio.set_event_loop(self.loop)
        self.loop.set_debug(True)
        self.loop.slow_callback_duration = self.LOOP_SLOW_CALLBACK_DURATION

        try:
            self._outcome = outcome

            with outcome.testPartExecutor(self):
                self.setUp()
                self.loop.run_until_complete(self.asyncSetUp())
            if outcome.success:
                outcome.expecting_failure = expecting_failure
                with outcome.testPartExecutor(self, isTest=True):
                    maybe_coroutine = testMethod()
                    if asyncio.iscoroutine(maybe_coroutine):
                        self.loop.run_until_complete(maybe_coroutine)
                outcome.expecting_failure = False
                with outcome.testPartExecutor(self):
                    self.loop.run_until_complete(self.asyncTearDown())
                    self.tearDown()

            self.doAsyncCleanups()

            try:
                _cancel_all_tasks(self.loop)
                self.loop.run_until_complete(self.loop.shutdown_asyncgens())
            finally:
                asyncio.set_event_loop(None)
                self.loop.close()

            for test, reason in outcome.skipped:
                self._addSkip(result, test, reason)
            self._feedErrorsToResult(result, outcome.errors)
            if outcome.success:
                if expecting_failure:
                    if outcome.expectedFailure:
                        self._addExpectedFailure(result,
                                                 outcome.expectedFailure)
                    else:
                        self._addUnexpectedSuccess(result)
                else:
                    result.addSuccess(self)
            return result
        finally:
            result.stopTest(self)
            if orig_result is None:
                stopTestRun = getattr(result, 'stopTestRun', None)  # pylint: disable=C0103
                if stopTestRun is not None:
                    stopTestRun()  # pylint: disable=E1102

            # explicitly break reference cycles:
            # outcome.errors -> frame -> outcome -> outcome.errors
            # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
            outcome.errors.clear()
            outcome.expectedFailure = None

            # clear the outcome, no more needed
            self._outcome = None
Example #36
0
    async def build_asgi_output(
        controller_output: Any,
        status: HTTPStatus = HTTPStatus.OK,
        headers: Optional[Sequence[Header]] = None,
        content_type: Optional[ContentType] = ContentType.APPLICATION_JSON,
        return_type_: Any = None,
        middlewares: Optional[Middlewares] = None,
        middleware_request: Optional[MiddlewareRequest] = None,
    ) -> ASGICallableResults:
        while iscoroutine(controller_output):
            controller_output = await controller_output

        if return_type_ is None and return_type:
            return_type_ = return_type

        if middlewares and not isinstance(controller_output, Response):
            controller_output = Response(
                body=controller_output,
                status=status,
                headers=headers,
                content_type=content_type,
            )

        if isinstance(controller_output, Response):
            if middlewares and middleware_request:
                for middleware in middlewares.post_execution:
                    middleware(middleware_request, controller_output)

            return build_asgi_output(
                controller_output['body'],
                controller_output['status'],
                controller_output['headers'],
                controller_output['content_type'],
                controller_output.__annotations__.get('body'),
            )

        elif isinstance(controller_output, dict):
            if return_type_:
                body = typed_dict_asjson(controller_output, return_type_)
            else:
                body = orjson.dumps(controller_output)

        elif (is_dataclass(controller_output)
              or isinstance(controller_output, tuple)
              or isinstance(controller_output, list)):
            body = dataclass_asjson(controller_output)

        elif (isinstance(controller_output, str)
              or isinstance(controller_output, int)
              or isinstance(controller_output, bool)
              or isinstance(controller_output, float)):
            if content_type == ContentType.APPLICATION_JSON:
                body = orjson.dumps(controller_output)
            else:
                body = str(controller_output).encode()

        elif controller_output is None:
            content_length = 0 if has_content_length else None

            if content_type is None and status in RESPONSES_MAP:
                if headers:
                    return (RESPONSES_MAP[status](
                        make_asgi_headers(headers)), )

                return (RESPONSES_MAP[status](headers), )

            if headers:
                return RESPONSES_MAP[content_type](  # type: ignore
                    content_length,
                    headers=make_asgi_headers(headers))

            return RESPONSES_MAP[content_type](content_length)  # type: ignore

        else:
            raise InvalidReturnError(controller_output, controller)

        content_length = len(body) if has_content_length else None

        return (
            RESPONSES_MAP[content_type](  # type: ignore
                content_length, status, make_asgi_headers(headers)),
            body,
        )
Example #37
0
    async def run_job(self, job: jobs.Job, worker_id: int) -> None:
        task_name = job.task_name

        task = self.load_task(task_name=task_name, worker_id=worker_id)

        context = self.context_for_worker(worker_id=worker_id, task=task)

        start_time = context.additional_context["start_timestamp"] = time.time(
        )

        self.logger.info(
            f"Starting job {job.call_string}",
            extra=context.log_extra(action="start_job"),
        )
        exc_info: Union[bool, Exception]
        job_args = []
        if task.pass_context:
            job_args.append(context)
        try:
            task_result = task(*job_args, **job.task_kwargs)
            if asyncio.iscoroutine(task_result):
                task_result = await task_result
            elif self.concurrency != 1:
                logger.warning(
                    "When using worker concurrency, non-async tasks will block "
                    "the whole worker.",
                    extra=context.log_extra(action="concurrent_sync_task"),
                )

        except Exception as e:
            task_result = None
            log_title = "Error"
            log_action = "job_error"
            log_level = logging.ERROR
            exc_info = e

            retry_exception = task.get_retry_exception(exception=e, job=job)
            if retry_exception:
                log_title = "Error, to retry"
                log_action = "job_error_retry"
                raise retry_exception from e
            raise exceptions.JobError() from e

        else:
            log_title = "Success"
            log_action = "job_success"
            log_level = logging.INFO
            exc_info = False
        finally:
            end_time = time.time()
            duration = end_time - start_time
            context.additional_context.update({
                "end_timestamp": end_time,
                "duration": duration,
                "result": task_result,
            })

            extra = context.log_extra(action=log_action)

            text = (f"Job {job.call_string} ended with status: {log_title}, "
                    f"lasted {duration:.3f} s")
            if task_result:
                text += f" - Result: {task_result}"[:250]
            self.logger.log(log_level, text, extra=extra, exc_info=exc_info)
 def _run_test_method(self, method):
     result = method()
     if asyncio.iscoroutine(result):
         self.loop.run_until_complete(
             asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
Example #39
0
 def create(cls, manager, attrs):
     if asyncio.iscoroutine(attrs):
         return cls._acreate(manager, attrs)
     else:
         return cls(manager, attrs)
Example #40
0
 def create(cls, manager, obj_cls, _list):
     if asyncio.iscoroutine(_list):
         return cls._acreate(manager, obj_cls, _list)
     else:
         return cls(manager, obj_cls, _list)
Example #41
0
    async def render_template(self,
                              templates,
                              context=None,
                              request=None,
                              view_name=None):
        context = context or {}
        if isinstance(templates, Template):
            template = templates
        else:
            if isinstance(templates, str):
                templates = [templates]
            template = self.jinja_env.select_template(templates)
        body_scripts = []
        # pylint: disable=no-member
        for script in pm.hook.extra_body_script(
                template=template.name,
                database=context.get("database"),
                table=context.get("table"),
                view_name=view_name,
                datasette=self,
        ):
            body_scripts.append(Markup(script))

        extra_template_vars = {}
        # pylint: disable=no-member
        for extra_vars in pm.hook.extra_template_vars(
                template=template.name,
                database=context.get("database"),
                table=context.get("table"),
                view_name=view_name,
                request=request,
                datasette=self,
        ):
            if callable(extra_vars):
                extra_vars = extra_vars()
            if asyncio.iscoroutine(extra_vars):
                extra_vars = await extra_vars
            assert isinstance(extra_vars,
                              dict), "extra_vars is of type {}".format(
                                  type(extra_vars))
            extra_template_vars.update(extra_vars)

        template_context = {
            **context,
            **{
                "actor":
                request.actor if request else None,
                "display_actor":
                display_actor,
                "show_logout":
                request is not None and "ds_actor" in request.cookies,
                "app_css_hash":
                self.app_css_hash(),
                "zip":
                zip,
                "body_scripts":
                body_scripts,
                "format_bytes":
                format_bytes,
                "show_messages":
                lambda: self._show_messages(request),
                "extra_css_urls":
                self._asset_urls("extra_css_urls", template, context),
                "extra_js_urls":
                self._asset_urls("extra_js_urls", template, context),
                "base_url":
                self.config("base_url"),
                "csrftoken":
                request.scope["csrftoken"] if request else lambda: "",
            },
            **extra_template_vars,
        }
        if request and request.args.get("_context") and self.config(
                "template_debug"):
            return "<pre>{}</pre>".format(
                jinja2.escape(
                    json.dumps(template_context, default=repr, indent=4)))

        return await template.render_async(template_context)
Example #42
0
    def update_body_from_data(self, data, skip_auto_headers):
        if not data:
            return

        if isinstance(data, str):
            data = data.encode(self.encoding)

        if isinstance(data, (bytes, bytearray)):
            self.body = data
            if (hdrs.CONTENT_TYPE not in self.headers
                    and hdrs.CONTENT_TYPE not in skip_auto_headers):
                self.headers[hdrs.CONTENT_TYPE] = 'application/octet-stream'
            if hdrs.CONTENT_LENGTH not in self.headers and not self.chunked:
                self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))

        elif isinstance(
                data,
            (asyncio.StreamReader, streams.StreamReader, streams.DataQueue)):
            self.body = data

        elif asyncio.iscoroutine(data):
            self.body = data
            if (hdrs.CONTENT_LENGTH not in self.headers
                    and self.chunked is None):
                self.chunked = True

        elif isinstance(data, io.IOBase):
            assert not isinstance(data, io.StringIO), \
                'attempt to send text data instead of binary'
            self.body = data
            if not self.chunked and isinstance(data, io.BytesIO):
                # Not chunking if content-length can be determined
                size = len(data.getbuffer())
                self.headers[hdrs.CONTENT_LENGTH] = str(size)
                self.chunked = False
            elif (not self.chunked
                  and isinstance(data,
                                 (io.BufferedReader, io.BufferedRandom))):
                # Not chunking if content-length can be determined
                try:
                    size = os.fstat(data.fileno()).st_size - data.tell()
                    self.headers[hdrs.CONTENT_LENGTH] = str(size)
                    self.chunked = False
                except OSError:
                    # data.fileno() is not supported, e.g.
                    # io.BufferedReader(io.BytesIO(b'data'))
                    self.chunked = True
            else:
                self.chunked = True

            if hasattr(data, 'mode'):
                if data.mode == 'r':
                    raise ValueError('file {!r} should be open in binary mode'
                                     ''.format(data))
            if (hdrs.CONTENT_TYPE not in self.headers
                    and hdrs.CONTENT_TYPE not in skip_auto_headers
                    and hasattr(data, 'name')):
                mime = mimetypes.guess_type(data.name)[0]
                mime = 'application/octet-stream' if mime is None else mime
                self.headers[hdrs.CONTENT_TYPE] = mime

        elif isinstance(data, MultipartWriter):
            self.body = data.serialize()
            self.headers.update(data.headers)
            self.chunked = self.chunked or 8192

        else:
            if not isinstance(data, helpers.FormData):
                data = helpers.FormData(data)

            self.body = data(self.encoding)

            if (hdrs.CONTENT_TYPE not in self.headers
                    and hdrs.CONTENT_TYPE not in skip_auto_headers):
                self.headers[hdrs.CONTENT_TYPE] = data.content_type

            if data.is_multipart:
                self.chunked = self.chunked or 8192
            else:
                if (hdrs.CONTENT_LENGTH not in self.headers
                        and not self.chunked):
                    self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
Example #43
0
 def put(self, coro):
     """Put a coroutine in the queue to be executed."""
     # Avoid logging when a coroutine is queued or executed to avoid log
     # spam from coroutines that are started on every keypress.
     assert asyncio.iscoroutine(coro)
     self._queue.put_nowait(coro)
Example #44
0
    def write_bytes(self, request, reader):
        """Support coroutines that yields bytes objects."""
        # 100 response
        if self._continue is not None:
            yield from self._continue

        try:
            if asyncio.iscoroutine(self.body):
                exc = None
                value = None
                stream = self.body

                while True:
                    try:
                        if exc is not None:
                            result = stream.throw(exc)
                        else:
                            result = stream.send(value)
                    except StopIteration as exc:
                        if isinstance(exc.value, bytes):
                            yield from request.write(exc.value, drain=True)
                        break
                    except:
                        self.response.close()
                        raise

                    if isinstance(result, asyncio.Future):
                        exc = None
                        value = None
                        try:
                            value = yield result
                        except Exception as err:
                            exc = err
                    elif isinstance(result, (bytes, bytearray)):
                        yield from request.write(result, drain=True)
                        value = None
                    else:
                        raise ValueError('Bytes object is expected, got: %s.' %
                                         type(result))

            elif isinstance(self.body,
                            (asyncio.StreamReader, streams.StreamReader)):
                chunk = yield from self.body.read(streams.DEFAULT_LIMIT)
                while chunk:
                    yield from request.write(chunk, drain=True)
                    chunk = yield from self.body.read(streams.DEFAULT_LIMIT)

            elif isinstance(self.body, streams.DataQueue):
                while True:
                    try:
                        chunk = yield from self.body.read()
                        if not chunk:
                            break
                        yield from request.write(chunk, drain=True)
                    except streams.EofStream:
                        break

            elif isinstance(self.body, io.IOBase):
                chunk = self.body.read(self.chunked)
                while chunk:
                    request.write(chunk)
                    chunk = self.body.read(self.chunked)
            else:
                if isinstance(self.body, (bytes, bytearray)):
                    self.body = (self.body, )

                for chunk in self.body:
                    request.write(chunk)

        except Exception as exc:
            new_exc = aiohttp.ClientRequestError(
                'Can not write request body for %s' % self.url)
            new_exc.__context__ = exc
            new_exc.__cause__ = exc
            reader.set_exception(new_exc)
        else:
            try:
                ret = request.write_eof()
                # NB: in asyncio 3.4.1+ StreamWriter.drain() is coroutine
                # see bug #170
                if (asyncio.iscoroutine(ret)
                        or isinstance(ret, asyncio.Future)):
                    yield from ret
            except Exception as exc:
                new_exc = aiohttp.ClientRequestError(
                    'Can not write request body for %s' % self.url)
                new_exc.__context__ = exc
                new_exc.__cause__ = exc
                reader.set_exception(new_exc)

        self._writer = None
Example #45
0
 async def send(self, *args, **kwargs):
     """Send args and kwargs to all registered callbacks"""
     for callback in self:
         res = callback(*args, **kwargs)
         if asyncio.iscoroutine(res) or isinstance(res, asyncio.Future):
             await res
Example #46
0
 async def consume(self):
     """Consume coroutines from the queue by executing them."""
     while True:
         coro = await self._queue.get()
         assert asyncio.iscoroutine(coro)
         await coro
Example #47
0
    async def debug(self, ctx, *, code):
        """Evaluates code"""
        def check(m):
            if m.content.strip().lower() == "more":
                return True

        author = ctx.message.author
        channel = ctx.message.channel

        code = code.strip('` ')
        result = None

        global_vars = globals().copy()
        global_vars['bot'] = self.bot
        global_vars['ctx'] = ctx
        global_vars['message'] = ctx.message
        global_vars['author'] = ctx.message.author
        global_vars['channel'] = ctx.message.channel
        global_vars['server'] = ctx.message.server

        try:
            result = eval(code, global_vars, locals())
        except Exception as e:
            await self.bot.say(box('{}: {}'.format(type(e).__name__, str(e)),
                                   lang="py"))
            return

        if asyncio.iscoroutine(result):
            result = await result

        result = str(result)

        if not ctx.message.channel.is_private:
            censor = (self.bot.settings.email,
                      self.bot.settings.password,
                      self.bot.settings.token)
            r = "[EXPUNGED]"
            for w in censor:
                if w is None or w == "":
                    continue
                result = result.replace(w, r)
                result = result.replace(w.lower(), r)
                result = result.replace(w.upper(), r)

        result = list(pagify(result, shorten_by=16))

        for i, page in enumerate(result):
            if i != 0 and i % 4 == 0:
                last = await self.bot.say("There are still {} messages. "
                                          "Type `more` to continue."
                                          "".format(len(result) - (i+1)))
                msg = await self.bot.wait_for_message(author=author,
                                                      channel=channel,
                                                      check=check,
                                                      timeout=10)
                if msg is None:
                    try:
                        await self.bot.delete_message(last)
                    except:
                        pass
                    finally:
                        break
            await self.bot.say(box(page, lang="py"))
Example #48
0
 def handler_wrapper(*args, **kwargs):
     result = old_handler(*args, **kwargs)
     if asyncio.iscoroutine(result):
         result = yield from result
     return result
Example #49
0
 async def execute_timely(self, delay, times, f, *args, **kwargs):
     for i in range(times):
         await asyncio.sleep(delay)
         (await f(*args, **kwargs)) if asyncio.iscoroutine(f) else f(*args, **kwargs)
Example #50
0
 async def wrapper(self, *args, **kwargs):
     async with self.db:
         r = fn(self, *args, **kwargs)
         if asyncio.iscoroutine(r):
             r = await r
         return r
Example #51
0
 def _run_test_method(self, method):
     # If the method is a coroutine or returns a coroutine, run it on the
     # loop
     result = method()
     if asyncio.iscoroutine(result):
         self.loop.run_until_complete(result)
Example #52
0
def is_future(obj):
    return iscoroutine(obj) or isinstance(obj, Future)
Example #53
0
def test_is_task():
    assert callable(copy.copy)
    assert isinstance(copy.copy, celery.Task)
    assert not asyncio.iscoroutine(copy.copy)
    assert asyncio.iscoroutinefunction(copy.copy.adelay)
Example #54
0
    async def serve(self, reader, writer):
        """Serve function.
        Don't use this outside asyncio.start_server.
        """
        global _unpack_encoding
        _logger.debug('enter serve: %s', writer.get_extra_info('peername'))

        conn = Connection(
            reader, writer,
            msgpack.Unpacker(encoding=_unpack_encoding, **self._unpack_params))

        while not conn.is_closed():
            req = None
            try:
                req = await conn.recvall(self._timeout)
            except asyncio.TimeoutError as te:
                await asyncio.sleep(3)
                _logger.warning(
                    "Client did not send any data before timeout. Closing connection..."
                )
                conn.close()
                continue
            except IOError as ie:
                break
            except Exception as e:
                conn.reader.set_exception(e)
                raise e

            if not isinstance(req, (tuple, list)):
                try:
                    await self._send_error(conn, "Invalid protocol", -1, None)
                    # skip the rest of iteration code after sending error
                    continue

                except Exception as e:
                    _logger.error("Error when receiving req: %s", e)

            req_start = datetime.datetime.now()
            method = None
            msg_id = None
            args = None
            try:
                _logger.debug('parsing req: %s', req)
                msg_id, method, args, method_name = self._parse_request(req)
                _logger.debug('parsing completed: %s', req)
            except Exception as e:
                _logger.error("Exception %s raised when _parse_request %s", e,
                              req)

                # skip the rest of iteration code since we already got an error
                continue

            # Execute the parsed request
            try:
                _logger.debug('calling method: %s', method)
                ret = method.__call__(*args)
                if asyncio.iscoroutine(ret):
                    _logger.debug("start to wait_for")
                    ret = await asyncio.wait_for(ret, self._timeout)
                _logger.debug('calling %s completed. result: %s', method, ret)
            except Exception as e:
                _logger.error("Caught Exception in `%s`. %s: %s", method_name,
                              type(e).__name__, e)
                await self._send_error(conn, type(e).__name__, str(e), msg_id)
                _logger.debug('sending exception %e completed', e)
            else:
                _logger.debug('sending result: %s', ret)
                await self._send_result(conn, ret, msg_id)
                _logger.debug('sending result %s completed', ret)

            req_end = datetime.datetime.now()
            _logger.info("Method `%s` took %fms", method_name,
                         (req_end - req_start).microseconds / 1000)
Example #55
0
 def test_is_task(self):
     assert callable(move.move)
     assert isinstance(move.move, celery.Task)
     assert not asyncio.iscoroutine(move.move)
     assert asyncio.iscoroutinefunction(move.move.adelay)
Example #56
0
async def await_me_maybe(value):
    if callable(value):
        value = value()
    if asyncio.iscoroutine(value):
        value = await value
    return value
Example #57
0
 def _send(self, *args, **kwargs):
     for receiver in self._items:
         res = receiver(*args, **kwargs)
         if asyncio.iscoroutine(res) or isinstance(res, asyncio.Future):
             yield from res
Example #58
0
    def input_char(self, k):
        """Called by the frontend for every keystroke from the user.

        :param k: The incoming keystroke."""

        self.reap_tasks()
        self.context.clear()
        try:
            self.log.debug(
                'got key %s = %s', repr(self.active_keymap.unkey(k)), repr(k))

            if self.intermediate_action is not None:
                self.intermediate_action(keystroke=k)

            try:
                v = self.active_keymap[k]
            except KeyError:
                if self.keyerror_action is not None:
                    self.keyerror_action(k)
                else:
                    self.context.message('unknown command')
                    self.active_keymap = self.keymap
                    self.keyseq = ''
                    self.log.error('no such key in map')
                    self.whine(k)
                return

            self.keyseq = (
                self.keyseq + self.keymap.unkey(k, compact=True) + ' ')

            if not callable(v):
                self.active_keymap = v
            else:
                self.active_keymap = self.keymap
                keyseq = self.keyseq
                self.keyseq = ''
                arg, self.universal_argument = self.universal_argument, None
                self.this_command = getattr(v, '__name__', '?')
                try:
                    self.before_command()
                    ret = self.keymap_action(
                        v,
                        context=self.context,
                        window=self,
                        keystroke=k,
                        argument=arg,
                        keyseq=keyseq,
                        keymap=self.keymap,
                        )
                finally:
                    self.after_command()
                    self.last_command = self.this_command
                    self.last_key = k

                if asyncio.iscoroutine(ret):
                    self.tasks.append(asyncio.Task(self.catch_and_log(ret)))

        except Exception as e:
            self.context.message(str(e))
            self.log.exception('executing command from keymap')
            self.whine(k)
            self.active_keymap = self.keymap
        finally:
            if self.activated_keymap is not None:
                self.activated_keymap(self.active_keymap)
            if self.keyseq:
                self.keyecho(self.keyseq)
Example #59
0
async def run_func(loop, func, *args, **kwargs):
    part = partial(func, *args, **kwargs)
    if asyncio.iscoroutine(func) or asyncio.iscoroutinefunction(func):
        return await part()

    return await loop.run_in_executor(None, part)
Example #60
0
    async def update_report(self, report_request_id):
        """
        Call the previously registered report callback and send the result as a message to the VTN.
        """
        logger.debug(f"Running update_report for {report_request_id}")
        report_request = find_by(self.report_requests, 'report_request_id',
                                 report_request_id)
        granularity = report_request['granularity']
        report_back_duration = report_request['report_back_duration']
        report_specifier_id = report_request['report_specifier_id']
        report = find_by(self.reports, 'report_specifier_id',
                         report_specifier_id)
        data_collection_mode = report.data_collection_mode

        if report_request_id in self.incomplete_reports:
            logger.debug("We were already compiling this report")
            outgoing_report = self.incomplete_reports[report_request_id]
        else:
            logger.debug("There is no report in progress")
            outgoing_report = objects.Report(
                report_request_id=report_request_id,
                report_specifier_id=report.report_specifier_id,
                report_name=report.report_name,
                intervals=[])

        intervals = outgoing_report.intervals or []
        if data_collection_mode == 'full':
            if report_back_duration is None:
                report_back_duration = granularity
            date_to = datetime.now(timezone.utc)
            date_from = date_to - max(report_back_duration, granularity)
            for r_id in report_request['r_ids']:
                report_callback = self.report_callbacks[(report_specifier_id,
                                                         r_id)]
                result = report_callback(date_from=date_from,
                                         date_to=date_to,
                                         sampling_interval=granularity)
                if asyncio.iscoroutine(result):
                    result = await result
                for dt, value in result:
                    report_payload = objects.ReportPayload(r_id=r_id,
                                                           value=value)
                    intervals.append(
                        objects.ReportInterval(dtstart=dt,
                                               report_payload=report_payload))

        else:
            for r_id in report_request['r_ids']:
                report_callback = self.report_callbacks[(report_specifier_id,
                                                         r_id)]
                result = report_callback()
                if asyncio.iscoroutine(result):
                    result = await result
                if isinstance(result, (int, float)):
                    result = [(datetime.now(timezone.utc), result)]
                for dt, value in result:
                    logger.info(f"Adding {dt}, {value} to report")
                    report_payload = objects.ReportPayload(r_id=r_id,
                                                           value=value)
                    intervals.append(
                        objects.ReportInterval(dtstart=dt,
                                               report_payload=report_payload))
        outgoing_report.intervals = intervals
        logger.info(
            f"The number of intervals in the report is now {len(outgoing_report.intervals)}"
        )

        # Figure out if the report is complete after this sampling
        if data_collection_mode == 'incremental' and report_back_duration is not None\
                and report_back_duration > granularity:
            report_interval = report_back_duration.total_seconds()
            sampling_interval = granularity.total_seconds()
            expected_len = len(report_request['r_ids']) * int(
                report_interval / sampling_interval)
            if len(outgoing_report.intervals) == expected_len:
                logger.info(
                    "The report is now complete with all the values. Will queue for sending."
                )
                await self.pending_reports.put(
                    self.incomplete_reports.pop(report_request_id))
            else:
                logger.debug(
                    "The report is not yet complete, will hold until it is.")
                self.incomplete_reports[report_request_id] = outgoing_report
        else:
            logger.info("Report will be sent now.")
            await self.pending_reports.put(outgoing_report)