Esempio n. 1
0
def serve(
    host,
    port,
    app,
    request_handler,
    error_handler,
    before_start=None,
    after_start=None,
    before_stop=None,
    after_stop=None,
    debug=False,
    request_timeout=60,
    response_timeout=60,
    keep_alive_timeout=5,
    ssl=None,
    sock=None,
    request_max_size=None,
    request_buffer_queue_size=100,
    reuse_port=False,
    loop=None,
    protocol=HttpProtocol,
    backlog=100,
    register_sys_signals=True,
    run_multiple=False,
    run_async=False,
    connections=None,
    signal=Signal(),
    request_class=None,
    access_log=True,
    keep_alive=True,
    is_request_stream=False,
    router=None,
    websocket_max_size=None,
    websocket_max_queue=None,
    websocket_read_limit=2 ** 16,
    websocket_write_limit=2 ** 16,
    state=None,
    graceful_shutdown_timeout=15.0,
    asyncio_server_kwargs=None,
):
    """Start asynchronous HTTP Server on an individual process.

    :param host: Address to host on
    :param port: Port to host on
    :param request_handler: Sanic request handler with middleware
    :param error_handler: Sanic error handler with middleware
    :param before_start: function to be executed before the server starts
                         listening. Takes arguments `app` instance and `loop`
    :param after_start: function to be executed after the server starts
                        listening. Takes  arguments `app` instance and `loop`
    :param before_stop: function to be executed when a stop signal is
                        received before it is respected. Takes arguments
                        `app` instance and `loop`
    :param after_stop: function to be executed when a stop signal is
                       received after it is respected. Takes arguments
                       `app` instance and `loop`
    :param debug: enables debug output (slows server)
    :param request_timeout: time in seconds
    :param response_timeout: time in seconds
    :param keep_alive_timeout: time in seconds
    :param ssl: SSLContext
    :param sock: Socket for the server to accept connections from
    :param request_max_size: size in bytes, `None` for no limit
    :param reuse_port: `True` for multiple workers
    :param loop: asyncio compatible event loop
    :param protocol: subclass of asyncio protocol class
    :param request_class: Request class to use
    :param access_log: disable/enable access log
    :param websocket_max_size: enforces the maximum size for
                               incoming messages in bytes.
    :param websocket_max_queue: sets the maximum length of the queue
                                that holds incoming messages.
    :param websocket_read_limit: sets the high-water limit of the buffer for
                                 incoming bytes, the low-water limit is half
                                 the high-water limit.
    :param websocket_write_limit: sets the high-water limit of the buffer for
                                  outgoing bytes, the low-water limit is a
                                  quarter of the high-water limit.
    :param is_request_stream: disable/enable Request.stream
    :param request_buffer_queue_size: streaming request buffer queue size
    :param router: Router object
    :param graceful_shutdown_timeout: How long take to Force close non-idle
                                      connection
    :param asyncio_server_kwargs: key-value args for asyncio/uvloop
                                  create_server method
    :return: Nothing
    """
    if not run_async:
        # create new event_loop after fork
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

    if debug:
        loop.set_debug(debug)

    connections = connections if connections is not None else set()
    server = partial(
        protocol,
        loop=loop,
        connections=connections,
        signal=signal,
        app=app,
        request_handler=request_handler,
        error_handler=error_handler,
        request_timeout=request_timeout,
        response_timeout=response_timeout,
        keep_alive_timeout=keep_alive_timeout,
        request_max_size=request_max_size,
        request_class=request_class,
        access_log=access_log,
        keep_alive=keep_alive,
        is_request_stream=is_request_stream,
        router=router,
        websocket_max_size=websocket_max_size,
        websocket_max_queue=websocket_max_queue,
        websocket_read_limit=websocket_read_limit,
        websocket_write_limit=websocket_write_limit,
        state=state,
        debug=debug,
    )
    asyncio_server_kwargs = (
        asyncio_server_kwargs if asyncio_server_kwargs else {}
    )
    server_coroutine = loop.create_server(
        server,
        host,
        port,
        ssl=ssl,
        reuse_port=reuse_port,
        sock=sock,
        backlog=backlog,
        **asyncio_server_kwargs
    )

    if run_async:
        return server_coroutine

    trigger_events(before_start, loop)

    try:
        http_server = loop.run_until_complete(server_coroutine)
    except BaseException:
        logger.exception("Unable to start server")
        return

    trigger_events(after_start, loop)

    # Ignore SIGINT when run_multiple
    if run_multiple:
        signal_func(SIGINT, SIG_IGN)

    # Register signals for graceful termination
    if register_sys_signals:
        _singals = (SIGTERM,) if run_multiple else (SIGINT, SIGTERM)
        for _signal in _singals:
            try:
                loop.add_signal_handler(_signal, loop.stop)
            except NotImplementedError:
                logger.warning(
                    "Sanic tried to use loop.add_signal_handler "
                    "but it is not implemented on this platform."
                )
    pid = os.getpid()
    try:
        logger.info("Starting worker [%s]", pid)
        loop.run_forever()
    finally:
        logger.info("Stopping worker [%s]", pid)

        # Run the on_stop function if provided
        trigger_events(before_stop, loop)

        # Wait for event loop to finish and all connections to drain
        http_server.close()
        loop.run_until_complete(http_server.wait_closed())

        # Complete all tasks on the loop
        signal.stopped = True
        for connection in connections:
            connection.close_if_idle()

        # Gracefully shutdown timeout.
        # We should provide graceful_shutdown_timeout,
        # instead of letting connection hangs forever.
        # Let's roughly calcucate time.
        start_shutdown = 0
        while connections and (start_shutdown < graceful_shutdown_timeout):
            loop.run_until_complete(asyncio.sleep(0.1))
            start_shutdown = start_shutdown + 0.1

        # Force close non-idle connection after waiting for
        # graceful_shutdown_timeout
        coros = []
        for conn in connections:
            if hasattr(conn, "websocket") and conn.websocket:
                coros.append(conn.websocket.close_connection())
            else:
                conn.close()

        _shutdown = asyncio.gather(*coros, loop=loop)
        loop.run_until_complete(_shutdown)

        trigger_events(after_stop, loop)

        loop.close()
Esempio n. 2
0
async def robot_add_friend_callback(data):
    """
    1. 获取机器人最后一次操作记录  0. 拉群 1. 入群
    2.  根据最后一次操作类型做相应处理
    参数示例:
    {
        "robot_id": "20181111222222",
        "user_id": "20181111222222",
        "nickname": "用户",
        "avatar": "http://xxx/xxx.png",
        "add_time": "1970-01-01T00:00:00"
    }
    """
    async def bind_user_code(user, code):
        async with db.conn.acquire() as con:
            update_st = await con.prepare('update "user" set code = $1 where id = $2 ')
            await update_st.fetchrow(code, user)

    async def save_robot_add_friend(robot_id, user_id):
        async with db.conn.acquire() as con:
            insert_robot_friend_stmt = await con.prepare(
                'insert into "robot_add_friend" (id, robot_id, user_id, status) values (uuid_generate_v4(), $1, $2, 1)')
            await insert_robot_friend_stmt.fetch(robot_id, user_id)

    robot_code = data.get('robot_id')
    user_code = data.get('user_id')
    nickname = data.get('nickname')
    nickname_md5 = data_md5(nickname)

    robot_info = await get_robot_by_code(robot_code)
    if robot_info is None:
        return
    robot_id = str(robot_info['id'])

    last_operate_key = CACHE_ROBOT_LASTED_OPERATE.format(robot_code=robot_code, nickname_md5=nickname_md5)
    user_operation_record = await redis.conn.get(last_operate_key)
    if user_operation_record is None:
        logger.warning(f'robot: {robot_code} add friend but user: {nickname}, nickname_md5: {nickname_md5} is not found')
        return
    user_operation_record = ujson.loads(user_operation_record)
    user_id = user_operation_record['user_id']
    operation_type = int(user_operation_record['type'])
    await bind_user_code(user_id, user_code)
    await save_robot_add_friend(robot_id, user_id)
    logger.info(f'user: {user_code}, robot: {robot_code} last operation is :{operation_type}')

    if operation_type == TYPE.ROBOT_ENTER_GROUP:
        await send_text_msg(robot_code, user_code, PULL_GROUP_TEXT)
        guide_picture = settings['EXTERNAL_HOST'] + '/static/image/guide.png'
        await send_image_msg(robot_code, user_code, guide_picture)
        await robot_distribute_and_update_count(robot_id, user_id, user_code)
        async with await redis.conn.pipeline() as pipe:
            await pipe.decr(CACHE_TEMP_ROBOT_AMOUNT_USED.format(robot_id=robot_id), 1)
            await pipe.expire(CACHE_TEMP_ROBOT_AMOUNT_USED.format(robot_id=robot_id), until_tomorrow_expire_time())
            await pipe.delete(CACHE_TEMP_USER_ROBOT_DISTRIBUTE.format(user_id=user_id))
            await pipe.execute()

    elif operation_type == TYPE.PULL_USER_GROUP:
        redis_key = CACHE_BIND_USER_ROBOT_GROUP_MAP.format(user_id=user_id, robot_code=robot_code)
        group_code = await redis.conn.get(redis_key)
        if group_code is not None:
            invite_data = {"group_id": group_code, "robot_id": robot_code, "user_id": user_code}
            resp = await robot_invite_user(invite_data)
            logger.info(f'robot invite user join group data:{invite_data} and response:{resp}')
    await redis.conn.delete(last_operate_key)
Esempio n. 3
0
    def tus_file_upload(self, request: request):
        response = Response.HTTPResponse(status=200)
        logger.info(
            f"Metadata: {request.headers.get('Upload-Metadata', 'None')}")

        if request.method == 'GET':
            metadata = {}
            if "Upload-Metadata" not in request.headers:
                logger.error("Upload-Metadata header is mandatory")
            for kv in request.headers.get("Upload-Metadata").split(","):
                if len(kv.split(" ")) > 1:
                    key, value = kv.split(" ")
                    metadata[key] = base64.b64decode(value).decode("utf-8")

            if metadata.get("filename", None) is None:
                return Response.text("metadata filename is not set", 404)

            (filename_name, _) = os.path.splitext(metadata.get("filename"))
            if filename_name.upper() in [
                    os.path.splitext(f)[0].upper() for f in os.listdir(
                        os.path.dirname(self.tus_upload_folder))
            ]:
                response.headers['Tus-File-Name'] = metadata.get("filename")
                response.headers['Tus-File-Exists'] = True
            else:
                response.headers['Tus-File-Exists'] = False
            return response

        elif request.method == 'OPTIONS' and 'Access-Control-Request-Method' in request.headers:
            # CORS option request, return 200
            return response

        if request.headers.get("Tus-Resumable") is not None:
            response.headers['Tus-Resumable'] = self.tus_api_version
            response.headers['Tus-Version'] = self.tus_api_version_supported

            if request.method == 'OPTIONS':
                response.headers['Tus-Extension'] = ",".join(
                    self.tus_api_extensions)
                response.headers['Tus-Max-Size'] = self.tus_max_file_size

                response.status = 204
                return response

            # process upload metadata
            metadata = {}
            if "Upload-Metadata" not in request.headers:
                logger.error("Upload-Metadata header is mandatory")
            for kv in request.headers.get("Upload-Metadata").split(","):
                if len(kv.split(" ")) > 1:
                    key, value = kv.split(" ")
                    metadata[key] = base64.b64decode(value).decode("utf-8")

            if metadata.get("filename") and os.path.lexists(
                    os.path.join(
                        self.tus_upload_folder, metadata.get(
                            "filename"))) and self.tus_file_overwrite is False:
                response.status = 409
                return response

            file_size = int(request.headers.get("Upload-Length", "0"))

            with db_session:
                File = TusFile(filename=metadata.get("filename", " "),
                               file_size=file_size,
                               metadata=metadata)

            try:
                f = open(os.path.join(self.tus_upload_folder, str(File.fid)),
                         "w")
                f.seek(file_size - 1)
                f.write("\0")
                f.close()
            except IOError as e:
                logger.error("Unable to create file: {}".format(e))
                response.status = 500
                return response

            response.status = 201
            response.headers['Location'] = '{}/{}'.format(
                request.url, str(File.fid))
            response.headers['Tus-Temp-Filename'] = str(File.fid)

        else:
            logger.warning(
                "Received File upload for unsupported file transfer protocol")
            response.body = b"Received File upload for unsupported file transfer protocol"
            response.status = 500

        return response
Esempio n. 4
0
def serve(
    host,
    port,
    app,
    request_handler,
    error_handler,
    before_start=None,
    after_start=None,
    before_stop=None,
    after_stop=None,
    debug=False,
    request_timeout=60,
    response_timeout=60,
    keep_alive_timeout=5,
    ssl=None,
    sock=None,
    request_max_size=None,
    request_buffer_queue_size=100,
    reuse_port=False,
    loop=None,
    protocol=HttpProtocol,
    backlog=100,
    register_sys_signals=True,
    run_multiple=False,
    run_async=False,
    connections=None,
    signal=Signal(),
    request_class=None,
    access_log=True,
    keep_alive=True,
    is_request_stream=False,
    router=None,
    websocket_max_size=None,
    websocket_max_queue=None,
    websocket_read_limit=2 ** 16,
    websocket_write_limit=2 ** 16,
    state=None,
    graceful_shutdown_timeout=15.0,
    asyncio_server_kwargs=None,
):
    """Start asynchronous HTTP Server on an individual process.

    :param host: Address to host on
    :param port: Port to host on
    :param request_handler: Sanic request handler with middleware
    :param error_handler: Sanic error handler with middleware
    :param before_start: function to be executed before the server starts
                         listening. Takes arguments `app` instance and `loop`
    :param after_start: function to be executed after the server starts
                        listening. Takes  arguments `app` instance and `loop`
    :param before_stop: function to be executed when a stop signal is
                        received before it is respected. Takes arguments
                        `app` instance and `loop`
    :param after_stop: function to be executed when a stop signal is
                       received after it is respected. Takes arguments
                       `app` instance and `loop`
    :param debug: enables debug output (slows server)
    :param request_timeout: time in seconds
    :param response_timeout: time in seconds
    :param keep_alive_timeout: time in seconds
    :param ssl: SSLContext
    :param sock: Socket for the server to accept connections from
    :param request_max_size: size in bytes, `None` for no limit
    :param reuse_port: `True` for multiple workers
    :param loop: asyncio compatible event loop
    :param protocol: subclass of asyncio protocol class
    :param run_async: bool: Do not create a new event loop for the server,
                      and return an AsyncServer object rather than running it
    :param request_class: Request class to use
    :param access_log: disable/enable access log
    :param websocket_max_size: enforces the maximum size for
                               incoming messages in bytes.
    :param websocket_max_queue: sets the maximum length of the queue
                                that holds incoming messages.
    :param websocket_read_limit: sets the high-water limit of the buffer for
                                 incoming bytes, the low-water limit is half
                                 the high-water limit.
    :param websocket_write_limit: sets the high-water limit of the buffer for
                                  outgoing bytes, the low-water limit is a
                                  quarter of the high-water limit.
    :param is_request_stream: disable/enable Request.stream
    :param request_buffer_queue_size: streaming request buffer queue size
    :param router: Router object
    :param graceful_shutdown_timeout: How long take to Force close non-idle
                                      connection
    :param asyncio_server_kwargs: key-value args for asyncio/uvloop
                                  create_server method
    :return: Nothing
    """
    if not run_async:
        # create new event_loop after fork
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)

    if debug:
        loop.set_debug(debug)

    app.asgi = False

    connections = connections if connections is not None else set()
    server = partial(
        protocol,
        loop=loop,
        connections=connections,
        signal=signal,
        app=app,
        request_handler=request_handler,
        error_handler=error_handler,
        request_timeout=request_timeout,
        response_timeout=response_timeout,
        keep_alive_timeout=keep_alive_timeout,
        request_max_size=request_max_size,
        request_buffer_queue_size=request_buffer_queue_size,
        request_class=request_class,
        access_log=access_log,
        keep_alive=keep_alive,
        is_request_stream=is_request_stream,
        router=router,
        websocket_max_size=websocket_max_size,
        websocket_max_queue=websocket_max_queue,
        websocket_read_limit=websocket_read_limit,
        websocket_write_limit=websocket_write_limit,
        state=state,
        debug=debug,
    )
    asyncio_server_kwargs = (
        asyncio_server_kwargs if asyncio_server_kwargs else {}
    )
    server_coroutine = loop.create_server(
        server,
        host,
        port,
        ssl=ssl,
        reuse_port=reuse_port,
        sock=sock,
        backlog=backlog,
        **asyncio_server_kwargs
    )

    if run_async:
        return AsyncioServer(
            loop,
            server_coroutine,
            connections,
            after_start,
            before_stop,
            after_stop,
        )

    trigger_events(before_start, loop)

    try:
        http_server = loop.run_until_complete(server_coroutine)
    except BaseException:
        logger.exception("Unable to start server")
        return

    trigger_events(after_start, loop)

    # Ignore SIGINT when run_multiple
    if run_multiple:
        signal_func(SIGINT, SIG_IGN)

    # Register signals for graceful termination
    if register_sys_signals:
        _singals = (SIGTERM,) if run_multiple else (SIGINT, SIGTERM)
        for _signal in _singals:
            try:
                loop.add_signal_handler(_signal, loop.stop)
            except NotImplementedError:
                logger.warning(
                    "Sanic tried to use loop.add_signal_handler "
                    "but it is not implemented on this platform."
                )
    pid = os.getpid()
    try:
        logger.info("Starting worker [%s]", pid)
        loop.run_forever()
    finally:
        logger.info("Stopping worker [%s]", pid)

        # Run the on_stop function if provided
        trigger_events(before_stop, loop)

        # Wait for event loop to finish and all connections to drain
        http_server.close()
        loop.run_until_complete(http_server.wait_closed())

        # Complete all tasks on the loop
        signal.stopped = True
        for connection in connections:
            connection.close_if_idle()

        # Gracefully shutdown timeout.
        # We should provide graceful_shutdown_timeout,
        # instead of letting connection hangs forever.
        # Let's roughly calcucate time.
        start_shutdown = 0
        while connections and (start_shutdown < graceful_shutdown_timeout):
            loop.run_until_complete(asyncio.sleep(0.1))
            start_shutdown = start_shutdown + 0.1

        # Force close non-idle connection after waiting for
        # graceful_shutdown_timeout
        coros = []
        for conn in connections:
            if hasattr(conn, "websocket") and conn.websocket:
                coros.append(conn.websocket.close_connection())
            else:
                conn.close()

        _shutdown = asyncio.gather(*coros, loop=loop)
        loop.run_until_complete(_shutdown)

        trigger_events(after_stop, loop)

        loop.close()
Esempio n. 5
0
async def asr_view(request, user_cache):
    # 解包相关信息,供之后逻辑调用`
    token, pivot = user_cache["token"], user_cache["pivot"]
    processed_id = user_cache["processed"]

    audio_bytes = request.files.get("file")
    if not audio_bytes:
        _Logger.debug("empty audio bytes")
        return response.json(
            {
                "status": False,
                "msg": "音频为空",
                "ret": Ret.EMPTY_ASR_AUDIO
            }
        )

    res = await aip_asr(audio_bytes)

    # 请求失败
    if not res or res.get("err_no") == 2000:
        _Logger.warning("err happened when processing baidu asr")
        return response.json(
            {
                "status": False,
                "msg": "请求百度 asr 失败",
                "ret": Ret.BAIDU_ASR_ERR
            }
        )

    # 请求成功
    asr_res = res.get("result") or [""]
    sentence = asr_res[0]

    _Logger.debug(f"successfully processed baidu asr res = {res}")
    if not sentence:
        return response.json(
            {
                "status": False,
                "msg": "语音内容为空",
                "ret": Ret.EMPTY_ASR_AUDIO
            }
        )

    sentence = split_sentence(sentence, pivot)
    _Logger.debug(f"get baidu asr sentence = {sentence}")
    if not sentence:
        resp = {"msg":f"需要说含有{pivot}的诗句哦", 
                "ret": Ret.FAIL_NOT_VALID_SENTENCE, 
                "status": False}

        return response.json(resp)

    result = await get_sentence_by_text(sentence)
    _Logger.debug(f"mysql search result = {result}")
    if not result:
        resp = {"msg":"没听过这句诗哦", 
                "ret": Ret.FAIL_NOT_VALID_SENTENCE, 
                "status": False}

        return response.json(resp)

    sent_id, poetry_id, _ = result
    sent_id = int(sent_id)
    if sent_id in processed_id:
        resp = {
            "msg": "这句已经说过了",
            "ret": Ret.FAIL_REPEATED,
            "status": False
        }
        return response.json(resp)

    user_cache["processed"].append(sent_id)
    user_cache["count"] += 1
    await set_user_cache(token, user_cache)
    author, title, poetry_text = await get_poetry_by_id(poetry_id)
    _Logger.debug(f"get poetry from sql text = {poetry_text}, author = {author}, {title}")
    return response.json(
        {
            "status": True,
            "ret": Ret.SUCCESS_CODE,
            "data": {
                "author": author,
                "title": title,
                "text": poetry_text
            }
        }
    )
Esempio n. 6
0
    def register_plugin(self,
                        plugin,
                        *args,
                        name=None,
                        skip_reg=False,
                        **kwargs):
        assert not self._running, "Cannot add, remove, or change plugins " \
                                  "after the App has started serving."
        assert plugin, "Plugin must be a valid type! Do not pass in `None` " \
                       "or `False`"

        if isinstance(plugin, type):
            # We got passed in a Class. That's ok, we can handle this!
            module_name = getattr(plugin, '__module__')
            class_name = getattr(plugin, '__name__')
            lower_class = to_snake_case(class_name)
            try:
                mod = importlib.import_module(module_name)
                try:
                    plugin = getattr(mod, lower_class)
                except AttributeError:
                    plugin = mod  # try the module-based resolution next
            except ImportError:
                raise

        if ismodule(plugin):
            # We got passed in a module. That's ok, we can handle this!
            try:  # look for '.instance' on the module
                plugin = getattr(plugin, 'instance')
                assert plugin is not None
            except (AttributeError, AssertionError):
                # now look for the same name,
                # like my_module.my_module on the module.
                try:
                    plugin_module_name = getattr(plugin, '__name__')
                    assert plugin_module_name and len(plugin_module_name) > 0
                    plugin_module_name = plugin_module_name.split('.')[-1]
                    plugin = getattr(plugin, plugin_module_name)
                    assert plugin is not None
                except (AttributeError, AssertionError):
                    raise RuntimeError(
                        "Cannot import this module as a Sanic Plugin.")

        assert isinstance(plugin, SanicPlugin),\
            "Plugin must be derived from SanicPlugin"
        if name is None:
            try:
                name = str(plugin.__class__.__name__)
                assert name is not None
            except (AttributeError, AssertionError, ValueError, KeyError):
                logger.warning(
                    "Cannot determine a name for {}, using UUID.".format(
                        repr(plugin)))
                name = str(uuid1(None, None))
        assert isinstance(name, str), \
            "Plugin name must be a python unicode string!"

        associated_tuple = plugin.AssociatedTuple

        if name in self._plugin_names:  # we're already registered on this SPF
            reg = plugin.find_plugin_registration(self)
            assoc = associated_tuple(plugin, reg)
            raise ValueError("Plugin {:s} is already registered!".format(name),
                             assoc)
        if plugin.is_registered_on_framework(self):
            raise RuntimeError("Plugin already shows it is registered to this "
                               "spf, maybe under a different name?")
        self._plugin_names.add(name)
        shared_context = self.shared_context
        self._contexts[name] = context = SanicContext(
            self, shared_context, {'shared': shared_context})
        _p_context = self._plugins_context
        _plugin_reg = _p_context.get(name, None)
        if _plugin_reg is None:
            _p_context[name] = _plugin_reg = _p_context.create_child_context()
        _plugin_reg['name'] = name
        _plugin_reg['context'] = context
        if skip_reg:
            dummy_reg = PluginRegistration(spf=self,
                                           plugin_name=name,
                                           url_prefix=None)
            context['log'] = partial(self.log, reg=dummy_reg)
            context['url_for'] = partial(self.url_for, reg=dummy_reg)
            plugin.registrations.add(dummy_reg)
            # This indicates the plugin is not registered on the app
            _plugin_reg['instance'] = None
            _plugin_reg['reg'] = None
            return associated_tuple(plugin, dummy_reg)
        if _plugin_reg.get('instance', False):
            raise RuntimeError("The plugin we are trying to register already "
                               "has a known instance!")
        reg = self._register_helper(plugin,
                                    context,
                                    *args,
                                    _spf=self,
                                    _plugin_name=name,
                                    **kwargs)
        _plugin_reg['instance'] = plugin
        _plugin_reg['reg'] = reg
        return associated_tuple(plugin, reg)
Esempio n. 7
0
async def rabbit_pt_callback(message: aio_pika.IncomingMessage):
    with message.process():
        pieces = message.routing_key.split(".")
        #print(" [x] %r:%r" % (
        #    message.routing_key,
        #    message.body.decode('utf-8')
        #))
        if pieces[1] == "status":
            try:
                if pieces[3] == "create_payload_with_code":
                    # this means we should be getting back the finished payload or an error
                    query = await db_model.payload_query()
                    payload = await db_objects.get(query, uuid=pieces[4])
                    agent_message = json.loads(message.body.decode())
                    if agent_message['status'] == 'success':
                        file = open(payload.file_id.path, 'wb')
                        file.write(base64.b64decode(agent_message['payload']))
                        file.close()
                        code = base64.b64decode(agent_message['payload'])
                        md5 = await hash_MD5(code)
                        sha1 = await hash_SHA1(code)
                        payload.file_id.md5 = md5
                        payload.file_id.sha1 = sha1
                        await db_objects.update(payload.file_id)
                        query = await db_model.buildparameterinstance_query()
                        current_instances = await db_objects.execute(query.where(db_model.BuildParameterInstance.payload == payload))
                        for ci in current_instances:
                            if ci.build_parameter.name in agent_message['build_parameter_instances']:
                                ci.parameter = agent_message['build_parameter_instances'][ci.build_parameter.name]
                                await db_objects.update(ci)
                                del agent_message['build_parameter_instances'][ci.build_parameter.name]
                        query = await db_model.buildparameter_query()
                        for k, v in agent_message['build_parameter_instances'].items():
                            # now create entries that were set to default in the build script that weren't supplied by the user
                            try:
                                bp = await db_objects.get(query, name=k, payload_type=payload.payload_type)
                                await db_objects.create(db_model.BuildParameterInstance, parameter=v, payload=payload,
                                                        build_parameter=bp)
                            except Exception as e:
                                agent_message['message'] += "Failed to find build parameter for name {}".format(k)
                    payload.build_phase = agent_message['status']
                    payload.build_message = agent_message['message']
                    await db_objects.update(payload)
                elif pieces[3] == "command_transform":
                    query = await db_model.task_query()
                    task = await db_objects.get(query, id=pieces[5])
                    if pieces[4] == "error":
                        # create a response that there was an error and set task to processed
                        task.status = "error"
                        task.completed = True
                        task.timestamp = datetime.datetime.utcnow()
                        task.status_timestamp_processed = task.timestamp
                        await db_objects.update(task)
                        await db_objects.create(db_model.Response, task=task, response=message.body.decode('utf-8'))
                    else:
                        task.params = message.body
                        task.timestamp = datetime.datetime.utcnow()
                        if pieces[4] == "success":
                            task.status = "submitted"
                        elif pieces[4] == "completed":
                            task.status = "processed"
                            task.completed = True
                        else:
                            task.status = pieces[4]
                        task.status_timestamp_submitted = task.timestamp
                        await db_objects.update(task)
                        await add_command_attack_to_task(task, task.command)
                elif pieces[3] == "sync_classes":
                    operation_query = await db_model.operation_query()
                    operations = await db_objects.execute(operation_query.where(db_model.Operation.complete == False))
                    if pieces[5] == "" or pieces[5] is None:
                        # this was an auto sync from starting a container
                        operator = None
                    else:
                        operator_query = await db_model.operator_query()
                        operator = await db_objects.get(operator_query, username=base64.b64decode(pieces[5]).decode())
                    if pieces[4] == "success":
                        from app.api.payloadtype_api import import_payload_type_func
                        try:
                            status = await import_payload_type_func(json.loads(message.body.decode()), operator)
                            if status['status'] == "success":
                                if pieces[2] != status['ptype']:
                                    logger.warning(
                                        "Imported ptype {} from container {}".format(status['ptype'], pieces[2]))
                                logger.info("Successfully updated database from docker code for {}".format(pieces[2]))
                                for o in operations:
                                    await db_objects.create(db_model.OperationEventLog, operator=operator, level="info", operation=o,
                                                     message="Successfully Sync-ed database with {} payload files".format(pieces[2]))
                                # for a successful checkin, we need to find all wrapper payload types and get them to re-check in
                                if status['wrapper'] is False:
                                    query = await db_model.payloadtype_query()
                                    pts = await db_objects.execute(query.where(db_model.PayloadType.wrapper == True))
                                    sync_operator = "" if operator is None else operator.username
                                    for pt in pts:
                                        await send_pt_rabbitmq_message(pt.ptype, "sync_classes", "", sync_operator)
                            else:
                                logger.error("Failed to update database from docker code for {}: {}".format(pieces[2], status['error']))
                                for o in operations:
                                    await db_objects.create(db_model.OperationEventLog, operator=operator, level="info",
                                                     operation=o,
                                                     message="Failed Sync-ed database with {} payload files: {}".format(
                                                         pieces[2], status['error']))
                        except Exception as i:
                            logger.error("Failed to update database from syncing classes: {}".format(str(i)))
                            for o in operations:
                                await db_objects.create(db_model.OperationEventLog, operator=operator, level="error",
                                                        operation=o,
                                                        message="Failed Sync-ed database with {} payload files: {}".format(
                                                            pieces[2], status['error']))
                    else:
                        logger.error("Failed to get new payload and command info from container for {}: {}".format(pieces[0], message.body.decode()))
                        for o in operations:
                            await db_objects.create(db_model.OperationEventLog, operator=operator, level="error",
                                                    operation=o,
                                                    message="Failed getting information for payload {} with error: {}".format(
                                                        pieces[2], message.body.decode()))
            except Exception as e:
                logger.exception("Exception in rabbit_pt_callback: " + str(e))
                print(str(sys.exc_info()[-1].tb_lineno) + " " + str(e))
Esempio n. 8
0
async def _train_async_internal(
    file_importer: TrainingDataImporter,
    train_path: Text,
    output_path: Text,
    force_training: bool,
    fixed_model_name: Optional[Text],
    persist_nlu_training_data: bool,
    additional_arguments: Optional[Dict],
) -> Optional[Text]:
    """Trains a Rasa model (Core and NLU). Use only from `train_async`.

    Args:
        file_importer: `TrainingDataImporter` which supplies the training data.
        train_path: Directory in which to train the model.
        output_path: Output path.
        force_training: If `True` retrain model even if data has not changed.
        persist_nlu_training_data: `True` if the NLU training data should be persisted
                                   with the model.
        fixed_model_name: Name of model to be stored.
        additional_arguments: Additional training parameters.

    Returns:
        Path of the trained model archive.
    """

    stories, nlu_data = await asyncio.gather(file_importer.get_stories(),
                                             file_importer.get_nlu_data())

    if stories.is_empty() and nlu_data.is_empty():
        _logger.error(
            "No training data given. Please provide stories and NLU data in "
            "order to train a Rasa model using the '--data' argument.")
        return

    if stories.is_empty():
        _logger.warning(
            "No stories present. Just a Rasa NLU model will be trained.")
        return await _train_nlu_with_validated_data(
            file_importer,
            output=output_path,
            fixed_model_name=fixed_model_name,
            persist_nlu_training_data=persist_nlu_training_data,
        )

    if nlu_data.is_empty():
        _logger.warning(
            "No NLU data present. Just a Rasa Core model will be trained.")
        return await _train_core_with_validated_data(
            file_importer,
            output=output_path,
            fixed_model_name=fixed_model_name,
            additional_arguments=additional_arguments,
        )

    new_fingerprint = await model.model_fingerprint(file_importer)
    old_model = model.get_latest_model(output_path)
    fingerprint_comparison = FingerprintComparisonResult(
        force_training=force_training)
    if not force_training:
        fingerprint_comparison = model.should_retrain(new_fingerprint,
                                                      old_model, train_path)

    if fingerprint_comparison.is_training_required():
        await _do_training(
            file_importer,
            output_path=output_path,
            train_path=train_path,
            fingerprint_comparison_result=fingerprint_comparison,
            fixed_model_name=fixed_model_name,
            persist_nlu_training_data=persist_nlu_training_data,
            additional_arguments=additional_arguments,
        )

        return model.package_model(
            fingerprint=new_fingerprint,
            output_directory=output_path,
            train_path=train_path,
            fixed_model_name=fixed_model_name,
        )

    print_success("Nothing changed. You can use the old model stored at '{}'."
                  "".format(os.path.abspath(old_model)))

    _logger.info("Nothing changed. You can use the old model stored at '{}'."
                 "".format(os.path.abspath(old_model)))

    return old_model
Esempio n. 9
0
    def run(
        self,
        host: Optional[str] = None,
        port: Optional[int] = None,
        *,
        debug: bool = False,
        auto_reload: Optional[bool] = None,
        ssl: Union[Dict[str, str], SSLContext, None] = None,
        sock: Optional[socket] = None,
        workers: int = 1,
        protocol: Optional[Type[Protocol]] = None,
        backlog: int = 100,
        register_sys_signals: bool = True,
        access_log: Optional[bool] = None,
        unix: Optional[str] = None,
        loop: None = None,
        reload_dir: Optional[Union[List[str], str]] = None,
    ) -> None:
        """
        Run the HTTP Server and listen until keyboard interrupt or term
        signal. On termination, drain connections before closing.

        :param host: Address to host on
        :type host: str
        :param port: Port to host on
        :type port: int
        :param debug: Enables debug output (slows server)
        :type debug: bool
        :param auto_reload: Reload app whenever its source code is changed.
                            Enabled by default in debug mode.
        :type auto_relaod: bool
        :param ssl: SSLContext, or location of certificate and key
                    for SSL encryption of worker(s)
        :type ssl: SSLContext or dict
        :param sock: Socket for the server to accept connections from
        :type sock: socket
        :param workers: Number of processes received before it is respected
        :type workers: int
        :param protocol: Subclass of asyncio Protocol class
        :type protocol: type[Protocol]
        :param backlog: a number of unaccepted connections that the system
                        will allow before refusing new connections
        :type backlog: int
        :param register_sys_signals: Register SIG* events
        :type register_sys_signals: bool
        :param access_log: Enables writing access logs (slows server)
        :type access_log: bool
        :param unix: Unix socket to listen on instead of TCP port
        :type unix: str
        :return: Nothing
        """
        if reload_dir:
            if isinstance(reload_dir, str):
                reload_dir = [reload_dir]

            for directory in reload_dir:
                direc = Path(directory)
                if not direc.is_dir():
                    logger.warning(
                        f"Directory {directory} could not be located")
                self.reload_dirs.add(Path(directory))

        if loop is not None:
            raise TypeError(
                "loop is not a valid argument. To use an existing loop, "
                "change to create_server().\nSee more: "
                "https://sanic.readthedocs.io/en/latest/sanic/deploying.html"
                "#asynchronous-support")

        if auto_reload or auto_reload is None and debug:
            self.auto_reload = True
            if os.environ.get("SANIC_SERVER_RUNNING") != "true":
                return reloader_helpers.watchdog(1.0, self)

        if sock is None:
            host, port = host or "127.0.0.1", port or 8000

        if protocol is None:
            protocol = (WebSocketProtocol
                        if self.websocket_enabled else HttpProtocol)
        # if access_log is passed explicitly change config.ACCESS_LOG
        if access_log is not None:
            self.config.ACCESS_LOG = access_log

        server_settings = self._helper(
            host=host,
            port=port,
            debug=debug,
            ssl=ssl,
            sock=sock,
            unix=unix,
            workers=workers,
            protocol=protocol,
            backlog=backlog,
            register_sys_signals=register_sys_signals,
            auto_reload=auto_reload,
        )

        try:
            self.is_running = True
            self.is_stopping = False
            if workers > 1 and os.name != "posix":
                logger.warn(
                    f"Multiprocessing is currently not supported on {os.name},"
                    " using workers=1 instead")
                workers = 1
            if workers == 1:
                serve_single(server_settings)
            else:
                serve_multiple(server_settings, workers)
        except BaseException:
            error_logger.exception(
                "Experienced exception while trying to serve")
            raise
        finally:
            self.is_running = False
        logger.info("Server Stopped")
Esempio n. 10
0
 def sign(self, envvars: dict, workdir: str) -> bool:
     env = copy.deepcopy(envvars)
     curpath = os.getenv('PATH')
     env['PATH'] = self.toolspath
     if curpath:
         env['PATH'] += ':' + curpath
     env['MACHINE'] = self.machine
     bupgen = 'BUPGEN' in env
     self._symlink_scripts(workdir)
     # We want to return the minimal set of artifacts possible, so
     # make sure we remove:
     # - the files that were sent over
     # - files generated that are not needed:
     #     - tegraflash will sign and encrypt the 'kernel', but cboot does
     #       not boot the encrypted+signed copy, just the signed one
     #     - the subdirectories generated by the signing program
     #     - other housekeeping files
     to_remove = os.listdir(workdir) + [
         'signed', 'encrypted_signed', 'flash.xml.tmp'
     ]
     pkc = self.keys.get('rsa_priv.pem')
     if self.soctype == 'tegra210':
         sbk = None
     else:
         to_remove.append('flash.xml')
         kernelname, kernelext = os.path.splitext(env['LNXFILE'])
         to_remove.append(kernelname + '_sigheader' + kernelext +
                          '.encrypt.signed')
         try:
             sbk = self.keys.get('sbk.txt')
         except FileNotFoundError:
             sbk = None
     cmd = [
         "{}-flash-helper".format(self.soctype),
         '--bup' if bupgen else '--no-flash', '-u', pkc
     ]
     if sbk:
         cmd += ['-v', sbk]
     cmd += [
         'flash.xml.in', env['DTBFILE'], '{}.cfg'.format(self.machine),
         env['ODMDATA']
     ]
     if self.soctype == 'tegra210':
         cmd.append(env['boardcfg'])
     cmd.append(env['LNXFILE'])
     try:
         logger.info("Running: {}".format(cmd))
         proc = subprocess.run(cmd,
                               stdin=subprocess.DEVNULL,
                               cwd=workdir,
                               env=env,
                               check=True,
                               capture_output=True,
                               encoding='utf-8')
         self.keys.cleanup()
         logger.debug("stdout: {}".format(proc.stdout))
         logger.debug("stderr: {}".format(proc.stderr))
         # For BUP generation, just return the payloads.
         # For flashing, we return all the signed/encrypted files
         if bupgen:
             utils.remove_files([
                 os.path.join(workdir, fname)
                 for fname in os.listdir(workdir)
                 if not fname.startswith('payloads')
             ])
         else:
             utils.remove_files(
                 [os.path.join(workdir, fname) for fname in to_remove])
         return True
     except subprocess.CalledProcessError as e:
         self.keys.cleanup()
         logger.warning("signing error, stdout: {}\nstderr: {}".format(
             e.stdout, e.stderr))
     return False
Esempio n. 11
0
async def handle_404(request, exception):
    logger.warning(f"URL {request.path} not found, sending current UTC Time...")
    current_time = int(round(datetime.now(tz=pytz.timezone("UTC")).timestamp()))
    return json({"time": current_time}, status=404)
Esempio n. 12
0
async def group_activated_callback(data):
    """
    1. 判断是否群宠用户和是否封号
        * 不是群宠用户 或被封号 注销由创群
        * 是群宠用户并且没有被封号 执行步骤2
    2. 该群是否被激活(status!= 3)
        * 激活,激活群失败,发私聊消息
        * 没有被激活,执行步骤3
    3. 该群是否以前被激活,
        * 获取结算天数 执行步骤4
    4. 插入group记录,map记录 更新robot表机器人激活额度(事务)
    5. redis记录机器人当日激活群数,并判断该机器人的额度是否到达当日最大的额度
        * 是 删除机器人快照缓存
    6. 判断该用户是否是首次导群
        * 是 修改user 表是否导群记录
    7. 给用户发送群激活成功的私聊消息
    8. 调用分配机器人接口,传入激活群机器人,以及要分配的额度,获取一个机器人
        * 返回机器人是当前机器人 不做操作
        * 返回机器人不是当前机器人 给用户发送私聊消息,推送机器人二维码

    :param :
    :return:
    """
    group_code = data.get('group_id')
    robot_code = data.get('robot_id')
    mem_code = data.get('user_id')
    name = data.get('name', '未命名')
    logger.info(
        f'robot activated group callback, robot_code:{robot_code}, group_code:{group_code},mem_code:{mem_code}'
    )
    # 1
    user_info = await get_user_by_code(mem_code)
    if user_info is None or user_info['status'] == 2:
        await cancel_group(data)
        logger.debug('user is not exist')
        return
    user_id = str(user_info['id'])
    channel = user_info['channel']
    robot_info = await get_robot_by_code(robot_code)
    robot_id = str(robot_info['id'])
    # 2
    key = 'bind_group:' + group_code
    if not await acquire_lock_group(key, release_time=30):
        return
    if await check_group_activated(group_code, robot_code, mem_code, name):
        return
    group_id = await add_group_record(group_code, name, user_id, robot_id)
    logger.info(f'activate group [{group_code}] for user [{mem_code}]')
    redis_val = {
        "group_code": group_code,
        "user_id": user_id,
        "user_code": mem_code,
        "robot_id": robot_id,
        "robot_code": robot_code,
        "group_id": group_id
    }
    await redis.conn.hset(GROUP_USER_ROBOT_MAP_REDIS_KEY, group_code,
                          ujson.dumps(redis_val))
    # 发起同步群成员
    await update_cache_after_bind_group(robot_id)
    await sync_group_members({"group_id": group_code})
    # 判断用户是否是首次导群
    await checkout_user_has_group(user_id)
    await send_text_msg(robot_code, mem_code,
                        GROUP_WAIT_STAR_RATE.format(group_name=name))

    result, distribute_robot_id = await check_robot_amount_distribute(
        user_id, mem_code, channel, robot_code)
    if distribute_robot_id is not None:
        if distribute_robot_id != robot_id:
            robot = await get_robot_by_id(distribute_robot_id)
            robot_name = robot.get('name')
            qr_code = robot.get('qr_code')
            send_msg = DISTRIBUTE_NEW_ROBOT.format(robot_name=robot_name)
            await send_text_msg(robot_code, mem_code, send_msg)
            await send_image_msg(robot_code, mem_code, qr_code)
    else:
        logger.warning(
            f'request [robot_distribution] to distribute robot result is None')
        await send_text_msg(robot_code, mem_code, NO_ROBOT_WORD)
    return
Esempio n. 13
0
import grp
import os
import socket

from sanic.log import logger

from admin.views import app

if __name__ == "__main__":
    if app.config["DEBUG"]:
        app.run(host="127.0.0.1", port=8000, debug=True)
    else:
        # Remove old socket (is any).
        try:
            os.unlink(app.config["SOCKET_FILE"])
        except FileNotFoundError as e:
            logger.info(f"No old socket file found: {e}")

        # Create socket and run app.
        with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:
            try:
                sock.bind(app.config["SOCKET_FILE"])

                os.chmod(app.config["SOCKET_FILE"], 0o775)
                os.chown(app.config["SOCKET_FILE"], -1,
                         grp.getgrnam("nginx").gr_gid)

                app.run(sock=sock, access_log=False)
            except OSError as e:
                logger.warning(e)
Esempio n. 14
0
    async def request(self, *args, **kwargs):
        request_hooks = kwargs.pop('request_hooks', self.request_hooks)
        response_hooks = kwargs.pop('response_hooks', self.response_hooks)

        for request_hook in request_hooks:
            getattr(self, request_hook)(kwargs)

        http_status = 599
        http_client_name = type(self).__name__
        try:
            async with aiohttp.ClientSession(
                    connector=aiohttp.TCPConnector(verify_ssl=False),
                    trace_configs=[self.trace_config]) as session:
                with async_timeout.timeout(self.timeout):
                    async with session.request(*args, **kwargs) as response:
                        http_status = response.status
                        _response_text = await response.text()
                        try:
                            _response = await response.json()
                        except ContentTypeError:
                            _response = {}
                        except JSONDecodeError:
                            logger.error('request JSONDecodeError text %s',
                                         _response_text)
                            _response = {}
                        if http_status >= 400:
                            message = ClientHttpError.MESSAGE.format(
                                http_client_name, kwargs['url'],
                                (args, kwargs), response.status, _response,
                                _response_text)
                            if http_status >= 500:
                                logger.error(
                                    f'BaseClient 5xx {http_client_name}: status_code={response.status} '
                                    'url=%s request=%s response=%s response_raw=%',
                                    kwargs['url'], (args, kwargs), _response,
                                    _response_text)
                            else:
                                logger.warning(message)
                            raise ClientHttpError(http_client=http_client_name,
                                                  url=kwargs['url'],
                                                  request=(args, kwargs),
                                                  status_code=http_status,
                                                  response=_response,
                                                  response_text=_response_text)

                        for response_hook in response_hooks:
                            getattr(self, response_hook)(_response)

                        return _response
        except Exception as ex:
            try:
                logger.exception(
                    f"BaseClient.request HTTP CLIENT ERROR {http_client_name}: "
                    f"status_code={http_status}; url=%s; args=%s; kwargs=%s",
                    kwargs.get('url'), args, kwargs)
                raise ex
            except TimeoutError:
                logger.error(
                    f'BaseClient Timeout {http_client_name}: status_code={http_status} '
                    'url=%s request=%s ', kwargs['url'], (args, kwargs))
                raise ClientHttpError(http_client=http_client_name,
                                      url=kwargs['url'],
                                      request=(args, kwargs),
                                      status_code=http_status,
                                      response=_response,
                                      response_text=_response_text)
Esempio n. 15
0
    def prepare(
        self,
        host: Optional[str] = None,
        port: Optional[int] = None,
        *,
        dev: bool = False,
        debug: bool = False,
        auto_reload: Optional[bool] = None,
        ssl: Union[None, SSLContext, dict, str, list, tuple] = None,
        sock: Optional[socket] = None,
        workers: int = 1,
        protocol: Optional[Type[Protocol]] = None,
        backlog: int = 100,
        register_sys_signals: bool = True,
        access_log: Optional[bool] = None,
        unix: Optional[str] = None,
        loop: AbstractEventLoop = None,
        reload_dir: Optional[Union[List[str], str]] = None,
        noisy_exceptions: Optional[bool] = None,
        motd: bool = True,
        fast: bool = False,
        verbosity: int = 0,
        motd_display: Optional[Dict[str, str]] = None,
    ) -> None:
        if dev:
            debug = True
            auto_reload = True

        self.state.verbosity = verbosity
        if not self.state.auto_reload:
            self.state.auto_reload = bool(auto_reload)

        if fast and workers != 1:
            raise RuntimeError("You cannot use both fast=True and workers=X")

        if motd_display:
            self.config.MOTD_DISPLAY.update(motd_display)

        if reload_dir:
            if isinstance(reload_dir, str):
                reload_dir = [reload_dir]

            for directory in reload_dir:
                direc = Path(directory)
                if not direc.is_dir():
                    logger.warning(
                        f"Directory {directory} could not be located"
                    )
                self.state.reload_dirs.add(Path(directory))

        if loop is not None:
            raise TypeError(
                "loop is not a valid argument. To use an existing loop, "
                "change to create_server().\nSee more: "
                "https://sanic.readthedocs.io/en/latest/sanic/deploying.html"
                "#asynchronous-support"
            )

        if (
            self.__class__.should_auto_reload()
            and os.environ.get("SANIC_SERVER_RUNNING") != "true"
        ):  # no cov
            return

        if sock is None:
            host, port = host or "127.0.0.1", port or 8000

        if protocol is None:
            protocol = (
                WebSocketProtocol if self.websocket_enabled else HttpProtocol
            )

        # Set explicitly passed configuration values
        for attribute, value in {
            "ACCESS_LOG": access_log,
            "AUTO_RELOAD": auto_reload,
            "MOTD": motd,
            "NOISY_EXCEPTIONS": noisy_exceptions,
        }.items():
            if value is not None:
                setattr(self.config, attribute, value)

        if fast:
            self.state.fast = True
            try:
                workers = len(os.sched_getaffinity(0))
            except AttributeError:  # no cov
                workers = os.cpu_count() or 1

        server_settings = self._helper(
            host=host,
            port=port,
            debug=debug,
            ssl=ssl,
            sock=sock,
            unix=unix,
            workers=workers,
            protocol=protocol,
            backlog=backlog,
            register_sys_signals=register_sys_signals,
        )
        self.state.server_info.append(
            ApplicationServerInfo(settings=server_settings)
        )

        if self.config.USE_UVLOOP is True or (
            self.config.USE_UVLOOP is _default and not OS_IS_WINDOWS
        ):
            try_use_uvloop()
Esempio n. 16
0
    def run(self):
        is_debug = self.log.loglevel == logging.DEBUG
        protocol = (
            self.websocket_protocol
            if self.app.callable.websocket_enabled
            else self.http_protocol
        )

        self._server_settings = self.app.callable._helper(
            loop=self.loop,
            debug=is_debug,
            protocol=protocol,
            ssl=self.ssl_context,
            run_async=True,
        )
        self._server_settings["signal"] = self.signal
        self._server_settings.pop("sock")
        self._await(self.app.callable._startup())
        self._await(
            self.app.callable._server_event("init", "before", loop=self.loop)
        )

        main_start = self._server_settings.pop("main_start", None)
        main_stop = self._server_settings.pop("main_stop", None)

        if main_start or main_stop:  # noqa
            logger.warning(
                "Listener events for the main process are not available "
                "with GunicornWorker"
            )

        try:
            self._await(self._run())
            self.app.callable.is_running = True
            self._await(
                self.app.callable._server_event(
                    "init", "after", loop=self.loop
                )
            )
            self.loop.run_until_complete(self._check_alive())
            self._await(
                self.app.callable._server_event(
                    "shutdown", "before", loop=self.loop
                )
            )
            self.loop.run_until_complete(self.close())
        except BaseException:
            traceback.print_exc()
        finally:
            try:
                self._await(
                    self.app.callable._server_event(
                        "shutdown", "after", loop=self.loop
                    )
                )
            except BaseException:
                traceback.print_exc()
            finally:
                self.loop.close()

        sys.exit(self.exit_code)
Esempio n. 17
0
    async def create_server(
        self,
        host: Optional[str] = None,
        port: Optional[int] = None,
        *,
        debug: bool = False,
        ssl: Union[None, SSLContext, dict, str, list, tuple] = None,
        sock: Optional[socket] = None,
        protocol: Type[Protocol] = None,
        backlog: int = 100,
        access_log: Optional[bool] = None,
        unix: Optional[str] = None,
        return_asyncio_server: bool = False,
        asyncio_server_kwargs: Dict[str, Any] = None,
        noisy_exceptions: Optional[bool] = None,
    ) -> Optional[AsyncioServer]:
        """
        Asynchronous version of :func:`run`.

        This method will take care of the operations necessary to invoke
        the *before_start* events via :func:`trigger_events` method invocation
        before starting the *sanic* app in Async mode.

        .. note::
            This does not support multiprocessing and is not the preferred
            way to run a :class:`Sanic` application.

        :param host: Address to host on
        :type host: str
        :param port: Port to host on
        :type port: int
        :param debug: Enables debug output (slows server)
        :type debug: bool
        :param ssl: SSLContext, or location of certificate and key
                    for SSL encryption of worker(s)
        :type ssl: SSLContext or dict
        :param sock: Socket for the server to accept connections from
        :type sock: socket
        :param protocol: Subclass of asyncio Protocol class
        :type protocol: type[Protocol]
        :param backlog: a number of unaccepted connections that the system
                        will allow before refusing new connections
        :type backlog: int
        :param access_log: Enables writing access logs (slows server)
        :type access_log: bool
        :param return_asyncio_server: flag that defines whether there's a need
                                      to return asyncio.Server or
                                      start it serving right away
        :type return_asyncio_server: bool
        :param asyncio_server_kwargs: key-value arguments for
                                      asyncio/uvloop create_server method
        :type asyncio_server_kwargs: dict
        :param noisy_exceptions: Log exceptions that are normally considered
                                 to be quiet/silent
        :type noisy_exceptions: bool
        :return: AsyncioServer if return_asyncio_server is true, else Nothing
        """

        if sock is None:
            host, port = host or "127.0.0.1", port or 8000

        if protocol is None:
            protocol = (
                WebSocketProtocol if self.websocket_enabled else HttpProtocol
            )

        # Set explicitly passed configuration values
        for attribute, value in {
            "ACCESS_LOG": access_log,
            "NOISY_EXCEPTIONS": noisy_exceptions,
        }.items():
            if value is not None:
                setattr(self.config, attribute, value)

        server_settings = self._helper(
            host=host,
            port=port,
            debug=debug,
            ssl=ssl,
            sock=sock,
            unix=unix,
            loop=get_event_loop(),
            protocol=protocol,
            backlog=backlog,
            run_async=return_asyncio_server,
        )

        if self.config.USE_UVLOOP is not _default:
            error_logger.warning(
                "You are trying to change the uvloop configuration, but "
                "this is only effective when using the run(...) method. "
                "When using the create_server(...) method Sanic will use "
                "the already existing loop."
            )

        main_start = server_settings.pop("main_start", None)
        main_stop = server_settings.pop("main_stop", None)
        if main_start or main_stop:
            logger.warning(
                "Listener events for the main process are not available "
                "with create_server()"
            )

        return await serve(
            asyncio_server_kwargs=asyncio_server_kwargs, **server_settings
        )
Esempio n. 18
0
    async def fetch_data(self, url, session):
        try:
            async with session.get(url) as response:
                data = await response.text()
                data = pjson.loads(data)
                cnfy_id = 'cnfy-{}'.format(str(uuid.uuid4()))

                for tx in data['transactions']:
                    if tx['type'] in [4] and tx['feeAssetId'] == os.environ['ASSET_ID']:
                        
                        attachment_base58 = base58.b58decode(tx['attachment']).decode('utf-8')
                        attachment = None
                        try:
                            attachment = requests.get('{0}:{1}/ipfs/{2}'.format(config['ipfs']['host'], config['ipfs']['port'], attachment_base58), timeout=2).text
                        except Exception as error:
                            logger.error('IPFS Error: {0}'.format(error))

                        if attachment == None:
                            logger.warning('CONTINUE ON IPFS HASH {0}'.format(attachment_base58) )
                            continue

                        attachment_hash = hashlib.sha256(attachment.encode('utf-8')).hexdigest()

                        root = ET.fromstring(attachment)
                        version = root.findall('version')[0].text if len(root.findall('version')) > 0 else None
                        blockchain = root.findall('blockchain')[0].text if len(root.findall('blockchain')) > 0 else None
                        network = root.findall('network')[0].text if len(root.findall('network')) > 0 else None
                        operations = root.findall('operations')[0] if len(root.findall('operations')) > 0 else []

                        if str(version) != str(os.environ['CDM_VERSION']):
                            continue
                        
                        operation_create = operations.findall('create')[0] if len(operations.findall('create')) > 0 else None
                        operation_insert = operations.findall('insert')[0] if len(operations.findall('insert')) > 0 else None
                        if (operation_create):
                            table_ciphertext = None
                            table_sha256hash = None
                            table = operation_create.findall('table')[0] if len(operation_create.findall('table')) > 0 else None
                            
                            recipient_public_key = None
                            recipient = operation_create.findall('recipient')[0] if len(operation_create.findall('recipient')) > 0 else None
                            if recipient:
                                recipient_public_key = recipient.findall('publickey')[0].text if len(recipient.findall('publickey')) > 0 else None
                                
                            if table:
                                table_ciphertext = table.findall('ciphertext')[0].text if len(table.findall('ciphertext')) > 0 else None
                                table_sha256hash = table.findall('sha256')[0].text if len(table.findall('sha256')) > 0 else None

                                self.sql_data_tables.append((
                                    table_sha256hash,
                                    tx['id'],
                                    table_ciphertext,
                                    recipient_public_key
                                ))

                            columns = operation_create.findall('columns')[0] if len(operation_create.findall('columns')) > 0 else None
                            if columns:
                                cols = columns.findall('column') if len(columns.findall('column')) > 0 else None
                                for col in cols:
                                    col_ciphertext = None
                                    col_sha256hash = None

                                    if col:
                                        col_ciphertext = col.findall('ciphertext')[0].text if len(col.findall('ciphertext')) > 0 else None
                                        col_sha256hash = col.findall('sha256')[0].text if len(col.findall('sha256')) > 0 else None

                                        self.sql_data_columns.append((
                                            col_sha256hash,
                                            table_sha256hash,
                                            col_ciphertext,
                                            recipient_public_key
                                        ))

                        if (operation_insert):
                            table_ciphertext = None
                            table_sha256hash = None
                            table = operation_insert.findall('table')[0] if len(operation_insert.findall('table')) > 0 else None

                            recipient_public_key = None
                            recipient = operation_insert.findall('recipient')[0] if len(operation_insert.findall('recipient')) > 0 else None
                            if recipient:
                                recipient_public_key = recipient.findall('publickey')[0].text if len(recipient.findall('publickey')) > 0 else None
                                
                            columns = operation_insert.findall('columns')[0] if len(operation_insert.findall('columns')) > 0 else None
                            if columns:
                                cols = columns.findall('column') if len(columns.findall('column')) > 0 else None
                                for col in cols:
                                    col_ciphertext = None
                                    col_sha256hash = None
                                    val_ciphertext = None
                                    val_sha256hash = None

                                    if col:
                                        col_ciphertext = col.findall('ciphertext')[0].text if len(col.findall('ciphertext')) > 0 else None
                                        col_sha256hash = col.findall('sha256')[0].text if len(col.findall('sha256')) > 0 else None
                                        
                                        value = col.findall('value')[0] if len(col.findall('value')) > 0 else None
                                        if value:
                                            val_ciphertext = value.findall('ciphertext')[0].text if len(value.findall('ciphertext')) > 0 else None
                                            val_sha256hash = value.findall('sha256')[0].text if len(value.findall('sha256')) > 0 else None
                                        
                                        self.sql_data_values.append((
                                            val_sha256hash,
                                            col_sha256hash,
                                            val_ciphertext,
                                            col_ciphertext,
                                            recipient_public_key
                                        ))
                                        print(self.sql_data_values)

                        tx_data = (
                            tx['id'],
                            data['height'],
                            tx['type'],
                            tx['sender'],
                            tx['senderPublicKey'],
                            tx['recipient'],
                            tx['amount'],
                            tx['assetId'],
                            tx['feeAssetId'],
                            tx['feeAsset'],
                            tx['fee'],
                            tx['attachment'],
                            tx['version'],
                            datetime.fromtimestamp(tx['timestamp'] / 1e3),
                            cnfy_id,
                            attachment_hash
                        )
                        
                        self.sql_data_transactions.append(tx_data)

                        for proof in tx['proofs']:
                            proof_id = 'proof-' + str(uuid.uuid4())
                            self.sql_data_proofs.append((tx['id'], proof, proof_id))

                       

        except asyncio.CancelledError:
            logger.info('Parser has been stopped')
            raise
        except Exception as error:
            logger.error('Fetching data error: {}'.format(error))
            pass
Esempio n. 19
0
        })

    if request.method == 'HEAD':
        return response.HTTPResponse(content_type=MIME_TS, status=200)

    log.info(f'[{request.ip}] Start: {u7d_msg}')
    with closing(await asyncio_dgram.bind((IPTV, client_port))) as stream:
        timedout = False
        respond = await request.respond(content_type=MIME_TS)
        try:
            await respond.send((await asyncio.wait_for(stream.recv(), 1))[0])
            while True:
                await respond.send((await
                                    asyncio.wait_for(stream.recv(), 0.25))[0])
        except Exception as ex:
            log.warning(f'[{request.ip}] {repr(ex)}')
            if isinstance(ex, TimeoutError):
                timedout = True
        finally:
            log.info(f'[{request.ip}] End: {u7d_msg}')
            try:
                if timedout:
                    await respond.send(end_stream=True)
                else:
                    u7d.send_signal(signal.SIGINT)
            except Exception as ex:
                log.warning(f'[{request.ip}] {repr(ex)}')


@app.get('/favicon.ico')
async def handle_notfound(request):
Esempio n. 20
0
def serve(host, port, request_handler, error_handler, before_start=None,
          after_start=None, before_stop=None, after_stop=None, debug=False,
          request_timeout=60, response_timeout=60, keep_alive_timeout=60,
          ssl=None, sock=None, request_max_size=None, reuse_port=False,
          loop=None, protocol=HttpProtocol, backlog=100,
          register_sys_signals=True, run_async=False, connections=None,
          signal=Signal(), request_class=None, access_log=True,
          keep_alive=True, is_request_stream=False, router=None,
          websocket_max_size=None, websocket_max_queue=None, state=None,
          graceful_shutdown_timeout=15.0):
    """Start asynchronous HTTP Server on an individual process.

    :param host: Address to host on
    :param port: Port to host on
    :param request_handler: Sanic request handler with middleware
    :param error_handler: Sanic error handler with middleware
    :param before_start: function to be executed before the server starts
                         listening. Takes arguments `app` instance and `loop`
    :param after_start: function to be executed after the server starts
                        listening. Takes  arguments `app` instance and `loop`
    :param before_stop: function to be executed when a stop signal is
                        received before it is respected. Takes arguments
                        `app` instance and `loop`
    :param after_stop: function to be executed when a stop signal is
                       received after it is respected. Takes arguments
                       `app` instance and `loop`
    :param debug: enables debug output (slows server)
    :param request_timeout: time in seconds
    :param ssl: SSLContext
    :param sock: Socket for the server to accept connections from
    :param request_max_size: size in bytes, `None` for no limit
    :param reuse_port: `True` for multiple workers
    :param loop: asyncio compatible event loop
    :param protocol: subclass of asyncio protocol class
    :param request_class: Request class to use
    :param access_log: disable/enable access log
    :param is_request_stream: disable/enable Request.stream
    :param router: Router object
    :return: Nothing
    """
    if not run_async:
        loop = async_loop.new_event_loop()
        asyncio.set_event_loop(loop)

    if debug:
        loop.set_debug(debug)

    connections = connections if connections is not None else set()
    server = partial(
        protocol,
        loop=loop,
        connections=connections,
        signal=signal,
        request_handler=request_handler,
        error_handler=error_handler,
        request_timeout=request_timeout,
        response_timeout=response_timeout,
        keep_alive_timeout=keep_alive_timeout,
        request_max_size=request_max_size,
        request_class=request_class,
        access_log=access_log,
        keep_alive=keep_alive,
        is_request_stream=is_request_stream,
        router=router,
        websocket_max_size=websocket_max_size,
        websocket_max_queue=websocket_max_queue,
        state=state,
        debug=debug,
    )

    server_coroutine = loop.create_server(
        server,
        host,
        port,
        ssl=ssl,
        reuse_port=reuse_port,
        sock=sock,
        backlog=backlog
    )

    # Instead of pulling time at the end of every request,
    # pull it once per minute
    loop.call_soon(partial(update_current_time, loop))

    if run_async:
        return server_coroutine

    trigger_events(before_start, loop)

    try:
        http_server = loop.run_until_complete(server_coroutine)
    except:
        logger.exception("Unable to start server")
        return

    trigger_events(after_start, loop)

    # Register signals for graceful termination
    if register_sys_signals:
        for _signal in (SIGINT, SIGTERM):
            try:
                loop.add_signal_handler(_signal, loop.stop)
            except NotImplementedError:
                logger.warning('Sanic tried to use loop.add_signal_handler '
                               'but it is not implemented on this platform.')
    pid = os.getpid()
    try:
        logger.info('Starting worker [%s]', pid)
        loop.run_forever()
    finally:
        logger.info("Stopping worker [%s]", pid)

        # Run the on_stop function if provided
        trigger_events(before_stop, loop)

        # Wait for event loop to finish and all connections to drain
        http_server.close()
        loop.run_until_complete(http_server.wait_closed())

        # Complete all tasks on the loop
        signal.stopped = True
        for connection in connections:
            connection.close_if_idle()

        # Gracefully shutdown timeout.
        # We should provide graceful_shutdown_timeout,
        # instead of letting connection hangs forever.
        # Let's roughly calcucate time.
        start_shutdown = 0
        while connections and (start_shutdown < graceful_shutdown_timeout):
            loop.run_until_complete(asyncio.sleep(0.1))
            start_shutdown = start_shutdown + 0.1

        # Force close non-idle connection after waiting for
        # graceful_shutdown_timeout
        coros = []
        for conn in connections:
            if hasattr(conn, "websocket") and conn.websocket:
                coros.append(conn.websocket.close_connection(force=True))
            else:
                conn.close()

        _shutdown = asyncio.gather(*coros, loop=loop)
        loop.run_until_complete(_shutdown)

        trigger_events(after_stop, loop)

        loop.close()
Esempio n. 21
0
async def robot_distribution_view(request):
    """
    - 分配机器人
    - 更新或绑定师徒关系
    - 创建机器人用户展示表
    """
    async def exists_user_access(union_id):
        async with db.conn.acquire() as con:
            select_stmt = await con.prepare(
                '''select "robot_access".id from "robot_access" join "robot" on "robot".id="robot_access".robot_id 
                   where "robot_access".union_id=$1 and "robot".status<>3 
                   order by "robot_access".create_date desc limit 1''')
            access_id = await select_stmt.fetchval(union_id)
            return access_id

    async def update_user_access(user_access_id,
                                 sharing_user_id=None,
                                 channel=None):
        if not sharing_user_id:
            is_sharing_user = False
        else:
            is_sharing_user = True
        async with db.conn.acquire() as con:
            update_stmt = await con.prepare(
                '''update "robot_access" set sharing_user_id=case when $1 then $2 else sharing_user_id end, 
                   channel=case when $3 then $4 else channel end, 
                   update_date=now() where id=$5 returning robot_id''')
            return await update_stmt.fetchval(is_sharing_user, sharing_user_id,
                                              is_sharing_user, channel,
                                              user_access_id)

    async def create_user_access(open_id,
                                 union_id,
                                 robot_id,
                                 sharing_user_id=None,
                                 channel=None):
        async with db.conn.acquire() as con:
            insert_stmt = await con.prepare('''insert into "robot_access" 
                   (id, open_id, union_id, robot_id, sharing_user_id, channel, status)
                   values (uuid_generate_v4(), $1, $2, $3, $4, $5, $6)''')
            await insert_stmt.fetch(open_id, union_id, robot_id,
                                    sharing_user_id, channel, CREATE_STATUS)

    async def robot_info(robot_id):
        '''查询机器人信息'''
        async with db.conn.acquire() as con:
            select_stmt = await con.prepare(
                'select head_url, qr_code, wechat_no, name from "robot" where id=$1'
            )
            robot = await select_stmt.fetchrow(robot_id)
        return robot

    open_id = request.json.get('open_id')
    union_id = request.json.get('union_id')
    if not open_id or not union_id:
        return response_json(None,
                             code=PARAMS_ERROR_CODE,
                             msg='缺少必要参数open_id或union_id')
    sharing_user_id = request.json.get('sharing_user_id', None)
    channel = request.json.get('channel', None)
    access_id = await exists_user_access(union_id)
    if access_id:
        if not sharing_user_id or sharing_user_id == 'null':
            sharing_user_id = None
        if not channel:
            channel = None
        robot_id = await update_user_access(access_id, sharing_user_id,
                                            channel)
    else:
        robot_id = await robot_distribution(channel=channel)
        if not robot_id:
            logger.warning(f'not match distribute robot for user [{open_id}]')
            return response_json(None,
                                 code=ROBOT_NOT_ENOUGH_CODE,
                                 msg='没有可分配的机器人')
        await create_user_access(open_id, union_id, robot_id, sharing_user_id,
                                 channel)
        await robot_today_display_record(robot_id)
    logger.info(f'distribute robot [{robot_id}] for user [{open_id}]')
    robot = await robot_info(robot_id)
    return response_json(dict(robot))
Esempio n. 22
0
 def not_found(request, exception):
     logger.warning(f'Unhandled route {request.method} {request.path}')
     return json({'error': 'Unexpected route'}, status=404)
async def after_server_start(_: Sanic, __: AbstractEventLoop):
    logger.warning('Server successfully started!')
Esempio n. 24
0
    async def create_server(
        self,
        host: Optional[str] = None,
        port: Optional[int] = None,
        *,
        debug: bool = False,
        ssl: Union[dict, SSLContext, None] = None,
        sock: Optional[socket] = None,
        protocol: Type[Protocol] = None,
        backlog: int = 100,
        access_log: Optional[bool] = None,
        unix: Optional[str] = None,
        return_asyncio_server: bool = False,
        asyncio_server_kwargs: Dict[str, Any] = None,
    ) -> Optional[AsyncioServer]:
        """
        Asynchronous version of :func:`run`.

        This method will take care of the operations necessary to invoke
        the *before_start* events via :func:`trigger_events` method invocation
        before starting the *sanic* app in Async mode.

        .. note::
            This does not support multiprocessing and is not the preferred
            way to run a :class:`Sanic` application.

        :param host: Address to host on
        :type host: str
        :param port: Port to host on
        :type port: int
        :param debug: Enables debug output (slows server)
        :type debug: bool
        :param ssl: SSLContext, or location of certificate and key
                    for SSL encryption of worker(s)
        :type ssl: SSLContext or dict
        :param sock: Socket for the server to accept connections from
        :type sock: socket
        :param protocol: Subclass of asyncio Protocol class
        :type protocol: type[Protocol]
        :param backlog: a number of unaccepted connections that the system
                        will allow before refusing new connections
        :type backlog: int
        :param access_log: Enables writing access logs (slows server)
        :type access_log: bool
        :param return_asyncio_server: flag that defines whether there's a need
                                      to return asyncio.Server or
                                      start it serving right away
        :type return_asyncio_server: bool
        :param asyncio_server_kwargs: key-value arguments for
                                      asyncio/uvloop create_server method
        :type asyncio_server_kwargs: dict
        :return: AsyncioServer if return_asyncio_server is true, else Nothing
        """

        if sock is None:
            host, port = host or "127.0.0.1", port or 8000

        if protocol is None:
            protocol = (WebSocketProtocol
                        if self.websocket_enabled else HttpProtocol)
        # if access_log is passed explicitly change config.ACCESS_LOG
        if access_log is not None:
            self.config.ACCESS_LOG = access_log

        server_settings = self._helper(
            host=host,
            port=port,
            debug=debug,
            ssl=ssl,
            sock=sock,
            unix=unix,
            loop=get_event_loop(),
            protocol=protocol,
            backlog=backlog,
            run_async=return_asyncio_server,
        )

        # Trigger before_start events
        await self.trigger_events(
            server_settings.get("before_start", []),
            server_settings.get("loop"),
        )
        main_start = server_settings.pop("main_start", None)
        main_stop = server_settings.pop("main_stop", None)
        if main_start or main_stop:
            logger.warning(
                "Listener events for the main process are not available "
                "with create_server()")

        return await serve(asyncio_server_kwargs=asyncio_server_kwargs,
                           **server_settings)
async def after_server_stop(_: Sanic, __: AbstractEventLoop):
    from .core import mongo
    mongo.close()
    logger.warning('Stopped.')
Esempio n. 26
0
    async def http1_response_header(self, data: bytes,
                                    end_stream: bool) -> None:
        res = self.response

        # Compatibility with simple response body
        if not data and getattr(res, "body", None):
            data, end_stream = res.body, True  # type: ignore

        size = len(data)
        headers = res.headers
        status = res.status

        if not isinstance(status, int) or status < 200:
            raise RuntimeError(f"Invalid response status {status!r}")

        if not has_message_body(status):
            # Header-only response status
            self.response_func = None
            if (data or not end_stream or "content-length" in headers
                    or "transfer-encoding" in headers):
                data, size, end_stream = b"", 0, True
                headers.pop("content-length", None)
                headers.pop("transfer-encoding", None)
                logger.warning(
                    f"Message body set in response on {self.request.path}. "
                    f"A {status} response may only have headers, no body.")
        elif self.head_only and "content-length" in headers:
            self.response_func = None
        elif end_stream:
            # Non-streaming response (all in one block)
            headers["content-length"] = size
            self.response_func = None
        elif "content-length" in headers:
            # Streaming response with size known in advance
            self.response_bytes_left = int(headers["content-length"]) - size
            self.response_func = self.http1_response_normal
        else:
            # Length not known, use chunked encoding
            headers["transfer-encoding"] = "chunked"
            data = b"%x\r\n%b\r\n" % (size, data) if size else b""
            self.response_func = self.http1_response_chunked

        if self.head_only:
            # Head request: don't send body
            data = b""
            self.response_func = self.head_response_ignored

        headers["connection"] = "keep-alive" if self.keep_alive else "close"
        ret = format_http1_response(status, res.processed_headers)
        if data:
            ret += data

        # Send a 100-continue if expected and not Expectation Failed
        if self.expecting_continue:
            self.expecting_continue = False
            if status != 417:
                ret = HTTP_CONTINUE + ret

        # Send response
        if self.protocol.access_log:
            self.log_response()

        await self._send(ret)
        self.stage = Stage.IDLE if end_stream else Stage.RESPONSE
Esempio n. 27
0
    async def fetch_data(self, url, session):
        try:
            async with session.get(url) as response:
                data = await response.text()
                data = pjson.loads(data)
                cnfy_id = 'cnfy-{}'.format(str(uuid.uuid4()))

                for tx in data['transactions']:
                    if tx['type'] in [
                            4
                    ] and tx['feeAssetId'] == os.environ['ASSET_ID']:

                        attachment_base58 = base58.b58decode(
                            tx['attachment']).decode('utf-8')
                        attachment = None
                        try:
                            attachment = requests.get(
                                '{0}:{1}/ipfs/{2}'.format(
                                    config['ipfs']['host'],
                                    config['ipfs']['port'], attachment_base58),
                                timeout=2).text
                        except Exception as error:
                            logger.error('IPFS Error: {0}'.format(error))

                        if attachment == None:
                            logger.warning('CONTINUE ON IPFS HASH {0}'.format(
                                attachment_base58))
                            continue
                        else:
                            self.transactions_in_batch += 1
                            logger.info(
                                'TX detected. Height: {0}, IPFS Hash: {1}'.
                                format(self.height, attachment_base58))

                        root = ET.fromstring(attachment)
                        version = root.findall('version')[0].text if len(
                            root.findall('version')) > 0 else None
                        blockchain = root.findall('blockchain')[0].text if len(
                            root.findall('blockchain')) > 0 else None
                        network = root.findall('network')[0].text if len(
                            root.findall('network')) > 0 else None
                        messages = root.findall('messages')[0] if len(
                            root.findall('messages')) > 0 else []

                        # if str(version) != str(os.environ['CDM_VERSION']):
                        #     continue

                        for message in messages:
                            to_public_key = None
                            cc_public_key = None
                            to_public_key_ciphertext = None
                            to_public_key_sha256hash = None
                            cc_public_key_ciphertext = None
                            cc_public_key_sha256hash = None

                            to = message.findall('to')[0] if len(
                                message.findall('to')) > 0 else None
                            cc = message.findall('cc')[0] if len(
                                message.findall('cc')) > 0 else None
                            if to:
                                to_public_key = to.findall(
                                    'publickey')[0].text if len(
                                        to.findall('publickey')) > 0 else None
                                to_public_key_ciphertext = to.findall(
                                    'ciphertext')[0].text if len(
                                        to.findall('ciphertext')) > 0 else None
                                to_public_key_sha256hash = to.findall(
                                    'sha256')[0].text if len(
                                        to.findall('sha256')) > 0 else None
                            if cc:
                                cc_public_key = cc.findall(
                                    'publickey')[0].text if len(
                                        cc.findall('publickey')) > 0 else None
                                cc_public_key_ciphertext = cc.findall(
                                    'ciphertext')[0].text if len(
                                        cc.findall('ciphertext')) > 0 else None
                                cc_public_key_sha256hash = cc.findall(
                                    'sha256')[0].text if len(
                                        cc.findall('sha256')) > 0 else None

                            subject_ciphertext = None
                            subject_sha256hash = None
                            subject = message.findall('subject')[0] if len(
                                message.findall('subject')) > 0 else None
                            if subject:
                                subject_ciphertext = subject.findall(
                                    'ciphertext')[0].text if len(
                                        subject.findall(
                                            'ciphertext')) > 0 else None
                                subject_sha256hash = subject.findall(
                                    'sha256'
                                )[0].text if len(
                                    subject.findall('sha256')) > 0 else None

                            body_ciphertext = None
                            body_sha256hash = None
                            body = message.findall('body')[0] if len(
                                message.findall('body')) > 0 else None
                            if body:
                                body_ciphertext = body.findall(
                                    'ciphertext'
                                )[0].text if len(
                                    body.findall('ciphertext')) > 0 else None
                                body_sha256hash = body.findall(
                                    'sha256')[0].text if len(
                                        body.findall('sha256')) > 0 else None

                            if version == '0.7':
                                recipient_public_key = to_public_key if to else cc_public_key
                                recipient_public_key_ciphertext = recipient_public_key
                                recipient_public_key_sha256hash = hashlib.sha256(
                                    recipient_public_key.encode(
                                        'utf-8')).hexdigest()
                            else:
                                recipient_public_key_ciphertext = to_public_key_ciphertext if to else cc_public_key_ciphertext
                                recipient_public_key_sha256hash = to_public_key_sha256hash if to else cc_public_key_sha256hash

                            recipient_type = 'to' if to else 'cc'
                            thread_hash = hashlib.sha256(''.join([
                                subject_sha256hash or '', body_sha256hash or ''
                            ]).encode('utf-8')).hexdigest()

                            re_subject_hash = None
                            re_message_hash = None
                            regarding = message.findall('regarding')[0] if len(
                                message.findall('regarding')) > 0 else None
                            if regarding:
                                re_subject_hash = regarding.findall(
                                    'subjecthash')[0].text if len(
                                        regarding.findall(
                                            'subjecthash')) > 0 else None
                                re_message_hash = regarding.findall(
                                    'messagehash')[0].text if len(
                                        regarding.findall(
                                            'messagehash')) > 0 else None

                                thread_hash = hashlib.sha256(''.join([
                                    re_subject_hash or '', re_message_hash
                                    or ''
                                ]).encode('utf-8')).hexdigest()

                            fwd_subject_hash = None
                            fwd_message_hash = None
                            forwarded = message.findall('forwarded')[0] if len(
                                message.findall('forwarded')) > 0 else None
                            if forwarded:
                                fwd_subject_hash = forwarded.findall(
                                    'subjecthash')[0].text if len(
                                        forwarded.findall(
                                            'subjecthash')) > 0 else None
                                fwd_message_hash = forwarded.findall(
                                    'messagehash')[0].text if len(
                                        forwarded.findall(
                                            'messagehash')) > 0 else None

                            cdm_id = 'cdm-' + str(uuid.uuid4())
                            self.sql_data_cdms.append(
                                (cdm_id, tx['id'],
                                 recipient_public_key_ciphertext,
                                 recipient_public_key_sha256hash,
                                 subject_ciphertext, subject_sha256hash,
                                 body_ciphertext, body_sha256hash, thread_hash,
                                 blockchain, network, recipient_type,
                                 re_subject_hash, re_message_hash,
                                 fwd_subject_hash, fwd_message_hash,
                                 datetime.fromtimestamp(tx['timestamp'] /
                                                        1e3), version))

                            from_block = message.findall('from')[0] if len(
                                message.findall('from')) > 0 else None
                            if from_block:
                                senders = from_block.findall('sender') if len(
                                    from_block.findall('sender')) > 0 else None
                                for sender in senders:
                                    sender_public_key = sender.findall(
                                        'publickey')[0].text if len(
                                            sender.findall(
                                                'publickey')) > 0 else None
                                    sender_public_key_ciphertext = sender.findall(
                                        'ciphertext')[0].text if len(
                                            sender.findall(
                                                'ciphertext')) > 0 else None
                                    sender_public_key_sha256hash = sender.findall(
                                        'sha256'
                                    )[0].text if len(
                                        sender.findall('sha256')) > 0 else None
                                    sender_signature = sender.findall(
                                        'signature')[0].text if len(
                                            sender.findall(
                                                'signature')) > 0 else None

                                    if version == '0.7':
                                        sender_public_key_ciphertext = sender_public_key
                                        sender_public_key_sha256hash = hashlib.sha256(
                                            sender_public_key.encode(
                                                'utf-8')).hexdigest()

                                    sender_id = str(uuid.uuid4())
                                    self.sql_data_senders.append(
                                        (sender_id, cdm_id,
                                         sender_public_key_ciphertext,
                                         sender_public_key_sha256hash,
                                         sender_signature, True))

                        tx_data = (tx['id'], data['height'], tx['type'],
                                   tx['sender'], tx['senderPublicKey'],
                                   hashlib.sha256(tx['senderPublicKey'].encode(
                                       'utf-8')).hexdigest(), tx['recipient'],
                                   tx['amount'], tx['assetId'],
                                   tx['feeAssetId'], tx['feeAsset'], tx['fee'],
                                   tx['attachment'], tx['version'],
                                   datetime.fromtimestamp(tx['timestamp'] /
                                                          1e3), cnfy_id,
                                   hashlib.sha256(
                                       attachment.encode('utf-8')).hexdigest())

                        self.sql_data_transactions.append(tx_data)

                        for proof in tx['proofs']:
                            proof_id = 'proof-' + str(uuid.uuid4())
                            self.sql_data_proofs.append(
                                (tx['id'], proof, proof_id))

        except asyncio.CancelledError:
            logger.info('Parser has been stopped')
            raise
        except Exception as error:
            logger.error('Fetching data error: {}'.format(error))
            pass
Esempio n. 28
0
def serve(host, port, request_handler, error_handler, before_start=None,
          after_start=None, before_stop=None, after_stop=None, debug=False,
          request_timeout=60, response_timeout=60, keep_alive_timeout=5,
          ssl=None, sock=None, request_max_size=None, reuse_port=False,
          loop=None, protocol=HttpProtocol, backlog=100,
          register_sys_signals=True, run_async=False, connections=None,
          signal=Signal(), request_class=None, access_log=True,
          keep_alive=True, is_request_stream=False, router=None,
          websocket_max_size=None, websocket_max_queue=None, state=None,
          graceful_shutdown_timeout=15.0):
    """Start asynchronous HTTP Server on an individual process.

    :param host: Address to host on
    :param port: Port to host on
    :param request_handler: Sanic request handler with middleware
    :param error_handler: Sanic error handler with middleware
    :param before_start: function to be executed before the server starts
                         listening. Takes arguments `app` instance and `loop`
    :param after_start: function to be executed after the server starts
                        listening. Takes  arguments `app` instance and `loop`
    :param before_stop: function to be executed when a stop signal is
                        received before it is respected. Takes arguments
                        `app` instance and `loop`
    :param after_stop: function to be executed when a stop signal is
                       received after it is respected. Takes arguments
                       `app` instance and `loop`
    :param debug: enables debug output (slows server)
    :param request_timeout: time in seconds
    :param response_timeout: time in seconds
    :param keep_alive_timeout: time in seconds
    :param ssl: SSLContext
    :param sock: Socket for the server to accept connections from
    :param request_max_size: size in bytes, `None` for no limit
    :param reuse_port: `True` for multiple workers
    :param loop: asyncio compatible event loop
    :param protocol: subclass of asyncio protocol class
    :param request_class: Request class to use
    :param access_log: disable/enable access log
    :param is_request_stream: disable/enable Request.stream
    :param router: Router object
    :return: Nothing
    """
    if not run_async:
        loop = async_loop.new_event_loop()
        asyncio.set_event_loop(loop)

    if debug:
        loop.set_debug(debug)

    connections = connections if connections is not None else set()
    server = partial(
        protocol,
        loop=loop,
        connections=connections,
        signal=signal,
        request_handler=request_handler,
        error_handler=error_handler,
        request_timeout=request_timeout,
        response_timeout=response_timeout,
        keep_alive_timeout=keep_alive_timeout,
        request_max_size=request_max_size,
        request_class=request_class,
        access_log=access_log,
        keep_alive=keep_alive,
        is_request_stream=is_request_stream,
        router=router,
        websocket_max_size=websocket_max_size,
        websocket_max_queue=websocket_max_queue,
        state=state,
        debug=debug,
    )

    server_coroutine = loop.create_server(
        server,
        host,
        port,
        ssl=ssl,
        reuse_port=reuse_port,
        sock=sock,
        backlog=backlog
    )

    # Instead of pulling time at the end of every request,
    # pull it once per minute
    loop.call_soon(partial(update_current_time, loop))

    if run_async:
        return server_coroutine

    trigger_events(before_start, loop)

    try:
        http_server = loop.run_until_complete(server_coroutine)
    except BaseException:
        logger.exception("Unable to start server")
        return

    trigger_events(after_start, loop)

    # Register signals for graceful termination
    if register_sys_signals:
        for _signal in (SIGINT, SIGTERM):
            try:
                loop.add_signal_handler(_signal, loop.stop)
            except NotImplementedError:
                logger.warning('Sanic tried to use loop.add_signal_handler '
                               'but it is not implemented on this platform.')
    pid = os.getpid()
    try:
        logger.info('Starting worker [%s]', pid)
        loop.run_forever()
    finally:
        logger.info("Stopping worker [%s]", pid)

        # Run the on_stop function if provided
        trigger_events(before_stop, loop)

        # Wait for event loop to finish and all connections to drain
        http_server.close()
        loop.run_until_complete(http_server.wait_closed())

        # Complete all tasks on the loop
        signal.stopped = True
        for connection in connections:
            connection.close_if_idle()

        # Gracefully shutdown timeout.
        # We should provide graceful_shutdown_timeout,
        # instead of letting connection hangs forever.
        # Let's roughly calcucate time.
        start_shutdown = 0
        while connections and (start_shutdown < graceful_shutdown_timeout):
            loop.run_until_complete(asyncio.sleep(0.1))
            start_shutdown = start_shutdown + 0.1

        # Force close non-idle connection after waiting for
        # graceful_shutdown_timeout
        coros = []
        for conn in connections:
            if hasattr(conn, "websocket") and conn.websocket:
                coros.append(
                    conn.websocket.close_connection(after_handshake=True)
                )
            else:
                conn.close()

        _shutdown = asyncio.gather(*coros, loop=loop)
        loop.run_until_complete(_shutdown)

        trigger_events(after_stop, loop)

        loop.close()
Esempio n. 29
0
    async def fetch_data(self, url, session):
        try:
            async with session.get(url) as response:
                data = await response.text()
                data = pjson.loads(data)
                cnfy_id = 'cnfy-{}'.format(str(uuid.uuid4()))

                for tx in data['transactions']:
                    if tx['type'] in [
                            4
                    ] and tx['feeAssetId'] == os.environ['ASSET_ID']:

                        attachment_base58 = base58.b58decode(
                            tx['attachment']).decode('utf-8')
                        attachment = None
                        try:
                            attachment = requests.get(
                                '{0}:{1}/ipfs/{2}'.format(
                                    config['ipfs']['host'],
                                    config['ipfs']['port'], attachment_base58),
                                timeout=2).text
                        except Exception as error:
                            logger.error('IPFS Error: {0}'.format(error))

                        if attachment == None:
                            logger.warning('CONTINUE ON IPFS HASH {0}'.format(
                                attachment_base58))
                            continue

                        attachment_hash = hashlib.sha256(
                            attachment.encode('utf-8')).hexdigest()

                        root = ET.fromstring(attachment)
                        version = root.findall('version')[0].text if len(
                            root.findall('version')) > 0 else None
                        blockchain = root.findall('blockchain')[0].text if len(
                            root.findall('blockchain')) > 0 else None
                        network = root.findall('network')[0].text if len(
                            root.findall('network')) > 0 else None
                        opcodes = root.findall('opcodes')[0] if len(
                            root.findall('opcodes')) > 0 else []

                        if str(version) != str(os.environ['CDM_VERSION']):
                            continue

                        opcode = opcodes.findall('opcode')[0] if len(
                            opcodes.findall('opcode')) > 0 else None
                        if opcode:
                            traffic_id = opcode.findall(
                                'trafficlight'
                            )[0].text if len(
                                opcode.findall('trafficlight')) > 0 else None
                            print('traffic_id', traffic_id)
                            tr_data = (traffic_id, 'GODMODE')
                            self.sql_data_traffic.append(tr_data)

                        tx_data = (tx['id'], data['height'], tx['type'],
                                   tx['sender'], tx['senderPublicKey'],
                                   tx['recipient'], tx['amount'],
                                   tx['assetId'], tx['feeAssetId'],
                                   tx['feeAsset'], tx['fee'], tx['attachment'],
                                   tx['version'],
                                   datetime.fromtimestamp(tx['timestamp'] /
                                                          1e3), cnfy_id,
                                   attachment_hash)

                        self.sql_data_transactions.append(tx_data)

                        for proof in tx['proofs']:
                            proof_id = 'proof-' + str(uuid.uuid4())
                            self.sql_data_proofs.append(
                                (tx['id'], proof, proof_id))

        except asyncio.CancelledError:
            logger.info('Parser has been stopped')
            raise
        except Exception as error:
            logger.error('Fetching data error: {}'.format(error))
            pass