Beispiel #1
0
def test_immutable_structures():
    """Test immutable structures"""
    l = ImmutableList([1, 2, 3])
    assert_raises(TypeError, l.__delitem__, 0)
    assert_raises(TypeError, l.__delslice__, 0, 1)
    assert_raises(TypeError, l.__iadd__, [1, 2])
    assert_raises(TypeError, l.__setitem__, 0, 1)
    assert_raises(TypeError, l.__setslice__, 0, 1, [2, 3])
    assert_raises(TypeError, l.append, 42)
    assert_raises(TypeError, l.insert, 0, 32)
    assert_raises(TypeError, l.pop)
    assert_raises(TypeError, l.extend, [2, 3])
    assert_raises(TypeError, l.reverse)
    assert_raises(TypeError, l.sort)
    assert l == [1, 2, 3]

    d = ImmutableDict(foo=23, bar=42)
    assert_raises(TypeError, d.setdefault, 'baz')
    assert_raises(TypeError, d.update, {2: 3})
    assert_raises(TypeError, d.popitem)
    assert_raises(TypeError, d.__delitem__, 'foo')
    assert_raises(TypeError, d.clear)
    assert d == dict(foo=23, bar=42)
    d = ImmutableDict.fromkeys([1, 2])
    assert d[1] == d[2] == None

    d = ImmutableMultiDict(d)
    assert_raises(TypeError, d.add, 'fuss', 44)
    assert_raises(TypeError, d.popitemlist)
    assert_raises(TypeError, d.poplist, 'foo')
    assert_raises(TypeError, d.setlist, 'tadaa', [1, 2])
    assert_raises(TypeError, d.setlistdefault, 'tadaa')
    d = ImmutableMultiDict.fromkeys([1, 2])
    assert d[1] == d[2] == None

    d = EnvironHeaders({'HTTP_X_FOO': 'test'})
    assert_raises(TypeError, d.__delitem__, 0)
    assert_raises(TypeError, d.add, 42)
    assert_raises(TypeError, d.pop, 'x-foo')
    assert_raises(TypeError, d.popitem)
    assert_raises(TypeError, d.setdefault, 'foo', 42)
    assert dict(d.items()) == {'X-Foo': 'test'}
    assert_raises(TypeError, d.copy)
Beispiel #2
0
def test_immutable_structures():
    """Test immutable structures"""
    l = ImmutableList([1, 2, 3])
    assert_raises(TypeError, l.__delitem__, 0)
    assert_raises(TypeError, l.__delslice__, 0, 1)
    assert_raises(TypeError, l.__iadd__, [1, 2])
    assert_raises(TypeError, l.__setitem__, 0, 1)
    assert_raises(TypeError, l.__setslice__, 0, 1, [2, 3])
    assert_raises(TypeError, l.append, 42)
    assert_raises(TypeError, l.insert, 0, 32)
    assert_raises(TypeError, l.pop)
    assert_raises(TypeError, l.extend, [2, 3])
    assert_raises(TypeError, l.reverse)
    assert_raises(TypeError, l.sort)
    assert l == [1, 2, 3]

    d = ImmutableDict(foo=23, bar=42)
    assert_raises(TypeError, d.setdefault, "baz")
    assert_raises(TypeError, d.update, {2: 3})
    assert_raises(TypeError, d.popitem)
    assert_raises(TypeError, d.__delitem__, "foo")
    assert_raises(TypeError, d.clear)
    assert d == dict(foo=23, bar=42)

    d = ImmutableMultiDict(d)
    assert_raises(TypeError, d.add, "fuss", 44)
    assert_raises(TypeError, d.popitemlist)
    assert_raises(TypeError, d.poplist, "foo")
    assert_raises(TypeError, d.setlist, "tadaa", [1, 2])
    assert_raises(TypeError, d.setlistdefault, "tadaa")

    d = EnvironHeaders({"HTTP_X_FOO": "test"})
    assert_raises(TypeError, d.__delitem__, 0)
    assert_raises(TypeError, d.add, 42)
    assert_raises(TypeError, d.pop, "x-foo")
    assert_raises(TypeError, d.popitem)
    assert_raises(TypeError, d.setdefault, "foo", 42)
    assert dict(d.items()) == {"X-Foo": "test"}
    assert_raises(TypeError, d.copy)
Beispiel #3
0
# -*- coding: utf-8 -*-

from flask.config import Config
from werkzeug.datastructures import ImmutableDict
import json
import os.path
import six

DEFAULTS = ImmutableDict({
    'MENU_PAGE': 'Menu',
    'FRONT_PAGE': 'Home',
    'LOGO_PAGE': 'logo.png',
    'LOCKED_PAGE': 'Locked',
    'ALIAS_PAGE': 'Alias',
    'HELP_PAGE': 'Help',
    'ICON_PAGE': None,
    'READ_ONLY': False,
    'SITE_NAME': 'Datta Wiki',
    'FALLBACK_URL': None,
    'MATH_URL': 'http://www.mathtran.org/cgi-bin/mathtran?tex=',
    'PYGMENTS_STYLE': 'tango',
    'RECAPTCHA_PUBLIC_KEY': None,
    'RECAPTCHA_PRIVATE_KEY': None,
})


class MultiConfig(Config):
    def from_storage(self, storage):
        fs = storage.fs
        path = '/.config/wiki/'
        configs = {'DEFAULT': dict(DEFAULTS)}
        for fp in fs.listdir(path, open_files=True):
Beispiel #4
0
def tagCorpusWithArgs(args):
    # Sanity check dictionaries
    if args.docuscope_version is not None:
        if args.simple_dictionary_path is not None:
            raise ValueError('Cannot specify a Simple Rule dictionary AND a DocuScope version.')
        elif args.docuscope_version not in os.listdir(os.path.join(dictionaries_root,'Docuscope')):
            raise ValueError('Provided docuscope_version %s is not in dictionary directory.' % args.docuscope_version)
    elif args.simple_dictionary_path is not None:
        if os.path.splitext(os.path.basename(args.simple_dictionary_path))[1] != '.csv':
            raise ValueError("Simple Rule dictionary (%s) must be a CSV" % args.simple_dictionary_path)
    else:
        if 'Docuscope' not in os.listdir(dictionaries_root):
            raise ValueError("No Docuscope dictionaries available. Please specify a Simple Rule dictionary for tagging, download a copy of Docusope 3.21 and pass it in as an argument, or use the web client for Docuscope tagging.")

    # Setup corpus_info and corpus_data_files
    timestamp = datetime.now().strftime("%Y-%m-%d-%X").replace(":", "-")
    if args.corpus_name is None:
        if os.path.isdir(args.corpus_path) and (args.corpus_path[-1] == '\\' or args.corpus_path[-1] == '/'):
            args.corpus_name = os.path.basename(args.corpus_path[:-1])
        else:
            args.corpus_name = os.path.basename(args.corpus_path)
    print 'Tagging corpus %s...' % args.corpus_name
    corpus_info = {
        "name": args.corpus_name,
        "job_name":args.corpus_name,
        "provenance": "ubq-%s-%s" % (args.corpus_name, timestamp),
        "path": args.corpus_path,
        "output_path": args.output_path,
        'data': {'Text': {
            'name': 'Text',
            'path': args.corpus_path
        }}
    }
    corpus_data_files = {"Text": {
        'saved': [],
        'skipped': []
    }}
    for root, dirnames, filenames in os.walk(args.corpus_path):
        for filename in filenames:
            file_path = os.path.join(root, filename)
            file_ext = os.path.splitext(filename)[1]
            if file_ext in ubiq_internal_tasks.DATA_EXTENSIONS['Text']:
                corpus_data_files['Text']['saved'].append(file_path)
            else:
                corpus_data_files['Text']['skipped'].append(file_path)

    # Point to SimpleRule dictionary if supplied
    if args.simple_dictionary_path is not None:
        corpus_info["data"]["SimpleRule"] = {
            "name": "SimpleRule",
            "path": args.simple_dictionary_path
        }
        corpus_data_files["SimpleRule"] = {
            "saved": [args.simple_dictionary_path],
            "skipped": []
        }
    # Run tag_corpus
    # create_zip_archive=True,
    if args.docuscope_version is None:
        ubiq_internal_tasks.tag_corpus(
            corpus_info=corpus_info,
            corpus_data_files=corpus_data_files,
            ngram_count=args.ngram_count,
            ngram_pun=args.ngram_pun,
            chunk_text = args.chunk,
            chunk_length = args.chunk_length,
            chunk_offset = args.chunk_offset,
            blacklist_path = args.blacklist_path,
            rule_csv=args.rule_csv,
            doc_rule=args.rule_per_doc,
            defect_count=args.defect_count,
            ngram_per_doc=args.ngram_per_doc,
            token_csv=args.token_csv
        )
    else:
        ubiq_internal_tasks.tag_corpus(
            corpus_info=corpus_info,
            corpus_data_files=corpus_data_files,
            ngram_count=args.ngram_count,
            ngram_pun=args.ngram_pun,
            tags=ImmutableDict(Docuscope={"dictionary_path": args.docuscope_version}),
            chunk_text = args.chunk,
            chunk_length = args.chunk_length,
            chunk_offset = args.chunk_offset,
            blacklist_path = args.blacklist_path,
            rule_csv=args.rule_csv,
            doc_rule=args.rule_per_doc,
            defect_count=args.defect_count,
            ngram_per_doc=args.ngram_per_doc,
            token_csv=args.token_csv
        )
Beispiel #5
0
class CtpBee(object):
    """
    ctpbee 源于我对于做项目的痛点需求, 旨在开发出一套具有完整api的交易微框架
    I hope it will help you !

    """
    # 默认回测配置参数
    default_params = {
        'cash': 10000.0,
        'check_submit': True,
        'eos_bar': False,
        'filler': None,
        "commision": 0.01,
        # slippage options
        'slip_percent': 0.0,
        'slip_fixed': 0.0,
        'slip_open': False,
        'slip_match': True,
        'slip_limit': True,
        'slip_out': False,
        'coc': False,
        'coo': False,
        'int2pnl': True,
        'short_cash': True,
        'fund_start_val': 100.0,
        'fund_mode': False
    }
    default_config = ImmutableDict(
        dict(LOG_OUTPUT=True,  # 是否开启输出模式
             TD_FUNC=False,  # 是否开启交易功能
             INTERFACE="ctp",  # 接口参数,默认指定国内期货ctp
             MD_FUNC=True,  # 是否开启行情功能
             XMIN=[],  # k线序列周期, 支持一小时以内的k线任意生成
             ALL_SUBSCRIBE=False,
             SHARE_MD=False,  # 是否多账户之间共享行情,---> 等待完成
             SLIPPAGE_COVER=0,  # 平多头滑点设置
             SLIPPAGE_SELL=0,  # 平空头滑点设置
             SLIPPAGE_SHORT=0,  # 卖空滑点设置
             SLIPPAGE_BUY=0,  # 买多滑点设置
             LOOPER_SETTING=default_params,  # 回测需要设置的参数
             SHARED_FUNC=False,  # 分时图数据 --> 等待优化
             REFRESH_INTERVAL=1.5,  # 定时刷新秒数, 需要在CtpBee实例化的时候将refresh设置为True才会生效
             INSTRUMENT_INDEPEND=False,  # 是否开启独立行情,策略对应相应的行情
             CLOSE_PATTERN="today",  # 面对支持平今的交易所,优先平今或者平昨 ---> today: 平今, yesterday: 平昨, 其他:处罚异常
             TODAY_EXCHANGE=[Exchange.SHFE.value, Exchange.INE.value],  # 需要支持平今的交易所代码列表
             AFTER_TIMEOUT=3,  # 设置after线程执行超时
             ))

    config_class = Config
    import_name = None

    # 交易api与行情api / trade api and market api
    market = None
    trader = None

    # 插件Api系统 /Extension system
    extensions = {}

    # 工具, 用于提供一些比较优秀的工具/ Toolbox, using by providing some good tools
    tools = {}

    def __init__(self, name: Text, import_name, action_class: Action = None, engine_method: str = "thread",
                 work_mode="limit_time", logger_class=None, logger_config_path=None,
                 refresh: bool = False, risk=None,
                 instance_path=None):
        """ 初始化 """
        self.name = name if name else 'ctpbee'
        self.import_name = import_name
        self.engine_method = engine_method
        self.refresh = refresh
        self.active = False
        # 是否加载以使用默认的logger类/ choose if use the default logging class
        if logger_class is None:
            self.logger = VLogger(app_name=self.name)
            self.logger.config.from_pyfile(os.path.join(os.path.split(__file__)[0], 'cprint_config.py'))
            self.logger.set_default(name=self.logger.app_name, owner='App')
        else:
            self.logger = logger_class(app_name=self.name)
            if logger_config_path:
                self.logger.config.from_pyfile(logger_config_path)
            else:
                self.logger.config.from_pyfile(os.path.join(os.path.split(__file__)[0], 'cprint_config.py'))
            self.logger.set_default(name=self.logger.app_name, owner='App')
        if engine_method == "thread":
            self.event_engine = EventEngine()
            self.recorder = Recorder(self, self.event_engine)
        elif engine_method == "async":
            self.event_engine = AsyncEngine()
            self.recorder = AsyncRecorder(self, self.event_engine)
        else:
            raise TypeError("引擎参数错误,只支持 thread 和 async,请检查代码")

        """
              If no risk is specified by default, set the risk_decorator to None
              如果默认不指定action参数, 那么使用设置风控装饰器为空
              """
        if risk is None:
            self.risk_decorator = None
        else:
            self.risk_decorator = risk
        """
        If no action is specified by default, use the default Action class
        如果默认不指定action参数, 那么使用默认的Action类 
        """
        if action_class is None:
            self.action = Action(self)
        else:
            self.action = action_class(self)
        """
        根据action里面的函数更新到CtpBee上面来
        bind the function of action to CtpBee
        """

        """ update """
        if self.risk_decorator is not None:
            self.risk_decorator.update_app(self)

        for x in dir(self.action):
            func = getattr(self.action, x)
            if x.startswith("__"):
                continue
            if ismethod(func):
                setattr(self, func.__name__, func)
        """
        If engine_method is specified by default, use the default EventEngine and Recorder or use the engine
            and recorder basis on your choice
        如果不指定engine_method参数,那么使用默认的事件引擎 或者根据你的参数使用不同的引擎和记录器
        """

        if instance_path is None:
            instance_path = self.auto_find_instance_path()
        elif not os.path.isabs(instance_path):
            raise ValueError(
                'If an instance path is provided it must be absolute.'
                ' A relative path was given instead.'
            )
        self.instance_path = instance_path
        self.config = self.make_config()
        self.init_finished = False

        # default monitor and flag
        self.p = None
        self.p_flag = True

        self.r = None
        self.r_flag = True
        self.work_mode = work_mode

        _app_context_ctx.push(self.name, self)

    def update_action_class(self, action_class):
        if isinstance(action_class, Action):
            raise TypeError(f"更新action_class出现错误, 你传入的action_class类型为{type(action_class)}")
        self.action = action_class()

    def update_risk_gateway(self, gateway_class):
        self.risk_decorator = gateway_class
        self.risk_decorator.update_app(self)

    def make_config(self):
        """ 生成class类"""
        defaults = dict(self.default_config)
        return self.config_class(self.instance_path, defaults)

    def auto_find_instance_path(self):
        prefix, package_path = find_package(self.import_name)
        if prefix is None:
            return os.path.join(package_path)
        return os.path.join(prefix, 'var', self.name + '-instance')

    @property
    def td_login_status(self):
        """ 交易 API 都应该实现td_status"""
        return self.trader.td_status

    @property
    def md_login_status(self):
        """ 行情 API 都应该实现md_status"""
        return self.market.md_status

    def _load_ext(self):
        """根据当前配置文件下的信息载入行情api和交易api,记住这个api的选项是可选的"""
        self.active = True
        if "CONNECT_INFO" in self.config.keys():
            info = self.config.get("CONNECT_INFO")
        else:
            raise ConfigError(message="没有相应的登录信息", args=("没有发现登录信息",))
        MdApi, TdApi = Interface.get_interface(self)
        if self.config.get("MD_FUNC"):
            self.market = MdApi(self.event_engine)
            self.market.connect(info)

        if self.config.get("TD_FUNC"):
            if self.config['INTERFACE'] == "looper":
                self.trader = TdApi(self.event_engine, self)
            else:
                self.trader = TdApi(self.event_engine)
            self.trader.connect(info)

        show_me = graphic_pattern(__version__, self.work_mode, self.engine_method)
        print(show_me)

        # 检查work_mode
        if self.work_mode == "forever":
            """ 7×24小时 """
            # 启动监视器

            if self.p is not None:
                self.p_flag = False
                sleep(1.5)
                self.p = Thread(target=run_forever, args=(self,))
                self.p.start()

            else:
                self.p = Thread(target=run_forever, args=(self,))
                self.p.start()
            self.p_flag = True
        else:
            pass

        if self.refresh:
            if self.r is not None:
                self.r_flag = False
                sleep(self.config['REFRESH_INTERVAL'] + 1.5)
                self.r = Thread(target=refresh_query, args=(self,))
                self.r.start()
            else:
                self.r = Thread(target=refresh_query, args=(self,))
                self.r.start()
            self.r_flag = True
        self.p_flag = True

    @locked_cached_property
    def name(self):
        if self.import_name == '__main__':
            fn = getattr(sys.modules['__main__'], '__file__', None)
            if fn is None:
                return '__main__'
            return os.path.splitext(os.path.basename(fn))[0]
        return self.import_name

    def start(self, log_output=True, debug=False):
        """
        开启处理
        :param log_output: 是否输出log信息
        :param debug: 是否开启调试模式 ----> 等待完成
        :return:
        """
        if not self.event_engine.status:
            self.event_engine.start()
        self.config["LOG_OUTPUT"] = log_output
        self._load_ext()

    def stop(self):
        """ 停止运行 """
        if self.event_engine.status:
            self.event_engine.stop()

    def remove_extension(self, extension_name: Text) -> None:
        """移除插件"""
        if extension_name in self.extensions:
            del self.extensions[extension_name]

    def add_extension(self, extension: CtpbeeApi):
        """添加插件"""
        self.extensions.pop(extension.extension_name, None)
        extension.init_app(self)
        self.extensions[extension.extension_name] = extension

    def suspend_extension(self, extension_name):
        extension = self.extensions.get(extension_name, None)
        if not extension:
            return False
        extension.frozen = True
        return True

    def enable_extension(self, extension_name):
        extension = self.extensions.get(extension_name, None)
        if not extension:
            return False
        extension.frozen = False
        return True

    def del_extension(self, extension_name):
        self.extensions.pop(extension_name, None)

    def reload(self):
        """ 重新载入接口 """
        if self.market is not None:
            self.market.close()
        if self.trader is not None:
            self.trader.close()
        # 清空处理队列
        self.event_engine._queue.empty()
        sleep(3)
        self.market, self.trader = None, None
        self._load_ext()

    def release(self):
        """ 释放账户,安全退出 """
        try:
            if self.market is not None:
                self.market.close()
            if self.trader is not None:
                self.trader.close()
            self.market, self.trader = None, None
            self.event_engine.stop()
            del self.event_engine
        except AttributeError:
            print(1)
Beispiel #6
0
import typing

from marshmallow.schema import Schema
from peewee import ModelSelect
from playhouse.signals import Model
from werkzeug.datastructures import ImmutableDict

###############################################################################
# TYPE SHORTCUTS
###############################################################################

SELECT_INST = ModelSelect

MODEL_INST = Model
MODEL_TYPE = typing.Type[MODEL_INST]

SCHEMA_INST = Schema
SCHEMA_TYPE = typing.Type[SCHEMA_INST]

ITERABLE = (typing.Tuple[str], typing.List[str])

###############################################################################
# ARG DEFAULTS
###############################################################################

DEFAULT_DICT = ImmutableDict()
Beispiel #7
0
    Copyright: (c) 2013 by ZEIT ONLINE.
    License: BSD, see LICENSE.md for more details.
"""
import contextlib
import sqlite3

import flask
from flask import g, jsonify, current_app as current_app
from werkzeug.datastructures import ImmutableDict

from . import access, metadata, exception, queries

api_server = flask.Blueprint('api_server', __name__)
developer_portal = flask.Blueprint('developer_portal', __name__)
developer_portal.jinja_options = ImmutableDict(
    {'extensions': ['jinja2.ext.loopcontrols']})


@api_server.app_errorhandler(400)
def handle_bad_request(error):
    return exception.bad_request(error)


@api_server.app_errorhandler(404)
def handle_resource_not_found(error):
    return exception.resource_not_found(error)


@api_server.app_errorhandler(405)
def handle_method_not_allowed(error):
    return exception.method_not_allowed(error)
Beispiel #8
0
    def build(self, endpoint, values = None, method = None, force_external = False, append_unknown = True):
        self.map.update()
        if values:
            if isinstance(values, MultiDict):
                values = dict(((k, v) for k, v in values.iteritems(multi=True) if v is not None))
            else:
                values = dict(((k, v) for k, v in values.iteritems() if v is not None))
        else:
            values = {}
        rv = self._partial_build(endpoint, values, method, append_unknown)
        if rv is None:
            raise BuildError(endpoint, values, method)
        subdomain, path = rv
        if not force_external and subdomain == self.subdomain:
            return str(urljoin(self.script_name, path.lstrip('/')))
        return str('%s://%s%s%s/%s' % (self.url_scheme,
         subdomain and subdomain + '.' or '',
         self.server_name,
         self.script_name[:-1],
         path.lstrip('/')))


DEFAULT_CONVERTERS = {'default': UnicodeConverter,
 'string': UnicodeConverter,
 'any': AnyConverter,
 'path': PathConverter,
 'int': IntegerConverter,
 'float': FloatConverter}
from werkzeug.datastructures import ImmutableDict, MultiDict
Map.default_converters = ImmutableDict(DEFAULT_CONVERTERS)
Beispiel #9
0
 def responses(self):
     # type: ()->Dict[int, ApiResponse]
     return ImmutableDict(self._responses)
Beispiel #10
0
 def paths(self):
     # type: ()->Dict[str, ApiPathItem]
     return ImmutableDict(self._paths)
Beispiel #11
0
 def definitions(self):
     # type: ()->Dict[str, ApiModelDefinition]
     return ImmutableDict(self._definitions)
Beispiel #12
0
def tag_corpus(corpus_info,
               corpus_data_files,
               email,
               tags=ImmutableDict(Docuscope={"return_included_tags": True}),
               formats=ImmutableDict(HTML=None),
               batch_formats=ImmutableDict(CSV=None),
               write_to_disk=True,
               create_zip_archive=False):
    # Validate corpus_info.
    if "path" not in corpus_info or "name" not in corpus_info or "data" not in corpus_info:
        raise ValueError("Invalid corpus_info provided.")

    # Add an id to the corpus_info dict.
    corpus_info["processing_id"] = "".join([
        corpus_info["name"], "_", "-".join(tags.keys()), "_",
        "-".join(formats.keys()), "_",
        socket.gethostname(), "_",
        str(int(time()))
    ])
    # Validate Taggers.
    tagger_instances = {}
    formatter_instances = {}
    if len(tags) > 1:
        raise ValueError(
            "Tagging texts with multiple taggers isn't supported yet.")
    for tag_name in tags.keys():
        if not module_exists("Ity.Taggers." + tag_name + "Tagger"):
            raise ValueError("A Tagger module for '%s' tags does not exist." %
                             tag_name)
    # Instantiate Taggers.
    for tag_name, tag_args in tags.items():
        if tag_name in tagger_instances:
            raise NotImplementedError(
                "Tagging multiple times with the same tagger is not yet supported."
            )
        tagger_name = tag_name + "Tagger"
        tagger_module = getattr(
            __import__("Ity.Taggers", fromlist=tagger_name), tagger_name)
        # Add some additional instantiation arguments for specific taggers.
        # TODO: Clean up Taggers' init() arguments.
        if tag_args is None:
            tagger_init_args = {}
        else:
            tagger_init_args = tag_args
        # custom rules file
        if tag_name == "Docuscope" and (
                "SimpleRule" in corpus_data_files
                and "saved" in corpus_data_files["SimpleRule"]
                and len(corpus_data_files["SimpleRule"]["saved"]) > 0):
            tagger_init_args.update(
                dictionary_path=corpus_data_files["SimpleRule"]["saved"][0])
        # Instantiate this Tagger.
        tagger_instance = tagger_module(**tagger_init_args)
        tagger_instances[tag_name] = tagger_instance
    # Validate formatters.
    for format_name in formats.keys() + batch_formats.keys():
        if not module_exists("Ity.Formatters." + format_name + "Formatter"):
            raise ValueError(
                "A Formatter module for '%s' format does not exist." %
                format_name)
    # Instantiate Formatters.
    for format_name, format_args in formats.items():
        if format_name in formatter_instances:
            raise NotImplementedError(
                "Formatting multiple times with the same formatter is not yet supported."
            )
        formatter_name = format_name + "Formatter"
        formatter_module = getattr(
            __import__("Ity.Formatters", fromlist=formatter_name),
            formatter_name)
        # Add some additional instantiation arguments for specific formatters.
        # TODO: Clean up Taggers' init() arguments.
        if format_args is None:
            formatter_init_args = {}
        else:
            formatter_init_args = format_args
        # Instantiate this Formatter.
        formatter_instance = formatter_module(**formatter_init_args)
        formatter_instances[format_name] = formatter_instance

    # Get all the texts in this corpus...if there are any?
    if "Text" not in corpus_info["data"] or len(
            corpus_data_files["Text"]["saved"]) == 0:
        raise StandardError("No corpus texts to tag!")
    text_paths = corpus_data_files["Text"]["saved"]

    # Logging
    logger.info(
        "Email: %s; Corpus: %s; # Texts: %u." %
        (email, corpus_info["name"], len(corpus_data_files["Text"]["saved"])))

    # Update progress.
    if current_task.request.id is not None:
        current_task.update_state(state='PROGRESS',
                                  meta={
                                      'current': 0.0,
                                      'total': 100.0,
                                      "model_path": corpus_info["name"]
                                  })

    # Prepare the arguments for each tag_text() call.
    tag_text_args = [
        dict(text_path=text_path,
             corpus_info=corpus_info,
             corpus_data_files=corpus_data_files,
             taggers=tagger_instances,
             formatters=formatter_instances,
             write_to_disk=write_to_disk) for text_path in text_paths
    ]
    # Synchonrously tag and format all the texts.
    tag_results = []
    for index, tag_text_arg in enumerate(tag_text_args):
        tag_results.append(_tag_text_with_existing_instances(**tag_text_arg))
        # Update progress.
        if current_task.request.id is not None:
            current_task.update_state(state='PROGRESS',
                                      meta={
                                          'current':
                                          float(index + 1) /
                                          len(tag_text_args) * 100.0,
                                          'total':
                                          100.0
                                      })
    csv_path = format_corpus(text_results=tag_results,
                             corpus_info=corpus_info,
                             write_to_disk=write_to_disk)
    zip_archive_path = None
    if create_zip_archive:
        zip_archive_path = archive_corpus_output(corpus_info=corpus_info)
    # Update progress.
    if current_task.request.id is not None:
        current_task.update_state(state='PROGRESS',
                                  meta={
                                      'current': 100.0,
                                      'total': 100.0
                                  })
    # Email if successful.
    if email is not None:
        email_alert(email, failed=False)
    return corpus_info["name"], csv_path, zip_archive_path
Beispiel #13
0
class ManiwaniApp(Flask):
    jinja_options = ImmutableDict(extensions=["jinja2.ext.autoescape", "jinja2.ext.with_"],
                                  bytecode_cache=jinja_cache.KeystoreCache())
Beispiel #14
0
===============================
    Shimehari.core.config
    ~~~~~~~~~~~~~~~~~~~~~
    shimehari core config settings
===============================
"""

from werkzeug.datastructures import ImmutableDict
u"""
    許可する RESTful アクション
"""
RESTFUL_ACTIONS = set(
    ['index', 'show', 'edit', 'new', 'create', 'update', 'destroy'])
u"""
    許可する HTTP メソッド
"""
ALLOWED_HTTP_METHOD_NAMES = set(
    ['get', 'post', 'put', 'delete', 'head', 'options', 'trace'])
u"""
    RESTful アクションごとに許可する HTTP メソッドの対応マップ
"""
RESTFUL_METHODS_MAP = ImmutableDict({
    'index': ['get'],
    'show': ['get', 'post'],
    'edit': ['get', 'post'],
    'new': ['get'],
    'create': ['post'],
    'update': ['put'],
    'destroy': ['delete']
})
Beispiel #15
0
class AthanaFlaskStyleApp(object):
    """Flask style app based on Athana HTTP"""
    #: Options that are passed directly to the Jinja2 environment.
    jinja_options = ImmutableDict(extensions=[
        'jinja2.ext.autoescape', 'jinja2.ext.with_', PyJadeExtension
    ])

    app_ctx_globals_class = _AppCtxGlobals

    def __init__(self,
                 import_name,
                 template_folder="web/templates",
                 name="mediatum",
                 **config):
        if "DEBUG" not in config:
            config["DEBUG"] = True
        self.blueprints = {}
        self.name = name
        self.config = config.copy()
        self.extensions = {}
        self.template_folder = template_folder
        self.jinja_env = self.create_jinja_environment()
        self.import_name = import_name
        self.root_path = get_root_path(import_name)
        #: A dictionary with list of functions that are called without argument
        #: to populate the template context.  The key of the dictionary is the
        #: name of the blueprint this function is active for, `None` for all
        #: requests.  Each returns a dictionary that the template context is
        #: updated with.  To register a function here, use the
        #: :meth:`context_processor` decorator.
        self.template_context_processors = {None: []}

        #: all the attached blueprints in a dictionary by name.  Blueprints

    @property
    def in_development_mode(self):
        return self.config["DEBUG"]

    def register_blueprint(self, blueprint):
        self.blueprints[blueprint.name] = blueprint
        logg.info("added blueprint %s, import name %s", blueprint.name,
                  blueprint.import_name)
        blueprint_reldir = path.relpath(blueprint.root_path,
                                        start=_request_handler.getBase())
        context_name = blueprint.name if blueprint.name.startswith(
            "/") else "/" + blueprint.name
        ctx = _request_handler.addContext(context_name, blueprint_reldir)
        logg.info(
            "added athana context from blueprint with context_name %s, dir %s",
            context_name, blueprint_reldir)
        blueprint.athana_context = ctx

    def register_with_athana(self):
        from core.transition.athana_sep import athana_http
        #         if athana_http.app is not None:
        #             raise Exception("App already registered!")
        athana_http.app = self

    def app_context(self):
        return AppContext(self)

    def request_context(self, request, session):
        return RequestContext(self, request, session)

    def context_processor(self, f):
        """Registers a template context processor function."""
        self.template_context_processors[None].append(f)
        return f

    def update_template_context(self, context):
        """Update the template context with some commonly used variables.
        This injects request, session, config and g into the template
        context as well as everything template context processors want
        to inject.  Note that the as of Flask 0.6, the original values
        in the context will not be overridden if a context processor
        decides to return a value with the same key.

        :param context: the context as a dictionary that is updated in place
                        to add extra variables.
        """
        funcs = self.template_context_processors[None]
        #         reqctx = _request_ctx_stack.top
        #         if reqctx is not None:
        #             bp = reqctx.request.blueprint
        #             if bp is not None and bp in self.template_context_processors:
        #                 funcs = chain(funcs, self.template_context_processors[bp])
        orig_ctx = context.copy()
        for func in funcs:
            context.update(func())
        # make sure the original values win.  This makes it possible to
        # easier add new variables in context processors without breaking
        # existing views.
        context.update(orig_ctx)

    def route(self, route):
        # TODO: todo (see Blueprint)
        pass

    def test_request_context(self):
        """
        TODO: only Athana, should return a flask or athana request and session
        """

        #         from core.transition.athana_sep import athana_http

        def data(self):
            return [p.data for p in self.outgoing]

        request = AthanaTestRequest()
        request.session = session = athana_http.Session(1)
        return RequestContext(self, request, session)

    def select_jinja_autoescape(self, filename):
        """Returns `True` if autoescaping should be active for the given
        template name.

        !taken from Flask.
        """
        if filename is None:
            return False
        return filename.endswith(('.html', '.htm', '.xml', '.xhtml', '.jade'))

    def create_jinja_environment(self):
        """
        !taken from Flask.
        """
        options = dict(self.jinja_options)
        if 'autoescape' not in options:
            options['autoescape'] = self.select_jinja_autoescape
        rv = Environment(self, **options)
        rv.globals.update(config=self.config,
                          request=request,
                          session=session,
                          javascript=JavascriptIncludes(
                              self.in_development_mode),
                          css=CSSIncludes(self.in_development_mode),
                          base_uri="",
                          g=g)
        rv.filters['yaml'] = yaml.dump
        rv.filters['yaml_safe'] = yaml.safe_dump
        rv.filters['yaml_pretty'] = pyaml.dump
        rv.filters['u'] = partial(unicode, encoding="utf8")
        rv.filters['ordereddict'] = OrderedDict
        rv.filters["dt_fromiso"] = dt_fromiso
        rv.filters["strptime"] = datetime.datetime.strptime
        rv.filters["strftime"] = datetime.datetime.strftime
        #rv.trim_blocks = True
        return rv

    @cached_property
    def jinja_loader(self):
        if self.template_folder is not None:
            loaders = [
                FileSystemLoader(
                    path.join(self.root_path, self.template_folder))
            ]
        else:
            loaders = []

        return ChoiceLoader(loaders)

    def add_template_loader(self, loader, pos=None):
        if pos is not None:
            self.jinja_loader.loaders.insert(pos, loader)
        else:
            self.jinja_loader.loaders.append(loader)

    def add_template_globals(self, **global_names):
        self.jinja_env.globals.update(global_names)
Beispiel #16
0
 def versioned_data(self):
     return ImmutableDict(self.current_data.versioned_data
                          ) if self.current_data is not None else None
 def __init__(self, *args, **kwargs):
     # Set the template loader to a postgres-version-aware loader
     self.jinja_options = ImmutableDict(
         extensions=['jinja2.ext.autoescape', 'jinja2.ext.with_'],
         loader=VersionedTemplateLoader(self))
     super(PgAdmin, self).__init__(*args, **kwargs)
Beispiel #18
0
 def properties(self):
     # type: ()->Dict[str, ApiModelProperty]
     return ImmutableDict(self._properties)
Beispiel #19
0
def rank_settings():
    if (has_request_context() and
            getattr(_request_ctx_stack.top, "rank_settings", None) is None):
        _request_ctx_stack.top.rank_settings = _get_rank_settings()
    return getattr(_request_ctx_stack.top, "rank_settings", ImmutableDict())
Beispiel #20
0
def get_countries():
    _countries = {country.alpha_2: getattr(country, 'common_name', country.name) for country in pycountry.countries}
    _countries.update(config.CUSTOM_COUNTRIES)
    return ImmutableDict(_countries)
Beispiel #21
0
def upload():
    """
    If there's some problem with the upload, we return early with the JSON status output.
    If the upload concludes successfully, we write the status information to disk and return a redirect to the upload_get route.
    :return:
    """
    # No Celery? Uh, we're not ready for that yet.
    if not app.config["CELERY"]:
        raise NotImplementedError("Right now we need Celery to even function!")
    # Make a unique corpus name for this upload.
    upload_hash = hashlib.sha1()
    upload_hash.update(str(time.time()))
    arbitrary_corpus_name = upload_hash.hexdigest()[:10]
    # Save the corpus synchronously.
    # This is *not* a Celery task call; we want to avoid transferring all the
    # upload data across the network to the Celery worker server.
    corpus_info, corpus_data_files = tasks.save_corpus(
        corpus_name=arbitrary_corpus_name,
        data_uploads=request.files
    )
    if "email_address" not in request.form:
        raise ValueError("No email address provided!")
    email_address = request.form["email_address"]
    docuscope_dictionary = request.form["docuscope_dictionary"]
    if "generate_ngram_csv" in request.form:
        ngram_count = int(request.form["ngram_count"])
        ngram_punc = "ngram_pun" in request.form
    else:
        ngram_count=0
        ngram_punc=False
    generate_text_htmls = "generate_text_htmls" in request.form
    chunk_text = "chunk_text" in request.form
    if "chunk_text" in request.form:
        if "chunk_length" in request.form and request.form["chunk_length"] != '':
            chunk_length = request.form["chunk_length"]
        else:
            chunk_length = None
        if "chunk_offset" in request.form and request.form["chunk_offset"] != '':
            chunk_offset = request.form["chunk_offset"]
        else:
            chunk_offset = None
    # wipes values if chunk parameters were entered but then chunk_text was unchecked
    else:
        chunk_length = None
        chunk_offset = None
    name = str(request.form["custom"])
    # get blacklisted words as string, if any
    blacklist_words = ''
    if "enable_blacklist" in request.form:
        if "blacklist_words" in request.form:
            blacklist_words = request.form["blacklist_words"]

    rule_csv = ("generate_rule" in request.form)
    defect_count = ("defect_stats" in request.form)
    token_csv = ("token_csv" in request.form)
    doc_rule = False
    if rule_csv:
        doc_rule = ("doc_rule" in request.form)
    # If we're running with Celery, make an asynchronous call to the task.
    # Then redirect to tag_corpus_status with the tid=[the Celery task ID].
    if app.config["CELERY"]:
        # Hand the uploaded data off to a Celery task.
        result = tasks.tag_corpus.delay(
            corpus_info=corpus_info,
            corpus_data_files=corpus_data_files,
            email=email_address,
            create_zip_archive=True,
            tags=ImmutableDict(Docuscope={"return_included_tags": True, "return_excluded_tags": False, "dictionary_path":docuscope_dictionary}),
            ngram_count=ngram_count,
            ngram_pun=ngram_punc,
            generate_text_htmls=generate_text_htmls,
            chunk_text=chunk_text,
            chunk_length=chunk_length,
            chunk_offset=chunk_offset,
            blacklist_words=blacklist_words,
            rule_csv=rule_csv,
            doc_rule=doc_rule,
            name=name,
            defect_count=defect_count,
            token_csv=token_csv
        )
        if name == '':
            name = '~' #placeholder to keep url structure
        # The jQuery AJAX form plugin expects a JSON return value rather than a redirect.
        if "javascript_enabled" in request.form and request.form["javascript_enabled"] == "true":
            return jsonify(dict(redirect=url_for(
                "tag_corpus_index",
                tid=result.id,
                name=name
            )))
        else:
            return redirect(url_for(
                "tag_corpus_index",
                tid=result.id,
                name=name
            ))
    # No Celery? Uh, we're not ready for that yet.
    else:
        raise NotImplementedError("Right now we need Celery to even function!")
Beispiel #22
0
# initiate the flask app
from flask import Flask

app = Flask(__name__, static_folder=os.path.join(root, 'public'))

# runtime/local configuration via config.py
cfg_file_path = os.path.join(root, 'config.py')
app.config.from_pyfile(cfg_file_path)

if not app.debug:
    # enable bytecode caching for templates
    from jinja2 import FileSystemBytecodeCache
    from werkzeug.datastructures import ImmutableDict
    j2cachedir = os.path.join(root, 'tmp', 'j2cache')
    app.jinja_options = ImmutableDict(
        extensions=['jinja2.ext.with_'],
        bytecode_cache=FileSystemBytecodeCache(j2cachedir, '%s.cache'),
    )

    # set up logging to stderr
    import logging, sys
    file_handler = logging.StreamHandler(sys.stderr)
    file_handler.setLevel(logging.WARNING)
    app.logger.addHandler(file_handler)

# replace the json encoder
from qdb.utils import CustomJSONEncoder

app.json_encoder = CustomJSONEncoder

# register routes
import qdb.routes
Beispiel #23
0
import httplib as http

from werkzeug.datastructures import ImmutableDict
from framework.exceptions import HTTPError

from website import mails
from website.util import web_url_for

CAMPAIGNS = ImmutableDict({
    'prereg': {
        'system_tag': 'prereg_challenge_campaign',
        'redirect_url': lambda: web_url_for('prereg_landing_page'),
        'confirmation_email_template': mails.CONFIRM_EMAIL_PREREG,
    },
})


def system_tag_for_campaign(campaign):
    if campaign in CAMPAIGNS:
        return CAMPAIGNS[campaign]['system_tag']
    return None


def email_template_for_campaign(campaign):
    if campaign in CAMPAIGNS:
        return CAMPAIGNS[campaign]['confirmation_email_template']


def campaign_for_user(user):
    for campaign, config in CAMPAIGNS.items():
        # TODO: This is a bit of a one-off to support the Prereg Challenge.
Beispiel #24
0
 def object_ref(self):
     """Return the reference of the changed object."""
     return ImmutableDict(type=self.type, category_id=self.category_id, event_id=self.event_id,
                          session_id=self.session_id, contrib_id=self.contrib_id, subcontrib_id=self.subcontrib_id)
def tag_corpus(corpus_info,
               corpus_data_files,
               email='',
               tags=ImmutableDict(Docuscope={
                   "return_included_tags": True,
                   "return_excluded_tags": False
               }),
               create_zip_archive=False,
               ngram_count=0,
               ngram_pun=False,
               ngram_per_doc=False,
               chunk_text=False,
               chunk_length=None,
               chunk_offset=None,
               blacklist_path=None,
               blacklist_words='',
               rule_csv=False,
               doc_rule=False,
               defect_count=False,
               name='',
               app_mode=False,
               current_task=None,
               logger=None,
               token_csv=False,
               verbose=True,
               includeTagViewer=False):
    print('Starting tag_corpus...')
    tag_corpus_start = time()
    timing = []

    # Validate corpus_info.
    if "path" not in corpus_info or "name" not in corpus_info or "data" not in corpus_info:
        raise ValueError("Invalid corpus_info provided.")

    # Validate parameters
    if chunk_text:
        if chunk_length is None:
            chunk_length = 2000  #default chunk size
        else:
            chunk_length = int(chunk_length)
        if chunk_offset is None:
            chunk_offset = chunk_length  #default offset is chunk length
        else:
            chunk_offset = int(chunk_offset)
        if chunk_length < chunk_offset:
            raise ValueError(
                "Invalid chunking parameters: chunk_offset must be <= chunk_length."
            )
    else:
        if chunk_length is not None or chunk_offset is not None:
            raise ValueError(
                "Text chunking must be enabled to set chunk_length or chunk_offset."
            )

    if ngram_count is None and ngram_pun == True:
        raise ValueError(
            "Ngrams must be enabled to set ngram punctuation count.")

    if ngram_count is None:
        ngram_count = 0

    if int(ngram_count) < 0 or int(ngram_count) > 3:
        raise ValueError("Ngram count must be between 1 and 3.")
    else:
        ngram_count = int(ngram_count)

    if name != '':  #add a hyphen to match output naming scheme
        name = name + "-"

    if doc_rule and not rule_csv:
        raise ValueError(
            "Must enable rule counting to enable per document rule information."
        )

    if chunk_text and rule_csv:
        raise ValueError(
            'Rule counting and chunking cannot be performed simultaneously.')

    if chunk_text and token_csv:
        raise ValueError(
            'Token csvs and chunking cannot be performed simultaneously.')

    # Validate blacklist params and retrieve blacklist words
    blacklist = []
    if blacklist_path is not None:
        if not os.path.exists(
                blacklist_path) or blacklist_path.endswith('.txt') is False:
            raise ValueError(
                "Blacklist text file '%s' does not exist. Please supply a valid space-separated .txt file."
                % blacklist_path)
        else:
            try:
                f = open(blacklist_path)
                for line in f:
                    words = line.split()
                    blacklist.extend(words)
            except IOError:
                raise ValueError(
                    "Unable to open blacklist file %s. Please supply a valid space-separated .txt file."
                    % blacklist_path)

    # Or, retrieve blacklisted words from GUI
    elif blacklist_words is not None:
        blacklist = str(blacklist_words).split()
    # Add an id to the corpus_info dict.
    corpus_info["processing_id"] = "".join([
        corpus_info["name"], "_", "-".join(tags.keys()), "_",
        socket.gethostname(), "_",
        str(int(time()))
    ])

    # Validate Taggers.
    tagger_instances = {}
    if len(tags) > 1:
        raise ValueError(
            "Tagging texts with multiple taggers isn't supported yet.")
    for tag_name in tags.keys():
        try:
            __import__("Ity.Taggers." + tag_name + "Tagger")
        except:
            raise ValueError("A Tagger module for '%s' tags does not exist." %
                             tag_name)
    is_docuscope = True

    # Instantiate Taggers.
    start = time()
    for tag_name, tag_args in tags.items():
        tagger_name = tag_name + "Tagger"
        tagger_module = getattr(
            __import__("Ity.Taggers", fromlist=tagger_name), tagger_name)
        # Add some additional instantiation arguments for specific taggers.
        # TODO: Clean up Taggers' init() arguments.
        if tag_args is None:
            tagger_init_args = {}
        else:
            tagger_init_args = tag_args
        # custom rules file
        if tag_name == "Docuscope" and (
                "SimpleRule" in corpus_data_files
                and "saved" in corpus_data_files["SimpleRule"]
                and len(corpus_data_files["SimpleRule"]["saved"]) > 0):
            is_docuscope = False
            tagger_init_args.update(
                dictionary_path=corpus_data_files["SimpleRule"]["saved"][0],
                return_untagged_tags=True,
                return_unrecognized_tags=True,
                blacklist=blacklist)
        else:
            tagger_init_args.update(
                return_untagged_tags=True,
                return_unrecognized_tags=True,
                return_excluded_tags=
                False,  # prevents display/tagging of whitespace
                return_included_tags=True,
                blacklist=blacklist,
            )
        # Instantiate this Tagger.
        # optimization: detailed tag data isn't required UNLESS we generate tag-level rule statistics
        if rule_csv or token_csv:
            tagger_init_args.update(return_tag_maps=True)

        tagger_instance = tagger_module(**tagger_init_args)
        tagger_instances[tag_name] = tagger_instance
    timing.append(('Instantiate Taggers', time() - start))

    # Get all the texts in this corpus...if there are any?
    if "Text" not in corpus_info["data"] or len(
            corpus_data_files["Text"]["saved"]) == 0:
        raise StandardError("No corpus texts to tag!")
    text_paths = corpus_data_files["Text"]["saved"]

    if app_mode:
        # Logging
        logger.info("Email: %s; Corpus: %s; # Texts: %u." %
                    (email, corpus_info["name"],
                     len(corpus_data_files["Text"]["saved"])))

        # Update progress.
        if current_task.request.id is not None:
            current_task.update_state(state='PROGRESS',
                                      meta={
                                          'current': 0.0,
                                          'total': 100.0,
                                          "model_path": corpus_info["name"]
                                      })

    # initialize primary csv
    csv_path = getCSVPath(corpus_info, name, 'gen')
    lats = sorted(tagger_instances['Docuscope']._ds_dict.lats)
    header_keys = getHeaderKeys(lats, is_docuscope, defect_count)
    u = open(csv_path, 'wb')
    uwriter = csv.writer(u, delimiter=',', quoting=csv.QUOTE_MINIMAL)
    uwriter.writerow(header_keys)

    #initialize rule dict
    corpus_map = dict()
    if ngram_count > 0:
        documentNgramCounts = defaultdict(
            int)  # to count number of documents ngrams appear in
        corpusNgramCounts = defaultdict(int)

    start = time()
    tokenizer = RegexTokenizer()
    timing.append(('Tokenizer Initialization Time: ', time() - start))

    # tag each text
    tag_start = time()
    index = 0
    tokenization_time = 0
    tagging_time = 0

    # initialize unreadable files list
    bad_texts = []

    # gleicher, summer 2021 - make this an enumerate to keep track of how much we've done
    # just for printing
    for ct, text_path in enumerate(text_paths):
        if verbose:
            print("file {} of {}".format(ct, len(text_paths)))
        # tokenize
        start = time()
        try:
            tokens = tokenizeText(text_path, defect_count, chunk_length,
                                  chunk_offset, tokenizer)
        # skip texts that can't be tokenized
        except IOError:
            bad_texts.append(text_path)
            continue
        if token_csv:
            token_frame = TokenTransform.transformToFrame(tokens)

        tokenization_time += (time() - start)

        start = time()
        result = tagText(tagger_instance,
                         tag_name,
                         text_path,
                         tokens,
                         corpus_info,
                         chunk=chunk_text,
                         rule_csv=rule_csv,
                         token_csv=token_csv)
        if token_csv:
            tagged_frame = TokenTransform.tagFrameMerge(token_frame, result)
            result["token_csv_name"] = result[
                'text_key'] + '-ubiq-tokens' + '.csv'
            # gleicher - change file names to match the input file names
            # filename = result['text_key']
            filename = os.path.splitext(result['text_name'])[0]
            tokenCSVPath = getCSVPath(corpus_info,
                                      name,
                                      type='token_csv',
                                      docName=filename)
            tagged_frame.to_csv(tokenCSVPath,
                                index=False,
                                header=False,
                                encoding='utf-8')
        else:
            if chunk_text:
                for r in result:
                    r["token_csv_name"] = ""
            else:
                result["token_csv_name"] = ""
        tagging_time += (time() - start)

        # iterate through tokens (or sub-lists of tokens) and calculate token level statistics (if necessary)
        # then delete tokens to free up space
        if chunk_text:
            if defect_count:
                for i in range(len(result)):
                    result[i] = defectProcess(result[i], tokens[0][i])
        else:
            if defect_count:
                result = defectProcess(result, tokens)

        if chunk_text:
            tokens = tokens[1]

        if ngram_count > 0:
            ngram_tokens = ngramProcess(tokens, ngram_pun)

        # done with tokens, free up memory
        del tokens

        # write out primary csv
        if chunk_text:
            for text_dict in result:
                row = result_to_gen_row(
                    text_dict,
                    header_keys,
                )
                uwriter.writerow(row)
        else:
            row = result_to_gen_row(result, header_keys)
            uwriter.writerow(row)

        # update corpus dictionaries
        if rule_csv:
            rule_map = result['rule_map']
            updateCorpusCounts(rule_map, corpus_map)
            if doc_rule:
                perDocRuleCSV(
                    corpus_info, result['text_key'], rule_map
                )  # generate PER document CSVs all in one method (they all need separate writers anyway)
        del result
        if ngram_count > 0:
            docCounts = ngramUpdate(ngram_tokens, documentNgramCounts,
                                    corpusNgramCounts, ngram_count, ngram_pun)
            if ngram_per_doc:
                docName = os.path.splitext(os.path.basename(text_path))[0]
                ngramCSV(documentNgramCounts=None,
                         corpusNgramCounts=docCounts,
                         maxN=ngram_count,
                         corpus_info=corpus_info,
                         name=name,
                         docName=docName)
        if app_mode:
            if current_task.request.id is not None:
                current_task.update_state(state='PROGRESS',
                                          meta={
                                              'current':
                                              float(index + 1) /
                                              len(text_paths) * 100.0,
                                              'total':
                                              100.0
                                          })
        index = index + 1

    u.close()

    timing.append(('Total Tokenization', tokenization_time))
    timing.append(('Total Tagging', tagging_time))
    # write out corpus-wide rule CSV (if applicable)
    if rule_csv:
        ruleCSV(corpus_info, name, corpus_map)

    if ngram_count > 0:
        ngramCSV(documentNgramCounts, corpusNgramCounts, ngram_count,
                 corpus_info, name)

    frame = inspect.currentframe()
    tc_args, _, _, tc_values = inspect.getargvalues(frame)
    buildReadme(tc_args, tc_values, timing, version, blacklist, bad_texts)

    if app_mode:
        # Update progress. (Web version)
        if current_task.request.id is not None:
            current_task.update_state(state='PROGRESS',
                                      meta={
                                          'current': 100.0,
                                          'total': 100.0
                                      })

    if token_csv:
        TVpath = os.path.join(corpus_info["output_path"],
                              corpus_info["provenance"], 'TextViewer.html')
        print(TVpath)
        if includeTagViewer:
            shutil.copyfile('TextViewer.html', TVpath)
    print('tag_corpus finished. Total elapsed time: %.2f seconds.' %
          (time() - tag_corpus_start))

    return csv_path
Beispiel #26
0
class Babel(object):
    """Central controller class that can be used to configure how
    Flask-Babelhg behaves.  Each application that wants to use Flask-Babelhg
    has to create, or run :meth:`init_app` on, an instance of this class
    after the configuration was initialized.
    """

    default_date_formats = ImmutableDict({
        'time': 'medium',
        'date': 'medium',
        'datetime': 'medium',
        'time.short': None,
        'time.medium': None,
        'time.full': None,
        'time.long': None,
        'date.short': None,
        'date.medium': None,
        'date.full': None,
        'date.long': None,
        'datetime.short': None,
        'datetime.medium': None,
        'datetime.full': None,
        'datetime.long': None,
    })

    def __init__(
        self,
        app=None,
        default_locale='en',
        default_timezone='UTC',
        default_domain=None,
        date_formats=None,
        configure_jinja=True,
    ):
        self._default_locale = default_locale
        self._default_timezone = default_timezone
        self._default_domain = (default_domain
                                if default_domain is not None else Domain())
        self._date_formats = date_formats
        self._configure_jinja = configure_jinja
        self._locale_cache = {}
        self.app = app
        self.locale_selector_func = None
        self.timezone_selector_func = None

        if app is not None:
            self.init_app(app)

    def init_app(self, app):
        """Set up this instance for use with *app*, if no app was passed to
        the constructor.
        """
        self.app = app
        app.babel_instance = self
        if not hasattr(app, 'extensions'):
            app.extensions = {}
        app.extensions['babel'] = self

        app.config.setdefault('BABEL_DEFAULT_LOCALE', self._default_locale)
        app.config.setdefault('BABEL_DEFAULT_TIMEZONE', self._default_timezone)
        app.config.setdefault('BABEL_DOMAIN', self._default_domain)
        if self._date_formats is None:
            self._date_formats = self.default_date_formats.copy()

        #: a mapping of Babel datetime format strings that can be modified
        #: to change the defaults.  If you invoke :func:`format_datetime`
        #: and do not provide any format string Flask-Babelhg will do the
        #: following things:
        #:
        #: 1.   look up ``date_formats['datetime']``.  By default ``'medium'``
        #:      is returned to enforce medium length datetime formats.
        #: 2.   ``date_formats['datetime.medium'] (if ``'medium'`` was
        #:      returned in step one) is looked up.  If the return value
        #:      is anything but `None` this is used as new format string.
        #:      otherwise the default for that language is used.
        self.date_formats = self._date_formats

        if self._configure_jinja:
            app.jinja_env.filters.update(
                datetimeformat=format_datetime,
                dateformat=format_date,
                timeformat=format_time,
                timedeltaformat=format_timedelta,
                numberformat=format_number,
                decimalformat=format_decimal,
                currencyformat=format_currency,
                percentformat=format_percent,
                scientificformat=format_scientific,
            )
            app.jinja_env.add_extension('jinja2.ext.i18n')
            app.jinja_env.install_gettext_callables(
                lambda x: get_domain().get_translations().ugettext(x),
                lambda s, p, n: get_domain().get_translations().ungettext(
                    s, p, n),
                newstyle=True,
            )

    def localeselector(self, f):
        """Registers a callback function for locale selection.  The default
        behaves as if a function was registered that returns `None` all the
        time.  If `None` is returned, the locale falls back to the one from
        the configuration.

        This has to return the locale as string (eg: ``'de_AT'``, ``'en_US'``)
        """
        assert (self.locale_selector_func is
                None), 'a localeselector function is already registered'
        self.locale_selector_func = f
        return f

    def timezoneselector(self, f):
        """Registers a callback function for timezone selection.  The default
        behaves as if a function was registered that returns `None` all the
        time.  If `None` is returned, the timezone falls back to the one from
        the configuration.

        This has to return the timezone as string (eg: ``'Europe/Vienna'``)
        """
        assert (self.timezone_selector_func is
                None), 'a timezoneselector function is already registered'
        self.timezone_selector_func = f
        return f

    @property
    def default_locale(self):
        """The default locale from the configuration as instance of a
        `babel.Locale` object.
        """
        return Locale.parse(self.app.config['BABEL_DEFAULT_LOCALE'])

    @property
    def default_timezone(self):
        """The default timezone from the configuration as instance of a
        `pytz.timezone` object.
        """
        return timezone(self.app.config['BABEL_DEFAULT_TIMEZONE'])

    @property
    def domain(self):
        """The message domain for the translations as a string."""
        return get_domain()

    def load_locale(self, locale):
        """Load locale by name and cache it. Returns instance of a `babel.locale`
        object.
        """
        rv = self._locale_cache.get(locale)
        if rv is None:
            self._locale_cache[locale] = rv = Locale.parse(locale)
        return rv
Beispiel #27
0
    CELERY_ACCEPT_CONTENT=["pickle", "json", "msgpack", "yaml"],
    CELERY_TIMEZONE=LOCALTZ,
    SENTRY_USER_ATTRS=("email", "first_name", "last_name"),
    SENTRY_INSTALL_CLIENT_JS=True,  # also install client JS
    SENTRY_JS_VERSION="1.1.22",
    # TODO: remove, not needed for recent sentry-js
    SENTRY_JS_PLUGINS=("console", "jquery", "native", "require"),
    SESSION_COOKIE_NAME=None,
    SQLALCHEMY_POOL_RECYCLE=1800,  # 30min. default value in flask_sa is None
    SQLALCHEMY_TRACK_MODIFICATIONS=False,
    LOGO_URL=Endpoint("abilian_static", filename="img/logo-abilian-32x32.png"),
    ABILIAN_UPSTREAM_INFO_ENABLED=False,  # upstream info extension
    TRACKING_CODE_SNIPPET="",  # tracking code to insert before </body>
    MAIL_ADDRESS_TAG_CHAR=None,
)
default_config = ImmutableDict(default_config)

# def configure_redis(app):
#     redis.init_app(app)
#
#
# def configure_queue(app):
#     queue.init_app(app, db, sentry)
#
#
# def configure_sentry(app):
#     from flask import session
#
#     sentry.init_app(app)
#
#     @app.before_request
Beispiel #28
0
def app_has_babel(app):
    """Check application instance for configured babel extension."""
    obj = app.extensions.get('babel')
    return obj is not None


def self_name(string):
    """Create config key for extension."""
    return 'HUMANIZE_{0}'.format(string.upper())


default_config = ImmutableDict({
    # The default locale to work with. When `BABEL_DEFAULT_LOCALE` is
    # available then it used instead.
    'default_locale': 'en',

    # Use UTC instead of local time for humanize dates and times.
    'use_utc': False,
})


class Humanize(object):
    """Add common humanization utilities, like turning number into a fuzzy
    human-readable duration or into human-readable size, to your flask
    applications.
    """

    # A function uses for locale selection.
    locale_selector_func = None

    def __init__(self, app=None):
Beispiel #29
0
class CtpBee(object):
    """
    ctpbee 源于我对于做项目的痛点需求, 旨在开发出一套具有完整api的交易微框架
    I hope it will help you !

    """
    # 默认回测配置参数
    default_params = {
        'cash': 10000.0,
        'check_submit': True,
        'eos_bar': False,
        'filler': None,
        "commision": 0.01,
        'slip_percent': 0.0,
        'slip_fixed': 0.0,
        'slip_open': False,
        'slip_match': True,
        'slip_limit': True,
        'slip_out': False,
        'coc': False,
        'coo': False,
        'int2pnl': True,
        'short_cash': True,
        'fund_start_val': 100.0,
        'fund_mode': False
    }
    default_config = ImmutableDict(
        dict(
            LOG_OUTPUT=True,  # 是否开启输出模式
            TD_FUNC=False,  # 是否开启交易功能
            INTERFACE="ctp",  # 接口参数,默认指定国内期货ctp
            MD_FUNC=True,  # 是否开启行情功能
            XMIN=[],  # k线序列周期, 支持一小时以内的k线任意生成
            ALL_SUBSCRIBE=False,
            SHARE_MD=False,  # 是否多账户之间共享行情,---> 等待完成
            SLIPPAGE_COVER=0,  # 平多头滑点设置
            SLIPPAGE_SELL=0,  # 平空头滑点设置
            SLIPPAGE_SHORT=0,  # 卖空滑点设置
            SLIPPAGE_BUY=0,  # 买多滑点设置
            LOOPER_PARAMS=default_params,  # 回测需要设置的参数
            SHARED_FUNC=False,  # 分时图数据 --> 等待优化
            REFRESH_INTERVAL=1.5,  # 定时刷新秒数, 需要在CtpBee实例化的时候将refresh设置为True才会生效
            INSTRUMENT_INDEPEND=False,  # 是否开启独立行情,策略对应相应的行情
            CLOSE_PATTERN=
            "today",  # 面对支持平今的交易所,优先平今或者平昨 ---> today: 平今, yesterday: 平昨, 其他:d
            TODAY_EXCHANGE=[Exchange.SHFE.value,
                            Exchange.INE.value],  # 需要支持平今的交易所代码列表
            AFTER_TIMEOUT=3,  # 设置after线程执行超时,
            TIMER_INTERVAL=1,
            PATTERN="real"))

    config_class = Config
    import_name = None

    # 交易api与行情api / trade api and market api
    market = None
    trader = None
    tools = {}

    def __init__(self,
                 name: Text,
                 import_name,
                 action_class: Action or None = None,
                 engine_method: str = "thread",
                 logger_class=None,
                 logger_config=None,
                 refresh: bool = False,
                 risk: RiskLevel = None,
                 instance_path=None):
        """
        name: 创建运行核心的名字
        import_name: 导入包的名字, 用__name__即可'
        action_class: 执行器 > 默认使用系统自带的Action, 或者由用户继承,然后传入类
        engine_method: Actor模型采用的底层的引擎
        logger_class: logger类,可以自己定义
        refresh: 是否自己主动持仓
        risk: 风险管理类, 可以自己继承RiskLevel进行定制
        sim: 是否进行模拟
        """
        self.start_datetime = datetime.now()
        self.basic_info = None
        self._extensions = {}
        self.name = name if name else 'ctpbee'
        self.import_name = import_name
        self.engine_method = engine_method
        self.refresh = refresh
        self.active = False
        # 是否加载以使用默认的logger类/ choose if use the default logging class
        if logger_class is None:
            self.logger = VLogger(CP, app_name=self.name)
            self.logger.set_default(name=self.logger.app_name, owner=self.name)
        else:
            if logger_config:
                self.logger = logger_class(logger_config, app_name=self.name)
            else:
                self.logger = logger_class(CP, app_name=self.name)
            self.logger.set_default(name=self.logger.app_name, owner='App')

        self.app_signal = AppSignal(self.name)

        if engine_method == "thread":
            self.recorder = Recorder(self)
        else:
            raise TypeError("引擎参数错误,只支持 thread 和 async,请检查代码")
        """
              If no risk is specified by default, set the risk_decorator to None
              如果默认不指定action参数, 那么使用设置风控装饰器为空
              """
        if risk is None:
            self.risk_decorator = None
        else:
            self.risk_decorator = risk
        """
        If no action is specified by default, use the default Action class
        如果默认不指定action参数, 那么使用默认的Action类 
        """
        if action_class is None:
            self.action: Action = Action(self)
        else:
            self.action: Action = action_class(self)
        """
        根据action里面的函数更新到CtpBee上面来
        bind the function of action to CtpBee
        """
        """
        If engine_method is specified by default, use the default EventEngine and Recorder or use the engine
            and recorder basis on your choice
        如果不指定engine_method参数,那么使用默认的事件引擎 或者根据你的参数使用不同的引擎和记录器
        """

        if instance_path is None:
            instance_path = self.auto_find_instance_path()
        elif not os.path.isabs(instance_path):
            raise ValueError(
                'If an instance path is provided it must be absolute.'
                ' A relative path was given instead.')
        self.instance_path = instance_path
        self.config = self.make_config()
        self.init_finished = False
        self.qifi = None
        # default monitor and flag
        self.p = None
        self.p_flag = True

        self.r = None
        self.r_flag = True

        self.center: Center = Center(self)
        """ update """
        if self.risk_decorator is not None:
            self.risk_decorator.update_app(self)

        for x in dir(self.action):
            func = getattr(self.action, x)
            if x.startswith("__"):
                continue
            if ismethod(func):
                setattr(self, func.__name__, func)
        _app_context_ctx.push(self.name, self)

        self.data = []

    def add_data(self, *data):
        """
        载入历史回测数据
        """
        if self.config.get("PATTERN") == "looper":
            self.data = data
        else:
            raise TypeError("此API仅仅接受回测模式, 请通过配置文件 PATTERN 修改运行模式")

    def update_action_class(self, action_class):
        if isinstance(action_class, Action):
            raise TypeError(
                f"更新action_class出现错误, 你传入的action_class类型为{type(action_class)}")
        self.action = action_class(self)

    def update_risk_gateway(self, gateway_class):
        self.risk_decorator = gateway_class
        self.risk_decorator.update_app(self)

    def make_config(self):
        """ 生成class类"""
        defaults = dict(self.default_config)
        return self.config_class(self.instance_path, defaults)

    def auto_find_instance_path(self):
        prefix, package_path = find_package(self.import_name)
        if prefix is None:
            return os.path.join(package_path)
        return os.path.join(prefix, 'var', self.name + '-instance')

    @property
    def td_login_status(self):
        """ 交易 API 都应该实现td_status"""
        return self.trader.td_status

    @property
    def md_login_status(self):
        """ 行情 API 都应该实现md_status"""
        return self.market.md_status

    def _running(self, logout=True):
        """
        根据当前配置文件下的信息载入行情api和交易api,记住这个api的选项是可选的
        """
        self.active = True
        if "CONNECT_INFO" in self.config.keys():
            info = self.config.get("CONNECT_INFO")
        else:
            raise ConfigError(message="没有相应的登录信息", args=("没有发现登录信息", ))
        show_me = graphic_pattern(__version__, self.engine_method)
        if logout:
            print(show_me)
        MdApi, TdApi = Interface.get_interface(self)
        if self.config.get("MD_FUNC"):
            self.market = MdApi(self.app_signal)
            self.market.connect(info)

        if self.config.get("TD_FUNC"):
            self.trader = TdApi(self.app_signal)
            self.trader.connect(info)

        if self.refresh:
            if self.r is not None:
                self.r_flag = False
                sleep(self.config['REFRESH_INTERVAL'] + 1.5)
                self.r = Thread(target=refresh_query,
                                args=(self, ),
                                daemon=True)
                self.r.start()
            else:
                self.r = Thread(target=refresh_query,
                                args=(self, ),
                                daemon=True)
                self.r.start()
            self.r_flag = True

    def start(self, log_output=True, debug=False):
        """
        开启处理
        :param log_output: 是否输出log信息
        :param debug: 是否开启调试模式 ----> 等待完成
        :return:
        """
        if self.config.get("PATTERN") == "real":

            def running_timer(common_signal):
                while True:
                    event = Event(type=EVENT_TIMER)
                    common_signal.timer_signal.send(event)
                    sleep(self.config['TIMER_INTERVAL'])

            self.timer = Thread(target=running_timer, args=(common_signals, ))
            self.timer.start()

            self.config["LOG_OUTPUT"] = log_output
            self._running(logout=log_output)
        elif self.config.get("PATTERN") == "looper":
            self.config["INTERFACE"] = "looper"
            show_me = graphic_pattern(__version__, self.engine_method)
            if log_output:
                print(show_me)
            Trader, Market = Interface.get_interface(app=self)
            self.trader = Trader(self.app_signal, self)
            self.market = Market(self.app_signal)
            print(">>>> 回测接口载入成功")
            self._start_looper()
        else:
            raise ValueError("错误的参数, 仅仅支持")

    def get_result(self, report: bool = False, **kwargs):
        """
        计算回测结果,生成回测报告
        :param report: bool ,指定是否输出策略报告
        :param auto_open: bool, 是否让浏览器自动打开回测报告
        :param zh:bpol, 是否输出成中文报告
        """
        strategys = list(self._extensions.keys())
        end_time = datetime.now()
        """
        账户数据
        """
        account_data = self.trader.account.get_mapping("balance")
        """
        耗费时间
        """
        cost_time = f"{str(end_time.hour - self.start_datetime.hour)}" \
                    f"h {str(end_time.minute - self.start_datetime.minute)}m " \
                    f"{str(end_time.second - self.start_datetime.second)}s"
        """
        每日盈利
        """
        net_pnl = self.trader.account.get_mapping("net_pnl")
        """
        成交单数据
        """
        trade_data = list(map(dumps,
                              self.trader.traded_order_mapping.values()))
        position_data = self.trader.position_detail
        if report:
            path = render_result(self.trader.account.result,
                                 trade_data=trade_data,
                                 strategy=strategys,
                                 net_pnl=net_pnl,
                                 account_data=account_data,
                                 datetimed=end_time,
                                 position_data=position_data,
                                 cost_time=cost_time,
                                 **kwargs)
            print(f"请复制下面的路径到浏览器打开----> \n {path}")
            return path
        return self.trader.account.result

    def add_basic_info(self, info):
        """ 添加基础手续费以及size_map等信息 """
        if self.config.get("PATTERN") != "looper":
            raise TypeError("此API仅在回测模式下进行调用")
        self.basic_info = info

    def _start_looper(self):
        """ 基于现有的数据进行回测数据 """
        d = VessData(*self.data)
        if self.basic_info is not None:
            self.trader.account.basic_info = self.basic_info
        """ trader初始化参数"""
        self.trader.init_params(params=self.config)
        while True:
            try:
                p = next(d)
                self.trader(p)
            except StopIteration:
                self.logger.info("回测结束,正在生成结果")
                break
            except ValueError:
                raise ValueError("数据存在问题, 请检查")

    def remove_extension(self, extension_name: Text) -> None:
        """移除插件"""
        if extension_name in self._extensions:
            del self._extensions[extension_name]

    def add_extension(self, extension: CtpbeeApi):
        """添加插件"""
        self._extensions.pop(extension.extension_name, None)
        extension.init_app(self)

    def suspend_extension(self, extension_name):
        extension = self._extensions.get(extension_name, None)
        if not extension:
            return False
        extension.frozen = True
        return True

    def get_extension(self, extension_name):
        if extension_name in self._extensions:
            return self._extensions.get(extension_name)
        else:
            return None

    def enable_extension(self, extension_name):
        extension = self._extensions.get(extension_name, None)
        if not extension:
            return False
        extension.frozen = False
        return True

    def del_extension(self, extension_name):
        self._extensions.pop(extension_name, None)

    def reload(self):
        """ 重新载入接口 """
        if self.market is not None:
            self.market.close()
        if self.trader is not None:
            self.trader.close()
        # 清空处理队列
        sleep(3)
        self.market, self.trader = None, None
        self._running()

    def release(self):
        """ 释放账户,安全退出 """
        try:
            if self.market is not None:
                self.market.close()
            if self.trader is not None:
                self.trader.close()
            self.market, self.trader = None, None
            if self.r is not None:
                """ 强行终结掉线程 """
                end_thread(self.r)
            if self.timer is not None:
                end_thread(self.timer)
        except AttributeError:
            pass
Beispiel #30
0
class CtpBee(object):
    """
    ctpbee 源于我对于做项目的痛点需求, 旨在开发出一套具有完整api的交易微框架 ,
    在这里你可以看到很多借鉴自flask的设计 , 毕竟本人实在热爱flask ....
    ctpbee提供完整的支持 ,一个CtpBee对象可以登录一个账户 ,
    当你登录多个账户时, 可以通过current_app, 以及switch_app还有 get_app提供完整的支持,
    每个账户对象都拥有单独的发单接口 ,在你实现策略的地方 可以通过上述api实现发单支持,
    当然ctpbee提供了ExtAbstract 抽象插件 ,继承此插件即可快速载入支持.
    总而言之,希望能够极大的简化目前的开发流程 !
    """

    # 默认配置
    default_config = ImmutableDict(
        dict(LOG_OUTPUT=True,
             TD_FUNC=False,
             INTERFACE="ctp",
             MD_FUNC=True,
             XMIN=[],
             ALL_SUBSCRIBE=False,
             SHARE_MD=False))
    config_class = Config
    import_name = None
    # 数据记录载体
    __active = False

    # 交易api与行情api
    market = None
    trader = None

    # 插件系统
    # todo :等共享内存块出来了 是否可以尝试在外部进行
    extensions = {}

    def __init__(self, name: Text, import_name, instance_path=None):
        """ 初始化 """
        self.name = name
        self.import_name = import_name
        self.event_engine = EventEngine()
        if instance_path is None:
            instance_path = self.auto_find_instance_path()
        elif not os.path.isabs(instance_path):
            raise ValueError(
                'If an instance path is provided it must be absolute.'
                ' A relative path was given instead.')
        self.risk_control = RiskController(self.name)
        self.recorder = Recorder(self, self.event_engine)
        self.instance_path = instance_path
        self.config = self.make_config()
        self.interface = Interface()
        _app_context_ctx.push(self.name, self)

    def make_config(self):
        """ 生成class类"""
        defaults = dict(self.default_config)
        return self.config_class(self.instance_path, defaults)

    def auto_find_instance_path(self):
        prefix, package_path = find_package(self.import_name)
        if prefix is None:
            return os.path.join(package_path)
        return os.path.join(prefix, 'var', self.name + '-instance')

    def _load_ext(self):
        """根据当前配置文件下的信息载入行情api和交易api,记住这个api的选项是可选的"""
        self.__active = True
        if "CONNECT_INFO" in self.config.keys():
            info = self.config.get("CONNECT_INFO")
        else:
            raise ConfigError(message="没有相应的登录信息", args=("没有发现登录信息", ))
        MdApi, TdApi = self.interface.get_interface(self)
        if self.config.get("MD_FUNC"):
            self.market = MdApi(self.event_engine)
            self.market.connect(info)

        if self.config.get("TD_FUNC"):
            self.trader = TdApi(self.event_engine)
            self.trader.connect(info)
            sleep(0.5)

    @locked_cached_property
    def name(self):
        if self.import_name == '__main__':
            fn = getattr(sys.modules['__main__'], '__file__', None)
            if fn is None:
                return '__main__'
            return os.path.splitext(os.path.basename(fn))[0]
        return self.import_name

    def start(self, log_output=True):
        """开始"""
        if not self.event_engine.status:
            self.event_engine.start()
        self.config["LOG_OUTPUT"] = log_output
        self._load_ext()

    def stop(self):
        """ 停止运行 """
        if self.event_engine.status:
            self.event_engine.stop()

    @check(type="trader")
    def send_order(self, order_req: OrderRequest) -> AnyStr:
        """发单"""
        result = self.risk_control.send(self)
        if False in result:
            event = Event(type=EVENT_LOG, data="风控阻止下单")
            self.event_engine.put(event)
            return
        send_monitor.send(order_req)
        return self.trader.send_order(order_req)

    @check(type="trader")
    def cancle_order(self, cancle_req: CancelRequest):
        """撤单"""
        cancle_monitor.send(cancle_req)
        self.trader.cancel_order(cancle_req)

    @check(type="market")
    def subscribe(self, symbol: AnyStr):
        """订阅行情"""
        if "." in symbol:
            symbol = symbol.split(".")[1]
        return self.market.subscribe(symbol)

    @check(type="trader")
    def query_position(self):
        """查询持仓"""
        return self.trader.query_position()

    @check(type="trader")
    def transfer(self, req, type):
        """
        req currency attribute
        [ "USD", "HKD", "CNY"]
        :param req:
        :param type:
        :return:
        """
        self.trader.transfer(req, type=type)

    @check(type="trader")
    def query_account_register(self, req):
        self.trader.query_account_register(req)

    @check(type="trader")
    def query_bank_account_money(self, req):
        self.trader.query_bank_account_money(req)

    @check(type="trader")
    def query_transfer_serial(self, req):
        self.trader.query_transfer_serial(req)

    @check(type="trader")
    def query_bank(self):
        pass

    @check(type="trader")
    def query_account(self):
        """查询账户"""
        return self.trader.query_account()

    def remove_extension(self, extension_name: Text) -> None:
        """移除插件"""
        if extension_name in self.extensions:
            del self.extensions[extension_name]

    def add_extensison(self, extension: ExtAbstract):
        """添加插件"""
        if extension.extension_name in self.extensions:
            return
        self.extensions[extension.extension_name] = extension

    def reload(self):
        """ 重新载入接口 """
        if self.market is not None:
            self.market.close()
        if self.trader is not None:
            self.trader.close()
        self._load_ext()

    def __del__(self):
        """释放账户 安全退出"""
        if self.market is not None:
            self.market.close()
        if self.trader is not None:
            self.trader.close()
        self.market, self.trader = None, None

        del self.event_engine