示例#1
0
class DBConnectionSpec(HasTraits):
    database = Str('massspecdata_import')
    username = Str('root')
    password = Password('Argon')
    #    host = Str('129.138.12.131')
    host = Str('localhost')

    def make_url(self):
        return '{}:{}@{}/{}'.format(self.username, self.password, self.host,
                                    self.database)

    def make_connection_dict(self):
        return dict(name=self.database,
                    username=self.username,
                    password=self.password,
                    host=self.host)

    def traits_view(self):
        return View(
            VGroup(
                Item('host'),
                Item('database'),
                Item('username'),
                Item('password'),
            ))
示例#2
0
class TextEditorDemo(HasTraits):
    """Defines the TextEditor demo class."""

    # Define a trait for each of three TextEditor variants:
    string_trait = Str("sample string")
    int_trait = Int(1)
    password = Password()

    # TextEditor display with multi-line capability (for a string):
    text_str_group = Group(
        Item('string_trait', style='simple', label='Simple'),
        Item('_'),
        Item('string_trait', style='custom', label='Custom'),
        Item('_'),
        Item('string_trait', style='text', label='Text'),
        Item('_'),
        Item('string_trait', style='readonly', label='ReadOnly'),
        label='String',
    )

    # TextEditor display without multi-line capability (for an integer):
    text_int_group = Group(
        Item('int_trait', style='simple', label='Simple', id="simple_int"),
        Item('_'),
        Item('int_trait', style='custom', label='Custom', id="custom_int"),
        Item('_'),
        Item('int_trait', style='text', label='Text', id="text_int"),
        Item('_'),
        Item(
            'int_trait',
            style='readonly',
            label='ReadOnly',
            id="readonly_int",
        ),
        label='Integer',
    )

    # TextEditor display with secret typing capability (for Password traits):
    text_pass_group = Group(
        Item('password', style='simple', label='Simple'),
        Item('_'),
        Item('password', style='custom', label='Custom'),
        Item('_'),
        Item('password', style='text', label='Text'),
        Item('_'),
        Item('password', style='readonly', label='ReadOnly'),
        label='Password',
    )

    # The view includes one group per data type. These will be displayed
    # on separate tabbed panels:
    traits_view = View(
        text_str_group,
        text_pass_group,
        text_int_group,
        title='TextEditor',
        buttons=['OK'],
    )
示例#3
0
    class _dbconn_view(HasTraits):

        host = Str('localhost')
        dbname = Str('seishub')
        user = Str('seishub')
        password = Password()

        trait_view = View(Item('host'), Item('dbname'), Item('user'),
                          Item('password'))
示例#4
0
class TextEditorDemo(HasTraits):
    """ This class specifies the details of the TextEditor demo.
    """

    # Define a trait for each of three variants
    string_trait = Str("sample string")
    int_trait = Int(1)
    password = Password()

    # TextEditor display without multi-line capability (for various traits):
    text_int_group = Group(Item('int_trait', style='simple', label='Simple'),
                           Item('_'),
                           Item('int_trait', style='custom', label='Custom'),
                           Item('_'),
                           Item('int_trait', style='text', label='Text'),
                           Item('_'),
                           Item('int_trait',
                                style='readonly',
                                label='ReadOnly'),
                           label='Integer')

    # TextEditor display with multi-line capability (for various traits):
    text_str_group = Group(Item('string_trait', style='simple',
                                label='Simple'),
                           Item('_'),
                           Item('string_trait', style='custom',
                                label='Custom'),
                           Item('_'),
                           Item('string_trait', style='text', label='Text'),
                           Item('_'),
                           Item('string_trait',
                                style='readonly',
                                label='ReadOnly'),
                           label='String')

    # TextEditor display with secret typing capability (for Password traits):
    text_pass_group = Group(Item('password', style='simple', label='Simple'),
                            Item('_'),
                            Item('password', style='custom', label='Custom'),
                            Item('_'),
                            Item('password', style='text', label='Text'),
                            Item('_'),
                            Item('password',
                                 style='readonly',
                                 label='ReadOnly'),
                            label='Password')

    # The view includes one group per data type.  These will be displayed
    # on separate tabbed panels.
    view1 = View(text_int_group,
                 text_str_group,
                 text_pass_group,
                 title='TextEditor',
                 buttons=['OK'])
示例#5
0
class MdAPIDB(FactorDB):
    """MdAPIDB"""
    UserID = Str("118073", arg_type="String", label="UserID", order=0)
    Pwd = Password("shuntai11", arg_type="String", label="Password", order=1)
    BrokerID = Str("9999", arg_type="String", label="BrokerID", order=2)
    FrontAddr = Str("tcp://180.168.146.187:10010",
                    arg_type="String",
                    label="前置机地址",
                    order=3)
    ConDir = Directory(label="流文件目录", arg_type="Directory", order=4)
    IDs = ListStr(label="订阅ID", arg_type="MultiOption", order=5)

    def __init__(self, sys_args={}, config_file=None, **kwargs):
        super().__init__(sys_args=sys_args,
                         config_file=(__QS_ConfigPath__ + os.sep +
                                      "MdAPIDBConfig.json"
                                      if config_file is None else config_file),
                         **kwargs)
        self._MdAPI = None
        self._CacheData = {}
        # 继承来的属性
        self.Name = "MdAPIDB"
        return

    def connect(self):
        self._MdAPI = _MdApi(fdb=self)  # 创建 API 对象
        self._MdAPI.createFtdcMdApi(
            self.ConDir)  # 在 C++ 环境中创建对象, 传入参数是希望用来保存 .con 文件的地址
        self._MdAPI.registerFront(self.FrontAddr)  # 注册前置机地址
        self._MdAPI.init()  # 初始化, 连接前置机
        return 0

    def disconnect(self):
        self._MdAPI.disconnect()
        self._MdAPI = None
        return 0

    def isAvailable(self):
        return (self._MdAPI is not None)

    # -------------------------------表的操作---------------------------------
    @property
    def TableNames(self):
        return ["MarketData"]

    def getTable(self, table_name, args={}):
        return _TickTable(name=table_name, fdb=self, sys_args=args)
示例#6
0
class TinySoftDB(FactorDB):
    """TinySoft"""
    InstallDir = Directory(label="安装目录", arg_type="Directory", order=0)
    IPAddr = Str("tsl.tinysoft.com.cn",
                 arg_type="String",
                 label="IP地址",
                 order=1)
    Port = Range(low=0,
                 high=65535,
                 value=443,
                 arg_type="Integer",
                 label="端口",
                 order=2)
    User = Str("", arg_type="String", label="用户名", order=3)
    Pwd = Password("", arg_type="String", label="密码", order=4)

    def __init__(self, sys_args={}, config_file=None, **kwargs):
        super().__init__(sys_args=sys_args,
                         config_file=(__QS_ConfigPath__ + os.sep +
                                      "TinySoftDBConfig.json"
                                      if config_file is None else config_file),
                         **kwargs)
        self.Name = "TinySoftDB"
        self._TSLPy = None
        self._TableInfo = None  # 数据库中的表信息
        self._FactorInfo = None  # 数据库中的表字段信息
        self._InfoFilePath = __QS_LibPath__ + os.sep + "TinySoftDBInfo.hdf5"  # 数据库信息文件路径
        self._InfoResourcePath = __QS_MainPath__ + os.sep + "Resource" + os.sep + "TinySoftDBInfo.xlsx"  # 数据库信息源文件路径
        self._TableInfo, self._FactorInfo = updateInfo(self._InfoFilePath,
                                                       self._InfoResourcePath)
        return

    def __getstate__(self):
        state = self.__dict__.copy()
        state["_TSLPy"] = (True if self.isAvailable() else False)
        return state

    def __setstate__(self, state):
        super().__setstate__(state)
        if self._TSLPy: self.connect()
        else: self._TSLPy = None

    def connect(self):
        if not (os.path.isdir(self.InstallDir)):
            raise __QS_Error__("TinySoft 的安装目录设置有误!")
        elif self.InstallDir not in sys.path:
            sys.path.append(self.InstallDir)
        import TSLPy3
        self._TSLPy = TSLPy3
        ErrorCode = self._TSLPy.ConnectServer(self.IPAddr, int(self.Port))
        if ErrorCode != 0:
            self._TSLPy = None
            raise __QS_Error__("TinySoft 服务器连接失败!")
        Rslt = self._TSLPy.LoginServer(self.User, self.Pwd)
        if Rslt is not None:
            ErrorCode, Msg = Rslt
            if ErrorCode != 0:
                self._TSLPy = None
                raise __QS_Error__("TinySoft 登录失败: " + Msg)
        else:
            raise __QS_Error__("TinySoft 登录失败!")
        return 0

    def disconnect(self):
        self._TSLPy.Disconnect()
        self._TSLPy = None

    def isAvailable(self):
        if self._TSLPy is not None:
            return self._TSLPy.Logined()
        else:
            return False

    @property
    def TableNames(self):
        if self._TableInfo is not None:
            return ["交易日历"] + self._TableInfo.index.tolist()
        else:
            return ["交易日历"]

    def getTable(self, table_name, args={}):
        if table_name == "交易日历":
            return _CalendarTable(name=table_name, fdb=self, sys_args=args)
        TableClass = self._TableInfo.loc[table_name, "TableClass"]
        return eval("_" + TableClass + "(name='" + table_name +
                    "', fdb=self, sys_args=args)")

    # 给定起始日期和结束日期, 获取交易所交易日期
    def getTradeDay(self,
                    start_date=None,
                    end_date=None,
                    exchange="SSE",
                    **kwargs):
        if exchange not in ("SSE", "SZSE"):
            raise __QS_Error__("不支持交易所: '%s' 的交易日序列!" % exchange)
        if start_date is None: start_date = dt.date(1900, 1, 1)
        if end_date is None: end_date = dt.date.today()
        CodeStr = "SetSysParam(pn_cycle(), cy_day());return MarketTradeDayQk(inttodate({StartDate}), inttodate({EndDate}));"
        CodeStr = CodeStr.format(StartDate=start_date.strftime("%Y%m%d"),
                                 EndDate=end_date.strftime("%Y%m%d"))
        ErrorCode, Data, Msg = self._TSLPy.RemoteExecute(CodeStr, {})
        if ErrorCode != 0:
            raise __QS_Error__("TinySoft 执行错误: " + Msg.decode("gbk"))
        return list(map(lambda x: dt.date(*self._TSLPy.DecodeDate(x)), Data))

    # 获取指定日当前或历史上的全体 A 股 ID,返回在市场上出现过的所有A股, 目前仅支持提取当前的所有 A 股
    def _getAllAStock(self, date=None, is_current=True):  # TODO
        if date is None: date = dt.date.today()
        CodeStr = "return getBK('深证A股;中小企业板;创业板;上证A股');"
        ErrorCode, Data, Msg = self._TSLPy.RemoteExecute(CodeStr, {})
        if ErrorCode != 0:
            raise __QS_Error__("TinySoft 执行错误: " + Msg.decode("gbk"))
        IDs = []
        for iID in Data:
            iID = iID.decode("gbk")
            IDs.append(iID[2:] + "." + iID[:2])
        return IDs

    # 给定指数 ID, 获取指定日当前或历史上的指数中的股票 ID, is_current=True:获取指定日当天的 ID, False:获取截止指定日历史上出现的 ID, 目前仅支持提取当前的指数成份股
    def getStockID(self, index_id, date=None, is_current=True):  # TODO
        if index_id == "全体A股":
            return self._getAllAStock(date=date, is_current=is_current)
        if date is None: date = dt.date.today()
        CodeStr = "return GetBKByDate('{IndexID}',IntToDate({Date}));"
        CodeStr = CodeStr.format(IndexID="".join(reversed(
            index_id.split("."))),
                                 Date=date.strftime("%Y%m%d"))
        ErrorCode, Data, Msg = self._TSLPy.RemoteExecute(CodeStr, {})
        if ErrorCode != 0:
            raise __QS_Error__("TinySoft 执行错误: " + Msg.decode("gbk"))
        IDs = []
        for iID in Data:
            iID = iID.decode("gbk")
            IDs.append(iID[2:] + "." + iID[:2])
        return IDs

    # 给定期货 ID, 获取指定日当前或历史上的该期货的所有 ID, is_current=True:获取指定日当天的 ID, False:获取截止指定日历史上出现的 ID, 目前仅支持提取当前在市的 ID
    def getFutureID(self, future_code="IF", date=None, is_current=True):
        if date is None: date = dt.date.today()
        if is_current:
            CodeStr = "EndT:= {Date}T;return GetFuturesID('{FutureID}', EndT);"
        else:
            raise __QS_Error__("目前不支持提取历史 ID")
        CodeStr = CodeStr.format(FutureID="".join(future_code.split(".")),
                                 Date=date.strftime("%Y%m%d"))
        ErrorCode, Data, Msg = self._TSLPy.RemoteExecute(CodeStr, {})
        if ErrorCode != 0:
            raise __QS_Error__("TinySoft 执行错误: " + Msg.decode("gbk"))
        return [iID.decode("gbk") for iID in Data]
示例#7
0
class QSSQLObject(__QS_Object__):
    """基于关系数据库的对象"""
    Name = Str("关系数据库")
    DBType = Enum("MySQL",
                  "SQL Server",
                  "Oracle",
                  "sqlite3",
                  arg_type="SingleOption",
                  label="数据库类型",
                  order=0)
    DBName = Str("Scorpion", arg_type="String", label="数据库名", order=1)
    IPAddr = Str("127.0.0.1", arg_type="String", label="IP地址", order=2)
    Port = Range(low=0,
                 high=65535,
                 value=3306,
                 arg_type="Integer",
                 label="端口",
                 order=3)
    User = Str("root", arg_type="String", label="用户名", order=4)
    Pwd = Password("", arg_type="String", label="密码", order=5)
    TablePrefix = Str("", arg_type="String", label="表名前缀", order=6)
    CharSet = Enum("utf8",
                   "gbk",
                   "gb2312",
                   "gb18030",
                   "cp936",
                   "big5",
                   arg_type="SingleOption",
                   label="字符集",
                   order=7)
    Connector = Enum("default",
                     "cx_Oracle",
                     "pymssql",
                     "mysql.connector",
                     "pymysql",
                     "sqlite3",
                     "pyodbc",
                     arg_type="SingleOption",
                     label="连接器",
                     order=8)
    DSN = Str("", arg_type="String", label="数据源", order=9)
    SQLite3File = File(label="sqlite3文件", arg_type="File", order=10)
    AdjustTableName = Bool(False, arg_type="Bool", label="调整表名", order=11)

    def __init__(self, sys_args={}, config_file=None, **kwargs):
        self._Connection = None  # 连接对象
        self._Connector = None  # 实际使用的数据库链接器
        self._AllTables = []  # 数据库中的所有表名, 用于查询时解决大小写敏感问题
        self._PID = None  # 保存数据库连接创建时的进程号
        return super().__init__(sys_args=sys_args,
                                config_file=config_file,
                                **kwargs)

    def __getstate__(self):
        state = self.__dict__.copy()
        state["_Connection"] = (True if self.isAvailable() else False)
        return state

    def __setstate__(self, state):
        super().__setstate__(state)
        if self._Connection: self._connect()
        else: self._Connection = None

    @property
    def Connection(self):
        if self._Connection is not None:
            if os.getpid() != self._PID: self._connect()  # 如果进程号发生变化, 重连
        return self._Connection

    def _connect(self):
        self._Connection = None
        if (self.Connector == "cx_Oracle") or ((self.Connector == "default")
                                               and (self.DBType == "Oracle")):
            try:
                import cx_Oracle
                self._Connection = cx_Oracle.connect(
                    self.User, self.Pwd,
                    cx_Oracle.makedsn(self.IPAddr, str(self.Port),
                                      self.DBName))
            except Exception as e:
                Msg = ("'%s' 尝试使用 cx_Oracle 连接(%s@%s:%d)数据库 '%s' 失败: %s" %
                       (self.Name, self.User, self.IPAddr, self.Port,
                        self.DBName, str(e)))
                self._QS_Logger.error(Msg)
                if self.Connector != "default": raise e
            else:
                self._Connector = "cx_Oracle"
        elif (self.Connector
              == "pymssql") or ((self.Connector == "default") and
                                (self.DBType == "SQL Server")):
            try:
                import pymssql
                self._Connection = pymssql.connect(server=self.IPAddr,
                                                   port=str(self.Port),
                                                   user=self.User,
                                                   password=self.Pwd,
                                                   database=self.DBName,
                                                   charset=self.CharSet)
            except Exception as e:
                Msg = ("'%s' 尝试使用 pymssql 连接(%s@%s:%d)数据库 '%s' 失败: %s" %
                       (self.Name, self.User, self.IPAddr, self.Port,
                        self.DBName, str(e)))
                self._QS_Logger.error(Msg)
                if self.Connector != "default": raise e
            else:
                self._Connector = "pymssql"
        elif (self.Connector
              == "mysql.connector") or ((self.Connector == "default") and
                                        (self.DBType == "MySQL")):
            try:
                import mysql.connector
                self._Connection = mysql.connector.connect(
                    host=self.IPAddr,
                    port=str(self.Port),
                    user=self.User,
                    password=self.Pwd,
                    database=self.DBName,
                    charset=self.CharSet,
                    autocommit=True)
            except Exception as e:
                Msg = (
                    "'%s' 尝试使用 mysql.connector 连接(%s@%s:%d)数据库 '%s' 失败: %s" %
                    (self.Name, self.User, self.IPAddr, self.Port, self.DBName,
                     str(e)))
                self._QS_Logger.error(Msg)
                if self.Connector != "default": raise e
            else:
                self._Connector = "mysql.connector"
        elif self.Connector == "pymysql":
            try:
                import pymysql
                self._Connection = pymysql.connect(host=self.IPAddr,
                                                   port=self.Port,
                                                   user=self.User,
                                                   password=self.Pwd,
                                                   db=self.DBName,
                                                   charset=self.CharSet)
            except Exception as e:
                Msg = ("'%s' 尝试使用 pymysql 连接(%s@%s:%d)数据库 '%s' 失败: %s" %
                       (self.Name, self.User, self.IPAddr, self.Port,
                        self.DBName, str(e)))
                self._QS_Logger.error(Msg)
                raise e
            else:
                self._Connector = "pymysql"
        elif (self.Connector == "sqlite3") or ((self.Connector == "default")
                                               and (self.DBType == "sqlite3")):
            try:
                import sqlite3
                self._Connection = sqlite3.connect(self.SQLite3File)
            except Exception as e:
                Msg = ("'%s' 尝试使用 sqlite3 连接数据库 '%s' 失败: %s" %
                       (self.Name, self.SQLite3File, str(e)))
                self._QS_Logger.error(Msg)
                raise e
            else:
                self._Connector = "sqlite3"
        if self._Connection is None:
            if self.Connector not in ("default", "pyodbc"):
                self._Connection = None
                Msg = ("'%s' 连接数据库时错误: 不支持该连接器(connector) '%s'" %
                       (self.Name, self.Connector))
                self._QS_Logger.error(Msg)
                raise __QS_Error__(Msg)
            elif self.DSN:
                try:
                    import pyodbc
                    self._Connection = pyodbc.connect("DSN=%s;PWD=%s" %
                                                      (self.DSN, self.Pwd))
                except Exception as e:
                    Msg = ("'%s' 尝试使用 pyodbc 连接数据库 'DSN: %s' 失败: %s" %
                           (self.Name, self.DSN, str(e)))
                    self._QS_Logger.error(Msg)
                    raise e
            else:
                try:
                    import pyodbc
                    self._Connection = pyodbc.connect(
                        "DRIVER={%s};DATABASE=%s;SERVER=%s;UID=%s;PWD=%s" %
                        (self.DBType, self.DBName, self.IPAddr + "," +
                         str(self.Port), self.User, self.Pwd))
                except Exception as e:
                    Msg = ("'%s' 尝试使用 pyodbc 连接(%s@%s:%d)数据库 '%s' 失败: %s" %
                           (self.Name, self.User, self.IPAddr, self.Port,
                            self.DBName, str(e)))
                    self._QS_Logger.error(Msg)
                    raise e
            self._Connector = "pyodbc"
        self._PID = os.getpid()
        return 0

    def connect(self):
        self._connect()
        if not self.AdjustTableName:
            self._AllTables = []
        else:
            self._AllTables = self.getDBTable()
        return 0

    def disconnect(self):
        if self._Connection is not None:
            try:
                self._Connection.close()
            except Exception as e:
                self._QS_Logger.warning("'%s' 断开数据库错误: %s" %
                                        (self.Name, str(e)))
            finally:
                self._Connection = None
        return 0

    def isAvailable(self):
        return (self._Connection is not None)

    def cursor(self, sql_str=None):
        if self._Connection is None:
            Msg = ("'%s' 获取 cursor 失败: 数据库尚未连接!" % (self.Name, ))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)
        if os.getpid() != self._PID: self._connect()  # 如果进程号发生变化, 重连
        try:  # 连接断开后重连
            Cursor = self._Connection.cursor()
        except:
            self._connect()
            Cursor = self._Connection.cursor()
        if sql_str is None: return Cursor
        if self.AdjustTableName:
            for iTable in self._AllTables:
                sql_str = re.sub(iTable, iTable, sql_str, flags=re.IGNORECASE)
        Cursor.execute(sql_str)
        return Cursor

    def fetchall(self, sql_str):
        Cursor = self.cursor(sql_str=sql_str)
        Data = Cursor.fetchall()
        Cursor.close()
        return Data

    def execute(self, sql_str):
        if self._Connection is None:
            Msg = ("'%s' 执行 SQL 命令失败: 数据库尚未连接!" % (self.Name, ))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)
        if os.getpid() != self._PID: self._connect()  # 如果进程号发生变化, 重连
        try:
            Cursor = self._Connection.cursor()
        except:
            self._connect()
            Cursor = self._Connection.cursor()
        Cursor.execute(sql_str)
        self._Connection.commit()
        Cursor.close()
        return 0

    def getDBTable(self, table_format=None):
        try:
            if self.DBType == "SQL Server":
                SQLStr = "SELECT Name FROM SysObjects Where XType='U'"
                TableField = "Name"
            elif self.DBType == "MySQL":
                SQLStr = "SELECT table_name FROM information_schema.tables WHERE table_schema='" + self.DBName + "' AND table_type='base table'"
                TableField = "table_name"
            elif self.DBType == "Oracle":
                SQLStr = "SELECT table_name FROM user_tables WHERE TABLESPACE_NAME IS NOT NULL AND user='******'"
                TableField = "table_name"
            elif self.DBType == "sqlite3":
                SQLStr = "SELECT name FROM sqlite_master WHERE type='table'"
                TableField = "name"
            else:
                raise __QS_Error__("不支持的数据库类型 '%s'" % self.DBType)
            if isinstance(table_format, str) and table_format:
                SQLStr += (" WHERE %s LIKE '%s' " % (TableField, table_format))
            AllTables = self.fetchall(SQLStr)
        except Exception as e:
            Msg = ("'%s' 调用方法 getDBTable 时错误: %s" % (self.Name, str(e)))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)
        else:
            return [rslt[0] for rslt in AllTables]

    def renameDBTable(self, old_table_name, new_table_name):
        SQLStr = "ALTER TABLE " + self.TablePrefix + old_table_name + " RENAME TO " + self.TablePrefix + new_table_name
        try:
            self.execute(SQLStr)
        except Exception as e:
            Msg = ("'%s' 调用方法 renameDBTable 将表 '%s' 重命名为 '%s' 时错误: %s" %
                   (self.Name, old_table_name, str(e)))
            self._QS_Logger.error(Msg)
            raise e
        else:
            self._QS_Logger.info("'%s' 调用方法 renameDBTable 将表 '%s' 重命名为 '%s'" %
                                 (self.Name, old_table_name, new_table_name))
        return 0

    # 创建表, field_types: {字段名: 数据类型}
    def createDBTable(self,
                      table_name,
                      field_types,
                      primary_keys=[],
                      index_fields=[]):
        if self.DBType == "MySQL":
            SQLStr = "CREATE TABLE IF NOT EXISTS %s (" % (self.TablePrefix +
                                                          table_name)
            for iField, iDataType in field_types.items():
                SQLStr += "`%s` %s, " % (iField, iDataType)
            if primary_keys:
                SQLStr += "PRIMARY KEY (`" + "`,`".join(primary_keys) + "`))"
            else:
                SQLStr += ")"
            SQLStr += " ENGINE=InnoDB DEFAULT CHARSET=" + self.CharSet
            IndexType = "BTREE"
        elif self.DBType == "sqlite3":
            SQLStr = "CREATE TABLE IF NOT EXISTS %s (" % (self.TablePrefix +
                                                          table_name)
            for iField, iDataType in field_types.items():
                SQLStr += "`%s` %s, " % (iField, iDataType)
            if primary_keys:
                SQLStr += "PRIMARY KEY (`" + "`,`".join(primary_keys) + "`))"
            else:
                SQLStr += ")"
            IndexType = None
        try:
            self.execute(SQLStr)
        except Exception as e:
            Msg = ("'%s' 调用方法 createDBTable 在数据库中创建表 '%s' 时错误: %s" %
                   (self.Name, table_name, str(e)))
            self._QS_Logger.error(Msg)
            raise e
        else:
            self._QS_Logger.info("'%s' 调用方法 createDBTable 在数据库中创建表 '%s'" %
                                 (self.Name, table_name))
        if index_fields:
            try:
                self.addIndex(table_name + "_index",
                              table_name,
                              fields=index_fields,
                              index_type=IndexType)
            except Exception as e:
                self._QS_Logger.warning(
                    "'%s' 调用方法 createDBTable 在数据库中创建表 '%s' 时错误: %s" %
                    (self.Name, table_name, str(e)))
        return 0

    def deleteDBTable(self, table_name):
        SQLStr = "DROP TABLE %s" % (self.TablePrefix + table_name)
        try:
            self.execute(SQLStr)
        except Exception as e:
            Msg = ("'%s' 调用方法 deleteDBTable 从数据库中删除表 '%s' 时错误: %s" %
                   (self.Name, table_name, str(e)))
            self._QS_Logger.error(Msg)
            raise e
        else:
            self._QS_Logger.info("'%s' 调用方法 deleteDBTable 从数据库中删除表 '%s'" %
                                 (self.Name, table_name))
        return 0

    def addIndex(self, index_name, table_name, fields, index_type="BTREE"):
        if index_type is not None:
            SQLStr = "CREATE INDEX " + index_name + " USING " + index_type + " ON " + self.TablePrefix + table_name + "(" + ", ".join(
                fields) + ")"
        else:
            SQLStr = "CREATE INDEX " + index_name + " ON " + self.TablePrefix + table_name + "(" + ", ".join(
                fields) + ")"
        try:
            self.execute(SQLStr)
        except Exception as e:
            Msg = ("'%s' 调用方法 addIndex 为表 '%s' 添加索引时错误: %s" %
                   (self.Name, table_name, str(e)))
            self._QS_Logger.error(Msg)
            raise e
        else:
            self._QS_Logger.info("'%s' 调用方法 addIndex 为表 '%s' 添加索引 '%s'" %
                                 (self.Name, table_name, index_name))
        return 0

    def getFieldDataType(self, table_format=None, ignore_fields=[]):
        try:
            if self.DBType == "sqlite3":
                AllTables = self.getDBTable(table_format=table_format)
                Rslt = []
                for iTable in AllTables:
                    iSQLStr = "PRAGMA table_info('" + iTable + "')"
                    iRslt = pd.DataFrame(self.fetchall(iSQLStr),
                                         columns=[
                                             "cid", "Field", "DataType",
                                             "notnull", "dflt_value", "pk"
                                         ])
                    iRslt["Table"] = iTable
                if Rslt:
                    Rslt = pd.concat(Rslt).drop(
                        labels=["cid", "notnull", "dflt_value", "pk"],
                        axis=1).loc[:, ["Table", "Field", "DataType"]].values
            else:
                if self.DBType == "MySQL":
                    SQLStr = (
                        "SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE FROM information_schema.columns WHERE table_schema='%s' "
                        % self.DBName)
                    TableField, ColField = "TABLE_NAME", "COLUMN_NAME"
                elif self.DBType == "SQL Server":
                    SQLStr = (
                        "SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE FROM information_schema.columns WHERE table_schema='%s' "
                        % self.DBName)
                    TableField, ColField = "TABLE_NAME", "COLUMN_NAME"
                elif self.DBType == "Oracle":
                    SQLStr = (
                        "SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE FROM user_tab_columns"
                    )
                    TableField, ColField = "TABLE_NAME", "COLUMN_NAME"
                else:
                    raise __QS_Error__("不支持的数据库类型 '%s'" % self.DBType)
                if isinstance(table_format, str) and table_format:
                    SQLStr += ("AND %s LIKE '%s' " %
                               (TableField, table_format))
                if ignore_fields:
                    SQLStr += "AND " + ColField + " NOT IN ('" + "', '".join(
                        ignore_fields) + "') "
                SQLStr += ("ORDER BY %s, %s" % (TableField, ColField))
                Rslt = self.fetchall(SQLStr)
        except Exception as e:
            Msg = ("'%s' 调用方法 getFieldDataType 获取字段数据类型信息时错误: %s" %
                   (self.Name, str(e)))
            self._QS_Logger.error(Msg)
            raise e
        return pd.DataFrame(Rslt, columns=["Table", "Field", "DataType"])

    # 增加字段, field_types: {字段名: 数据类型}
    def addField(self, table_name, field_types):
        SQLStr = "ALTER TABLE %s " % (self.TablePrefix + table_name)
        SQLStr += "ADD COLUMN ("
        for iField in field_types:
            SQLStr += "%s %s," % (iField, field_types[iField])
        SQLStr = SQLStr[:-1] + ")"
        try:
            self.execute(SQLStr)
        except Exception as e:
            Msg = ("'%s' 调用方法 addField 为表 '%s' 添加字段时错误: %s" %
                   (self.Name, table_name, str(e)))
            self._QS_Logger.error(Msg)
            raise e
        else:
            self._QS_Logger.info(
                "'%s' 调用方法 addField 为表 '%s' 添加字段 ’%s'" %
                (self.Name, table_name, str(list(field_types.keys()))))
        return 0

    def renameField(self, table_name, old_field_name, new_field_name):
        try:
            if self.DBType != "sqlite3":
                SQLStr = "ALTER TABLE " + self.TablePrefix + table_name
                SQLStr += " CHANGE COLUMN `" + old_field_name + "` `" + new_field_name + "`"
                self.execute(SQLStr)
            else:
                # 将表名改为临时表
                SQLStr = "ALTER TABLE %s RENAME TO %s"
                TempTableName = genAvailableName("TempTable",
                                                 self.getDBTable())
                self.execute(SQLStr % (self.TablePrefix + table_name,
                                       self.TablePrefix + TempTableName))
                # 创建新表
                FieldTypes = OrderedDict()
                FieldDataType = self.getFieldDataType(
                    table_format=table_name
                ).loc[:,
                      ["Field", "DataType"]].set_index(["Field"
                                                        ]).iloc[:,
                                                                0].to_dict()
                for iField, iDataType in FieldDataType.items():
                    iDataType = ("text" if iDataType == "string" else "real")
                    if iField == old_field_name:
                        FieldTypes[new_field_name] = iDataType
                    else:
                        FieldTypes[iField] = iDataType
                self.createTable(table_name, field_types=FieldTypes)
                # 导入数据
                OldFieldNames = ", ".join(FieldDataType.keys())
                NewFieldNames = ", ".join(FieldTypes)
                SQLStr = "INSERT INTO %s (datetime, code, %s) SELECT datetime, code, %s FROM %s"
                Cursor = self.cursor(
                    SQLStr % (self.TablePrefix + table_name, NewFieldNames,
                              OldFieldNames, self.TablePrefix + TempTableName))
                Conn = self.Connection
                Conn.commit()
                # 删除临时表
                Cursor.execute("DROP TABLE %s" %
                               (self.TablePrefix + TempTableName, ))
                Conn.commit()
                Cursor.close()
        except Exception as e:
            Msg = (
                "'%s' 调用方法 renameField 将表 '%s' 中的字段 '%s' 重命名为 '%s' 时错误: %s" %
                (self.Name, table_name, old_field_name, new_field_name,
                 str(e)))
            self._QS_Logger.error(Msg)
            raise e
        else:
            self._QS_Logger.info(
                "'%s' 调用方法 renameField 在将表 '%s' 中的字段 '%s' 重命名为 '%s'" %
                (self.Name, table_name, old_field_name, new_field_name))
        return 0

    def deleteField(self, table_name, field_names):
        if not field_names: return 0
        try:
            if self.DBType != "sqlite3":
                SQLStr = "ALTER TABLE " + self.TablePrefix + table_name
                for iField in field_names:
                    SQLStr += " DROP COLUMN `" + iField + "`,"
                self.execute(SQLStr[:-1])
            else:
                # 将表名改为临时表
                SQLStr = "ALTER TABLE %s RENAME TO %s"
                TempTableName = genAvailableName("TempTable",
                                                 self.getDBTable())
                self.execute(SQLStr % (self.TablePrefix + table_name,
                                       self.TablePrefix + TempTableName))
                # 创建新表
                FieldTypes = OrderedDict()
                FieldDataType = self.getFieldDataType(
                    table_format=table_name
                ).loc[:,
                      ["Field", "DataType"]].set_index(["Field"
                                                        ]).iloc[:,
                                                                0].to_dict()
                FactorIndex = list(set(FieldDataType).difference(field_names))
                for iField in FactorIndex:
                    FieldTypes[iField] = ("text" if FieldDataType[iField]
                                          == "string" else "real")
                self.createTable(table_name, field_types=FieldTypes)
                # 导入数据
                FactorNameStr = ", ".join(FactorIndex)
                SQLStr = "INSERT INTO %s (datetime, code, %s) SELECT datetime, code, %s FROM %s"
                Cursor = self.cursor(
                    SQLStr % (self.TablePrefix + table_name, FactorNameStr,
                              FactorNameStr, self.TablePrefix + TempTableName))
                Conn = self.Connection
                Conn.commit()
                # 删除临时表
                Cursor.execute("DROP TABLE %s" %
                               (self.TablePrefix + TempTableName, ))
                Conn.commit()
                Cursor.close()
        except Exception as e:
            Msg = ("'%s' 调用方法 deleteField 删除表 '%s' 中的字段 '%s' 时错误: %s" %
                   (self.Name, table_name, str(field_names), str(e)))
            self._QS_Logger.error(Msg)
            raise e
        else:
            self._QS_Logger.info("'%s' 调用方法 deleteField 删除表 '%s' 中的字段 '%s'" %
                                 (self.Name, table_name, str(field_names)))
        return 0

    def truncateDBTable(self, table_name):
        if self.DBType == "sqlite3":
            SQLStr = "DELETE FROM %s" % (self.TablePrefix + table_name)
        else:
            SQLStr = "TRUNCATE TABLE %s" % (self.TablePrefix + table_name)
        try:
            self.execute(SQLStr)
        except Exception as e:
            Msg = ("'%s' 调用方法 truncateDBTable 清空数据库中的表 '%s' 时错误: %s" %
                   (self.Name, table_name, str(e)))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)
        else:
            self._QS_Logger.info("'%s' 调用方法 truncateDBTable 清空数据库中的表 '%s'" %
                                 (self.Name, table_name))
        return 0
示例#8
0
class MayaviViewer(HasTraits):
    """
    This class represents a Mayavi based viewer for the particles.  They
    are queried from a running solver.
    """

    particle_arrays = List(Instance(ParticleArrayHelper), [])
    pa_names = List(Str, [])

    interpolator = Instance(InterpolatorView)

    # The default scalar to load up when running the viewer.
    scalar = Str("rho")

    scene = Instance(MlabSceneModel, ())

    ########################################
    # Traits to pull data from a live solver.
    live_mode = Bool(False,
                     desc='if data is obtained from a running solver '
                     'or from saved files')

    shell = Button('Launch Python Shell')
    host = Str('localhost',
               enter_set=True,
               auto_set=False,
               desc='machine to connect to')
    port = Int(8800,
               enter_set=True,
               auto_set=False,
               desc='port to use to connect to solver')
    authkey = Password('pysph',
                       enter_set=True,
                       auto_set=False,
                       desc='authorization key')
    host_changed = Bool(True)
    client = Instance(MultiprocessingClient)
    controller = Property(depends_on='live_mode, host_changed')

    ########################################
    # Traits to view saved solver output.
    files = List(Str, [])
    directory = Directory()
    current_file = Str('', desc='the file being viewed currently')
    update_files = Button('Refresh')
    file_count = Range(low='_low',
                       high='_n_files',
                       value=0,
                       desc='the file counter')
    play = Bool(False, desc='if all files are played automatically')
    play_delay = Float(0.2, desc='the delay between loading files')
    loop = Bool(False, desc='if the animation is looped')
    # This is len(files) - 1.
    _n_files = Int(0)
    _low = Int(0)

    ########################################
    # Timer traits.
    timer = Instance(Timer)
    interval = Float(
        5.0,
        enter_set=True,
        auto_set=False,
        desc='suggested frequency in seconds with which plot is updated')

    ########################################
    # Solver info/control.
    current_time = Float(0.0, desc='the current time in the simulation')
    time_step = Float(0.0, desc='the time-step of the solver')
    iteration = Int(0, desc='the current iteration number')
    pause_solver = Bool(False, desc='if the solver should be paused')

    ########################################
    # Movie.
    record = Bool(False, desc='if PNG files are to be saved for animation')
    frame_interval = Range(1, 100, 5, desc='the interval between screenshots')
    movie_directory = Str
    # internal counters.
    _count = Int(0)
    _frame_count = Int(0)
    _last_time = Float
    _solver_data = Any
    _file_name = Str
    _particle_array_updated = Bool
    _doing_update = Bool(False)
    _poll_interval = Float(5.0)

    ########################################
    # The layout of the dialog created
    view = View(HSplit(
        Group(
            Group(
                Group(
                    Group(
                        Item(name='directory'),
                        Item(name='current_file'),
                        Item(name='file_count'),
                        padding=0,
                    ),
                    HGroup(
                        Item(name='play'),
                        Item(name='play_delay', label='Delay'),
                        Item(name='loop'),
                        Item(name='update_files', show_label=False),
                        padding=0,
                    ),
                    padding=0,
                    label='Saved Data',
                    selected=True,
                    enabled_when='not live_mode',
                ),
                Group(
                    Group(Item(name='live_mode'), ),
                    Group(
                        Item(name='host'),
                        Item(name='port'),
                        Item(name='authkey'),
                        enabled_when='live_mode',
                    ),
                    label='Connection',
                ),
                layout='tabbed',
            ),
            Group(
                Group(
                    Item(name='current_time',
                         style='readonly',
                         format_str='%.4e'),
                    Item(name='pause_solver', enabled_when='live_mode'),
                    Item(name='iteration', style='readonly'),
                    Item(name='interval', enabled_when='live_mode'),
                    Item(name='time_step', style='readonly',
                         format_str='%.4e'),
                    columns=2,
                    label='Solver',
                ),
                Group(
                    Item(name='record'),
                    Item(name='frame_interval'),
                    Item(name='movie_directory'),
                    label='Movie',
                ),
                layout='tabbed',
            ),
            Group(Item(name='particle_arrays',
                       style='custom',
                       show_label=False,
                       editor=ListEditor(use_notebook=True,
                                         deletable=False,
                                         page_name='.name')),
                  Item(name='interpolator', style='custom', show_label=False),
                  layout='tabbed'),
            Item(name='shell', show_label=False),
        ),
        Group(
            Item('scene',
                 editor=SceneEditor(scene_class=MayaviScene),
                 height=400,
                 width=600,
                 show_label=False), )),
                resizable=True,
                title='PySPH Particle Viewer',
                height=640,
                width=1024,
                handler=ViewerHandler)

    ######################################################################
    # `MayaviViewer` interface.
    ######################################################################
    def on_close(self):
        self._handle_particle_array_updates()

    @on_trait_change('scene:activated')
    def start_timer(self):
        if not self.live_mode:
            # No need for the timer if we are rendering files.
            return

        # Just accessing the timer will start it.
        t = self.timer
        if not t.IsRunning():
            t.Start(int(self._poll_interval * 1000))

    @on_trait_change('scene:activated')
    def update_plot(self):

        # No need to do this if files are being used.
        if self._doing_update or not self.live_mode:
            return

        # do not update if solver is paused
        if self.pause_solver:
            return

        if self.client is None:
            self.host_changed = True

        controller = self.controller
        if controller is None:
            return

        try:
            start = time.time()
            self._doing_update = True
            self.current_time = t = controller.get_t()
            self.time_step = controller.get_dt()
            self.iteration = controller.get_count()

            arrays = []
            for idx, name in enumerate(self.pa_names):
                pa = controller.get_named_particle_array(name)
                arrays.append(pa)
                pah = self.particle_arrays[idx]
                pah.trait_set(particle_array=pa, time=t)

            self.interpolator.particle_arrays = arrays

            total = time.time() - start
            if total * 3 > self._poll_interval or total * 5 < self._poll_interval:
                self._poll_interval = max(3 * total, self.interval)
                self._interval_changed(self._poll_interval)
            if self.record:
                self._do_snap()
        finally:
            self._doing_update = False

    def run_script(self, path):
        """Execute a script in the namespace of the viewer.
        """
        pas = self.particle_arrays
        if len(pas) == 0 or pas[0].plot is None:
            do_after(2000, self.run_script, path)
            return

        with open(path) as fp:
            data = fp.read()
            ns = self._get_shell_namespace()
            exec(compile(data, path, 'exec'), ns)

    ######################################################################
    # Private interface.
    ######################################################################
    def _do_snap(self):
        """Generate the animation."""
        p_arrays = self.particle_arrays
        if len(p_arrays) == 0:
            return
        if self.current_time == self._last_time:
            return

        if len(self.movie_directory) == 0:
            controller = self.controller
            output_dir = controller.get_output_directory()
            movie_dir = os.path.join(output_dir, 'movie')
            self.movie_directory = movie_dir
        else:
            movie_dir = self.movie_directory
        if not os.path.exists(movie_dir):
            os.mkdir(movie_dir)

        interval = self.frame_interval
        count = self._count
        if count % interval == 0:
            fname = 'frame%06d.png' % (self._frame_count)
            p_arrays[0].scene.save_png(os.path.join(movie_dir, fname))
            self._frame_count += 1
            self._last_time = self.current_time
        self._count += 1

    @on_trait_change('host,port,authkey')
    def _mark_reconnect(self):
        if self.live_mode:
            self.host_changed = True

    @cached_property
    def _get_controller(self):
        ''' get the controller, also sets the iteration count '''
        if not self.live_mode:
            return None

        reconnect = self.host_changed
        if not reconnect:
            try:
                c = self.client.controller
            except Exception as e:
                logger.info('Error: no connection or connection closed: '
                            'reconnecting: %s' % e)
                reconnect = True
                self.client = None
            else:
                try:
                    self.client.controller.get_count()
                except IOError:
                    self.client = None
                    reconnect = True

        if reconnect:
            self.host_changed = False
            try:
                if MultiprocessingClient.is_available((self.host, self.port)):
                    self.client = MultiprocessingClient(address=(self.host,
                                                                 self.port),
                                                        authkey=self.authkey)
                else:
                    logger.info('Could not connect: Multiprocessing Interface'
                                ' not available on %s:%s' %
                                (self.host, self.port))
                    return None
            except Exception as e:
                logger.info('Could not connect: check if solver is '
                            'running:%s' % e)
                return None
            c = self.client.controller
            self.iteration = c.get_count()

        if self.client is None:
            return None
        else:
            return self.client.controller

    def _client_changed(self, old, new):
        if not self.live_mode:
            return

        self._clear()
        if new is None:
            return
        else:
            self.pa_names = self.client.controller.get_particle_array_names()

        self.particle_arrays = [
            self._make_particle_array_helper(self.scene, x)
            for x in self.pa_names
        ]
        self.interpolator = InterpolatorView(scene=self.scene)
        do_later(self.update_plot)

        output_dir = self.client.controller.get_output_directory()
        config_file = os.path.join(output_dir, 'mayavi_config.py')
        if os.path.exists(config_file):
            do_later(self.run_script, config_file)
        else:
            # Turn on the legend for the first particle array.
            if len(self.particle_arrays) > 0:
                self.particle_arrays[0].trait_set(show_legend=True,
                                                  show_time=True)

    def _timer_event(self):
        # catch all Exceptions else timer will stop
        try:
            self.update_plot()
        except Exception as e:
            logger.info('Exception: %s caught in timer_event' % e)

    def _interval_changed(self, value):
        t = self.timer
        if t is None:
            return
        if t.IsRunning():
            t.Stop()
            interval = max(value, self._poll_interval)
            t.Start(int(interval * 1000))

    def _timer_default(self):
        return Timer(int(self._poll_interval * 1000), self._timer_event)

    def _pause_solver_changed(self, value):
        if self.live_mode:
            c = self.controller
            if c is None:
                return
            if value:
                c.pause_on_next()
            else:
                c.cont()

    def _record_changed(self, value):
        if value:
            self._do_snap()

    def _files_changed(self, value):
        if len(value) == 0:
            self._n_files = 0
            return
        else:
            d = os.path.dirname(os.path.abspath(value[0]))
            self.movie_directory = os.path.join(d, 'movie')
            self.trait_set(directory=d, trait_change_notify=False)
        self._n_files = len(value) - 1
        self._frame_count = 0
        self._count = 0
        self.frame_interval = 1
        fc = self.file_count
        self.file_count = 0
        if fc == 0:
            # Force an update when our original file count is 0.
            self._file_count_changed(fc)
        t = self.timer
        if not self.live_mode:
            if t.IsRunning():
                t.Stop()
        else:
            if not t.IsRunning():
                t.Stop()
                t.Start(self._poll_interval * 1000)

    def _file_count_changed(self, value):
        # Save out any updates for the previous file if needed.
        self._handle_particle_array_updates()
        if not self.files:
            return
        # Load the new file.
        value = min(value, len(self.files) - 1)
        fname = self.files[value]
        if not os.path.exists(fname):
            print("File %s is missing, ignoring!" % fname)
            return
        self._file_name = fname
        self.current_file = os.path.basename(fname)
        # Code to read the file, create particle array and setup the helper.
        data = load(fname)
        solver_data = data["solver_data"]
        arrays = data["arrays"]
        self._solver_data = solver_data
        self.current_time = t = float(solver_data['t'])
        self.time_step = float(solver_data['dt'])
        self.iteration = int(solver_data['count'])
        names = list(arrays.keys())
        pa_names = self.pa_names

        if len(pa_names) == 0:
            self.interpolator = InterpolatorView(scene=self.scene)
            self.pa_names = names
            pas = []
            for name in names:
                pa = arrays[name]
                pah = self._make_particle_array_helper(self.scene, name)
                # Must set this after setting the scene.
                pah.trait_set(particle_array=pa, time=t)
                pas.append(pah)
            self.particle_arrays = pas
        else:
            for idx, name in enumerate(pa_names):
                pa = arrays[name]
                pah = self.particle_arrays[idx]
                pah.trait_set(particle_array=pa, time=t)

        self.interpolator.particle_arrays = list(arrays.values())

        if self.record:
            self._do_snap()

    def _loop_changed(self, value):
        if value and self.play:
            self._play_changed(self.play)

    def _play_changed(self, value):
        t = self.timer
        if value:
            t.Stop()
            t.callable = self._play_event
            t.Start(1000 * self.play_delay)
        else:
            t.Stop()
            t.callable = self._timer_event

    def _clear(self):
        self.pa_names = []
        self.scene.mayavi_scene.children[:] = []

    def _play_event(self):
        nf = self._n_files
        pc = self.file_count
        pc += 1
        if pc > nf:
            if self.loop:
                pc = 0
            else:
                self.timer.Stop()
                pc = nf
        self.file_count = pc
        self._handle_particle_array_updates()

    def _play_delay_changed(self):
        if self.play:
            self._play_changed(self.play)

    def _scalar_changed(self, value):
        for pa in self.particle_arrays:
            pa.scalar = value

    def _update_files_fired(self):
        fc = self.file_count
        if len(self.files) == 0:
            files = get_files_in_dir(self.directory)
        else:
            files = glob_files(self.files[fc])
        sort_file_list(files)
        self.files = files
        if len(files) > 0:
            fc = min(len(files) - 1, fc)
            self.file_count = fc
            if self.play:
                self._play_changed(self.play)

    def _shell_fired(self):
        ns = self._get_shell_namespace()
        obj = PythonShellView(ns=ns)
        obj.edit_traits()

    def _get_shell_namespace(self):
        pas = {}
        for i, x in enumerate(self.particle_arrays):
            pas[i] = x
            pas[x.name] = x
        return dict(viewer=self,
                    particle_arrays=pas,
                    interpolator=self.interpolator,
                    scene=self.scene,
                    mlab=self.scene.mlab)

    def _directory_changed(self, d):
        files = get_files_in_dir(d)
        if len(files) > 0:
            self._clear()
            sort_file_list(files)
            self.files = files
            self.file_count = min(self.file_count, len(files) - 1)
        else:
            pass
        config_file = os.path.join(d, 'mayavi_config.py')
        if os.path.exists(config_file):
            self.run_script(config_file)

    def _live_mode_changed(self, value):
        if value:
            self._file_name = ''
            self.client = None
            self._clear()
            self._mark_reconnect()
            self.start_timer()
        else:
            self.client = None
            self._clear()
            self.timer.Stop()

    def _particle_array_helper_updated(self, value):
        self._particle_array_updated = True

    def _handle_particle_array_updates(self):
        # Called when the particle array helper fires an updated event.
        if self._particle_array_updated and self._file_name:
            sd = self._solver_data
            arrays = [x.particle_array for x in self.particle_arrays]
            detailed = self._requires_detailed_output(arrays)
            dump(self._file_name,
                 arrays,
                 sd,
                 detailed_output=detailed,
                 only_real=False)
            self._particle_array_updated = False

    def _requires_detailed_output(self, arrays):
        detailed = False
        for pa in arrays:
            props = set(pa.properties.keys())
            output = set(pa.output_property_arrays)
            diff = props - output
            for prop in diff:
                array = pa.get(prop)
                if (array.max() - array.min()) > 0:
                    detailed = True
                    break
            if detailed:
                break
        return detailed

    def _make_particle_array_helper(self, scene, name):
        pah = ParticleArrayHelper(scene=scene, name=name, scalar=self.scalar)
        pah.on_trait_change(self._particle_array_helper_updated, 'updated')
        return pah
示例#9
0
class QSSQLObject(__QS_Object__):
    """基于关系数据库的对象"""
    DBType = Enum("MySQL",
                  "SQL Server",
                  "Oracle",
                  arg_type="SingleOption",
                  label="数据库类型",
                  order=0)
    DBName = Str("Scorpion", arg_type="String", label="数据库名", order=1)
    IPAddr = Str("127.0.0.1", arg_type="String", label="IP地址", order=2)
    Port = Range(low=0,
                 high=65535,
                 value=3306,
                 arg_type="Integer",
                 label="端口",
                 order=3)
    User = Str("root", arg_type="String", label="用户名", order=4)
    Pwd = Password("shuntai11", arg_type="String", label="密码", order=5)
    TablePrefix = Str("", arg_type="String", label="表名前缀", order=6)
    CharSet = Enum("utf8",
                   "gbk",
                   "gb2312",
                   "gb18030",
                   "cp936",
                   "big5",
                   arg_type="SingleOption",
                   label="字符集",
                   order=7)
    Connector = Enum("default",
                     "cx_Oracle",
                     "pymssql",
                     "mysql.connector",
                     "pyodbc",
                     arg_type="SingleOption",
                     label="连接器",
                     order=8)
    DSN = Str("", arg_type="String", label="数据源", order=9)

    def __init__(self, sys_args={}, config_file=None, **kwargs):
        self._Connection = None
        self._PID = None  # 保存数据库连接创建时的进程号
        return super().__init__(sys_args=sys_args,
                                config_file=config_file,
                                **kwargs)

    def __getstate__(self):
        state = self.__dict__.copy()
        state["_Connection"] = (True if self.isAvailable() else False)
        return state

    def __setstate__(self, state):
        super().__setstate__(state)
        if self._Connection: self._connect()
        else: self._Connection = None

    def connect(self):
        self._Connection = None
        if (self.Connector == "cx_Oracle") or ((self.Connector == "default")
                                               and (self.DBType == "Oracle")):
            try:
                import cx_Oracle
                self._Connection = cx_Oracle.connect(
                    self.User, self.Pwd,
                    cx_Oracle.makedsn(self.IPAddr, str(self.Port),
                                      self.DBName))
            except Exception as e:
                if self.Connector != "default": raise e
        elif (self.Connector
              == "pymssql") or ((self.Connector == "default") and
                                (self.DBType == "SQL Server")):
            try:
                import pymssql
                self._Connection = pymssql.connect(server=self.IPAddr,
                                                   port=str(self.Port),
                                                   user=self.User,
                                                   password=self.Pwd,
                                                   database=self.DBName,
                                                   charset=self.CharSet)
            except Exception as e:
                if self.Connector != "default": raise e
        elif (self.Connector
              == "mysql.connector") or ((self.Connector == "default") and
                                        (self.DBType == "MySQL")):
            try:
                import mysql.connector
                self._Connection = mysql.connector.connect(
                    host=self.IPAddr,
                    port=str(self.Port),
                    user=self.User,
                    password=self.Pwd,
                    database=self.DBName,
                    charset=self.CharSet,
                    autocommit=True)
            except Exception as e:
                if self.Connector != "default": raise e
        if self._Connection is None:
            if self.Connector not in ("default", "pyodbc"):
                self._Connection = None
                raise __QS_Error__("不支持该连接器(connector) : " + self.Connector)
            else:
                import pyodbc
                if self.DSN:
                    self._Connection = pyodbc.connect("DSN=%s;PWD=%s" %
                                                      (self.DSN, self.Pwd))
                else:
                    self._Connection = pyodbc.connect(
                        "DRIVER={%s};DATABASE=%s;SERVER=%s;UID=%s;PWD=%s" %
                        (self.DBType, self.DBName, self.IPAddr + "," +
                         str(self.Port), self.User, self.Pwd))
                self.Connector = "pyodbc"
        self._PID = os.getpid()
        return 0

    def disconnect(self):
        if self._Connection is not None:
            try:
                self._Connection.close()
            except Exception as e:
                raise e
            finally:
                self._Connection = None
        return 0

    def isAvailable(self):
        return (self._Connection is not None)

    def cursor(self, sql_str=None):
        if self._Connection is None:
            raise __QS_Error__("%s尚未连接!" % self.__doc__)
        if os.getpid() != self._PID: self.connect()  # 如果进程号发生变化, 重连
        Cursor = self._Connection.cursor()
        if sql_str is None: return Cursor
        Cursor.execute(sql_str)
        return Cursor

    def fetchall(self, sql_str):
        Cursor = self.cursor(sql_str=sql_str)
        Data = Cursor.fetchall()
        Cursor.close()
        return Data

    def execute(self, sql_str):
        Cursor = self._Connection.cursor()
        Cursor.execute(sql_str)
        self._Connection.commit()
        Cursor.close()
        return 0

    def addIndex(self, index_name, table_name, fields, index_type="BTREE"):
        SQLStr = "CREATE INDEX " + index_name + " USING " + index_type + " ON " + self.TablePrefix + table_name + "(" + ", ".join(
            fields) + ")"
        return self.execute(SQLStr)
示例#10
0
class SQLDB(WritableFactorDB):
    """SQLDB"""
    DBType = Enum("MySQL", "SQL Server", "Oracle", "sqlite3", arg_type="SingleOption", label="数据库类型", order=0)
    DBName = Str("Scorpion", arg_type="String", label="数据库名", order=1)
    IPAddr = Str("127.0.0.1", arg_type="String", label="IP地址", order=2)
    Port = Range(low=0, high=65535, value=3306, arg_type="Integer", label="端口", order=3)
    User = Str("root", arg_type="String", label="用户名", order=4)
    Pwd = Password("", arg_type="String", label="密码", order=5)
    TablePrefix = Str("", arg_type="String", label="表名前缀", order=6)
    CharSet = Enum("utf8", "gbk", "gb2312", "gb18030", "cp936", "big5", arg_type="SingleOption", label="字符集", order=7)
    Connector = Enum("default", "cx_Oracle", "pymssql", "mysql.connector", "pymysql", "sqlite3", "pyodbc", arg_type="SingleOption", label="连接器", order=8)
    DSN = Str("", arg_type="String", label="数据源", order=9)
    SQLite3File = File(label="sqlite3文件", arg_type="File", order=10)
    CheckWriteData = Bool(False, arg_type="Bool", label="检查写入值", order=11)
    IgnoreFields = ListStr(arg_type="List", label="忽略字段", order=12)
    InnerPrefix = Str("qs_", arg_type="String", label="内部前缀", order=13)
    def __init__(self, sys_args={}, config_file=None, **kwargs):
        self._Connection = None# 数据库链接
        self._Connector = None# 实际使用的数据库链接器
        self._TableFactorDict = {}# {表名: pd.Series(数据类型, index=[因子名])}
        self._TableFieldDataType = {}# {表名: pd.Series(数据库数据类型, index=[因子名])}
        super().__init__(sys_args=sys_args, config_file=(__QS_ConfigPath__+os.sep+"SQLDBConfig.json" if config_file is None else config_file), **kwargs)
        self._PID = None# 保存数据库连接创建时的进程号
        self.Name = "SQLDB"
        return
    def __getstate__(self):
        state = self.__dict__.copy()
        state["_Connection"] = (True if self.isAvailable() else False)
        return state
    def __setstate__(self, state):
        super().__setstate__(state)
        if self._Connection: self._connect()
        else: self._Connection = None
    # -------------------------------------------数据库相关---------------------------
    def _connect(self):
        self._Connection = None
        if (self.Connector=="cx_Oracle") or ((self.Connector=="default") and (self.DBType=="Oracle")):
            try:
                import cx_Oracle
                self._Connection = cx_Oracle.connect(self.User, self.Pwd, cx_Oracle.makedsn(self.IPAddr, str(self.Port), self.DBName))
            except Exception as e:
                if self.Connector!="default": raise e
            else:
                self._Connector = "cx_Oracle"
        elif (self.Connector=="pymssql") or ((self.Connector=="default") and (self.DBType=="SQL Server")):
            try:
                import pymssql
                self._Connection = pymssql.connect(server=self.IPAddr, port=str(self.Port), user=self.User, password=self.Pwd, database=self.DBName, charset=self.CharSet)
            except Exception as e:
                if self.Connector!="default": raise e
            else:
                self._Connector = "pymssql"
        elif (self.Connector=="mysql.connector") or ((self.Connector=="default") and (self.DBType=="MySQL")):
            try:
                import mysql.connector
                self._Connection = mysql.connector.connect(host=self.IPAddr, port=str(self.Port), user=self.User, password=self.Pwd, database=self.DBName, charset=self.CharSet, autocommit=True)
            except Exception as e:
                if self.Connector!="default": raise e
            else:
                self._Connector = "mysql.connector"
        elif self.Connector=='pymysql':
            try:
                import pymysql
                self._Connection = pymysql.connect(host=self.IPAddr, port=self.Port, user=self.User, password=self.Pwd, db=self.DBName, charset=self.CharSet)
            except Exception as e:
                if self.Connector!='default': raise e
        elif (self.Connector=="sqlite3") or ((self.Connector=="default") and (self.DBType=="sqlite3")):
            import sqlite3
            self._Connection = sqlite3.connect(self.SQLite3File)
            self._Connector = "sqlite3"
        if self._Connection is None:
            if self.Connector not in ("default", "pyodbc"):
                self._Connection = None
                raise __QS_Error__("不支持该连接器(connector) : "+self.Connector)
            else:
                import pyodbc
                if self.DSN: self._Connection = pyodbc.connect("DSN=%s;PWD=%s" % (self.DSN, self.Pwd))
                else: self._Connection = pyodbc.connect("DRIVER={%s};DATABASE=%s;SERVER=%s;UID=%s;PWD=%s" % (self.DBType, self.DBName, self.IPAddr+","+str(self.Port), self.User, self.Pwd))
                self._Connector = "pyodbc"
        self._PID = os.getpid()
        return 0
    def connect(self):
        self._connect()
        nPrefix = len(self.InnerPrefix)
        if self._Connector=="sqlite3":
            SQLStr = "SELECT name FROM sqlite_master WHERE type='table' AND name LIKE '%s%%' ORDER BY name"
            Cursor = self.cursor(SQLStr % self.InnerPrefix)
            AllTables = Cursor.fetchall()
            self._TableFactorDict = {}
            self._TableFieldDataType = {}
            IgnoreFields = ["code", "datetime"]+list(self.IgnoreFields)
            for iTableName in AllTables:
                iTableName = iTableName[0][nPrefix:]
                Cursor.execute("PRAGMA table_info([%s])" % self.InnerPrefix+iTableName)
                iDataType = np.array(Cursor.fetchall())
                iDataType = pd.Series(iDataType[:, 2], index=iDataType[:, 1])
                iDataType = iDataType[iDataType.index.difference(IgnoreFields)]
                if iDataType.shape[0]>0:
                    self._TableFieldDataType[iTableName] = iDataType.copy()
                    iDataType[iDataType=="text"] = "string"
                    iDataType[iDataType=="real"] = "double"
                    self._TableFactorDict[iTableName] = iDataType
        elif self.DBType=="MySQL":
            SQLStr = ("SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE FROM information_schema.COLUMNS WHERE table_schema='%s' " % self.DBName)
            SQLStr += ("AND TABLE_NAME LIKE '%s%%' " % self.InnerPrefix)
            SQLStr += "AND COLUMN_NAME NOT IN ('code', 'datetime'"
            if len(self.IgnoreFields)>0:
                SQLStr += ",'"+"','".join(self.IgnoreFields)+"') "
            else:
                SQLStr += ") "
            SQLStr += "ORDER BY TABLE_NAME, COLUMN_NAME"
            Rslt = self.fetchall(SQLStr)
            if not Rslt:
                self._TableFieldDataType = {}
                self._TableFactorDict = {}
            else:
                self._TableFieldDataType = pd.DataFrame(np.array(Rslt), columns=["表", "因子", "DataType"]).set_index(["表", "因子"])["DataType"]
                self._TableFactorDict = self._TableFieldDataType.copy()
                Mask = (self._TableFactorDict.str.contains("char") | self._TableFactorDict.str.contains("date"))
                self._TableFactorDict[Mask] = "string"
                self._TableFactorDict[~Mask] = "double"
                self._TableFactorDict = {iTable[nPrefix:]:self._TableFactorDict.loc[iTable] for iTable in self._TableFactorDict.index.levels[0]}
                self._TableFieldDataType = {iTable[nPrefix:]:self._TableFieldDataType.loc[iTable] for iTable in self._TableFieldDataType.index.levels[0]}
        return 0
    def disconnect(self):
        if self._Connection is not None:
            try:
                self._Connection.close()
            except Exception as e:
                self._QS_Logger.error("因子库 '%s' 断开错误: %s" % (self.Name, str(e)))
            finally:
                self._Connection = None
        return 0
    def isAvailable(self):
        return (self._Connection is not None)
    def cursor(self, sql_str=None):
        if self._Connection is None: raise __QS_Error__("%s尚未连接!" % self.__doc__)
        if os.getpid()!=self._PID: self._connect()# 如果进程号发生变化, 重连
        try:# 连接断开后重连
            Cursor = self._Connection.cursor()
        except:
            self._connect()
            Cursor = self._Connection.cursor()
        if sql_str is None: return Cursor
        Cursor.execute(sql_str)
        return Cursor
    def fetchall(self, sql_str):
        Cursor = self.cursor(sql_str=sql_str)
        Data = Cursor.fetchall()
        Cursor.close()
        return Data
    def execute(self, sql_str):
        if self._Connection is None: raise __QS_Error__("%s尚未连接!" % self.__doc__)
        if os.getpid()!=self._PID: self._connect()# 如果进程号发生变化, 重连
        try:
            Cursor = self._Connection.cursor()
        except:
            self._connect()
            Cursor = self._Connection.cursor()
        Cursor.execute(sql_str)
        self._Connection.commit()
        Cursor.close()
        return 0
    # -------------------------------表的操作---------------------------------
    @property
    def TableNames(self):
        return sorted(self._TableFactorDict)
    def getTable(self, table_name, args={}):
        if table_name not in self._TableFactorDict: raise __QS_Error__("表 '%s' 不存在!" % table_name)
        if args.get("因子表类型", "宽表")=="宽表":
            return _WideTable(name=table_name, fdb=self, sys_args=args, logger=self._QS_Logger)
        else:
            return _NarrowTable(name=table_name, fdb=self, sys_args=args, logger=self._QS_Logger)
    def renameTable(self, old_table_name, new_table_name):
        if old_table_name not in self._TableFactorDict: raise __QS_Error__("表: '%s' 不存在!" % old_table_name)
        if (new_table_name!=old_table_name) and (new_table_name in self._TableFactorDict): raise __QS_Error__("表: '"+new_table_name+"' 已存在!")
        SQLStr = "ALTER TABLE "+self.TablePrefix+self.InnerPrefix+old_table_name+" RENAME TO "+self.TablePrefix+self.InnerPrefix+new_table_name
        self.execute(SQLStr)
        self._TableFactorDict[new_table_name] = self._TableFactorDict.pop(old_table_name)
        self._TableFieldDataType[new_table_name] = self._TableFieldDataType.pop(old_table_name)
        return 0
    # 为某张表增加索引
    def addIndex(self, index_name, table_name, fields=["datetime", "code"], index_type="BTREE"):
        if index_type is not None:
            SQLStr = "CREATE INDEX "+index_name+" USING "+index_type+" ON "+self.TablePrefix+self.InnerPrefix+table_name+"("+", ".join(fields)+")"
        else:
            SQLStr = "CREATE INDEX "+index_name+" ON "+self.TablePrefix+self.InnerPrefix+table_name+"("+", ".join(fields)+")"
        return self.execute(SQLStr)
    # 创建表, field_types: {字段名: 数据类型}
    def createTable(self, table_name, field_types):
        if self.DBType=="MySQL":
            SQLStr = "CREATE TABLE IF NOT EXISTS %s (`datetime` DATETIME(6) NOT NULL, `code` VARCHAR(40) NOT NULL, " % (self.TablePrefix+self.InnerPrefix+table_name)
            for iField in field_types: SQLStr += "`%s` %s, " % (iField, field_types[iField])
            SQLStr += "PRIMARY KEY (`datetime`, `code`)) ENGINE=InnoDB DEFAULT CHARSET=utf8"
            IndexType = "BTREE"
        elif self.DBType=="sqlite3":
            SQLStr = "CREATE TABLE IF NOT EXISTS %s (`datetime` text NOT NULL, `code` text NOT NULL, " % (self.TablePrefix+self.InnerPrefix+table_name)
            for iField in field_types: SQLStr += "`%s` %s, " % (iField, field_types[iField])
            SQLStr += "PRIMARY KEY (`datetime`, `code`))"
            IndexType = None
        self.execute(SQLStr)
        try:
            self.addIndex(table_name+"_index", table_name, index_type=IndexType)
        except Exception as e:
            self._QS_Logger.warning("因子表 '%s' 索引创建失败: %s" % (table_name, str(e)))
        return 0
    # 增加字段,field_types: {字段名: 数据类型}
    def addField(self, table_name, field_types):
        if table_name not in self._TableFactorDict: return self.createTable(table_name, field_types)
        SQLStr = "ALTER TABLE %s " % (self.TablePrefix+self.InnerPrefix+table_name)
        SQLStr += "ADD COLUMN ("
        for iField in field_types: SQLStr += "%s %s," % (iField, field_types[iField])
        SQLStr = SQLStr[:-1]+")"
        self.execute(SQLStr)
        return 0
    def deleteTable(self, table_name):
        if table_name not in self._TableFactorDict: return 0
        SQLStr = 'DROP TABLE %s' % (self.TablePrefix+self.InnerPrefix+table_name)
        self.execute(SQLStr)
        self._TableFactorDict.pop(table_name, None)
        self._TableFieldDataType.pop(table_name, None)
        return 0
    # 清空表
    def truncateTable(self, table_name):
        if table_name not in self._TableFactorDict: raise __QS_Error__("表: '%s' 不存在!" % table_name)
        SQLStr = "TRUNCATE TABLE %s" % (self.TablePrefix+self.InnerPrefix+table_name)
        self.execute(SQLStr)
        return 0
    # ----------------------------因子操作---------------------------------
    def renameFactor(self, table_name, old_factor_name, new_factor_name):
        if old_factor_name not in self._TableFactorDict[table_name]: raise __QS_Error__("因子: '%s' 不存在!" % old_factor_name)
        if (new_factor_name!=old_factor_name) and (new_factor_name in self._TableFactorDict[table_name]): raise __QS_Error__("表中的因子: '%s' 已存在!" % new_factor_name)
        if self.DBType!="sqlite3":
            SQLStr = "ALTER TABLE "+self.TablePrefix+self.InnerPrefix+table_name
            SQLStr += " CHANGE COLUMN `"+old_factor_name+"` `"+new_factor_name+"`"
            self.execute(SQLStr)
        else:
            # 将表名改为临时表
            SQLStr = "ALTER TABLE %s RENAME TO %s"
            TempTableName = genAvailableName("TempTable", self.TableNames)
            self.execute(SQLStr % (self.TablePrefix+self.InnerPrefix+table_name, self.TablePrefix+self.InnerPrefix+TempTableName))
            # 创建新表
            FieldTypes = OrderedDict()
            for iFactorName, iDataType in self._TableFactorDict[table_name].items():
                iDataType = ("text" if iDataType=="string" else "real")
                if iFactorName==old_factor_name: FieldTypes[new_factor_name] = iDataType
                else: FieldTypes[iFactorName] = iDataType
            self.createTable(table_name, field_types=FieldTypes)
            # 导入数据
            OldFactorNames = ", ".join(self._TableFactorDict[table_name].index)
            NewFactorNames = ", ".join(FieldTypes)
            SQLStr = "INSERT INTO %s (datetime, code, %s) SELECT datetime, code, %s FROM %s"
            Cursor = self.cursor(SQLStr % (self.TablePrefix+self.InnerPrefix+table_name, NewFactorNames, OldFactorNames, self.TablePrefix+self.InnerPrefix+TempTableName))
            self._Connection.commit()
            # 删除临时表
            Cursor.execute("DROP TABLE %s" % (self.TablePrefix+self.InnerPrefix+TempTableName, ))
            self._Connection.commit()
            Cursor.close()
        self._TableFactorDict[table_name][new_factor_name] = self._TableFactorDict[table_name].pop(old_factor_name)
        self._TableFieldDataType[table_name][new_factor_name] = self._TableFieldDataType[table_name].pop(old_factor_name)
        return 0
    def deleteFactor(self, table_name, factor_names):
        if not factor_names: return 0
        FactorIndex = self._TableFactorDict.get(table_name, pd.Series()).index.difference(factor_names).tolist()
        if not FactorIndex: return self.deleteTable(table_name)
        if self.DBType!="sqlite3":
            SQLStr = "ALTER TABLE "+self.TablePrefix+self.InnerPrefix+table_name
            for iFactorName in factor_names: SQLStr += " DROP COLUMN `"+iFactorName+"`,"
            self.execute(SQLStr[:-1])
        else:
            # 将表名改为临时表
            SQLStr = "ALTER TABLE %s RENAME TO %s"
            TempTableName = genAvailableName("TempTable", self.TableNames)
            self.execute(SQLStr % (self.TablePrefix+self.InnerPrefix+table_name, self.TablePrefix+self.InnerPrefix+TempTableName))
            # 创建新表
            FieldTypes = OrderedDict()
            for iFactorName in FactorIndex:
                FieldTypes[iFactorName] = ("text" if self._TableFactorDict[table_name].loc[iFactorName]=="string" else "real")
            self.createTable(table_name, field_types=FieldTypes)
            # 导入数据
            FactorNameStr = ", ".join(FactorIndex)
            SQLStr = "INSERT INTO %s (datetime, code, %s) SELECT datetime, code, %s FROM %s"
            Cursor = self.cursor(SQLStr % (self.TablePrefix+self.InnerPrefix+table_name, FactorNameStr, FactorNameStr, self.TablePrefix+self.InnerPrefix+TempTableName))
            self._Connection.commit()
            # 删除临时表
            Cursor.execute("DROP TABLE %s" % (self.TablePrefix+self.InnerPrefix+TempTableName, ))
            self._Connection.commit()
            Cursor.close()
        self._TableFactorDict[table_name] = self._TableFactorDict[table_name][FactorIndex]
        self._TableFieldDataType[table_name] = self._TableFieldDataType[table_name][FactorIndex]
        return 0
    def deleteData(self, table_name, ids=None, dts=None):
        DBTableName = self.TablePrefix+self.InnerPrefix+table_name
        if (self.DBType!="sqlite3") and (ids is None) and (dts is None):
            SQLStr = "TRUNCATE TABLE "+DBTableName
            return self.execute(SQLStr)
        SQLStr = "DELETE * FROM "+DBTableName
        if dts is not None:
            DTs = [iDT.strftime("%Y-%m-%d %H:%M:%S.%f") for iDT in dts]
            SQLStr += "WHERE "+genSQLInCondition(DBTableName+".datetime", DTs, is_str=True, max_num=1000)+" "
        else:
            SQLStr += "WHERE "+DBTableName+".datetime IS NOT NULL "
        if ids is not None:
            SQLStr += "AND "+genSQLInCondition(DBTableName+".code", ids, is_str=True, max_num=1000)
        return self.execute(SQLStr)
    def _adjustWriteData(self, data):
        NewData = []
        DataLen = data.applymap(lambda x: len(x) if isinstance(x, list) else 1).max(axis=1)
        for i in range(data.shape[0]):
            iDataLen = DataLen.iloc[i]
            iData = data.iloc[i].apply(lambda x: x * int(np.ceil(iDataLen / len(x))) if isinstance(x, list) else [x]*iDataLen).tolist()
            NewData.extend(zip(*iData))
        return NewData
    def writeData(self, data, table_name, if_exists="update", data_type={}, **kwargs):
        FieldTypes = {iFactorName:_identifyDataType(self.DBType, data.iloc[i].dtypes) for i, iFactorName in enumerate(data.items)}
        if table_name not in self._TableFactorDict:
            self.createTable(table_name, field_types=FieldTypes)
            self._TableFactorDict[table_name] = pd.Series({iFactorName: ("string" if FieldTypes[iFactorName].find("char")!=-1 else "double") for iFactorName in FieldTypes})
            self._TableFieldDataType[table_name] = pd.Series(FieldTypes)
            SQLStr = "INSERT INTO "+self.TablePrefix+self.InnerPrefix+table_name+" (`datetime`, `code`, "
        else:
            NewFactorNames = data.items.difference(self._TableFactorDict[table_name].index).tolist()
            if NewFactorNames:
                self.addField(table_name, {iFactorName:FieldTypes[iFactorName] for iFactorName in NewFactorNames})
                NewDataType = pd.Series({iFactorName: ("string" if FieldTypes[iFactorName].find("char")!=-1 else "double") for iFactorName in NewFactorNames})
                self._TableFactorDict[table_name] = self._TableFactorDict[table_name].append(NewDataType)
                self._TableFieldDataType[table_name] = self._TableFieldDataType[table_name].append(pd.Series(FieldTypes))
            AllFactorNames = self._TableFactorDict[table_name].index.tolist()
            if self.CheckWriteData:
                OldData = self.getTable(table_name, args={"因子值类型":"list", "时间转字符串":True}).readData(factor_names=AllFactorNames, ids=data.minor_axis.tolist(), dts=data.major_axis.tolist())
            else:
                OldData = self.getTable(table_name, args={"时间转字符串":True}).readData(factor_names=AllFactorNames, ids=data.minor_axis.tolist(), dts=data.major_axis.tolist())
            if if_exists=="append":
                for iFactorName in AllFactorNames:
                    if iFactorName in data:
                        data[iFactorName] = OldData[iFactorName].where(pd.notnull(OldData[iFactorName]), data[iFactorName])
                    else:
                        data[iFactorName] = OldData[iFactorName]
            elif if_exists=="update":
                for iFactorName in AllFactorNames:
                    if iFactorName in data:
                        data[iFactorName] = data[iFactorName].where(pd.notnull(data[iFactorName]), OldData[iFactorName])
                    else:
                        data[iFactorName] = OldData[iFactorName]
            SQLStr = "REPLACE INTO "+self.TablePrefix+self.InnerPrefix+table_name+" (`datetime`, `code`, "
        data.major_axis = [iDT.strftime("%Y-%m-%d %H:%M:%S.%f") for iDT in data.major_axis]
        NewData = {}
        for iFactorName in data.items:
            iData = data.loc[iFactorName].stack(dropna=False)
            NewData[iFactorName] = iData
            SQLStr += "`"+iFactorName+"`, "
        NewData = pd.DataFrame(NewData).loc[:, data.items]
        NewData = NewData[pd.notnull(NewData).any(axis=1)]
        if NewData.shape[0]==0: return 0
        NewData = NewData.astype("O").where(pd.notnull(NewData), None)
        if self._Connector in ("pyodbc", "sqlite3"):
            SQLStr = SQLStr[:-2] + ") VALUES (" + "?, " * (NewData.shape[1]+2)
        else:
            SQLStr = SQLStr[:-2] + ") VALUES (" + "%s, " * (NewData.shape[1]+2)
        SQLStr = SQLStr[:-2]+") "
        Cursor = self._Connection.cursor()
        if self.CheckWriteData:
            NewData = self._adjustWriteData(NewData.reset_index())
            Cursor.executemany(SQLStr, NewData)
        else:
            Cursor.executemany(SQLStr, NewData.reset_index().values.tolist())
        self._Connection.commit()
        Cursor.close()
        return 0
示例#11
0
class SQLDB(WritableFactorDB):
    """SQLDB"""
    DBType = Enum("MySQL",
                  "SQL Server",
                  "Oracle",
                  arg_type="SingleOption",
                  label="数据库类型",
                  order=0)
    DBName = Str("Scorpion", arg_type="String", label="数据库名", order=1)
    IPAddr = Str("127.0.0.1", arg_type="String", label="IP地址", order=2)
    Port = Range(low=0,
                 high=65535,
                 value=3306,
                 arg_type="Integer",
                 label="端口",
                 order=3)
    User = Str("root", arg_type="String", label="用户名", order=4)
    Pwd = Password("", arg_type="String", label="密码", order=5)
    TablePrefix = Str("", arg_type="String", label="表名前缀", order=6)
    CharSet = Enum("utf8",
                   "gbk",
                   "gb2312",
                   "gb18030",
                   "cp936",
                   "big5",
                   arg_type="SingleOption",
                   label="字符集",
                   order=7)
    Connector = Enum("default",
                     "cx_Oracle",
                     "pymssql",
                     "mysql.connector",
                     "pyodbc",
                     arg_type="SingleOption",
                     label="连接器",
                     order=8)
    DSN = Str("", arg_type="String", label="数据源", order=9)

    def __init__(self, sys_args={}, config_file=None, **kwargs):
        self._Connection = None  # 数据库链接
        self._Prefix = "QS_"
        self._TableFactorDict = {}  # {表名: pd.Series(数据类型, index=[因子名])}
        super().__init__(sys_args=sys_args,
                         config_file=(__QS_ConfigPath__ + os.sep +
                                      "SQLDBConfig.json"
                                      if config_file is None else config_file),
                         **kwargs)
        self.Name = "SQLDB"
        return

    def __getstate__(self):
        state = self.__dict__.copy()
        state["_Connection"] = (True if self.isAvailable() else False)
        return state

    def __setstate__(self, state):
        super().__setstate__(state)
        if self._Connection: self._connect()
        else: self._Connection = None

    # -------------------------------------------数据库相关---------------------------
    def _connect(self):
        if (self.Connector == "cx_Oracle") or ((self.Connector == "default")
                                               and (self.DBType == "Oracle")):
            try:
                import cx_Oracle
                self._Connection = cx_Oracle.connect(
                    self.User, self.Pwd,
                    cx_Oracle.makedsn(self.IPAddr, str(self.Port),
                                      self.DBName))
            except Exception as e:
                if self.Connector != "default": raise e
        elif (self.Connector
              == "pymssql") or ((self.Connector == "default") and
                                (self.DBType == "SQL Server")):
            try:
                import pymssql
                self._Connection = pymssql.connect(server=self.IPAddr,
                                                   port=str(self.Port),
                                                   user=self.User,
                                                   password=self.Pwd,
                                                   database=self.DBName,
                                                   charset=self.CharSet)
            except Exception as e:
                if self.Connector != "default": raise e
        elif (self.Connector
              == "mysql.connector") or ((self.Connector == "default") and
                                        (self.DBType == "MySQL")):
            try:
                import mysql.connector
                self._Connection = mysql.connector.connect(
                    host=self.IPAddr,
                    port=str(self.Port),
                    user=self.User,
                    password=self.Pwd,
                    database=self.DBName,
                    charset=self.CharSet,
                    autocommit=True)
            except Exception as e:
                if self.Connector != "default": raise e
        if self.Connector not in ("default", "pyodbc"):
            self._Connection = None
            raise __QS_Error__("不支持该连接器(connector) : " + self.Connector)
        else:
            import pyodbc
            if self.DSN:
                self._Connection = pyodbc.connect("DSN=%s;PWD=%s" %
                                                  (self.DSN, self.Pwd))
            else:
                self._Connection = pyodbc.connect(
                    "DRIVER={%s};DATABASE=%s;SERVER=%s;UID=%s;PWD=%s" %
                    (self.DBType, self.DBName, self.IPAddr, self.User,
                     self.Pwd))
            self.Connector = "pyodbc"
        return 0

    def connect(self):
        self._connect()
        if self.DBType == "MySQL":
            SQLStr = (
                "SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE FROM information_schema.COLUMNS WHERE table_schema='%s' "
                % self.DBName)
            SQLStr += ("AND TABLE_NAME LIKE '%s%%' " % self._Prefix)
            SQLStr += "AND COLUMN_NAME NOT IN ('ID', 'DateTime') "
            SQLStr += "ORDER BY TABLE_NAME, COLUMN_NAME"
            Rslt = self.fetchall(SQLStr)
            if not Rslt: self._TableFactorDict = {}
            else:
                self._TableFactorDict = pd.DataFrame(
                    np.array(Rslt),
                    columns=["表", "因子",
                             "DataType"]).set_index(["表", "因子"])["DataType"]
                Mask = (self._TableFactorDict == "varchar")
                self._TableFactorDict[Mask] = "string"
                self._TableFactorDict[~Mask] = "double"
                nPrefix = len(self._Prefix)
                self._TableFactorDict = {
                    iTable[nPrefix:]: self._TableFactorDict.loc[iTable]
                    for iTable in self._TableFactorDict.index.levels[0]
                }
        return 0

    def disconnect(self):
        if self._Connection is not None:
            try:
                self._Connection.close()
            except Exception as e:
                raise e
            finally:
                self._Connection = None
        return 0

    def isAvailable(self):
        return (self._Connection is not None)

    def cursor(self, sql_str=None):
        if self._Connection is None:
            raise __QS_Error__("%s尚未连接!" % self.__doc__)
        Cursor = self._Connection.cursor()
        if sql_str is None: return Cursor
        Cursor.execute(sql_str)
        return Cursor

    def fetchall(self, sql_str):
        Cursor = self.cursor(sql_str=sql_str)
        Data = Cursor.fetchall()
        Cursor.close()
        return Data

    def execute(self, sql_str):
        Cursor = self._Connection.cursor()
        Cursor.execute(sql_str)
        self._Connection.commit()
        Cursor.close()
        return 0

    # -------------------------------表的操作---------------------------------
    @property
    def TableNames(self):
        return sorted(self._TableFactorDict)

    def getTable(self, table_name, args={}):
        if table_name not in self._TableFactorDict:
            raise __QS_Error__("表 '%s' 不存在!" % table_name)
        return _FactorTable(name=table_name,
                            fdb=self,
                            data_type=self._TableFactorDict[table_name],
                            sys_args=args)

    def renameTable(self, old_table_name, new_table_name):
        if old_table_name not in self._TableFactorDict:
            raise __QS_Error__("表: '%s' 不存在!" % old_table_name)
        if (new_table_name != old_table_name) and (new_table_name
                                                   in self._TableFactorDict):
            raise __QS_Error__("表: '" + new_table_name + "' 已存在!")
        SQLStr = "ALTER TABLE " + self.TablePrefix + self._Prefix + old_table_name + " RENAME TO " + self.TablePrefix + self._Prefix + new_table_name
        self.execute(SQLStr)
        self._TableFactorDict[new_table_name] = self._TableFactorDict.pop(
            old_table_name)
        return 0

    # 为某张表增加索引
    def addIndex(self,
                 index_name,
                 table_name,
                 fields=["DateTime", "ID"],
                 index_type="BTREE"):
        SQLStr = "CREATE INDEX " + index_name + " USING " + index_type + " ON " + self.TablePrefix + self._Prefix + table_name + "(" + ", ".join(
            fields) + ")"
        return self.execute(SQLStr)

    # 创建表, field_types: {字段名: 数据类型}
    def createTable(self, table_name, field_types):
        SQLStr = "CREATE TABLE IF NOT EXISTS %s (`DateTime` DATETIME(6) NOT NULL, `ID` VARCHAR(40) NOT NULL, " % (
            self.TablePrefix + self._Prefix + table_name)
        for iField in field_types:
            SQLStr += "`%s` %s, " % (iField, field_types[iField])
        SQLStr += "PRIMARY KEY (`DateTime`, `ID`)) ENGINE=InnoDB DEFAULT CHARSET=utf8"
        self.execute(SQLStr)
        try:
            self.addIndex(table_name + "_index", table_name)
        except Exception as e:
            print("索引创建失败: " + str(e))
        return 0

    # 增加字段,field_types: {字段名: 数据类型}
    def addField(self, table_name, field_types):
        if table_name not in self._TableFactorDict:
            return self.createTable(table_name, field_types)
        SQLStr = "ALTER TABLE %s " % (self.TablePrefix + self._Prefix +
                                      table_name)
        SQLStr += "ADD COLUMN ("
        for iField in field_types:
            SQLStr += "%s %s," % (iField, field_types[iField])
        SQLStr = SQLStr[:-1] + ")"
        self.execute(SQLStr)
        return 0

    def deleteTable(self, table_name):
        if table_name not in self._TableFactorDict: return 0
        SQLStr = 'DROP TABLE %s' % (self.TablePrefix + self._Prefix +
                                    table_name)
        self.execute(SQLStr)
        self._TableFactorDict.pop(table_name, None)
        return 0

    # ----------------------------因子操作---------------------------------
    def renameFactor(self, table_name, old_factor_name, new_factor_name):
        if old_factor_name not in self._TableFactorDict[table_name]:
            raise __QS_Error__("因子: '%s' 不存在!" % old_factor_name)
        if (new_factor_name != old_factor_name) and (
                new_factor_name in self._TableFactorDict[table_name]):
            raise __QS_Error__("表中的因子: '%s' 已存在!" % new_factor_name)
        SQLStr = "ALTER TABLE " + self.TablePrefix + self._Prefix + table_name
        SQLStr += " CHANGE COLUMN `" + old_factor_name + "` `" + new_factor_name + "`"
        self.execute(SQLStr)
        self._TableFactorDict[table_name][
            new_factor_name] = self._TableFactorDict[table_name].pop(
                old_factor_name)
        return 0

    def deleteFactor(self, table_name, factor_names):
        if not factor_names: return 0
        SQLStr = "ALTER TABLE " + self.TablePrefix + self._Prefix + table_name
        for iFactorName in factor_names:
            SQLStr += " DROP COLUMN `" + iFactorName + "`,"
        self.execute(SQLStr[:-1])
        FactorIndex = list(
            set(self._TableFactorDict.get(table_name,
                                          pd.Series()).index).difference(
                                              set(factor_names)))
        if not FactorIndex: self._TableFactorDict.pop(table_name, None)
        else:
            self._TableFactorDict[table_name] = self._TableFactorDict[
                table_name][FactorIndex]
        return 0

    def deleteData(self, table_name, ids=None, dts=None):
        DBTableName = self.TablePrefix + self._Prefix + table_name
        if (ids is None) and (dts is None):
            SQLStr = "TRUNCATE TABLE " + DBTableName
            return self.execute(SQLStr)
        SQLStr = "DELETE * FROM " + DBTableName
        if dts is not None:
            DTs = [iDT.strftime("%Y-%m-%d %H:%M:%S.%f") for iDT in dts]
            SQLStr += "WHERE " + genSQLInCondition(
                DBTableName + ".DateTime", DTs, is_str=True,
                max_num=1000) + " "
        else:
            SQLStr += "WHERE " + DBTableName + ".DateTime IS NOT NULL "
        if ids is not None:
            SQLStr += "AND " + genSQLInCondition(
                DBTableName + ".ID", ids, is_str=True, max_num=1000)
        return self.execute(SQLStr)

    def writeData(self,
                  data,
                  table_name,
                  if_exists="update",
                  data_type={},
                  **kwargs):
        FieldTypes = {
            iFactorName: _identifyDataType(data.iloc[i].dtypes)
            for i, iFactorName in enumerate(data.items)
        }
        if table_name not in self._TableFactorDict:
            self.createTable(table_name, field_types=FieldTypes)
            self._TableFactorDict[table_name] = pd.Series({
                iFactorName:
                ("string"
                 if FieldTypes[iFactorName].find("char") != -1 else "double")
                for iFactorName in FieldTypes
            })
            SQLStr = "INSERT INTO " + self.TablePrefix + self._Prefix + table_name + " (`DateTime`, `ID`, "
        else:
            NewFactorNames = data.items.difference(
                self._TableFactorDict[table_name].index).tolist()
            if NewFactorNames:
                self.addField(
                    table_name, {
                        iFactorName: FieldTypes[iFactorName]
                        for iFactorName in NewFactorNames
                    })
                NewDataType = pd.Series({
                    iFactorName:
                    ("string" if FieldTypes[iFactorName].find("char") != -1
                     else "double")
                    for iFactorName in NewFactorNames
                })
                self._TableFactorDict[table_name] = self._TableFactorDict[
                    table_name].append(NewDataType)
            AllFactorNames = self._TableFactorDict[table_name].index.tolist()
            OldData = self.getTable(table_name).readData(
                factor_names=AllFactorNames,
                ids=data.minor_axis.tolist(),
                dts=data.major_axis.tolist())
            if if_exists == "append":
                for iFactorName in AllFactorNames:
                    if iFactorName in data:
                        data[iFactorName] = OldData[iFactorName].where(
                            pd.notnull(OldData[iFactorName]),
                            data[iFactorName])
                    else:
                        data[iFactorName] = OldData[iFactorName]
            elif if_exists == "update":
                for iFactorName in AllFactorNames:
                    if iFactorName in data:
                        data[iFactorName] = data[iFactorName].where(
                            pd.notnull(data[iFactorName]),
                            OldData[iFactorName])
                    else:
                        data[iFactorName] = OldData[iFactorName]
            SQLStr = "REPLACE INTO " + self.TablePrefix + self._Prefix + table_name + " (`DateTime`, `ID`, "
        NewData = {}
        for iFactorName in data.items:
            iData = data.loc[iFactorName].stack(dropna=False)
            NewData[iFactorName] = iData
            SQLStr += "`" + iFactorName + "`, "
        NewData = pd.DataFrame(NewData).loc[:, data.items]
        NewData = NewData[pd.notnull(NewData).any(axis=1)]
        if NewData.shape[0] == 0: return 0
        NewData = NewData.astype("O").where(pd.notnull(NewData), None)
        if self.Connector == "pyodbc":
            SQLStr = SQLStr[:-2] + ") VALUES (" + "?, " * (NewData.shape[1] +
                                                           2)
        else:
            SQLStr = SQLStr[:-2] + ") VALUES (" + "%s, " * (NewData.shape[1] +
                                                            2)
        SQLStr = SQLStr[:-2] + ") "
        Cursor = self._Connection.cursor()
        Cursor.executemany(SQLStr, NewData.reset_index().values.tolist())
        self._Connection.commit()
        Cursor.close()
        return 0
示例#12
0
class MongoDB(WritableFactorDB):
    """MongoDB"""
    Name = Str("MongoDB")
    DBType = Enum("Mongo", arg_type="SingleOption", label="数据库类型", order=0)
    DBName = Str("Scorpion", arg_type="String", label="数据库名", order=1)
    IPAddr = Str("127.0.0.1", arg_type="String", label="IP地址", order=2)
    Port = Range(low=0,
                 high=65535,
                 value=27017,
                 arg_type="Integer",
                 label="端口",
                 order=3)
    User = Str("root", arg_type="String", label="用户名", order=4)
    Pwd = Password("", arg_type="String", label="密码", order=5)
    CharSet = Enum("utf8",
                   "gbk",
                   "gb2312",
                   "gb18030",
                   "cp936",
                   "big5",
                   arg_type="SingleOption",
                   label="字符集",
                   order=6)
    Connector = Enum("default",
                     "pymongo",
                     arg_type="SingleOption",
                     label="连接器",
                     order=7)
    IgnoreFields = ListStr(arg_type="List", label="忽略字段", order=8)
    InnerPrefix = Str("qs_", arg_type="String", label="内部前缀", order=9)

    def __init__(self, sys_args={}, config_file=None, **kwargs):
        super().__init__(sys_args=sys_args,
                         config_file=(__QS_ConfigPath__ + os.sep +
                                      "MongoDBConfig.json"
                                      if config_file is None else config_file),
                         **kwargs)
        self._TableFactorDict = {}  # {表名: pd.Series(数据类型, index=[因子名])}
        self._TableFieldDataType = {}  # {表名: pd.Series(数据库数据类型, index=[因子名])}
        self.Name = "MongoDB"
        return

    def __getstate__(self):
        state = self.__dict__.copy()
        state["_Connection"] = (True if self.isAvailable() else False)
        state["_DB"] = None
        return state

    def __setstate__(self, state):
        super().__setstate__(state)
        if self._Connection: self._connect()
        else: self._Connection = None

    @property
    def Connection(self):
        if self._Connection is not None:
            if os.getpid() != self._PID: self._connect()  # 如果进程号发生变化, 重连
        return self._Connection

    def _connect(self):
        self._Connection = None
        if (self.Connector == "pymongo") or ((self.Connector == "default") and
                                             (self.DBType == "Mongo")):
            try:
                import pymongo
                self._Connection = pymongo.MongoClient(host=self.IPAddr,
                                                       port=self.Port)
            except Exception as e:
                Msg = ("'%s' 尝试使用 pymongo 连接(%s@%s:%d)数据库 '%s' 失败: %s" %
                       (self.Name, self.User, self.IPAddr, self.Port,
                        self.DBName, str(e)))
                self._QS_Logger.error(Msg)
                if self.Connector != "default": raise e
            else:
                self._Connector = "pymongo"
        else:
            Msg = ("'%s' 连接(%s@%s:%d)数据库 '%s' 失败: %s" %
                   (self.Name, self.User, self.IPAddr, self.Port, self.DBName,
                    str(e)))
            self._QS_Logger.error(Msg)
            raise e
        self._PID = os.getpid()
        self._DB = self._Connection[self.DBName]
        return 0

    def connect(self):
        self._connect()
        nPrefix = len(self.InnerPrefix)
        if self.DBType == "Mongo":
            self._TableFactorDict = {}
            for iTableName in self._DB.collection_names():
                if iTableName[:nPrefix] == self.InnerPrefix:
                    iTableInfo = self._DB[iTableName].find_one(
                        {"code": "_TableInfo"}, {
                            "datetime": 0,
                            "code": 0,
                            "_id": 0
                        })
                    if iTableInfo:
                        self._TableFactorDict[
                            iTableName[nPrefix:]] = pd.Series({
                                iFactorName: iInfo["DataType"]
                                for iFactorName, iInfo in iTableInfo.items()
                                if iFactorName not in self.IgnoreFields
                            })
        return 0

    @property
    def TableNames(self):
        return sorted(self._TableFactorDict)

    def getTable(self, table_name, args={}):
        if table_name not in self._TableFactorDict:
            Msg = ("因子库 '%s' 调用方法 getTable 错误: 不存在因子表: '%s'!" %
                   (self.Name, table_name))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)
        TableType = args.get("因子表类型", "宽表")
        if TableType == "宽表":
            return _WideTable(name=table_name,
                              fdb=self,
                              sys_args=args,
                              logger=self._QS_Logger)
        else:
            Msg = ("因子库 '%s' 调用方法 getTable 错误: 不支持的因子表类型: '%s'" %
                   (self.Name, TableType))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)

    def renameTable(self, old_table_name, new_table_name):
        if old_table_name not in self._TableFactorDict:
            Msg = ("因子库 '%s' 调用方法 renameTable 错误: 不存在因子表 '%s'!" %
                   (self.Name, old_table_name))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)
        if (new_table_name != old_table_name) and (new_table_name
                                                   in self._TableFactorDict):
            Msg = ("因子库 '%s' 调用方法 renameTable 错误: 新因子表名 '%s' 已经存在于库中!" %
                   (self.Name, new_table_name))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)
        self._DB[self.InnerPrefix + old_table_name].rename(self.InnerPrefix +
                                                           new_table_name)
        self._TableFactorDict[new_table_name] = self._TableFactorDict.pop(
            old_table_name)
        return 0

    def deleteTable(self, table_name):
        if table_name not in self._TableFactorDict: return 0
        self._DB.drop_collection(self.InnerPrefix + table_name)
        self._TableFactorDict.pop(table_name, None)
        return 0

    # 创建表, field_types: {字段名: 数据类型}
    def createTable(self, table_name, field_types):
        if self.InnerPrefix + table_name not in self._DB.collection_names():
            Doc = {
                iField: {
                    "DataType": iDataType
                }
                for iField, iDataType in field_types.items()
            }
            Doc.update({"datetime": None, "code": "_TableInfo"})
            Collection = self._DB[self.InnerPrefix + table_name]
            Collection.insert(Doc)
            # 添加索引
            if self._Connector == "pymongo":
                import pymongo
                Index1 = pymongo.IndexModel([("datetime", pymongo.ASCENDING),
                                             ("code", pymongo.ASCENDING)],
                                            name=self.InnerPrefix +
                                            "datetime_code")
                Index2 = pymongo.IndexModel([("code", pymongo.HASHED)],
                                            name=self.InnerPrefix + "code")
                try:
                    Collection.create_indexes([Index1, Index2])
                except Exception as e:
                    self._QS_Logger.warning(
                        "'%s' 调用方法 createTable 在数据库中创建表 '%s' 的索引时错误: %s" %
                        (self.Name, table_name, str(e)))
        self._TableFactorDict[table_name] = pd.Series(field_types)
        return 0

    # ----------------------------因子操作---------------------------------
    def renameFactor(self, table_name, old_factor_name, new_factor_name):
        if old_factor_name not in self._TableFactorDict[table_name]:
            Msg = ("因子库 '%s' 调用方法 renameFactor 错误: 因子表 '%s' 中不存在因子 '%s'!" %
                   (self.Name, table_name, old_factor_name))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)
        if (new_factor_name != old_factor_name) and (
                new_factor_name in self._TableFactorDict[table_name]):
            Msg = (
                "因子库 '%s' 调用方法 renameFactor 错误: 新因子名 '%s' 已经存在于因子表 '%s' 中!" %
                (self.Name, new_factor_name, table_name))
            self._QS_Logger.error(Msg)
            raise __QS_Error__(Msg)
        self._DB[self.InnerPrefix + table_name].update_many(
            {}, {"$rename": {
                old_factor_name: new_factor_name
            }})
        self._TableFactorDict[table_name][
            new_factor_name] = self._TableFactorDict[table_name].pop(
                old_factor_name)
        return 0

    def deleteFactor(self, table_name, factor_names):
        if not factor_names: return 0
        FactorIndex = self._TableFactorDict.get(
            table_name, pd.Series()).index.difference(factor_names).tolist()
        if not FactorIndex: return self.deleteTable(table_name)
        self.deleteField(self.InnerPrefix + table_name, factor_names)
        for iFactorName in factor_names:
            self._DB[self.InnerPrefix + table_name].update_many(
                {}, {'$unset': {
                    iFactorName: 1
                }})
        self._TableFactorDict[table_name] = self._TableFactorDict[table_name][
            FactorIndex]
        return 0

    # 增加因子,field_types: {字段名: 数据类型}
    def addFactor(self, table_name, field_types):
        if table_name not in self._TableFactorDict:
            return self.createTable(table_name, field_types)
        Doc = {
            iField: {
                "DataType": iDataType
            }
            for iField, iDataType in field_types.items()
        }
        self._DB[self.InnerPrefix + table_name].update({"code": "_TableInfo"},
                                                       {"$set": Doc})
        self._TableFactorDict[table_name] = self._TableFactorDict[
            table_name].append(field_types)
        return 0

    def deleteData(self, table_name, ids=None, dts=None):
        Doc = {}
        if dts is not None:
            Doc["datetime"] = {"$in": dts}
        if ids is not None:
            Doc["code"] = {"$in": ids}
        if Doc:
            self._DB[self.InnerPrefix + table_name].delete_many(Doc)
        else:
            self._DB.drop_collection(self.InnerPrefix + table_name)
            self._TableFactorDict.pop(table_name)
        return 0

    def writeData(self,
                  data,
                  table_name,
                  if_exists="update",
                  data_type={},
                  **kwargs):
        if table_name not in self._TableFactorDict:
            FieldTypes = {
                iFactorName: _identifyDataType(data.iloc[i].dtypes)
                for i, iFactorName in enumerate(data.items)
            }
            self.createTable(table_name, field_types=FieldTypes)
        else:
            NewFactorNames = data.items.difference(
                self._TableFactorDict[table_name].index).tolist()
            if NewFactorNames:
                FieldTypes = {
                    iFactorName: _identifyDataType(data.iloc[i].dtypes)
                    for i, iFactorName in enumerate(NewFactorNames)
                }
                self.addFactor(table_name, FieldTypes)
            AllFactorNames = self._TableFactorDict[table_name].index.tolist()
            OldData = self.getTable(table_name).readData(
                factor_names=AllFactorNames,
                ids=data.minor_axis.tolist(),
                dts=data.major_axis.tolist())
            if if_exists == "append":
                for iFactorName in AllFactorNames:
                    if iFactorName in data:
                        data[iFactorName] = OldData[iFactorName].where(
                            pd.notnull(OldData[iFactorName]),
                            data[iFactorName])
                    else:
                        data[iFactorName] = OldData[iFactorName]
            elif if_exists == "update":
                for iFactorName in AllFactorNames:
                    if iFactorName in data:
                        data[iFactorName] = data[iFactorName].where(
                            pd.notnull(data[iFactorName]),
                            OldData[iFactorName])
                    else:
                        data[iFactorName] = OldData[iFactorName]
            else:
                Msg = ("因子库 '%s' 调用方法 writeData 错误: 不支持的写入方式 '%s'!" %
                       (self.Name, if_exists))
                self._QS_Logger.error(Msg)
                raise __QS_Error__(Msg)
        NewData = {}
        for iFactorName in data.items:
            iData = data.loc[iFactorName].stack(dropna=False)
            NewData[iFactorName] = iData
        NewData = pd.DataFrame(NewData).loc[:, data.items]
        Mask = pd.notnull(NewData).any(axis=1)
        NewData = NewData[Mask]
        if NewData.shape[0] == 0: return 0
        self.deleteData(table_name,
                        ids=data.minor_axis.tolist(),
                        dts=data.major_axis.tolist())
        NewData = NewData.reset_index()
        NewData.columns = ["datetime", "code"] + NewData.columns[2:].tolist()
        self._DB[self.InnerPrefix + table_name].insert_many(
            NewData.to_dict(orient="records"))
        return 0
示例#13
0
class ArcticDB(WritableFactorDB):
    """ArcticDB"""
    DBName = Str("arctic", arg_type="String", label="数据库名", order=0)
    IPAddr = Str("127.0.0.1", arg_type="String", label="IP地址", order=1)
    Port = Range(low=0,
                 high=65535,
                 value=27017,
                 arg_type="Integer",
                 label="端口",
                 order=2)
    User = Str("", arg_type="String", label="用户名", order=3)
    Pwd = Password("", arg_type="String", label="密码", order=4)

    def __init__(self, sys_args={}, config_file=None, **kwargs):
        self._Arctic = None  # Arctic 对象
        super().__init__(sys_args=sys_args,
                         config_file=(__QS_ConfigPath__ + os.sep +
                                      "ArcticDBConfig.json"
                                      if config_file is None else config_file),
                         **kwargs)
        self.Name = "ArcticDB"
        return

    def __getstate__(self):
        state = self.__dict__.copy()
        # Remove the unpicklable entries.
        state["_Arctic"] = self.isAvailable()
        return state

    def __setstate__(self, state):
        super().__setstate__(state)
        if self._Arctic: self.connect()
        else: self._Arctic = None

    def connect(self):
        self._Arctic = arctic.Arctic(self.IPAddr)
        return 0

    def disconnect(self):
        self._Arctic = None
        return 1

    def isAvailable(self):
        return (self._Arctic is not None)

    @property
    def TableNames(self):
        return sorted(self._Arctic.list_libraries())

    def getTable(self, table_name, args={}):
        if table_name not in self._Arctic.list_libraries():
            raise __QS_Error__("表 '%s' 不存在!" % table_name)
        return _FactorTable(name=table_name,
                            fdb=self,
                            sys_args=args,
                            logger=self._QS_Logger)

    def renameTable(self, old_table_name, new_table_name):
        self._Arctic.rename_library(old_table_name, new_table_name)
        return 0

    def deleteTable(self, table_name):
        self._Arctic.delete_library(table_name)
        return 0

    def setTableMetaData(self,
                         table_name,
                         key=None,
                         value=None,
                         meta_data=None):
        Lib = self._Arctic[table_name]
        TableInfo = Lib.read_metadata("_FactorInfo")
        if TableInfo is None: TableInfo = {}
        if meta_data is not None: TableInfo.update(dict(meta_data))
        if key is not None: TableInfo[key] = value
        Lib.write_metadata("_FactorInfo", TableInfo)
        return 0

    def renameFactor(self, table_name, old_factor_name, new_factor_name):
        if table_name not in self._Arctic.list_libraries():
            raise __QS_Error__("表: '%s' 不存在!" % table_name)
        Lib = self._Arctic[table_name]
        FactorInfo = Lib.read(symbol="_FactorInfo").set_index(["FactorName"])
        if old_factor_name not in FactorInfo.index:
            raise __QS_Error__("因子: '%s' 不存在!" % old_factor_name)
        if new_factor_name in FactorInfo.index:
            raise __QS_Error__("因子: '%s' 已经存在!" % new_factor_name)
        FactorNames = FactorInfo.index.tolist()
        FactorNames[FactorNames.index(old_factor_name)] = new_factor_name
        FactorInfo.index = FactorNames
        FactorInfo.index.name = "FactorName"
        Lib.write(
            "_FactorInfo",
            FactorInfo.reset_index(),
            chunker=arctic.chunkstore.passthrough_chunker.PassthroughChunker())
        IDs = Lib.list_symbols()
        IDs.remove("_FactorInfo")
        for iID in IDs:
            iMetaData = Lib.read_metadata(iID)
            if old_factor_name in iMetaData["FactorNames"]:
                iMetaData["FactorNames"][iMetaData["FactorNames"].index(
                    old_factor_name)] = new_factor_name
                Lib.write_metadata(iID, iMetaData)
        return 0

    def deleteFactor(self, table_name, factor_names):
        if table_name not in self._Arctic.list_libraries(): return 0
        Lib = self._Arctic[table_name]
        FactorInfo = Lib.read(symbol="_FactorInfo").set_index(["FactorName"])
        FactorInfo = FactorInfo.loc[FactorInfo.index.difference(factor_names)]
        if FactorInfo.shape[0] == 0: return self.deleteTable(table_name)
        IDs = Lib.list_symbols()
        IDs.remove("_FactorInfo")
        for iID in IDs:
            iMetaData = Lib.read_metadata(iID)
            iFactorIndex = pd.Series(iMetaData["Cols"],
                                     index=iMetaData["FactorNames"])
            iFactorIndex = iFactorIndex[iFactorIndex.index.difference(
                factor_names)]
            if iFactorIndex.shape[0] == 0:
                Lib.delete(iID)
                continue
            iFactorNames = iFactorIndex.values.tolist()
            iData = Lib.read(symbol=iID, columns=iFactorNames)
            iCols = [str(i) for i in range(iFactorIndex.shape[0])]
            iData.columns = iCols
            iMetaData["FactorNames"], iMetaData["Cols"] = iFactorNames, iCols
            Lib.write(iID, iData, metadata=iMetaData)
        Lib.write(
            "_FactorInfo",
            FactorInfo.reset_index(),
            chunker=arctic.chunkstore.passthrough_chunker.PassthroughChunker())
        return 0

    def setFactorMetaData(self,
                          table_name,
                          ifactor_name,
                          key=None,
                          value=None,
                          meta_data=None):
        if (key is None) and (meta_data is None): return 0
        Lib = self._Arctic[table_name]
        FactorInfo = Lib.read(symbol="_FactorInfo").set_index(["FactorName"])
        if key is not None: FactorInfo.loc[ifactor_name, key] = value
        if meta_data is not None:
            for iKey in meta_data:
                FactorInfo.loc[ifactor_name, iKey] = meta_data[iKey]
        Lib.write(
            "_FactorInfo",
            FactorInfo.reset_index(),
            chunker=arctic.chunkstore.passthrough_chunker.PassthroughChunker())
        return 0

    def writeData(self,
                  data,
                  table_name,
                  if_exists="update",
                  data_type={},
                  **kwargs):
        if data.shape[0] == 0: return 0
        if table_name not in self._Arctic.list_libraries():
            return self._writeNewData(data, table_name, data_type=data_type)
        Lib = self._Arctic[table_name]
        DataCols = [str(i) for i in range(data.shape[0])]
        #DTRange = pd.date_range(data.major_axis[0], data.major_axis[-1], freq=Freq)
        DTRange = data.major_axis
        OverWrite = (if_exists == "update")
        for i, iID in enumerate(data.minor_axis):
            iData = data.iloc[:, :, i]
            if not Lib.has_symbol(iID):
                iMetaData = {
                    "FactorNames": iData.columns.tolist(),
                    "Cols": DataCols
                }
                iData.index.name, iData.columns = "date", DataCols
                Lib.write(iID, iData, metadata=iMetaData)
                continue
            iMetaData = Lib.read_metadata(symbol=iID)
            iOldFactorNames, iCols = iMetaData["FactorNames"], iMetaData[
                "Cols"]
            iNewFactorNames = iData.columns.difference(
                iOldFactorNames).tolist()
            #iCrossFactorNames = iOldFactorNames.intersection(iData.columns).tolist()
            iOldData = Lib.read(symbol=iID,
                                chunk_range=DTRange,
                                filter_data=True)
            if iOldData.shape[0] > 0:
                iOldData.columns = iOldFactorNames
                iOldData = iOldData.loc[iOldData.index.union(iData.index),
                                        iOldFactorNames + iNewFactorNames]
                iOldData.update(iData, overwrite=OverWrite)
            else:
                iOldData = iData.loc[:, iOldFactorNames + iNewFactorNames]
            if iNewFactorNames:
                iCols += [
                    str(i)
                    for i in range(iOldData.shape[1], iOldData.shape[1] +
                                   len(iNewFactorNames))
                ]
                #iOldData = pd.merge(iOldData, iData.loc[:, iNewFactorNames], how="outer", left_index=True, right_index=True)
            #if iCrossFactorNames:
            #iOldData = iOldData.loc[iOldData.index.union(iData.index), :]
            #iOldData.update(iData, overwrite=OverWrite)
            #if if_exists=="update": iOldData.loc[iData.index, iCrossFactorNames] = iData.loc[:, iCrossFactorNames]
            #else: iOldData.loc[iData.index, iCrossFactorNames] = iOldData.loc[iData.index, iCrossFactorNames].where(pd.notnull(iOldData.loc[iData.index, iCrossFactorNames]), iData.loc[:, iCrossFactorNames])
            iOldData.index.name, iOldData.columns = "date", iCols
            iMetaData["FactorNames"], iMetaData[
                "Cols"] = iOldFactorNames + iNewFactorNames, iCols
            Lib.update(iID, iOldData, metadata=iMetaData, chunk_range=DTRange)
        FactorInfo = Lib.read(symbol="_FactorInfo").set_index("FactorName")
        NewFactorNames = data.items.difference(FactorInfo.index).tolist()
        FactorInfo = FactorInfo.loc[FactorInfo.index.tolist() +
                                    NewFactorNames, :]
        for iFactorName in NewFactorNames:
            if iFactorName in data_type:
                FactorInfo.loc[iFactorName,
                               "DataType"] = data_type[iFactorName]
            elif np.dtype('O') in data.loc[iFactorName].dtypes:
                FactorInfo.loc[iFactorName, "DataType"] = "string"
            else:
                FactorInfo.loc[iFactorName, "DataType"] = "double"
        Lib.write(
            "_FactorInfo",
            FactorInfo.reset_index(),
            chunker=arctic.chunkstore.passthrough_chunker.PassthroughChunker())
        return 0

    def _writeNewData(self, data, table_name, data_type):
        FactorNames = data.items.tolist()
        DataType = pd.Series("double", index=data.items)
        for i, iFactorName in enumerate(DataType.index):
            if iFactorName in data_type:
                DataType.iloc[i] = data_type[iFactorName]
            elif np.dtype('O') in data.iloc[i].dtypes:
                DataType.iloc[i] = "string"
        DataCols = [str(i) for i in range(data.shape[0])]
        data.items = DataCols
        self._Arctic.initialize_library(table_name,
                                        lib_type=arctic.CHUNK_STORE)
        Lib = self._Arctic[table_name]
        for i, iID in enumerate(data.minor_axis):
            iData = data.iloc[:, :, i]
            iMetaData = {"FactorNames": FactorNames, "Cols": DataCols}
            iData.index.name = "date"
            Lib.write(iID, iData, metadata=iMetaData)
        DataType = DataType.reset_index()
        DataType.columns = ["FactorName", "DataType"]
        Lib.write(
            "_FactorInfo",
            DataType,
            chunker=arctic.chunkstore.passthrough_chunker.PassthroughChunker())
        data.items = FactorNames
        return 0
示例#14
0
class WindDB(FactorDB):
    """Wind 金融工程数据库"""
    DBType = Enum("SQL Server", "Oracle", "MySQL", arg_type="SingleOption", label="数据库类型", order=0)
    DBName = Str("wind", arg_type="String", label="数据库名", order=1)
    IPAddr = Str("127.0.0.1", arg_type="String", label="IP地址", order=2)
    Port = Range(low=0, high=65535, value=1521, arg_type="Integer", label="端口", order=3)
    User = Str("root", arg_type="String", label="用户名", order=4)
    Pwd = Password("", arg_type="String", label="密码", order=5)
    TablePrefix = Str("", arg_type="String", label="表名前缀", order=6)
    CharSet = Enum("utf8", "gbk", "gb2312", "gb18030", "cp936", "big5", arg_type="SingleOption", label="字符集", order=7)
    Connector = Enum("default", "cx_Oracle", "pymssql", "mysql.connector", "pyodbc", arg_type="SingleOption", label="连接器", order=8)
    DSN = Str("", arg_type="String", label="数据源", order=9)
    def __init__(self, sys_args={}, config_file=None, **kwargs):
        super().__init__(sys_args=sys_args, config_file=(__QS_ConfigPath__+os.sep+"WindDBConfig.json" if config_file is None else config_file), **kwargs)
        self._Connection = None# 数据库链接
        self._AllTables = []# 数据库中的所有表名, 用于查询时解决大小写敏感问题
        self._InfoFilePath = __QS_LibPath__+os.sep+"WindDBInfo.hdf5"# 数据库信息文件路径
        self._InfoResourcePath = __QS_MainPath__+os.sep+"Resource"+os.sep+"WindDBInfo.xlsx"# 数据库信息源文件路径
        self._TableInfo, self._FactorInfo = updateInfo(self._InfoFilePath, self._InfoResourcePath, self._QS_Logger)# 数据库中的表信息, 数据库中的字段信息
        self.Name = "WindDB"
        return
    def __getstate__(self):
        state = self.__dict__.copy()
        # Remove the unpicklable entries.
        state["_Connection"] = (True if self.isAvailable() else False)
        return state
    def __setstate__(self, state):
        super().__setstate__(state)
        if self._Connection:
            self.connect()
        else:
            self._Connection = None
        self._AllTables = state.get("_AllTables", [])
    # -------------------------------------------数据库相关---------------------------
    def connect(self):
        if (self.Connector=='cx_Oracle') or ((self.Connector=='default') and (self.DBType=='Oracle')):
            try:
                import cx_Oracle
                self._Connection = cx_Oracle.connect(self.User, self.Pwd, cx_Oracle.makedsn(self.IPAddr, str(self.Port), self.DBName))
            except Exception as e:
                if self.Connector!='default': raise e
        elif (self.Connector=='pymssql') or ((self.Connector=='default') and (self.DBType=='SQL Server')):
            try:
                import pymssql
                self._Connection = pymssql.connect(server=self.IPAddr, port=str(self.Port), user=self.User, password=self.Pwd, database=self.DBName, charset=self.CharSet)
            except Exception as e:
                if self.Connector!='default': raise e
        elif (self.Connector=='mysql.connector') or ((self.Connector=='default') and (self.DBType=='MySQL')):
            try:
                import mysql.connector
                self._Connection = mysql.connector.connect(host=self.IPAddr, port=str(self.Port), user=self.User, password=self.Pwd, database=self.DBName, charset=self.CharSet)
            except Exception as e:
                if self.Connector!='default': raise e
        else:
            if self.Connector not in ('default', 'pyodbc'):
                self._Connection = None
                raise __QS_Error__("不支持该连接器(connector) : "+self.Connector)
            else:
                import pyodbc
                if self.DSN:
                    self._Connection = pyodbc.connect('DSN=%s;PWD=%s' % (self.DSN, self.Pwd))
                else:
                    self._Connection = pyodbc.connect('DRIVER={%s};DATABASE=%s;SERVER=%s;UID=%s;PWD=%s' % (self.DBType, self.DBName, self.IPAddr, self.User, self.Pwd))
        self._Connection.autocommit = True
        self._AllTables = []
        return 0
    def disconnect(self):
        if self._Connection is not None:
            try:
                self._Connection.close()
            except Exception as e:
                self._QS_Logger.warning("因子库 ’%s' 断开错误: %s" % (self.Name, str(e)))
            finally:
                self._Connection = None
        return 0
    def isAvailable(self):
        return (self._Connection is not None)
    def cursor(self, sql_str=None):
        if self._Connection is None: raise __QS_Error__("%s尚未连接!" % self.__doc__)
        Cursor = self._Connection.cursor()
        if sql_str is None: return Cursor
        if not self._AllTables:
            if self.DBType=="SQL Server":
                Cursor.execute("SELECT Name FROM SysObjects Where XType='U'")
                self._AllTables = [rslt[0] for rslt in Cursor.fetchall()]
            elif self.DBType=="MySQL":
                Cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema='"+self.DBName+"' AND table_type='base table'")
                self._AllTables = [rslt[0] for rslt in Cursor.fetchall()]
        for iTable in self._AllTables:
            sql_str = re.sub(iTable, iTable, sql_str, flags=re.IGNORECASE)
        Cursor.execute(sql_str)
        return Cursor
    def fetchall(self, sql_str):
        Cursor = self.cursor(sql_str=sql_str)
        Data = Cursor.fetchall()
        Cursor.close()
        return Data
    # -------------------------------表的操作---------------------------------
    @property
    def TableNames(self):
        if self._TableInfo is not None: return self._TableInfo.index.tolist()
        else: return []
    def getTable(self, table_name, args={}):
        TableClass = self._TableInfo.loc[table_name, "TableClass"]
        return eval("_"+TableClass+"(name='"+table_name+"', fdb=self, sys_args=args, logger=self._QS_Logger)")
    # -----------------------------------------数据提取---------------------------------
    # 给定起始日期和结束日期, 获取交易所交易日期, 目前仅支持:"SSE", "SZSE"
    def getTradeDay(self, start_date=None, end_date=None, exchange="SSE"):
        if exchange not in ("SSE", "SZSE"):
            raise __QS_Error__("不支持交易所: '%s' 的交易日序列!" % exchange)
        if start_date is None:
            start_date = dt.date(1900,1,1)
        if end_date is None:
            end_date = dt.date.today()
        SQLStr = 'SELECT F1_1010 FROM {Prefix}tb_object_1010 '
        SQLStr += 'WHERE F1_1010<=\'{EndDate}\' '
        SQLStr += 'AND F1_1010>=\'{StartDate}\' '
        SQLStr += 'ORDER BY F1_1010'
        Dates = self.fetchall(SQLStr.format(Prefix=self.TablePrefix,StartDate=start_date.strftime("%Y%m%d"),EndDate=end_date.strftime("%Y%m%d")))
        return list(map(lambda x: dt.date(int(x[0][:4]), int(x[0][4:6]), int(x[0][6:8])), Dates))
    # 获取指定日当前在市或者历史上出现过的全体 A 股 ID
    def _getAllAStock(self, date, is_current=True):
        SQLStr = 'SELECT {Prefix}tb_object_0001.f1_0001 FROM {Prefix}tb_object_0001 INNER JOIN {Prefix}tb_object_1090 ON ({Prefix}tb_object_0001.f16_0001={Prefix}tb_object_1090.f2_1090) '
        if is_current:
            SQLStr += 'WHERE {Prefix}tb_object_1090.f21_1090=1 AND {Prefix}tb_object_1090.F4_1090=\'A\' AND ({Prefix}tb_object_1090.F18_1090 is NULL OR {Prefix}tb_object_1090.F18_1090>\'{Date}\') AND {Prefix}tb_object_1090.F17_1090<=\'{Date}\' ORDER BY {Prefix}tb_object_0001.f1_0001'
        else:
            SQLStr += 'WHERE {Prefix}tb_object_1090.f21_1090=1 AND {Prefix}tb_object_1090.F4_1090=\'A\' AND {Prefix}tb_object_1090.F17_1090<=\'{Date}\' ORDER BY {Prefix}tb_object_0001.f1_0001'
        return [iRslt[0] for iRslt in self.fetchall(SQLStr.format(Prefix=self.TablePrefix, Date=date.strftime("%Y%m%d")))]
    # 给定指数名称和ID,获取指定日当前或历史上的指数中的股票ID,is_current=True:获取指定日当天的ID,False:获取截止指定日历史上出现的ID
    def getStockID(self, index_id="全体A股", date=None, is_current=True):
        if date is None: date = dt.date.today()
        if index_id=="全体A股": return self._getAllAStock(date=date, is_current=is_current)
        # 获取指数在数据库内部的证券 ID
        SQLStr = 'SELECT f16_0001 FROM {Prefix}tb_object_0001 where f1_0001=\'{IndexID}\''
        IndexEquityID = self.fetchall(SQLStr.format(Prefix=self.TablePrefix, IndexID=index_id))[0][0]
        # 获取指数中的股票 ID
        SQLStr = 'SELECT {Prefix}tb_object_0001.f1_0001 FROM {Prefix}tb_object_1402, {Prefix}tb_object_0001 '
        SQLStr += 'WHERE {Prefix}tb_object_0001.F16_0001={Prefix}tb_object_1402.F1_1402 '
        SQLStr += 'AND {Prefix}tb_object_1402.F2_1402=\'{IndexEquityID}\' '
        SQLStr += 'AND {Prefix}tb_object_1402.F3_1402<=\'{Date}\' '# 纳入日期在date之前
        if is_current:
            SQLStr += 'AND ({Prefix}tb_object_1402.F5_1402=1 OR {Prefix}tb_object_1402.F4_1402>\'{Date}\') '# 剔除日期在date之后
        SQLStr += 'ORDER BY {Prefix}tb_object_0001.f1_0001'
        return [iRslt[0] for iRslt in self.fetchall(SQLStr.format(Prefix=self.TablePrefix, IndexEquityID=IndexEquityID, Date=date.strftime("%Y%m%d")))]
    # ID 转换成证券 ID
    def ID2EquityID(self, ids):
        nID = len(ids)
        if nID<=1000:
            SQLStr = 'SELECT f1_0001, f16_0001 FROM '+self.TablePrefix+'tb_object_0001 WHERE f1_0001 IN (\''+'\',\''.join(ids)+'\')'
        else:
            SQLStr = 'SELECT f1_0001, f16_0001 FROM '+self.TablePrefix+'tb_object_0001 WHERE f1_0001 IN (\''+'\',\''.join(ids[0:1000])+'\')'
            i = 1000
            while i<nID:
                SQLStr += ' OR f1_0001 IN (\''+'\',\''.join(ids[i:i+1000])+'\')'
                i = i+1000
        Cursor = self.cursor(SQLStr)
        Result = Cursor.fetchall()
        Cursor.close()
        return dict(Result)