Пример #1
0
 def test_save_table_patition_disk(self):
     orca.default_session().run(
         f"db = database('{WORK_DIR}padisk', RANGE, 2010.01.04 2011.01.04 2012.01.04 2013.01.04 2014.01.04 2015.01.04  2016.01.04)"
     )
     odf = self.odf_disk
     orca.save_table(WORK_DIR + "padisk", "tdisk", self.odf_disk)
     x = orca.read_table(WORK_DIR + "padisk", "tdisk")
     # print(len(x))
     assert_frame_equal(odf.to_pandas(), x.to_pandas())
     orca.save_table(WORK_DIR + "padisk", "tdisk", self.odf_disk)
     x = orca.read_table(WORK_DIR + "padisk", "tdisk")
Пример #2
0
 def test_save_table_in_memory_disk(self):
     # y = self.odf_npdisk
     odf = orca.DataFrame(
         {
             'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
             'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
         },
         index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
     orca.default_session().run(f"db = database('{WORK_DIR}imd');")
     orca.save_table(WORK_DIR + "imd", "imdb", odf)
     x = orca.read_table(WORK_DIR + "imd", "imdb")
     # print(x)
     # print(self.pdf)
     # index will be reset
     assert_frame_equal(self.pdf.reset_index(drop=True), x.to_pandas())
Пример #3
0
def _create_odf_csv(datal, datar):
    dfsDatabase = "dfs://testMergeDB"
    s = orca.default_session()
    dolphindb_script = """
                        login('admin', '123456')
                        if(existsDatabase('{dbPath}'))
                           dropDatabase('{dbPath}')
                        db=database('{dbPath}', VALUE, `a`b`c`d`e`f`g)
                        stb1=extractTextSchema('{data1}')
                        update stb1 set type="SYMBOL" where name="type"
                        stb2=extractTextSchema('{data2}')
                        update stb2 set type="SYMBOL" where name="type"
                        loadTextEx(db,`tickers,`type, '{data1}',,stb1)
                        loadTextEx(db,`values,`type, '{data2}',,stb2)
                        """.format(dbPath=dfsDatabase, data1=datal, data2=datar)
    s.run(dolphindb_script)
Пример #4
0
def _create_odf_pandas(n, pdf):
    # call function default_session() to get session object
    s = orca.default_session()

    # upload a local dataframe to a dfs table
    dolphindb_script = """
    login('admin', '123456')
    dbPath='dfs://whereDB'
    if(existsDatabase(dbPath))
        dropDatabase(dbPath)
    db=database(dbPath, VALUE, 1..""" + str(n) + """) 
    tdata=table(1:0,`id`date`tsymbol`tbool`tchar`tshort`tint`long`tfloat`tdouble, 
    [INT,DATE,SYMBOL,BOOL,CHAR,SHORT,INT,LONG,FLOAT,DOUBLE]) 
    db.createPartitionedTable(tdata, `tb, `id) """
    s.run(dolphindb_script)
    s.run("tableInsert{loadTable('dfs://whereDB',`tb)}", pdf)
    return orca.read_table("dfs://whereDB", 'tb')
Пример #5
0
def _create_odf_dfs(data):
    # call function default_session() to get session object
    s = orca.default_session()

    # import a csv file to a dfs table
    dolphindb_script = """
    login("admin", "123456")
    dbPath="dfs://USPricesDB"
    if(existsDatabase(dbPath))
        dropDatabase(dbPath)
    tt=extractTextSchema('{data}')
    update tt set type='FLOAT' where name in ['SHROUT', 'DLSTCD', 'DLPRC', 'VOL']
    update tt set type='SYMBOL' where name='TRDSTAT'
    schema = table(500:0, tt.name, tt.type)
    db = database(dbPath, RANGE, 2010.01.04 2011.01.04 2012.01.04 2013.01.04 2014.01.04 2015.01.04  2016.01.04)
    USPrice = db.createPartitionedTable(schema, `USPrices, `date)
    db.loadTextEx(`USPrices,`date, '{data}' ,, tt)""".format(data=data)
    s.run(dolphindb_script)
    return orca.read_table("dfs://USPricesDB", 'USPrices')
Пример #6
0
def _create_odf_csv(data, dfsDatabase):
    # call function default_session() to get session object
    s = orca.default_session()
    dolphindb_script = """
    login("admin", "123456")
    dbPath="dfs://USPricesDB"
    if(existsDatabase(dbPath))
       dropDatabase(dbPath)
    cols = exec name from extractTextSchema('{data}')
    types = exec type from extractTextSchema('{data}')
    schema = table(50000:0, cols, types)
    tt=schema(schema).colDefs
    tt.drop!(`typeInt)
    tt.rename!(`name`type)
    db = database(dbPath, RANGE, 2010.01.04 2011.01.04  2012.01.04 2013.01.04 2014.01.04 2015.01.04  2016.01.04)
    USPrice = db.createPartitionedTable(schema, `USPrices, `date)
    db.loadTextEx(`USPrices,`date, '{data}' ,, tt)""".format(data=data)
    s.run(dolphindb_script)
    return orca.read_table(dfsDatabase, 'USPrices')
Пример #7
0
def _odf_disk_unpartitioned(data):
    s = orca.default_session()

    # import a csv file to a dfs table
    dolphindb_script = """
    login("admin", "123456")
    dbPath='{WORK_DIR}'+'testOnDiskunpartitionedDB'
    if(existsDatabase(dbPath))
        dropDatabase(dbPath)
    tt=extractTextSchema('{data}')
    update tt set type='FLOAT' where name in ['SHROUT', 'DLSTCD', 'DLPRC', 'VOL']
    update tt set type='SYMBOL' where name='TRDSTAT'
    schema = table(500:0, tt.name, tt.type)
    db = database(dbPath)
    USPrice = db.createTable(schema, `USPrices, `date)
    db.loadTextEx(`USPrices,`date, '{data}' ,, tt)""".format(WORK_DIR=WORK_DIR,
                                                             data=data)
    s.run(dolphindb_script)
    return orca.read_table(WORK_DIR + 'testOnDiskunpartitionedDB', 'USPrices')
Пример #8
0
def _create_odf_csv(data, dfsDatabase):
    # call function default_session() to get session object
    s = orca.default_session()
    dolphindb_script = """
    login("admin", "123456")
    dbPath="dfs://groupbyDateDB"
    if(existsDatabase(dbPath))
        dropDatabase(dbPath)
    schema = extractTextSchema('{data}')
    cols = exec name from schema
    types = ["INT", "DATE", "SYMBOL", "BOOL", "SHORT", "INT", "LONG", "FLOAT", "DOUBLE"]
    schema = table(50000:0, cols, types)
    tt=schema(schema).colDefs
    tt.drop!(`typeInt)
    tt.rename!(`name`type)
    db = database(dbPath, RANGE, 1 501 1001 1501 2001 2501 3001)
    tb = db.createPartitionedTable(schema, `tb, `id)
    db.loadTextEx(`tb,`id, '{data}' ,, tt)""".format(data=data)
    s.run(dolphindb_script)
    return orca.read_table(dfsDatabase, 'tb')
Пример #9
0
def _create_odf_csv(data_left, data_right):
    # call function default_session() to get session object
    s = orca.default_session()

    dolphindb_script = """
    login("admin", "123456")
    dbPath="dfs://testjoinDB"
    if(existsDatabase(dbPath))
        dropDatabase(dbPath)
    tt_left=extractTextSchema('{datal}')
    update tt_left set type="SYMBOL" where name="TRDSTAT"
    tt_right=extractTextSchema('{datar}')
    schema_left = table(50000:0, tt_left.name, tt_left.type)
    schema_right = table(50000:0, tt_right.name, tt_right.type)
    db = database(dbPath, RANGE, 2010.01.04 2011.01.04 2012.01.04 2013.01.04 2014.01.04 2015.01.04  2016.01.04)
    tb_left = db.createPartitionedTable(schema_left, `tb_left, `date)
    db.loadTextEx(`tb_left, `date, '{datal}' ,, tt_left)
    tb_right = db.createPartitionedTable(schema_right, `tb_right, `date)
    db.loadTextEx(`tb_right, `date, '{datar}' ,, tt_right)""".format(
        datal=data_left, datar=data_right)
    s.run(dolphindb_script)
Пример #10
0
def _create_odf_csv(data, dfsDatabase):
    # call function default_session() to get session object
    s = orca.default_session()

    dolphindb_script = """
    login("admin", "123456")
    dbPath="dfs://onlyNumericalColumnsDB"
    if(existsDatabase(dbPath))
        dropDatabase(dbPath)
    schema = extractTextSchema('{data}')
    cols = exec name from schema
    types = ["INT", "BOOL", "CHAR", "SHORT", "INT", "LONG", "FLOAT", "DOUBLE"]
    schema = table(50000:0, cols, types)
    db = database(dbPath, RANGE, 1 501 1001 1501 2001 2501 3001)
    tb = db.createPartitionedTable(schema, `tb, `id)
    tt=schema(schema).colDefs
    tt.drop!(`typeInt`comment)
    tt.rename!(`name`type)
    update tt set type="INT" where name="tchar"
    tdata=loadText('{data}' ,, tt)
    tb.append!(tdata)""".format(data=data)
    s.run(dolphindb_script)
    return orca.read_table(dfsDatabase, 'tb')
Пример #11
0
    def __init__(self, ticker_file, value_file):
        self.ticker_file = ticker_file
        self.value_file = value_file

        self.df_ticker = None
        self.df_value = None

        script = """
                login('admin', '123456')
                if(existsDatabase('dfs://testOrcaTicker'))
                   dropDatabase('dfs://testOrcaTicker')
                schema=extractTextSchema('{data1}')
                db=database('dfs://testOrcaTicker', HASH, [DATE,20])
                loadTextEx(db,`tickers,`date, '{data1}')
                
                if(existsDatabase('dfs://testOrcaValue'))
                   dropDatabase('dfs://testOrcaValue')
                schema=extractTextSchema('{data2}')
                db=database('dfs://testOrcaValue', HASH, [INT, 4])
                loadTextEx(db,`values,`id, '{data2}')
                """.format(data1=ticker_file, data2=value_file)

        s = orca.default_session()
        s.run(script)
Пример #12
0
 def test_dataframe_Combining_joining_merging_append_on_disk_unpa(self):
      orca.default_session().run(f"db = database('{WORK_DIR}imdx');")
      orca.save_table(WORK_DIR + "imdx", "imdb", self.odf)
      x = orca.read_table(WORK_DIR + "imdx", "imdb")
      x.append(self.odf, inplace=True)
      y = orca.read_table(WORK_DIR + "imdx", "imdb")