def auto_crawler_new():
    ACII = AutoCrawlerInstitutionalInvestors()
    ACII.main()

    C2S = BasedClass.Crawler2SQL('InstitutionalInvestors', 'Financial_DataSet')
    C2S.upload2sql(ACII.data)

    print('save crawler process')
    BasedClass.save_crawler_process('InstitutionalInvestors')
def auto_crawler_new():
    date_name = 'GoldPrice'
    CGP = CrawlerGoldPrice()
    CGP.main()
    
    C2S = BasedClass.Crawler2SQL(date_name,'Financial_DataSet')
    C2S.upload2sql(CGP.data)
    #-------------------------------------------------
    print('save crawler process')
    BasedClass.save_crawler_process(date_name)
def auto_crawler_new():
    date_name = 'CrudeOilPrices'
    ACCOP = AutoCrawlerCrudeOilPrices()
    ACCOP.main()

    C2S = BasedClass.Crawler2SQL(date_name, 'Financial_DataSet')
    C2S.upload2sql(ACCOP.data)
    #-------------------------------------------------
    print('save crawler process')
    BasedClass.save_crawler_process('CrudeOilPrices')
Beispiel #4
0
def auto_crawler_new():
    date_name = 'ExchangeRate'
    ACCOP = AutoCrawlerExchangeRate()
    ACCOP.main()

    C2S = BasedClass.Crawler2SQL(date_name, 'Financial_DataSet')
    C2S.upload2sql(ACCOP.data, no_float_col=['date', 'country'])
    #------------------------------------------------------
    print('save crawler process')
    BasedClass.save_crawler_process('ExchangeRate')
Beispiel #5
0
def auto_crawler_new():
    date_name = 'InterestRate'
    ACIR = AutoCrawlerInterestRate()
    ACIR.main()

    C2S = BasedClass.Crawler2SQL(date_name, 'Financial_DataSet')
    C2S.upload2sql(ACIR.data,
                   no_float_col=['country', 'full_country_name', 'date'])
    #-------------------------------------------------
    print('save crawler process')
    BasedClass.save_crawler_process('InterestRate')
def auto_crawler_new():
    date_name = 'GovernmentBonds'
    self = AutoCrawlerGovernmentBonds()
    self.main()

    C2S = BasedClass.Crawler2SQL(date_name, 'Financial_DataSet')
    C2S.upload2sql(self.data,
                   no_float_col=['Date', 'data_name', 'country', 'curr_id'])
    #-------------------------------------------------
    print('save crawler process')
    BasedClass.save_crawler_process(date_name)
Beispiel #7
0
 def get_new(self):
     def UPDATE_sql(host,user,password,database,sql_text):
         # text = sql_text
         conn = ( pymysql.connect(host = host,
                          port = 3306,
                          user = user,
                          password = password,
                          database = database,  
                          charset="utf8") )  
         cursor = conn.cursor()    
         try:   
             for i in range(len(sql_text)):
                 cursor.execute(sql_text[i])
             conn.commit()
             conn.close()
             return 1
         except:
             conn.close()
             return 0
     
     old_date = Load.Load(database = 'StockDividend', select = self.stock).sort_values('meeting_data')
     self.old_date = str( old_date.iloc[len(old_date)-1]['meeting_data'] )
     self.new_date = self.new_data['meeting_data']
     
     change_name = list( self.new_data.index )
     sql_text = []
     
     if self.old_date == self.new_date:
         [ change_name.remove(col) for col in ['meeting_data','stock_id'] ]
         self.get_data_id()
         for col in change_name:
             tem = self.change_sql_data(col)
             if tem != '':
                 sql_text.append( tem )
         # update new value, 
         # because Ex_right_trading_day & Ex-dividend transaction day 
         # always slower announcement
         UPDATE_sql(Key.host,
                    Key.user,
                    Key.password,
                    self.database,
                    sql_text)
                 
     elif self.old_date < self.new_date:
         # if new date > old data, then add new data
         data = pd.DataFrame(self.new_data)
         data = data.T
         C2S = BasedClass.Crawler2SQL('StockDividend','Financial_DataSet')
         C2S.upload2sql(data,
                        no_float_col = ['meeting_data',
                                        'Ex_right_trading_day',
                                        'Ex_dividend_transaction_day',
                                        'stock_id'])
Beispiel #8
0
def auto_crawler_new():

    dataset_name = 'StockPrice'
    self = AutoCrawlerStockPrice()
    self.main()
    print('crawler data and upload 2 sql')
    C2S = BasedClass.Crawler2SQL(dataset_name, 'Financial_DataSet')
    C2S.upload2sql(self.new_data,
                   no_float_col=['date', 'stock'],
                   int_col=['Volume'])
    #------------------------------------------------------
    print('save crawler process')
    BasedClass.save_crawler_process('StockPrice')
Beispiel #9
0
def main():
    database = 'Financial_DataSet'
    CSID = CrawlerStockID(Key.host, Key.user, Key.password, database)
    CSID.run()

    C2S = BasedClass.Crawler2SQL('StockInfo', 'Financial_DataSet')
    try:
        C2S.create_table(CSID.data.columns)
    except:
        123

    # upload stock info
    BasedClass.execute_sql2(database, 'TRUNCATE table `StockInfo` ')
    CSID.upload_stock_info2sql()
Beispiel #10
0
def crawler_history():
    date_name = 'ExchangeRate'
    CER = CrawlerExchangeRate()
    CER.main()

    C2S = BasedClass.Crawler2SQL(date_name, 'Financial_DataSet')
    try:
        C2S.create_table(CER.data.columns, text_col=['country'])
    except:
        123
    C2S.upload2sql(CER.data, no_float_col=['date', 'country'])

    print('create process table')
    BasedClass.create_datatable('ExchangeRate')
def crawler_history():

    CCOP = CrawlerCrudeOilPrices()
    CCOP.main()
    #CII.data
    C2S = BasedClass.Crawler2SQL('CrudeOilPrices', 'Financial_DataSet')
    try:
        C2S.create_table(CCOP.data.columns)
    except:
        123

    C2S.upload2sql(CCOP.data)
    print('create process table')
    BasedClass.create_datatable('CrudeOilPrices')
def crawler_history():
    date_name = 'GovernmentBonds'
    self = CrawlerGovernmentBonds()
    self.main()

    C2S = BasedClass.Crawler2SQL(date_name, 'Financial_DataSet')
    try:
        C2S.create_table(self.data.columns,
                         text_col=['data_name', 'country', 'curr_id'])
    except:
        123
    C2S.upload2sql(self.data,
                   no_float_col=['Date', 'data_name', 'country', 'curr_id'])
    print('create process table')
    BasedClass.create_datatable(date_name)
def crawler_history():

    CII = CrawlerInstitutionalInvestors()
    CII.main()
    #CII.data

    C2S = BasedClass.Crawler2SQL('InstitutionalInvestors', 'Financial_DataSet')
    try:
        C2S.create_table(CII.data.columns)
    except:
        123

    C2S.upload2sql(CII.data)
    print('create process table')
    BasedClass.create_datatable('InstitutionalInvestors')
Beispiel #14
0
def crawler_history():

    CIR = CrawlerInterestRate()
    CIR.main()

    C2S = BasedClass.Crawler2SQL('InterestRate', 'Financial_DataSet')
    try:
        C2S.create_table(CIR.data.columns,
                         text_col=['country', 'full_country_name'])
    except:
        123

    C2S.upload2sql(CIR.data,
                   no_float_col=['country', 'full_country_name', 'date'])
    print('create process table')
    BasedClass.create_datatable('InterestRate')
def crawler_history():
    
    CFS = CrawlerFinancialStatements()
    CFS.crawler()
    CFS.fix()
    CFS.stock_financial_statements['year'] = CFS.stock_financial_statements['year'] + 1911
    
    C2S = BasedClass.Crawler2SQL('FinancialStatements','Financial_DataSet')
    try:
        C2S.create_table()
    except:
        123
    C2S.upload2sql(CFS.stock_financial_statements,
                   no_float_col = ['stock_id','url'],
                   int_col = ['year','quar'])
    
    print('create process table')
    BasedClass.create_datatable('FinancialStatements')
Beispiel #16
0
def crawler_history():
    CTD = CrawlerStockDividend()
    CTD.main()
    C2S = BasedClass.Crawler2SQL('StockDividend','Financial_DataSet')
    try:
        C2S.create_table()
    except:
        123
    for i in range(len(CTD.url_set)):
        print(str(i)+'/'+str(len(CTD.url_set)))#i=0
        data = CTD.get_value(i)
        C2S.upload2sql(data,
                       no_float_col = ['meeting_data',
                                       'Ex_right_trading_day',
                                       'Ex_dividend_transaction_day',
                                       'stock_id']) 
    print('create process table')
    BasedClass.create_datatable('StockDividend')
def crawler_history():
    date_name = 'GoldPrice'
    # get hostory by download https://www.gold.org/data/gold-price file
    file_path = '/home/' + path + '/github/FinancialMining/CrawlerCode/'
    data = pd.read_csv(file_path + 'glod.csv',skiprows = 1)
    
    date = [ datetime.datetime.strptime(d,'%Y/%m/%d').date() for d in data.date ]
    data = data[ [ d < datetime.datetime.strptime('2018-1-1','%Y-%m-%d').date() for d in date ] ]    
    data['date'] = [ d.replace('/','-') + ' 00:00:00' for d in data.date ]
    data.columns = ['datetime','Price']
    
    C2S = BasedClass.Crawler2SQL(date_name,'Financial_DataSet')
    try:
        C2S.create_table(data.columns,dt_col = ['datetime'])
    except:
        123
    C2S.upload2sql( data )
    print('create process table')
    BasedClass.create_datatable(date_name)
def auto_crawler_new():
    ACFS = AutoCrawlerFinancialStatements(database = 'Financial_DataSet')
    # self = ACFS
    ACFS.main()
    if len(ACFS.stock_financial_statements) != 0 :    
        try:
            ACFS.fix()
        except:
            123
        if ACFS.stock_financial_statements.columns[0] == 0:
            ACFS.stock_financial_statements = ACFS.stock_financial_statements.T
            
        C2S = BasedClass.Crawler2SQL('FinancialStatements','Financial_DataSet')
        C2S.upload2sql(ACFS.stock_financial_statements,
                       no_float_col = ['stock_id','url'],
                       int_col = ['year','quar'])
    #------------------------------------------------------
    print('save crawler process')
    BasedClass.save_crawler_process('FinancialStatements')   
Beispiel #19
0
def crawler_history():

    dataset_name = 'StockPrice'
    self = CrawlerHistoryStockPrice()
    #self.main()
    os.chdir('/home/linsam/github')
    self.data = pd.read_csv('test.csv')
    print('crawler data and upload 2 sql')
    C2S = BasedClass.Crawler2SQL(dataset_name, 'Financial_DataSet')
    try:
        C2S.create_table(self.data.columns,
                         text_col=['stock'],
                         BIGINT_col=['Volume'])
    except:
        123
    C2S.upload2sql(self.data,
                   no_float_col=['date', 'stock'],
                   int_col=['Volume'])
    print('create process table')
    BasedClass.create_datatable('StockPrice')