Esempio n. 1
0
def job_7():
    try:
        print("I'm working......龙虎榜数据")
        # 每日龙虎榜列表
        top_list = ts.top_list(today)
        data = pd.DataFrame(top_list)
        data.to_sql('top_list',engine,index=True,if_exists='replace')
        print("每日龙虎榜列表......done")

        # 个股上榜统计
        cap_tops = ts.cap_tops()
        data = pd.DataFrame(cap_tops)
        data.to_sql('cap_tops',engine,index=True,if_exists='replace')
        print("个股上榜统计......done")

        # 营业部上榜统计
        broker_tops = ts.broker_tops()
        data = pd.DataFrame(broker_tops)
        data.to_sql('broker_tops',engine,index=True,if_exists='replace')
        print("营业部上榜统计......done")

        # 机构席位追踪
        inst_tops = ts.inst_tops()
        data = pd.DataFrame(inst_tops)
        data.to_sql('inst_tops',engine,index=True,if_exists='replace')
        print("机构席位追踪......done")

        # 机构成交明细
        inst_detail = ts.inst_detail()
        data = pd.DataFrame(inst_detail)
        data.to_sql('inst_detail',engine,index=True,if_exists='replace')
        print("机构成交明细......done")
    except Exception as e:
        print(e)
Esempio n. 2
0
 def store_top_data(self, trading_date=None):
     """龙虎榜数据: 龙虎榜数据接口提供历史龙虎榜上榜股票数据"""
     trading_date = self.last_trading_date if trading_date is None else trading_date
     # 每日龙虎榜列表
     print('top_list...')
     top_df = ts.top_list(self.stock_date_format(trading_date))
     self.mysqlUtils.append_data(top_df, 'top_list')
     # 个股上榜统计
     print('cap_tops...')
     cap_tops_df = ts.cap_tops()
     cap_tops_df['date'] = trading_date
     self.mysqlUtils.append_data(cap_tops_df, 'cap_tops')
     # 营业部上榜统计
     print('broker_tops...')
     broker_tops_df = ts.broker_tops()
     broker_tops_df['date'] = trading_date
     self.mysqlUtils.append_data(broker_tops_df, 'broker_tops')
     # 龙虎榜机构席位追踪
     print('inst_tops...')
     inst_tops_df = ts.inst_tops()
     inst_tops_df['date'] = trading_date
     self.mysqlUtils.append_data(inst_tops_df, 'inst_tops')
     # 龙虎榜机构席位成交明细
     print('inst_detail...')
     inst_detail_df = ts.inst_detail()
     self.mysqlUtils.append_data(inst_detail_df, 'inst_detail')
Esempio n. 3
0
def cap_tops(days,retry_count,pause):
    try:
        df = ts.cap_tops(days,retry_count,pause)
        engine = create_engine('mysql://*****:*****@127.0.0.1/stock?charset=utf8')
        df.to_sql('cap_tops', engine, if_exists='append')
        print "message"
    except Exception, e:
        e.message
Esempio n. 4
0
def get_cap_tops():

    df = ts.cap_tops()
    df.to_sql('cap_tops',
              engine,
              if_exists='append',
              index=False,
              index_label='code')
Esempio n. 5
0
def getdragontigerdata():
    curday = datetime.date.today()
    curdate = curday.strftime('%Y%m%d')
    print(curdate)

    mylogger = getmylogger()

    # 每日龙虎榜列表
    df = ts.top_list(curday.strftime('%Y-%m-%d'))
    if df is not None:
        df['date'] = curdate
        tosql(df, 'toplistdata', "append", "每日龙虎榜数据", mylogger)
    else:
        mylogger.info("没有每日龙虎榜数据。")

    # 个股上榜统计
    for i in [5, 10, 30, 60]:
        df = ts.cap_tops(i)
        logmsg = "个股上榜数据" + "%d日:" % i
        if df is not None:
            df['date'] = curdate
            df['period'] = i
            tosql(df, 'captops', "append", logmsg, mylogger)
        else:
            mylogger.info("没有" + logmsg)

    # 营业部上榜统计
    for i in [5, 10, 30, 60]:
        df = ts.broker_tops(i)
        logmsg = "营业部上榜数据" + "%d日:" % i
        if df is not None:
            df['date'] = curdate
            df['period'] = i
            tosql(df, 'brokertops', "append", logmsg, mylogger)
        else:
            mylogger.info("没有" + logmsg)

    # 机构席位追踪
    for i in [5, 10, 30, 60]:
        df = ts.inst_tops(i)
        logmsg = "机构席位追踪数据" + "%d日:" % i
        if df is not None:
            df['date'] = curdate
            df['period'] = i
            tosql(df, 'instops', "append", logmsg, mylogger)
        else:
            mylogger.info("没有" + logmsg)

    # 机构成交明细
    df = ts.inst_detail()
    logmsg = "机构成交明细:"
    if df is not None:
        df['date'] = curdate
        tosql(df, 'instdetail', "append", logmsg, mylogger)
    else:
        mylogger.info("没有机构成交明细。")
Esempio n. 6
0
def get_td_cap_list(days=5, retry=3, pause=0):
    """
    description:n日内上榜的累积卖出和买入
    params:
        days: n
        retry: 重试次数
        pause: 重启间隔
    """
    data = ts.cap_tops(days=days, retry_count=retry, pause=pause)
    return data
Esempio n. 7
0
def test():
    ts.get_sz50s()
    ts.get_hs300s()
    ts.get_zz500s()
    ts.realtime_boxoffice()
    ts.get_latest_news()
    ts.get_notices(tk)
    ts.guba_sina()
    ts.get_cpi()
    ts.get_ppi()
    ts.get_stock_basics()
    ts.get_concept_classified()
    ts.get_money_supply()
    ts.get_gold_and_foreign_reserves()
    ts.top_list()  #每日龙虎榜列表
    ts.cap_tops()  #个股上榜统计
    ts.broker_tops()  #营业部上榜统计
    ts.inst_tops()  # 获取机构席位追踪统计数据
    ts.inst_detail()
Esempio n. 8
0
def countInList(days=1):
    """
    获取所有10天内上龙虎榜
    :param days:
    :return:
    """
    df = ts.cap_tops(days)
    # df = df[df.loc[:, 'net'] > 0]
    sortdf = df.sort_values(by='count', ascending=False)
    # print(sortdf)
    print(sortdf[sortdf.loc[:, 'name'] == '方大炭素'])
Esempio n. 9
0
def capTops():
    """
       获取近5、10、30、60日个股上榜统计数据,包括上榜次数、累积购买额、累积卖出额、净额、买入席位数和卖出席位数
       """
    try:
        day = int(input('近天数(5、10、30、60):'))
    except:
        day = 10
        print('默认10天')
    cap = ts.cap_tops(days=day)
    print('\n')
    print(cap.sort_values(by=['bamount', 'count'], ascending=(False, False)))
Esempio n. 10
0
def stk_cap_tops(days=5,table = 'stk_cap_tops_ma5'):
    print("\n插入数据:" + table)
    try:
        conn = connMysql().createconn()
        cur = conn.cursor()
        sql = 'truncate table ' + table + ';'
        cur.execute(sql)
        cur.close()
        conn.close()
        df = ts.cap_tops(days=days)
        df.to_sql(table,engine,if_exists='append')
    except:
        print("\n收集个股上榜统计失败")
Esempio n. 11
0
File: tops.py Progetto: cnslyq/ts
def tops(engine, freq):
	today = datetime.date.today()
	
	tsl.log("tops_stock start...")
	try:
		df = ts.cap_tops(freq)
		df['date'] = today
		df['freq'] = freq
		df = df.set_index('code', drop='true')
		df.to_sql('tops_stock',engine,if_exists='append')
		print
		tsl.log("tops_stock done")
	except BaseException, e:
		print
		print e
		tsl.log("tops_stock error")
    def get_top_inst_count(self, days=5):
        """
        获取个股在近5、10、30、60日的龙虎榜上榜次数、累积购买额、累积卖出额、净额、买入席位数和卖出席位数。
        获取机构的近5、10、30、60日累积买入次数
        具体返回参数参考tushare.cap_tops和tushare.inst_tops
        Parameters:
        ----------
        days:5、10、30、60,表示统计周期
        """
        top_count_df = ts.cap_tops(days=days, retry_count=5, pause=1)
        with codecs.open(u"{}/cap_tops_count_{}_{}.csv".format(data_path, days, self.today), 'w+', "utf-8") as f:
            top_count_df.to_csv(f, header=True, sep="\t", index=True)

        inst_tops_df = ts.inst_tops(days=days, retry_count=5, pause=1)
        with codecs.open(u"{}/inst_tops_count_{}_{}.csv".format(data_path, days, self.today), 'w+', "utf-8") as f:
            top_count_df.to_csv(f, header=True, sep="\t", index=True)
Esempio n. 13
0
def get_lbh_cap_tops(day):
    """
    个股上榜统计
    获取近5、10、30、60日个股上榜统计数据,包括上榜次数、
    累积购买额、累积卖出额、净额、买入席位数和卖出席位数。
    """
    df = ts.cap_tops(day)
    print(df)
    if df is not None:
        res = df.to_sql(lbh_cap_tops, engine, if_exists='replace')
        msg = 'ok' if res is None else res
        print('获取近 {0} 日个股上榜统计数据,包括上榜次数、累积购买额、累积卖出额、净额、'
              '买入席位数和卖出席位数:{1}'.format(day, msg) + '\n')
    else:
        print('获取近 {0} 日个股上榜统计数据,包括上榜次数、累积购买额、累积卖出额、净额、'
              '买入席位数和卖出席位数:{1}'.format(day, 'None') + '\n')
Esempio n. 14
0
 def getStockLHB(self,days =5):
     '''
     个股上榜统计
     输入:
         days:统计周期5、10、30和60日,默认为5日
     输出:
         code:代码
         name:名称
         count:上榜次数
         bamount:累积购买额(万)
         samount:累积卖出额(万)
         net:净额(万)
         bcount:买入席位数
         scount:卖出席位数
     '''
     df =ts.cap_tops(days)#5
     return df
Esempio n. 15
0
def get_individual_statistics_tops(days=5, retry_count=RETRY_COUNT, pause=PAUSE):
    """个股上榜统计"""

    logger.info('Begin get IndividualStatisticsTops. Days is: %s.' % days)
    try:
        data_df = ts.cap_tops(days, retry_count, pause)
    except Exception as e:
        logger.exception('Error get IndividualStatisticsTops. Days is: %s.' % days)
        return None
    else:
        data_dicts = []
        if data_df is None or data_df.empty:
            logger.warn('Empty get IndividualStatisticsTops. Days is: %s.' % days)
        else:
            data_dicts = [{'code': row[0], 'name': row[1],
                           'count': row[2], 'bamount': row[3], 'samount': row[4],
                           'net': row[5], 'bcount': row[6], 'scount': row[7],
                           'days_type': days, 'insert_date': today_line}
                          for row in data_df.values]
            logger.info('Success get IndividualStatisticsTops. Days is: %s.' % days)
        return data_dicts
Esempio n. 16
0
def top_type(top_type):
    today = datetime.datetime.today().strftime('%Y-%m-%d')
    if top_type == 'top_list':
        top_list = ts.top_list(today)
        if top_list is not None:
            top_list.to_sql('top_list',
                            engine,
                            flavor='mysql',
                            if_exists='append')
    elif top_type == 'cap_tops':
        cap_tops = ts.cap_tops()
        if cap_tops is not None:
            cap_tops['date'] = today
            cap_tops.to_sql('top_cap_tops',
                            engine,
                            flavor='mysql',
                            if_exists='append')
    elif top_type == 'broker_tops':
        broker_tops = ts.broker_tops()
        if broker_tops is not None:
            broker_tops['date'] = today
            broker_tops.to_sql('top_broker_tops',
                               engine,
                               flavor='mysql',
                               if_exists='append')
    elif top_type == 'inst_tops':
        inst_tops = ts.inst_tops()
        if inst_tops is not None:
            inst_tops['date'] = today
            inst_tops.to_sql('top_inst_tops',
                             engine,
                             flavor='mysql',
                             if_exists='append')
    elif top_type == 'inst_detail':
        inst_detail = ts.inst_detail()
        if inst_detail is not None:
            inst_detail.to_sql('top_inst_detail',
                               engine,
                               flavor='mysql',
                               if_exists='append')
Esempio n. 17
0
days:统计周期5、10、30和60日,默认为5日
retry_count:当网络异常后重试次数,默认为3
pause:重试时停顿秒数,默认为0
返回值说明:

code:代码
name:名称
count:上榜次数
bamount:累积购买额(万)
samount:累积卖出额(万)
net:净额(万)
bcount:买入席位数
scount:卖出席位数'''

print('个股上榜统计')
print(ts.cap_tops())
'''
获取营业部近5、10、30、60日上榜次数、累积买卖等情况。

参数说明:

days:统计周期5、10、30和60日,默认为5日
retry_count:当网络异常后重试次数,默认为3
pause:重试时停顿秒数,默认为0
返回值说明:

broker:营业部名称
count:上榜次数
bamount:累积购买额(万)
bcount:买入席位数
samount:累积卖出额(万)
Esempio n. 18
0
def cap_list(days=5, retry_count=5, pause=1):
    df = ts.cap_tops(days=days, retry_count=retry_count, pause=pause)
    return df
Esempio n. 19
0
def toDB_cap_tops(date, days):
    df = ts.cap_tops()
    Add_col(df, date=date, days=days).to_sql('t_cap_tops',
                                             c.ENGINE,
                                             if_exists='append')
Esempio n. 20
0
         for idx in df.index:
             temp = df.ix[idx]
             sql = "insert into top_list(code,name,pchange,amount,buy,bratio,sell,sratio,reason,date) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
             param = (temp['code'],temp['name'],temp['pchange'],temp['amount'],temp['buy'],temp['bratio'],temp['sell'],temp['sratio'],temp['reason'],temp['date'])
             cursor.execute(sql, param)
             conn.commit()
 except:
     f=open("errors/"+today+".log",'a')
     traceback.print_exc(file=f)
     f.flush()
     f.close()
     
 # 个股上榜统计            
 try:
     df = idx = temp = sql = param = None
     df = ts.cap_tops()
     if df is not None:
         for idx in df.index:
             temp = df.ix[idx]
             sql = "insert into cap_tops(code,name,count,bamount,samount,net,bcount,scount) values(%s,%s,%s,%s,%s,%s,%s,%s)"
             param = (temp['code'],temp['name'],temp['count'],temp['bamount'],temp['samount'],temp['net'],temp['bcount'],temp['scount'])
             cursor.execute(sql, param)
             conn.commit()
 except:
     f=open("errors/"+today+".log",'a')
     traceback.print_exc(file=f)
     f.flush()
     f.close()
     
 # 营业部上榜统计    
 try:                
Esempio n. 21
0
def TopTotal():
	df=ts.cap_tops()
	df=df.sort_values('count',ascending=False)
	return [u"个股上榜统计:",df[:20]]
Esempio n. 22
0
 def getCapTops(self):
     file_name = 'cap_tops.csv'
     path = self.index + self.index_cap_tops_data + file_name
     data = ts.cap_tops()
     data.to_csv(path, encoding='utf-8')
     print(file_name)
Esempio n. 23
0
def recommend(request):
    #基金
    if not models.RecommendFund.objects.all():
        r = requests.get('http://fund.eastmoney.com/trade/default.html')
        encode_content = r.content.decode('gb2312')
        soup = BeautifulSoup(encode_content, 'lxml')
        name = soup.find_all('td', 'fname')
        pattern1 = re.compile("<td>(\d\d\d\d\d\d)</td>")
        code = re.findall(pattern1, encode_content)
        rate = []
        for item in code[0:25]:
            r = requests.get('http://fund.eastmoney.com/pingzhongdata/' + item + '.js')
            pattern3 = re.compile('var syl_1n="(.*?)"')
            tmp = re.findall(pattern3, r.text)
            #tmp[0] += '%'
            rate.append(tmp[0])
        for i in range(0, 25):
            recF = models.RecommendFund()
            recF.code = code[i]
            recF.name = name[i].string
            recF.annualrate = rate[i]
            recF.save()
    #股票
    if not models.RecommendStock.objects.all():
        rs = ts.cap_tops()
        for i in range(0, 30):
            stock_code=rs.code[i]
            stockdata = requests.get('http://hq.sinajs.cn/list=sh' + stock_code)
            stockdatasplit = stockdata.text.split(',')
            if (len(stockdata.text) == 24):
                stockdata = requests.get('http://hq.sinajs.cn/list=sz' + stock_code)
                stockdatasplit = stockdata.text.split(',')
                stock = models.Stock()
                stock.code = stock_code
                stock.name = stockdatasplit[0][21:]
                stock.open = stockdatasplit[1]
                stock.close = stockdatasplit[2]
                if float(stock.close)==0:
                    continue
                stock.high = stockdatasplit[4]
                stock.low = stockdatasplit[5]
                stock.price = stockdatasplit[3]
                stock.currentrate = (float(stock.price) - float(stock.close)) / float(stock.close) * 100
            else:
                stock = models.Stock()
                stock.code = stock_code
                stock.name = stockdatasplit[0][21:]
                stock.open = stockdatasplit[1]
                stock.close = stockdatasplit[2]
                if float(stock.close)==0:
                    continue
                stock.high = stockdatasplit[4]
                stock.low = stockdatasplit[5]
                stock.price = stockdatasplit[3]
                stock.currentrate = (float(stock.price) - float(stock.close)) / float(stock.close) * 100
            w = round(stock.currentrate, 4)
            if abs(w)>11:
                continue
            recS = models.RecommendStock()
            recS.code = rs.code[i]
            recS.name = rs.name[i]
            recS.rate = w
            recS.save()
    recF = models.RecommendFund.objects.all()
    recS = models.RecommendStock.objects.all()
    return render(request, 'recommend.html', {'recF': recF, 'recS': recS})
Esempio n. 24
0
 def longhuban(self, date):
     print(ts.top_list(date))
     print(ts.cap_tops())
Esempio n. 25
0
import pymongo
import json

#    ts.get_cash_flow : get specialed Code Cash_flow in the History .
# 600547:山东黄金
# 600362:江西铜业
# 600312:平高电气
# 600499:科达洁能
# 603993:洛阳钼业
db = "TopRanks"
coll = "CapTop"
date = '2017-12-22'
tops = 5

conn = pymongo.MongoClient('127.0.0.1', port=27017)
df = ts.cap_tops(tops)
# index data columns(X columns)
dicIndex = json.loads(df.to_json(orient='split'))
for i, ind in enumerate(dicIndex['index']):
    jsonstr = {
        '_id': dicIndex['data'][i][0] + "-" + date + "-" + str(tops),
        'Date': date,
        dicIndex['columns'][0]: dicIndex['data'][i][0],
        dicIndex['columns'][1]: dicIndex['data'][i][1],
        dicIndex['columns'][2]: dicIndex['data'][i][2],
        dicIndex['columns'][3]: dicIndex['data'][i][3],
        dicIndex['columns'][4]: dicIndex['data'][i][4],
        dicIndex['columns'][5]: dicIndex['data'][i][5],
        dicIndex['columns'][6]: dicIndex['data'][i][6],
        dicIndex['columns'][7]: dicIndex['data'][i][7]
    }
Esempio n. 26
0
import tushare as ts
import urllib.request

proxy = urllib.request.ProxyHandler({'http': 'http://10.144.1.10:8080'})
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener)
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent': user_agent}

#df = ts.inst_tops()
#df = ts.top_list('2017-05-08')
df = ts.cap_tops()
df.to_csv('d:/temp/cap.csv')
print(df)
Esempio n. 27
0
def Getgegushangbang():
    df = ts.cap_tops()
    df = df.to_json(force_ascii=False)
    print(df)
    return df
Esempio n. 28
0
def GetStockCountOnBillBoardIn(days):
    """
    Args:
        days: days should be 5,10,30 or 60
    """
    return ts.cap_tops(date)
Esempio n. 29
0
# 行业分类
# df = ts.get_industry_classified()
# df.to_csv('F:/01/get_industry_classified.csv', encoding='utf_8_sig')

# 2.概念分类
# df = ts.get_concept_classified()
# df.to_csv('F:/01/get_concept_classified.csv', encoding='utf_8_sig')

# 1.存款利率
# print(ts.get_deposit_rate())

# 2.贷款利率
# print(ts.get_loan_rate())


# print(ts.top_list('2019-04-02'))

df = ts.cap_tops(10) # 默认为5日

df.to_csv('F:/01/cap_tops.csv', encoding='utf_8_sig')
# df.to_json('F:/01/cap_tops.json')









Esempio n. 30
0
def capture_stock_data():
    capture_date = datetime.datetime.now().strftime("%Y%m%d")
    save_dir = "/home/dandelion/stock_data/" + capture_date

    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
        print("The save directory is created successfully!\n", save_dir)
    print("The save directory is already exist!\n", save_dir)
    # ======================Daily Command================================================================
    # get the boxoffcie data of the last day and save as csvfile named as the capture command
    ts.day_boxoffice().to_csv(
        save_dir + "/" + capture_date + "_day_boxoffice.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("day_boxoffice data capture completed!")

    # get the cinema data of the last day and save as csvfile named as the capture command
    ts.day_cinema().to_csv(
        save_dir + "/" + capture_date + "_day_cinema.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("day_cinema data capture completed!")

    ts.month_boxoffice().to_csv(
        save_dir + "/" + capture_date + "_month_boxoffice.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("month_boxoffice data capture completed!")

    ts.realtime_boxoffice().to_csv(
        save_dir + "/" + capture_date + "_realtime_boxoffice.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("realtime_boxoffice data capture completed!")

    # get the stock data index of the last day and save as csvfile named as the capture command
    ts.get_index().to_csv(
        save_dir + "/" + capture_date + "_get_index.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_index data capture completed!")

    # get the history cpi data and save as csvfile named as the capture command
    ts.get_cpi().to_csv(
        save_dir + "/" + capture_date + "_get_cpi.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_cpi data capture completed!")

    # get the history gdp data  by month and save as csvfile named as the capture command
    ts.get_gdp_year().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_year.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_gdp_year data capture completed!")

    # get today all stock data and save as csvfile named as the capture command
    # ts.get_today_all().to_csv(save_dir+'/'+capture_date+'_get_today_all.csv',header=True,sep=',',index=False)

    # get detail information of the top brokers today and save as csvfile named as the capture command
    ts.broker_tops().to_csv(
        save_dir + "/" + capture_date + "_broker_tops.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("broker_tops data capture completed!")

    # get detail information of the top brokers today and save as csvfile named as the capture command
    ts.cap_tops().to_csv(
        save_dir + "/" + capture_date + "_cap_tops.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("cap_tops data capture completed!")

    ts.get_area_classified().to_csv(
        save_dir + "/" + capture_date + "_get_area_classified.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_area_classified data capture completed!")

    # ts.get_balance_sheet(code='').to_csv(save_dir+'/'+capture_date+'_get_balance_sheet.csv',header=True,sep=',',index=False)
    # print('get_balance_sheet data capture completed!')

    # ts.get_cash_flow(code='').to_csv(save_dir+'/'+capture_date+'_get_cash_flow.csv',header=True,sep=',',index=False)
    # print('get_cash_flow data capture completed!')

    ts.get_day_all().to_csv(
        save_dir + "/" + capture_date + "_get_day_all.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_day_all data capture completed!")
    ts.get_cashflow_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_cashflow_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_cashflow_data data capture completed!")
    ts.get_concept_classified().to_csv(
        save_dir + "/" + capture_date + "_get_concept_classified.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_concept_classified data capture completed!")
    ts.get_debtpaying_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_debtpaying_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_debtpaying_data data capture completed!")
    ts.get_deposit_rate().to_csv(
        save_dir + "/" + capture_date + "_get_deposit_rate.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_deposit_rate data capture completed!")

    ts.get_gdp_contrib().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_contrib.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_gdp_for().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_for.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_gdp_pull().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_pull.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_gdp_quarter().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_quarter.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_gdp_ data capture completed!")
    # ts.get_gdp_year().to_csv(save_dir+'/'+capture_date+'_get_gdp_year.csv',header=True,sep=',',index=False)
    ts.get_gem_classified().to_csv(
        save_dir + "/" + capture_date + "_get_gem_classified.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_gold_and_foreign_reserves().to_csv(
        save_dir + "/" + capture_date + "_get_gold_and_foreign_reserves.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_growth_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_growth_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_industry_classified().to_csv(
        save_dir + "/" + capture_date + "_get_industry_classified.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_hs300s().to_csv(
        save_dir + "/" + capture_date + "_get_hs300s.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_sz50s().to_csv(
        save_dir + "/" + capture_date + "_get_sz50s.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_zz500s().to_csv(
        save_dir + "/" + capture_date + "_get_zz500s.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_operation_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_operation_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_stock_basics().to_csv(
        save_dir + "/" + capture_date + "_get_stock_basics.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_report_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_report_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.inst_detail().to_csv(
        save_dir + "/" + capture_date + "_inst_detail.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.inst_tops().to_csv(
        save_dir + "/" + capture_date + "_inst_tops.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("inst_tops data capture completed!")
    ts.new_stocks().to_csv(
        save_dir + "/" + capture_date + "_new_stocks.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("new_stocks data capture completed!")
    ts.top_list().to_csv(
        save_dir + "/" + capture_date + "_top_list.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("top_list data capture completed!")
Esempio n. 31
0
def get_cap_tops(days=30):
    return ts.cap_tops(days)
Esempio n. 32
0
 def GetCaptops(self):
     datateee = ts.cap_tops()
     self.SaveCSV(datateee, 'captops.csv')
Esempio n. 33
0
#coding:utf-8
from sqlalchemy import create_engine
import tushare as ts

# define engine
engine = create_engine('mysql://*****:*****@127.0.0.1/tushare?charset=utf8')


df = ts.get_today_all()
df.to_sql('realtime_price',engine, if_exists='replace')


df = ts.cap_tops(60)
df.to_sql('cap_tops',engine, if_exists='replace')

#df=ts.inst_tops(10)
#df.to_sql('inst_tops',engine, if_exists='replace')


#df=ts.inst_detail(10)
#df.to_sql('inst_detail',engine, if_exists='replace')
Esempio n. 34
0
 def longhuban(self, date):
     print(ts.top_list(date))
     print(ts.cap_tops())
Esempio n. 35
0
import tushare as ts
from sqlalchemy import *
token = ts.set_token('c723069dd4a25402d05ea6afad36da2937111adf012f8258abb5f7e05936e575')
engine = create_engine('mysql+pymysql://root:toor@localhost/pack?charset=utf8')

#df = ts.new_stocks()
#df.to_sql('new_stocks',engine,if_exists='append')

#holdings = ts.fund_holdings(2015,3)
#holdings.to_sql('fund_holdings',engine,if_exists='append')

#df = ts.get_industry_classified()
#df.to_sql('industry_classified',engine,if_exists='append')


#top = ts.top_list('2016-04-21')                                                         #每日龙虎榜列表
cap = ts.cap_tops()                                                                     #个股上榜统计,
broker = ts.broker_tops()                                                               #营业部上榜统计
detail = ts.inst_detail()                                                               #机构成交明细

#top.to_sql('top_list',engine,if_exists = 'append')
cap.to_sql('cap_tops',engine,if_exists = 'append')
broker.to_sql('broker_tops',engine,if_exists = 'append')
detail.to_sql('inst_detail',engine,if_exists = 'append')