Esempio n. 1
0
def job_7():
    try:
        print("I'm working......龙虎榜数据")
        # 每日龙虎榜列表
        top_list = ts.top_list(today)
        data = pd.DataFrame(top_list)
        data.to_sql('top_list',engine,index=True,if_exists='replace')
        print("每日龙虎榜列表......done")

        # 个股上榜统计
        cap_tops = ts.cap_tops()
        data = pd.DataFrame(cap_tops)
        data.to_sql('cap_tops',engine,index=True,if_exists='replace')
        print("个股上榜统计......done")

        # 营业部上榜统计
        broker_tops = ts.broker_tops()
        data = pd.DataFrame(broker_tops)
        data.to_sql('broker_tops',engine,index=True,if_exists='replace')
        print("营业部上榜统计......done")

        # 机构席位追踪
        inst_tops = ts.inst_tops()
        data = pd.DataFrame(inst_tops)
        data.to_sql('inst_tops',engine,index=True,if_exists='replace')
        print("机构席位追踪......done")

        # 机构成交明细
        inst_detail = ts.inst_detail()
        data = pd.DataFrame(inst_detail)
        data.to_sql('inst_detail',engine,index=True,if_exists='replace')
        print("机构成交明细......done")
    except Exception as e:
        print(e)
Esempio n. 2
0
 def store_top_data(self, trading_date=None):
     """龙虎榜数据: 龙虎榜数据接口提供历史龙虎榜上榜股票数据"""
     trading_date = self.last_trading_date if trading_date is None else trading_date
     # 每日龙虎榜列表
     print('top_list...')
     top_df = ts.top_list(self.stock_date_format(trading_date))
     self.mysqlUtils.append_data(top_df, 'top_list')
     # 个股上榜统计
     print('cap_tops...')
     cap_tops_df = ts.cap_tops()
     cap_tops_df['date'] = trading_date
     self.mysqlUtils.append_data(cap_tops_df, 'cap_tops')
     # 营业部上榜统计
     print('broker_tops...')
     broker_tops_df = ts.broker_tops()
     broker_tops_df['date'] = trading_date
     self.mysqlUtils.append_data(broker_tops_df, 'broker_tops')
     # 龙虎榜机构席位追踪
     print('inst_tops...')
     inst_tops_df = ts.inst_tops()
     inst_tops_df['date'] = trading_date
     self.mysqlUtils.append_data(inst_tops_df, 'inst_tops')
     # 龙虎榜机构席位成交明细
     print('inst_detail...')
     inst_detail_df = ts.inst_detail()
     self.mysqlUtils.append_data(inst_detail_df, 'inst_detail')
Esempio n. 3
0
def inst_tops(days,retry_count,pause):
    try:
        df = ts.inst_tops(days,retry_count,pause)
        engine = create_engine('mysql://*****:*****@127.0.0.1/stock?charset=utf8')
        df.to_sql('inst_tops', engine, if_exists='append')
        print "message"
    except Exception, e:
        e.message
Esempio n. 4
0
def getdragontigerdata():
    curday = datetime.date.today()
    curdate = curday.strftime('%Y%m%d')
    print(curdate)

    mylogger = getmylogger()

    # 每日龙虎榜列表
    df = ts.top_list(curday.strftime('%Y-%m-%d'))
    if df is not None:
        df['date'] = curdate
        tosql(df, 'toplistdata', "append", "每日龙虎榜数据", mylogger)
    else:
        mylogger.info("没有每日龙虎榜数据。")

    # 个股上榜统计
    for i in [5, 10, 30, 60]:
        df = ts.cap_tops(i)
        logmsg = "个股上榜数据" + "%d日:" % i
        if df is not None:
            df['date'] = curdate
            df['period'] = i
            tosql(df, 'captops', "append", logmsg, mylogger)
        else:
            mylogger.info("没有" + logmsg)

    # 营业部上榜统计
    for i in [5, 10, 30, 60]:
        df = ts.broker_tops(i)
        logmsg = "营业部上榜数据" + "%d日:" % i
        if df is not None:
            df['date'] = curdate
            df['period'] = i
            tosql(df, 'brokertops', "append", logmsg, mylogger)
        else:
            mylogger.info("没有" + logmsg)

    # 机构席位追踪
    for i in [5, 10, 30, 60]:
        df = ts.inst_tops(i)
        logmsg = "机构席位追踪数据" + "%d日:" % i
        if df is not None:
            df['date'] = curdate
            df['period'] = i
            tosql(df, 'instops', "append", logmsg, mylogger)
        else:
            mylogger.info("没有" + logmsg)

    # 机构成交明细
    df = ts.inst_detail()
    logmsg = "机构成交明细:"
    if df is not None:
        df['date'] = curdate
        tosql(df, 'instdetail', "append", logmsg, mylogger)
    else:
        mylogger.info("没有机构成交明细。")
Esempio n. 5
0
def test():
    ts.get_sz50s()
    ts.get_hs300s()
    ts.get_zz500s()
    ts.realtime_boxoffice()
    ts.get_latest_news()
    ts.get_notices(tk)
    ts.guba_sina()
    ts.get_cpi()
    ts.get_ppi()
    ts.get_stock_basics()
    ts.get_concept_classified()
    ts.get_money_supply()
    ts.get_gold_and_foreign_reserves()
    ts.top_list()  #每日龙虎榜列表
    ts.cap_tops()  #个股上榜统计
    ts.broker_tops()  #营业部上榜统计
    ts.inst_tops()  # 获取机构席位追踪统计数据
    ts.inst_detail()
Esempio n. 6
0
def download_org_top(time ):
    list=[5,10,30]
    for i in list:
        Datas_b=  ts.inst_tops(days= i)
        files_path = '../report/Brokerage/%s'%time
        if os.path.exists(files_path) == False: # 判断文件是不是存在
            os.mkdir(files_path)                # 创建目录
        Datas_b.to_csv(files_path+'/%s_org_top%sD_csv.csv'%(time,i),encoding='gbk')
        with pd.ExcelWriter(files_path+'/%s_org_top%sD_xlx.xlsx'%(time,i)) as writer:
            Datas_b.to_excel(writer, sheet_name='Sheet1')
        print('\n%s %s机构席位 have been saved'%(time,i))
Esempio n. 7
0
def get_lbh_inst_tops(day):
    """
    机构席位追踪
    获取机构近5、10、30、60日累积买卖次数和金额等情况。
    """
    df = ts.inst_tops(day)
    print(df)
    if df is not None:
        res = df.to_sql(lbh_inst_tops, engine, if_exists='replace')
        msg = 'ok' if res is None else res
        print('获取机构近 {0} 日累积买卖次数和金额等情况:{1}'.format(day, msg) + '\n')
    else:
        print('获取机构近 {0} 日累积买卖次数和金额等情况:{1}'.format(day, 'None') + '\n')
Esempio n. 8
0
def stk_inst_tops(days=5,table = 'stk_inst_tops_ma5'):
    print("\n插入数据:" + table)
    try:
        conn = connMysql().createconn()
        cur = conn.cursor()
        sql = 'truncate table ' + table + ';'
        cur.execute(sql)
        cur.close()
        conn.close()
        df = ts.inst_tops(days=days)
        df.to_sql(table,engine,if_exists='append')
    except:
        print("\n收集机构席位追踪失败")
    def get_top_inst_count(self, days=5):
        """
        获取个股在近5、10、30、60日的龙虎榜上榜次数、累积购买额、累积卖出额、净额、买入席位数和卖出席位数。
        获取机构的近5、10、30、60日累积买入次数
        具体返回参数参考tushare.cap_tops和tushare.inst_tops
        Parameters:
        ----------
        days:5、10、30、60,表示统计周期
        """
        top_count_df = ts.cap_tops(days=days, retry_count=5, pause=1)
        with codecs.open(u"{}/cap_tops_count_{}_{}.csv".format(data_path, days, self.today), 'w+', "utf-8") as f:
            top_count_df.to_csv(f, header=True, sep="\t", index=True)

        inst_tops_df = ts.inst_tops(days=days, retry_count=5, pause=1)
        with codecs.open(u"{}/inst_tops_count_{}_{}.csv".format(data_path, days, self.today), 'w+', "utf-8") as f:
            top_count_df.to_csv(f, header=True, sep="\t", index=True)
Esempio n. 10
0
def custom_stocks_streage():
    streage = {
        '机构五日内买卖数据按买入 - 卖出倒序\n字段:代码 名称 累积买入额(万) 买入次数 累积卖出额(万) 卖出次数 净额(万)':
        institutional_seat_tracking,
        '成交明细':
        institutional_seat_tracking_detail,
        '5天之内公布的分配预案\n字段:股票代码 股票名称 分配年份 公布日期 分红金额(每10股):转增和送股数(每10股)':
        profit,
        '今日机构买卖数据按买入 - 卖出倒序\n字段:代码 股票名称 交易日期 机构席位买入额(万) 机构席位卖出额(万) type 差额':
        today_institutional_seat_tracking_detail
    }
    data = ts.inst_tops().sort_values(by='net', ascending=False).head(5)
    for item, fuc in streage.items():
        mec = fuc(data)
        for i in mec:
            testnotice.notify('"{0}"\n{1}'.format(item, i))
Esempio n. 11
0
def get_institution_tops(days=5, retry_count=RETRY_COUNT, pause=PAUSE):
    """机构成交明细"""

    logger.info('Begin get InstitutionTops. Days is: %s.' % days)
    try:
        data_df = ts.inst_tops(days, retry_count, pause)
    except Exception as e:
        logger.exception('Error get InstitutionTops. Days is: %s.' % days)
        return None
    else:
        data_dicts = []
        if data_df is None or data_df.empty:
            logger.warn('Empty get InstitutionTops. Days is: %s.' % days)
        else:
            data_dicts = [{'code': row[0], 'name': row[1],
                           'bamount': row[2], 'bcount': row[3], 'samount': row[4],
                           'scount': row[5], 'net': row[6],
                           'days_type': days, 'insert_date': today_line}
                          for row in data_df.values]
            logger.info('Success get InstitutionTops. Days is: %s.' % days)
        return data_dicts
Esempio n. 12
0
def top_type(top_type):
    today = datetime.datetime.today().strftime('%Y-%m-%d')
    if top_type == 'top_list':
        top_list = ts.top_list(today)
        if top_list is not None:
            top_list.to_sql('top_list',
                            engine,
                            flavor='mysql',
                            if_exists='append')
    elif top_type == 'cap_tops':
        cap_tops = ts.cap_tops()
        if cap_tops is not None:
            cap_tops['date'] = today
            cap_tops.to_sql('top_cap_tops',
                            engine,
                            flavor='mysql',
                            if_exists='append')
    elif top_type == 'broker_tops':
        broker_tops = ts.broker_tops()
        if broker_tops is not None:
            broker_tops['date'] = today
            broker_tops.to_sql('top_broker_tops',
                               engine,
                               flavor='mysql',
                               if_exists='append')
    elif top_type == 'inst_tops':
        inst_tops = ts.inst_tops()
        if inst_tops is not None:
            inst_tops['date'] = today
            inst_tops.to_sql('top_inst_tops',
                             engine,
                             flavor='mysql',
                             if_exists='append')
    elif top_type == 'inst_detail':
        inst_detail = ts.inst_detail()
        if inst_detail is not None:
            inst_detail.to_sql('top_inst_detail',
                               engine,
                               flavor='mysql',
                               if_exists='append')
Esempio n. 13
0
def inst_tops(days=5, retry_count=5, pause=1):
    df = ts.inst_tops(days=days, retry_count=retry_count, pause=pause)
    return df
Esempio n. 14
0
def ChairTotal():
	df=ts.inst_tops()
	df=df.sort_values('bcount',ascending=False)
	return [u"机构席位追踪:",df[:20]]
Esempio n. 15
0
from sqlalchemy import create_engine
import tushare as ts
#engine = create_engine('mysql+pymysql://root:[email protected]/packageing?charset=utf8')
engine = create_engine(
    'mysql+pymysql://root:[email protected]/gupiao?charset=utf8')

df = ts.inst_detail()
de = ts.inst_tops()
broker = ts.broker_tops()

#df.to_sql('inst_datail',engine)
#de.to_sql('inst_tops',engine)
#broker.to_sql('broker_tops',engine)

df.to_sql('inst_datail', engine, if_exists='append')
de.to_sql('inst_tops', engine, if_exists='append')
#broker.to_sql('broker_tops',engine,if_exists='append')
Esempio n. 16
0
参数说明:

days:统计周期5、10、30和60日,默认为5日
retry_count:当网络异常后重试次数,默认为3
pause:重试时停顿秒数,默认为0
返回值说明:

code:代码
name:名称
bamount:累积买入额(万)
bcount:买入次数
samount:累积卖出额(万)
scount:卖出次数
net:净额(万)'''
print('机构席位追踪')
print(ts.inst_tops())
'''
获取最近一个交易日机构席位成交明细统计数据

参数说明:

retry_count:当网络异常后重试次数,默认为3
pause:重试时停顿秒数,默认为0
返回值说明:

code:代码
name:名称
date:交易日期
bamount:机构席位买入额(万)
samount:机构席位卖出额(万)
type:类型
Esempio n. 17
0
#! D:/Python27/python
# coding=utf-8
__ = 'Administrator'
from datetime import date
import numpy as np
import sys
import tushare as ts

today = date.today()
today1 = str(today)
start = '%s-%s-%s' % (today.year - 1, today.month, today.day)

data = ts.get_hist_data('600567', start=start, end=today1)

close = data['close']

# print close

print ts.inst_detail(retry_count=5, pause=0.001)

print ts.inst_tops()
Esempio n. 18
0
import pymongo
import json

#    ts.get_cash_flow : get specialed Code Cash_flow in the History .
# 600547:山东黄金
# 600362:江西铜业
# 600312:平高电气
# 600499:科达洁能
# 603993:洛阳钼业
db = "TopRanks"
coll = "InstTop"
date = '2017-12-22'
tops = 5

conn = pymongo.MongoClient('127.0.0.1', port=27017)
df = ts.inst_tops(tops)
# index data columns(X columns)
dicIndex = json.loads(df.to_json(orient='split'))
for i, ind in enumerate(dicIndex['index']):
    jsonstr = {
        '_id': dicIndex['data'][i][0] + "-" + date + "-" + str(tops),
        'Date': date,
        dicIndex['columns'][0]: dicIndex['data'][i][0],
        dicIndex['columns'][1]: dicIndex['data'][i][1],
        dicIndex['columns'][2]: dicIndex['data'][i][2],
        dicIndex['columns'][3]: dicIndex['data'][i][3],
        dicIndex['columns'][4]: dicIndex['data'][i][4],
        dicIndex['columns'][5]: dicIndex['data'][i][5],
        dicIndex['columns'][6]: dicIndex['data'][i][6]
    }
    try:
Esempio n. 19
0
 def getInstTops(self):
     file_name = 'inst_tops.csv'
     path = self.index + self.index_inst_tops + file_name
     data = ts.inst_tops()
     data.to_csv(path, encoding='utf-8')
     print(file_name)
Esempio n. 20
0
def Getjigouxiwei():
    df = ts.inst_tops()
    df = df.to_json(force_ascii=False)
    print(df)
    return df
Esempio n. 21
0
def get_inst_tops():
    # 统计周期5、10、30和60日,默认为5日
    df = ts.inst_tops(days=5)
    df.to_sql('inst_tops', engine, if_exists='append', index=False, index_label='code')
Esempio n. 22
0
def capture_stock_data():
    capture_date = datetime.datetime.now().strftime("%Y%m%d")
    save_dir = "/home/dandelion/stock_data/" + capture_date

    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
        print("The save directory is created successfully!\n", save_dir)
    print("The save directory is already exist!\n", save_dir)
    # ======================Daily Command================================================================
    # get the boxoffcie data of the last day and save as csvfile named as the capture command
    ts.day_boxoffice().to_csv(
        save_dir + "/" + capture_date + "_day_boxoffice.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("day_boxoffice data capture completed!")

    # get the cinema data of the last day and save as csvfile named as the capture command
    ts.day_cinema().to_csv(
        save_dir + "/" + capture_date + "_day_cinema.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("day_cinema data capture completed!")

    ts.month_boxoffice().to_csv(
        save_dir + "/" + capture_date + "_month_boxoffice.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("month_boxoffice data capture completed!")

    ts.realtime_boxoffice().to_csv(
        save_dir + "/" + capture_date + "_realtime_boxoffice.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("realtime_boxoffice data capture completed!")

    # get the stock data index of the last day and save as csvfile named as the capture command
    ts.get_index().to_csv(
        save_dir + "/" + capture_date + "_get_index.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_index data capture completed!")

    # get the history cpi data and save as csvfile named as the capture command
    ts.get_cpi().to_csv(
        save_dir + "/" + capture_date + "_get_cpi.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_cpi data capture completed!")

    # get the history gdp data  by month and save as csvfile named as the capture command
    ts.get_gdp_year().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_year.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_gdp_year data capture completed!")

    # get today all stock data and save as csvfile named as the capture command
    # ts.get_today_all().to_csv(save_dir+'/'+capture_date+'_get_today_all.csv',header=True,sep=',',index=False)

    # get detail information of the top brokers today and save as csvfile named as the capture command
    ts.broker_tops().to_csv(
        save_dir + "/" + capture_date + "_broker_tops.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("broker_tops data capture completed!")

    # get detail information of the top brokers today and save as csvfile named as the capture command
    ts.cap_tops().to_csv(
        save_dir + "/" + capture_date + "_cap_tops.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("cap_tops data capture completed!")

    ts.get_area_classified().to_csv(
        save_dir + "/" + capture_date + "_get_area_classified.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_area_classified data capture completed!")

    # ts.get_balance_sheet(code='').to_csv(save_dir+'/'+capture_date+'_get_balance_sheet.csv',header=True,sep=',',index=False)
    # print('get_balance_sheet data capture completed!')

    # ts.get_cash_flow(code='').to_csv(save_dir+'/'+capture_date+'_get_cash_flow.csv',header=True,sep=',',index=False)
    # print('get_cash_flow data capture completed!')

    ts.get_day_all().to_csv(
        save_dir + "/" + capture_date + "_get_day_all.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_day_all data capture completed!")
    ts.get_cashflow_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_cashflow_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_cashflow_data data capture completed!")
    ts.get_concept_classified().to_csv(
        save_dir + "/" + capture_date + "_get_concept_classified.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_concept_classified data capture completed!")
    ts.get_debtpaying_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_debtpaying_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_debtpaying_data data capture completed!")
    ts.get_deposit_rate().to_csv(
        save_dir + "/" + capture_date + "_get_deposit_rate.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_deposit_rate data capture completed!")

    ts.get_gdp_contrib().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_contrib.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_gdp_for().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_for.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_gdp_pull().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_pull.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_gdp_quarter().to_csv(
        save_dir + "/" + capture_date + "_get_gdp_quarter.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("get_gdp_ data capture completed!")
    # ts.get_gdp_year().to_csv(save_dir+'/'+capture_date+'_get_gdp_year.csv',header=True,sep=',',index=False)
    ts.get_gem_classified().to_csv(
        save_dir + "/" + capture_date + "_get_gem_classified.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_gold_and_foreign_reserves().to_csv(
        save_dir + "/" + capture_date + "_get_gold_and_foreign_reserves.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_growth_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_growth_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_industry_classified().to_csv(
        save_dir + "/" + capture_date + "_get_industry_classified.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_hs300s().to_csv(
        save_dir + "/" + capture_date + "_get_hs300s.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_sz50s().to_csv(
        save_dir + "/" + capture_date + "_get_sz50s.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_zz500s().to_csv(
        save_dir + "/" + capture_date + "_get_zz500s.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_operation_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_operation_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_stock_basics().to_csv(
        save_dir + "/" + capture_date + "_get_stock_basics.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.get_report_data(2018, 3).to_csv(
        save_dir + "/" + capture_date + "_get_report_data.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.inst_detail().to_csv(
        save_dir + "/" + capture_date + "_inst_detail.csv",
        header=True,
        sep=",",
        index=False,
    )
    ts.inst_tops().to_csv(
        save_dir + "/" + capture_date + "_inst_tops.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("inst_tops data capture completed!")
    ts.new_stocks().to_csv(
        save_dir + "/" + capture_date + "_new_stocks.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("new_stocks data capture completed!")
    ts.top_list().to_csv(
        save_dir + "/" + capture_date + "_top_list.csv",
        header=True,
        sep=",",
        index=False,
    )
    print("top_list data capture completed!")
Esempio n. 23
0
from sqlalchemy import create_engine
import tushare as ts
engine = create_engine('mysql+pymysql://root:[email protected]/packageing?charset=utf8')

df = ts.inst_detail()
de = ts.inst_tops()
broker = ts.broker_tops()

df.to_sql('inst_datail',engine,if_exists = 'append')
de.to_sql('inst_tops',engine,if_exists = 'append')
broker.to_sql('broker_tops',engine,if_exists = 'append')
Esempio n. 24
0
         for idx in df.index:
             temp = df.ix[idx]
             sql = "insert into broker_tops(broker,count,bamount,bcount,samount,scount,top3) values(%s,%s,%s,%s,%s,%s,%s)"
             param = (temp['broker'],temp['count'],temp['bamount'],temp['bcount'],temp['samount'],temp['scount'],temp['top3'])
             cursor.execute(sql, param)
             conn.commit()
 except:
     f=open("errors/"+today+".log",'a')
     traceback.print_exc(file=f)
     f.flush()
     f.close()
 
 # 机构席位追踪    
 try:                
     df = idx = temp = sql = param = None
     df = ts.inst_tops()
     if df is not None:
         for idx in df.index:
             temp = df.ix[idx]
             sql = "insert into inst_tops(code,name,bamount,bcount,samount,scount,net) values(%s,%s,%s,%s,%s,%s,%s)"
             param = (temp['code'],temp['name'],temp['bamount'],temp['bcount'],temp['samount'],temp['scount'],temp['net'])
             cursor.execute(sql,param)
             conn.commit()
 except:
     f=open("errors/"+today+".log",'a')
     traceback.print_exc(file=f)
     f.flush()
     f.close()
     
 # 机构成交明细
 try:
Esempio n. 25
0
File: tops.py Progetto: cnslyq/ts
	try:
		df = ts.broker_tops(freq)
		df['date'] = today
		df['freq'] = freq
		df = df.set_index('date', drop='true')
		df.to_sql('tops_broker',engine,if_exists='append')
		print
		tsl.log("tops_broker done")
	except BaseException, e:
		print
		print e
		tsl.log("tops_broker error")
	
	tsl.log("tops_inst_seat start...")
	try:
		df = ts.inst_tops(freq)
		df['date'] = today
		df['freq'] = freq
		df = df.set_index('code', drop='true')
		df.to_sql('tops_inst_seat',engine,if_exists='append')
		print
		tsl.log("tops_inst_seat done")
	except BaseException, e:
		print
		print e
		tsl.log("tops_inst_seat error")
	
	# TBD
	# no data, should be a bug
	'''
	if freq == 5:
Esempio n. 26
0
import tushare as ts
import util

import pandas
from sqlalchemy import create_engine
engine = create_engine(util.db_url)

# 每日龙虎榜列表
# top_daily = ts.top_list('2019-05-06')
# pandas.io.sql.to_sql(top_daily, "top_daily_20190506", engine, schema='StockPick', if_exists='append')

# 个股上榜统计
# individual = ts.cap_tops(days=10)
# pandas.io.sql.to_sql(individual, "individual_20190507_10", engine, schema='StockPick', if_exists='append')

# 营业部上榜统计
# broker_tops = ts.broker_tops(days=10)
# pandas.io.sql.to_sql(broker_tops, "broker_tops_20190507_10", engine, schema='StockPick', if_exists='append')

# 机构席位追踪
institution_tops = ts.inst_tops(days=10)
pandas.io.sql.to_sql(institution_tops, "institution_tops_20190507_10", engine, schema='StockPick', if_exists='append')

Esempio n. 27
0
    days:统计周期5、10、30和60日,默认为5日
    retry_count:当网络异常后重试次数,默认为3
    pause:重试时停顿秒数,默认为0

返回值说明:

    code:代码
    name:名称
    bamount:累积买入额(万)
    bcount:买入次数
    samount:累积卖出额(万)
    scount:卖出次数
    net:净额(万)

'''
ts.inst_tops()

# 机构成交明细
'''
获取最近一个交易日机构席位成交明细统计数据

参数说明:

    retry_count:当网络异常后重试次数,默认为3
    pause:重试时停顿秒数,默认为0

返回值说明:

    code:代码
    name:名称
    date:交易日期
Esempio n. 28
0
def cn_main_loop(mode):
    the_ticks, info, flags = interact_choose_ticks(mode)
    # print the_ticks
    # print info
    exec_func = get_one_ticker_k_data
    if 'realtime' in flags:
        exec_func = real_time_ticks
    elif 'onestock' in flags:
        exec_func = real_time_ticks
    elif 'news_sina' in flags:
        df = get_latest_news()
        idxs, nflags = cli_select_menu(df['title'], menu_columns=1)
        for rowid in idxs:
            url = df.iloc[rowid]['url']
            texts, html = get_article_detail(url, 'p')
            print texts.encode(ENCODE, 'ignore')
    elif 'news_wscn' in flags or 'hot_wscn' in flags:
        wscn = StockNewsWSCN()
        if 'hot_wscn' in flags:
            df = wscn.mode_run('hot_article')
        else:
            df = wscn.mode_run('info_flow')
        idxs, nflags = cli_select_menu(df['title'], menu_columns=1)
        # pdb.set_trace()
        for rowid in idxs:
            url = df.iloc[rowid]['uri']
            res = wscn.mode_run('article', stocks=[url])
            print res[0].encode(ENCODE, 'ignore')
            print ''
    elif 'index' in flags:
        df = ts.get_index()
        print df
    elif 'futu_news' in flags:
        _ftnn = StockNewsFUTUNN()
        df = _ftnn.get_news()
        df.index = pd.RangeIndex(df.shape[0])
        idxs, nflags = cli_select_menu(df['content'], menu_columns=1)
        for rowid in idxs:
            url = df.iloc[rowid]['detail_url']
            texts, tags = get_article_detail(url, 'div', '#content')
            print texts
            # print (u'\n'.join(texts[:-5])).encode('gbk','ignore')
    elif 'wscn_loop' in flags:
        wscn_loop()
    elif 'top' in flags:
        df = ts.top_list()
        print df.sort_values('amount', ascending=False)
    elif 'inst' in flags:
        df = ts.inst_tops()
        print df.sort_values('net', ascending=False)
        raw_input('[pause]')
        df = ts.inst_detail()
        print df.sort_values('bamount', ascending=False)
    elif 'quit' in flags:
        sys.exit()

    if Pool is None:
        for tk in the_ticks:
            results = [exec_func(tk, info)]
    else:
        pool = Pool(8)
        jobs = []
        for tk in the_ticks:
            job = pool.spawn(exec_func, tk, info, flags)
            jobs.append(job)
        # pool.close()
        pool.join()
        # jobs = [gevent.spawn(get_one_ticker_k_data,tk,info,flags) for tk in the_tks]
        # gevent.joinall(jobs)
        results = [job.value for job in jobs]

    ## 读取分析结果
    # fname = 'results.%s.json'%exec_func.func_name
    # print fname
    # json.dump(results,open(fname,'w'),indent=2)
    ########### 打印技术分析
    print '\n\n' + analyse_res_to_str(results) + '\n'

    if 'graph' in flags and exec_func.func_name == 'get_one_ticker_k_data':
        cols = len(results)
        fig, ax = plt.subplots(nrows=3, ncols=cols, sharex=False)
        for i, onestk in enumerate(results):
            tick = onestk['code']
            name = onestk['info'].get('name')
            # fname = FNAME_PAT_HIST%tick
            # df = pd.read_csv(fname,encoding='utf8',index_col='date')
            df = pd.DataFrame.from_dict(onestk['df'])
            df = df[-50:]
            title = '%s' % (tick)
            df['atr'] = talib.ATR(df['high'], df['low'], df['close'])
            df['sma10'] = talib.SMA(df['close'], 10)
            df['ema10'] = talib.EMA(df['close'], 10)
            df['ema_dif'] = df['ema10'] - df['sma10']
            if cols > 1:
                aax = [ax[0, i], ax[1, i], ax[2, i]]
            else:
                aax = [ax[0], ax[1], ax[2]]
            df[['close', 'sma10', 'ema10']].plot(title=title, ax=aax[0])
            df[['ema_dif']].plot(title=title, ax=aax[1])
            df[['volume']].plot(title=title, ax=aax[2])
        plt.show()
    if 'pause' in flags:
        raw_input('pause')
    return flags
Esempio n. 29
0
def get_tiger():
    file2 = './data/jigou.xlsx'
    df = ts.inst_tops(5)
    df2 = df.sort_values(by='bcount', ascending=False)
    df2.to_excel(file2, index=False)
    print "import jigou done!"
Esempio n. 30
0
 def cb(**kw):
     return ts.inst_tops()
Esempio n. 31
0
def get_td_inst_list(days=5, retry=3, pause=0):
    """
    上榜票列表
    """
    data = ts.inst_tops(days=days, retry_count=retry, pause=pause)
    return data
Esempio n. 32
0
 def GetInsttops(self):
     datateees = ts.inst_tops()
     self.SaveCSV(datateees, 'insttops.csv')
Esempio n. 33
0
    def findAgent(self):
        df = ts.inst_tops(10)
        df.sort_values(by='bamount')

        print (df[df.net > 5000])
        return df[df.net > 5000]