def job_7(): try: print("I'm working......龙虎榜数据") # 每日龙虎榜列表 top_list = ts.top_list(today) data = pd.DataFrame(top_list) data.to_sql('top_list',engine,index=True,if_exists='replace') print("每日龙虎榜列表......done") # 个股上榜统计 cap_tops = ts.cap_tops() data = pd.DataFrame(cap_tops) data.to_sql('cap_tops',engine,index=True,if_exists='replace') print("个股上榜统计......done") # 营业部上榜统计 broker_tops = ts.broker_tops() data = pd.DataFrame(broker_tops) data.to_sql('broker_tops',engine,index=True,if_exists='replace') print("营业部上榜统计......done") # 机构席位追踪 inst_tops = ts.inst_tops() data = pd.DataFrame(inst_tops) data.to_sql('inst_tops',engine,index=True,if_exists='replace') print("机构席位追踪......done") # 机构成交明细 inst_detail = ts.inst_detail() data = pd.DataFrame(inst_detail) data.to_sql('inst_detail',engine,index=True,if_exists='replace') print("机构成交明细......done") except Exception as e: print(e)
def store_top_data(self, trading_date=None): """龙虎榜数据: 龙虎榜数据接口提供历史龙虎榜上榜股票数据""" trading_date = self.last_trading_date if trading_date is None else trading_date # 每日龙虎榜列表 print('top_list...') top_df = ts.top_list(self.stock_date_format(trading_date)) self.mysqlUtils.append_data(top_df, 'top_list') # 个股上榜统计 print('cap_tops...') cap_tops_df = ts.cap_tops() cap_tops_df['date'] = trading_date self.mysqlUtils.append_data(cap_tops_df, 'cap_tops') # 营业部上榜统计 print('broker_tops...') broker_tops_df = ts.broker_tops() broker_tops_df['date'] = trading_date self.mysqlUtils.append_data(broker_tops_df, 'broker_tops') # 龙虎榜机构席位追踪 print('inst_tops...') inst_tops_df = ts.inst_tops() inst_tops_df['date'] = trading_date self.mysqlUtils.append_data(inst_tops_df, 'inst_tops') # 龙虎榜机构席位成交明细 print('inst_detail...') inst_detail_df = ts.inst_detail() self.mysqlUtils.append_data(inst_detail_df, 'inst_detail')
def get_last_charge_detail_by_inst(): """ :return: """ fields = ['code', 'name', 'date', 'bamount', 'samount', 'type'] inst_detail = ts.inst_detail() inst_detail_len = len(inst_detail) if inst_detail_len == 0: logger.info("no data received") # todo: add email system bulk_list = [] max_date = max(inst_detail['date']) if InstChargeDailyData.objects.filter(type_date=max_date).exists(): return for i in range(inst_detail_len): per_dict = dict() for f in fields: per_dict.update({f: inst_detail[f][i]}) if not per_dict.get('date'): continue if per_dict.get('date') != max_date: continue r_model = InstChargeDailyData(type_date=per_dict.get('date'), code=per_dict.get('code'), name=per_dict.get('name'), bamount=per_dict.get('bamount'), samount=per_dict.get('samount'), type=compare_by_value_get_key( INST_CHARGE_REASON, per_dict.get('type'))) bulk_list.append(r_model) InstChargeDailyData.objects.bulk_create(bulk_list)
def get_top_inst_list(self, task=None): """ 获取每日龙虎榜列表(龙虎榜上榜是根据上榜原因定的,因此有的股票可能会有多个原因,存在多条记录),并保存到文件中 获取当日机构购买详情,不能赋值日期,默认只能获取最近一天的交易详情 Parameters ---------- task:指定任意参数表示获取某一天的龙虎榜,否则为获取当日的龙虎榜 """ d = self.today if task is None else self.end top_list_df = ts.top_list(d) inst_detail_df = ts.inst_detail() if task is None else None if os.path.exists(u"{}/top_list_data.csv".format(data_path)): with codecs.open(u"{}/top_list_data.csv".format(data_path), "a+", "utf-8") as f: top_list_df.to_csv(f, header=False, sep="\t", index=True) else: with codecs.open(u"{}/top_list_data.csv".format(data_path), "a+", "utf-8") as f: top_list_df.to_csv(f, header=True, sep="\t", index=True) if inst_detail_df is not None: if os.path.exists(u"{}/inst_detail_data.csv".format(data_path)): with codecs.open(u"{}/inst_detail_data.csv".format(data_path), "a+", "utf-8") as f: top_list_df.to_csv(f, header=False, sep="\t", index=True) else: with codecs.open(u"{}/inst_detail_data.csv".format(data_path), "a+", "utf-8") as f: top_list_df.to_csv(f, header=True, sep="\t", index=True)
def institutional_seat_tracking_detail(mechanism1): list = [] mechanism2 = ts.inst_detail() mechanism1.apply(lambda row: list.append(mechanism2[mechanism2['code'] == row['code']].to_csv()), axis=1) return list
def getProfit(self, year=0, top=10, shares=10, divi=1): df = ts.profit_data(year=year, top=top) # df.sort('shares', ascending=False) print (df.sort_values(by='divi', ascending=False)) df1 = ts.inst_detail() print (df1) return df[df.divi >= divi]
def get_inst_detail(): df = ts.inst_detail() df.to_sql('inst_detail', engine, if_exists='replace', index=False, index_label='code')
def inst_detail(retry_count,pause): try: df = ts.inst_detail(retry_count,pause) engine = create_engine('mysql://*****:*****@127.0.0.1/stock?charset=utf8') df.to_sql('inst_detail', engine, if_exists='append') print "message" except Exception, e: e.message
def stk_inst_detail(): print("======\n机构成交明细============") try: df = ts.inst_detail() #print(df.idx[0]) df.to_sql('stk_inst_detail',engine,if_exists='append') # print(df) except : print("insert failed.")
def getdragontigerdata(): curday = datetime.date.today() curdate = curday.strftime('%Y%m%d') print(curdate) mylogger = getmylogger() # 每日龙虎榜列表 df = ts.top_list(curday.strftime('%Y-%m-%d')) if df is not None: df['date'] = curdate tosql(df, 'toplistdata', "append", "每日龙虎榜数据", mylogger) else: mylogger.info("没有每日龙虎榜数据。") # 个股上榜统计 for i in [5, 10, 30, 60]: df = ts.cap_tops(i) logmsg = "个股上榜数据" + "%d日:" % i if df is not None: df['date'] = curdate df['period'] = i tosql(df, 'captops', "append", logmsg, mylogger) else: mylogger.info("没有" + logmsg) # 营业部上榜统计 for i in [5, 10, 30, 60]: df = ts.broker_tops(i) logmsg = "营业部上榜数据" + "%d日:" % i if df is not None: df['date'] = curdate df['period'] = i tosql(df, 'brokertops', "append", logmsg, mylogger) else: mylogger.info("没有" + logmsg) # 机构席位追踪 for i in [5, 10, 30, 60]: df = ts.inst_tops(i) logmsg = "机构席位追踪数据" + "%d日:" % i if df is not None: df['date'] = curdate df['period'] = i tosql(df, 'instops', "append", logmsg, mylogger) else: mylogger.info("没有" + logmsg) # 机构成交明细 df = ts.inst_detail() logmsg = "机构成交明细:" if df is not None: df['date'] = curdate tosql(df, 'instdetail', "append", logmsg, mylogger) else: mylogger.info("没有机构成交明细。")
def today_institutional_seat_tracking_detail(mechanism1): list = [] data = ts.inst_detail() data = data[data['date'] == datetime.today().strftime('%Y-%m-%d')] print(data) # data['net'] = data.apply((lambda row: row['bamount'] - row['samount']), axis=1) data['net'] = data['bamount'] - data['samount'] data = data.sort_values('net', ascending=False).head(10) list.append(data.to_csv()) return list
def test(): ts.get_sz50s() ts.get_hs300s() ts.get_zz500s() ts.realtime_boxoffice() ts.get_latest_news() ts.get_notices(tk) ts.guba_sina() ts.get_cpi() ts.get_ppi() ts.get_stock_basics() ts.get_concept_classified() ts.get_money_supply() ts.get_gold_and_foreign_reserves() ts.top_list() #每日龙虎榜列表 ts.cap_tops() #个股上榜统计 ts.broker_tops() #营业部上榜统计 ts.inst_tops() # 获取机构席位追踪统计数据 ts.inst_detail()
def download_Orgday(time ): Datas_b= ts.inst_detail() files_path = '../report/Brokerage/%s'%time if os.path.exists(files_path) == False: # 判断文件是不是存在 os.mkdir(files_path) # 创建目录 Datas_b.to_csv(files_path+'/%s_Orgday_csv.csv'%(time),encoding='gbk') with pd.ExcelWriter(files_path+'/%s_Orgday_xlx.xlsx'%(time)) as writer: Datas_b.to_excel(writer, sheet_name='Sheet1') print('\n%s机构席位一天 have been saved'%(time))
def get_lbh_inst_detail(): """ 机构成交明细 获取最近一个交易日机构席位成交明细统计数据 """ df = ts.inst_detail() print(df) if df is not None: res = df.to_sql(lbh_inst_detail, engine, if_exists='replace') msg = 'ok' if res is None else res print('获取最近一个交易日机构席位成交明细统计数据:{0}'.format(msg) + '\n') else: print('获取最近一个交易日机构席位成交明细统计数据:{0}'.format('None') + '\n')
def __call__(self,conns): self.base=Base() self.financial_data=conns['financial_data'] date=self.base.gettoday().replace('/','-') # print(date) # '''每日龙虎榜列表''' # for day in self.base.datelist('20180702','20180705'): # day=day.replace('/','-') # top_list=ts.top_list(day) # self.base.batchwri(top_list,'top_list',self.financial_data) # ''' # 名称:个股上榜统计 # 参数说明: # days:统计周期5、10、30和60日,默认为5日 # retry_count:当网络异常后重试次数,默认为3 # pause:重试时停顿秒数,默认为0''' # cap_tops=ts.cap_tops() # self.base.batchwri(cap_tops,'cap_tops',self.financial_data) # ''' # 名称:营业部上榜统计 # 参数说明: # days:统计周期5、10、30和60日,默认为5日 # retry_count:当网络异常后重试次数,默认为3 # pause:重试时停顿秒数,默认为0''' # broker_tops=ts.broker_tops() # self.base.batchwri(broker_tops,'broker_tops',self.financial_data) # ''' # 名称:机构席位追踪 # 参数说明: # days:统计周期5、10、30和60日,默认为5日 # retry_count:当网络异常后重试次数,默认为3 # pause:重试时停顿秒数,默认为0 # ''' # inst_tops=ts.inst_tops() # self.base.batchwri(inst_tops,'inst_tops',self.financial_data) '''机构成交明细''' inst_detail=ts.inst_detail() self.base.batchwri(inst_detail,'inst_detail',self.financial_data)
def get_institution_detail(retry_count=RETRY_COUNT, pause=PAUSE): """机构成交明细""" logger.info('Begin get InstitutionDetail.') try: data_df = ts.inst_detail(retry_count, pause) except Exception as e: logger.exception('Error get InstitutionDetail.') return None else: data_dicts = [] if data_df is None or data_df.empty: logger.warn('Empty get InstitutionDetail.') else: data_dicts = [{'code': row[0], 'name': row[1], 'deal_date': row[2], 'bamount': row[3], 'samount': row[4], 'type': row[5], 'insert_date': today_line} for row in data_df.values] logger.info('Success get InstitutionDetail.') return data_dicts
def top_type(top_type): today = datetime.datetime.today().strftime('%Y-%m-%d') if top_type == 'top_list': top_list = ts.top_list(today) if top_list is not None: top_list.to_sql('top_list', engine, flavor='mysql', if_exists='append') elif top_type == 'cap_tops': cap_tops = ts.cap_tops() if cap_tops is not None: cap_tops['date'] = today cap_tops.to_sql('top_cap_tops', engine, flavor='mysql', if_exists='append') elif top_type == 'broker_tops': broker_tops = ts.broker_tops() if broker_tops is not None: broker_tops['date'] = today broker_tops.to_sql('top_broker_tops', engine, flavor='mysql', if_exists='append') elif top_type == 'inst_tops': inst_tops = ts.inst_tops() if inst_tops is not None: inst_tops['date'] = today inst_tops.to_sql('top_inst_tops', engine, flavor='mysql', if_exists='append') elif top_type == 'inst_detail': inst_detail = ts.inst_detail() if inst_detail is not None: inst_detail.to_sql('top_inst_detail', engine, flavor='mysql', if_exists='append')
def inst_detail(retry_count=5, pause=1): df = ts.inst_detail(retry_count=retry_count, pause=pause) return df
pause:重试时停顿秒数,默认为0 返回值说明: code:代码 name:名称 bamount:累积买入额(万) bcount:买入次数 samount:累积卖出额(万) scount:卖出次数 net:净额(万)''' print('机构席位追踪') print(ts.inst_tops()) ''' 获取最近一个交易日机构席位成交明细统计数据 参数说明: retry_count:当网络异常后重试次数,默认为3 pause:重试时停顿秒数,默认为0 返回值说明: code:代码 name:名称 date:交易日期 bamount:机构席位买入额(万) samount:机构席位卖出额(万) type:类型 ''' print('机构成交明细') print(ts.inst_detail())
for idx in df.index: temp = df.ix[idx] sql = "insert into inst_tops(code,name,bamount,bcount,samount,scount,net) values(%s,%s,%s,%s,%s,%s,%s)" param = (temp['code'],temp['name'],temp['bamount'],temp['bcount'],temp['samount'],temp['scount'],temp['net']) cursor.execute(sql,param) conn.commit() except: f=open("errors/"+today+".log",'a') traceback.print_exc(file=f) f.flush() f.close() # 机构成交明细 try: df = idx = temp = sql = param = None df = ts.inst_detail() if df is not None: for idx in df.index: temp = df.ix[idx] sql = "insert into inst_detail(code,name,date,bamount,samount,type) values(%s,%s,%s,%s,%s,%s)" param = (temp['code'],temp['name'],temp['date'],temp['bamount'],temp['samount'],temp['type']) cursor.execute(sql,param) conn.commit() except: f=open("errors/"+today+".log",'a') traceback.print_exc(file=f) f.flush() f.close() cursor.close() conn.close()
# coding=utf-8 __ = 'Administrator' from datetime import date import numpy as np import sys import tushare as ts today = date.today() today1 = str(today) start = '%s-%s-%s' % (today.year - 1, today.month, today.day) data = ts.get_hist_data('600567', start=start, end=today1) close = data['close'] # print close print(ts.inst_detail(retry_count=5, pause=0.001)) #print(ts.inst_tops())
def ChairDetail(): df=ts.inst_detail() df=df.sort_values('bamount',ascending=False) return [u"机构成交明细:",df[:50]]
def capture_stock_data(): capture_date = datetime.datetime.now().strftime("%Y%m%d") save_dir = "/home/dandelion/stock_data/" + capture_date if not os.path.exists(save_dir): os.mkdir(save_dir) print("The save directory is created successfully!\n", save_dir) print("The save directory is already exist!\n", save_dir) # ======================Daily Command================================================================ # get the boxoffcie data of the last day and save as csvfile named as the capture command ts.day_boxoffice().to_csv( save_dir + "/" + capture_date + "_day_boxoffice.csv", header=True, sep=",", index=False, ) print("day_boxoffice data capture completed!") # get the cinema data of the last day and save as csvfile named as the capture command ts.day_cinema().to_csv( save_dir + "/" + capture_date + "_day_cinema.csv", header=True, sep=",", index=False, ) print("day_cinema data capture completed!") ts.month_boxoffice().to_csv( save_dir + "/" + capture_date + "_month_boxoffice.csv", header=True, sep=",", index=False, ) print("month_boxoffice data capture completed!") ts.realtime_boxoffice().to_csv( save_dir + "/" + capture_date + "_realtime_boxoffice.csv", header=True, sep=",", index=False, ) print("realtime_boxoffice data capture completed!") # get the stock data index of the last day and save as csvfile named as the capture command ts.get_index().to_csv( save_dir + "/" + capture_date + "_get_index.csv", header=True, sep=",", index=False, ) print("get_index data capture completed!") # get the history cpi data and save as csvfile named as the capture command ts.get_cpi().to_csv( save_dir + "/" + capture_date + "_get_cpi.csv", header=True, sep=",", index=False, ) print("get_cpi data capture completed!") # get the history gdp data by month and save as csvfile named as the capture command ts.get_gdp_year().to_csv( save_dir + "/" + capture_date + "_get_gdp_year.csv", header=True, sep=",", index=False, ) print("get_gdp_year data capture completed!") # get today all stock data and save as csvfile named as the capture command # ts.get_today_all().to_csv(save_dir+'/'+capture_date+'_get_today_all.csv',header=True,sep=',',index=False) # get detail information of the top brokers today and save as csvfile named as the capture command ts.broker_tops().to_csv( save_dir + "/" + capture_date + "_broker_tops.csv", header=True, sep=",", index=False, ) print("broker_tops data capture completed!") # get detail information of the top brokers today and save as csvfile named as the capture command ts.cap_tops().to_csv( save_dir + "/" + capture_date + "_cap_tops.csv", header=True, sep=",", index=False, ) print("cap_tops data capture completed!") ts.get_area_classified().to_csv( save_dir + "/" + capture_date + "_get_area_classified.csv", header=True, sep=",", index=False, ) print("get_area_classified data capture completed!") # ts.get_balance_sheet(code='').to_csv(save_dir+'/'+capture_date+'_get_balance_sheet.csv',header=True,sep=',',index=False) # print('get_balance_sheet data capture completed!') # ts.get_cash_flow(code='').to_csv(save_dir+'/'+capture_date+'_get_cash_flow.csv',header=True,sep=',',index=False) # print('get_cash_flow data capture completed!') ts.get_day_all().to_csv( save_dir + "/" + capture_date + "_get_day_all.csv", header=True, sep=",", index=False, ) print("get_day_all data capture completed!") ts.get_cashflow_data(2018, 3).to_csv( save_dir + "/" + capture_date + "_get_cashflow_data.csv", header=True, sep=",", index=False, ) print("get_cashflow_data data capture completed!") ts.get_concept_classified().to_csv( save_dir + "/" + capture_date + "_get_concept_classified.csv", header=True, sep=",", index=False, ) print("get_concept_classified data capture completed!") ts.get_debtpaying_data(2018, 3).to_csv( save_dir + "/" + capture_date + "_get_debtpaying_data.csv", header=True, sep=",", index=False, ) print("get_debtpaying_data data capture completed!") ts.get_deposit_rate().to_csv( save_dir + "/" + capture_date + "_get_deposit_rate.csv", header=True, sep=",", index=False, ) print("get_deposit_rate data capture completed!") ts.get_gdp_contrib().to_csv( save_dir + "/" + capture_date + "_get_gdp_contrib.csv", header=True, sep=",", index=False, ) ts.get_gdp_for().to_csv( save_dir + "/" + capture_date + "_get_gdp_for.csv", header=True, sep=",", index=False, ) ts.get_gdp_pull().to_csv( save_dir + "/" + capture_date + "_get_gdp_pull.csv", header=True, sep=",", index=False, ) ts.get_gdp_quarter().to_csv( save_dir + "/" + capture_date + "_get_gdp_quarter.csv", header=True, sep=",", index=False, ) print("get_gdp_ data capture completed!") # ts.get_gdp_year().to_csv(save_dir+'/'+capture_date+'_get_gdp_year.csv',header=True,sep=',',index=False) ts.get_gem_classified().to_csv( save_dir + "/" + capture_date + "_get_gem_classified.csv", header=True, sep=",", index=False, ) ts.get_gold_and_foreign_reserves().to_csv( save_dir + "/" + capture_date + "_get_gold_and_foreign_reserves.csv", header=True, sep=",", index=False, ) ts.get_growth_data(2018, 3).to_csv( save_dir + "/" + capture_date + "_get_growth_data.csv", header=True, sep=",", index=False, ) ts.get_industry_classified().to_csv( save_dir + "/" + capture_date + "_get_industry_classified.csv", header=True, sep=",", index=False, ) ts.get_hs300s().to_csv( save_dir + "/" + capture_date + "_get_hs300s.csv", header=True, sep=",", index=False, ) ts.get_sz50s().to_csv( save_dir + "/" + capture_date + "_get_sz50s.csv", header=True, sep=",", index=False, ) ts.get_zz500s().to_csv( save_dir + "/" + capture_date + "_get_zz500s.csv", header=True, sep=",", index=False, ) ts.get_operation_data(2018, 3).to_csv( save_dir + "/" + capture_date + "_get_operation_data.csv", header=True, sep=",", index=False, ) ts.get_stock_basics().to_csv( save_dir + "/" + capture_date + "_get_stock_basics.csv", header=True, sep=",", index=False, ) ts.get_report_data(2018, 3).to_csv( save_dir + "/" + capture_date + "_get_report_data.csv", header=True, sep=",", index=False, ) ts.inst_detail().to_csv( save_dir + "/" + capture_date + "_inst_detail.csv", header=True, sep=",", index=False, ) ts.inst_tops().to_csv( save_dir + "/" + capture_date + "_inst_tops.csv", header=True, sep=",", index=False, ) print("inst_tops data capture completed!") ts.new_stocks().to_csv( save_dir + "/" + capture_date + "_new_stocks.csv", header=True, sep=",", index=False, ) print("new_stocks data capture completed!") ts.top_list().to_csv( save_dir + "/" + capture_date + "_top_list.csv", header=True, sep=",", index=False, ) print("top_list data capture completed!")
from sqlalchemy import create_engine import tushare as ts #engine = create_engine('mysql+pymysql://root:[email protected]/packageing?charset=utf8') engine = create_engine( 'mysql+pymysql://root:[email protected]/gupiao?charset=utf8') df = ts.inst_detail() de = ts.inst_tops() broker = ts.broker_tops() #df.to_sql('inst_datail',engine) #de.to_sql('inst_tops',engine) #broker.to_sql('broker_tops',engine) df.to_sql('inst_datail', engine, if_exists='append') de.to_sql('inst_tops', engine, if_exists='append') #broker.to_sql('broker_tops',engine,if_exists='append')
def cn_main_loop(mode): the_ticks, info, flags = interact_choose_ticks(mode) # print the_ticks # print info exec_func = get_one_ticker_k_data if 'realtime' in flags: exec_func = real_time_ticks elif 'onestock' in flags: exec_func = real_time_ticks elif 'news_sina' in flags: df = get_latest_news() idxs, nflags = cli_select_menu(df['title'], menu_columns=1) for rowid in idxs: url = df.iloc[rowid]['url'] texts, html = get_article_detail(url, 'p') print texts.encode(ENCODE, 'ignore') elif 'news_wscn' in flags or 'hot_wscn' in flags: wscn = StockNewsWSCN() if 'hot_wscn' in flags: df = wscn.mode_run('hot_article') else: df = wscn.mode_run('info_flow') idxs, nflags = cli_select_menu(df['title'], menu_columns=1) # pdb.set_trace() for rowid in idxs: url = df.iloc[rowid]['uri'] res = wscn.mode_run('article', stocks=[url]) print res[0].encode(ENCODE, 'ignore') print '' elif 'index' in flags: df = ts.get_index() print df elif 'futu_news' in flags: _ftnn = StockNewsFUTUNN() df = _ftnn.get_news() df.index = pd.RangeIndex(df.shape[0]) idxs, nflags = cli_select_menu(df['content'], menu_columns=1) for rowid in idxs: url = df.iloc[rowid]['detail_url'] texts, tags = get_article_detail(url, 'div', '#content') print texts # print (u'\n'.join(texts[:-5])).encode('gbk','ignore') elif 'wscn_loop' in flags: wscn_loop() elif 'top' in flags: df = ts.top_list() print df.sort_values('amount', ascending=False) elif 'inst' in flags: df = ts.inst_tops() print df.sort_values('net', ascending=False) raw_input('[pause]') df = ts.inst_detail() print df.sort_values('bamount', ascending=False) elif 'quit' in flags: sys.exit() if Pool is None: for tk in the_ticks: results = [exec_func(tk, info)] else: pool = Pool(8) jobs = [] for tk in the_ticks: job = pool.spawn(exec_func, tk, info, flags) jobs.append(job) # pool.close() pool.join() # jobs = [gevent.spawn(get_one_ticker_k_data,tk,info,flags) for tk in the_tks] # gevent.joinall(jobs) results = [job.value for job in jobs] ## 读取分析结果 # fname = 'results.%s.json'%exec_func.func_name # print fname # json.dump(results,open(fname,'w'),indent=2) ########### 打印技术分析 print '\n\n' + analyse_res_to_str(results) + '\n' if 'graph' in flags and exec_func.func_name == 'get_one_ticker_k_data': cols = len(results) fig, ax = plt.subplots(nrows=3, ncols=cols, sharex=False) for i, onestk in enumerate(results): tick = onestk['code'] name = onestk['info'].get('name') # fname = FNAME_PAT_HIST%tick # df = pd.read_csv(fname,encoding='utf8',index_col='date') df = pd.DataFrame.from_dict(onestk['df']) df = df[-50:] title = '%s' % (tick) df['atr'] = talib.ATR(df['high'], df['low'], df['close']) df['sma10'] = talib.SMA(df['close'], 10) df['ema10'] = talib.EMA(df['close'], 10) df['ema_dif'] = df['ema10'] - df['sma10'] if cols > 1: aax = [ax[0, i], ax[1, i], ax[2, i]] else: aax = [ax[0], ax[1], ax[2]] df[['close', 'sma10', 'ema10']].plot(title=title, ax=aax[0]) df[['ema_dif']].plot(title=title, ax=aax[1]) df[['volume']].plot(title=title, ax=aax[2]) plt.show() if 'pause' in flags: raw_input('pause') return flags
import tushare as ts from sqlalchemy import * token = ts.set_token('c723069dd4a25402d05ea6afad36da2937111adf012f8258abb5f7e05936e575') engine = create_engine('mysql+pymysql://root:toor@localhost/pack?charset=utf8') #df = ts.new_stocks() #df.to_sql('new_stocks',engine,if_exists='append') #holdings = ts.fund_holdings(2015,3) #holdings.to_sql('fund_holdings',engine,if_exists='append') #df = ts.get_industry_classified() #df.to_sql('industry_classified',engine,if_exists='append') #top = ts.top_list('2016-04-21') #每日龙虎榜列表 cap = ts.cap_tops() #个股上榜统计, broker = ts.broker_tops() #营业部上榜统计 detail = ts.inst_detail() #机构成交明细 #top.to_sql('top_list',engine,if_exists = 'append') cap.to_sql('cap_tops',engine,if_exists = 'append') broker.to_sql('broker_tops',engine,if_exists = 'append') detail.to_sql('inst_detail',engine,if_exists = 'append')
from sqlalchemy import create_engine import tushare as ts import pandas import pymysql import datetime engine = create_engine( 'mysql+pymysql://root:[email protected]:3306/gushi?charset=utf8') dt = ts.inst_detail() dt.to_sql('jigou', engine, if_exists='append', index=False)
def get_td_inst_detail(retry=3, pause=0): """ 最近一天 """ data = ts.inst_detail(retry_count=retry, pause=pause) return data
def getInstDetail(self): file_name = 'inst_detail.csv' path = self.index + self.index_inst_detail + file_name data = ts.inst_detail() data.to_csv(path, encoding='utf-8') print(file_name)
def GetDetail(self): datatts = ts.inst_detail() self.SaveCSV(datatts, 'instdetail.csv')
def GetInstitutionDetailAtLastDay(): """ """ return ts.inst_detail()