def page_api(**optx): opts = {"search": "comment", "outTF": False} opts.update(optx) dfTF = opts.pop('dfTF', False) opts.update(debugTF=True) pqint("===page_api INPUT:\n", opts, file=sys.stderr) data = wrap_alanapi(**opts) if isinstance(data, dict): dd = {x: dtCvt(y) for x, y in data.items()} elif isinstance(data, list): for j in range(len(data)): if not isinstance(data[j], dict): continue for x, y in data[j].items(): data[j][x] = dtCvt(y) dd = data elif isinstance(data, pd.DataFrame) and dfTF is False: dd = data.to_dict(orient='records') else: dd = data pqint("===page_api type_data:{},type_dd:{}, OUTPUT_dd:\n{}".format( type(data), type(dd), dd)[:100], "\n", file=sys.stderr) return dd
def run_alanapi(symLst,fdLst,**opts): ''' list of keys: ['lang', 'subtopic', 'search', 'end', 'start', 'debugTF', 'hostname', 'tablename', 'topic', 'field', 'instrument', 'output', 'dbname'] ''' search, instrument, hostname, dbname, tablename = opts['search'],opts['instrument'],opts['hostname'],opts['dbname'],opts['tablename'] if search == None: search='search_list' elif search[:7] != 'search_': search = 'search_{}'.format(search) if symLst is None: symLst=['*'] if search[-5:] == '_list' else [] try: opts['tablename']=dft_tablename(search,instrument,tablename) pqint(" --- {} {}, instrument:{}, tablename:{}\n".format(search,symLst,instrument,tablename) ,file=sys.stderr) searchFunc=globals()[search] opts.pop('search',None) data=searchFunc(symLst,fdLst,**opts) except Exception as e: pqint('**ERROR: failed to run {}: {}'.format(searchFunc,str(e)) ,file=sys.stderr) return None outTF = opts.pop('outTF',True) if outTF: output = opts.pop('output','json') data = data_output(data,output) return data
def search_comment(tkLst,fdLst,**opts): topicLst='hourly|news|report|theme|peers|industry|MFRM'.split('|') topic=getKeyVal(opts,'topic','MFRM') if topic not in topicLst: return None argName="{}_comment".format(topic) if topic in topicLst and argName in globals(): pqint("==RUNNING {}() Inputs:{}".format(argName,opts),file=sys.stderr) try: data=globals()[argName](tkLst,fdLst,**opts) except Exception as e: pqint("**ERROR:{} to run {}".format(str(e),argName),file=sys.stderr) return data output=opts.pop('output',None) data=pd.DataFrame() optx=subDict(opts,['tablename','lang','dbname','hostname','topic','subtopic','factor']) if tkLst[0]=='*': data=geteach_comment('*',fdLst,**optx) # data = data_output(data,output) return data for ticker in tkLst: ret=geteach_comment(ticker,fdLst,**optx) if ret is not None and len(ret)>0: data=data.append(ret,ignore_index=True) else: continue # data = data_output(data,output) if len(data)<1: return None return data
def get_db_pg(dbname=None, hostname=None, port=5432): engine = None if not (dbname is None or hostname is None): try: dbURL = 'postgresql://sfdbo@{}:{}/{}'.format( hostname, port, dbname) engine = create_engine(dbURL) if (dbURL is not None) else None except: pqint("***DB ERROR:", sys.exc_info()[1], file=sys.stderr) return engine
def page_performance(**optx): optx.update(search='quote', topic='detailed', ticker='AAPL') ret = page_api(**optx) pqint(" --page_report OUTPUT:\n", type(ret), "{}".format(ret)[:30], file=sys.stderr) if isinstance(ret, list) and len(ret) > 0: return ret[0] else: return ret
def save2mgdb_daily_comment(ret, dbM, tablename): if dbM is None: return None pqint(ret, file=sys.stderr) mobj = json_util.loads(ret.to_json()) dbM[tablename].delete_one({ "ticker": mobj["ticker"], "curr_date": mobj["curr_date"] }) dbM[tablename].insert_one(mobj) return len(mobj)
def geteach_peers(tkLst,fdLst,subtopic='',output='csv',tablename=None,lang=None,dbname="ara",hostname='localhost',**optx): from iex_peers import iex_peers df,peerLst,peerInfo=iex_peers(tkLst) #if subtopic!='detailed': # return peerLst if fdLst !='*': field = list(set(fdLst.split(',')) & set(df.columns)) df = df[field] data = df # data = data_output(df,output) pqint( df.tail(2) ,file=sys.stderr) return data
def yh_rawdata(ticker=None, gap='1m', ranged='1d', debugTF=False): """ Get daily/minute data via yahoo version 8 api """ if ticker is None: return '' if ',' in ranged: start, end = ranged.split(',') if all([start, end]): period1 = str2epoch(start) period2 = str2epoch(end, endOfDay=True) range_periods = "period1={}&period2={}".format(period1, period2) elif start: period1 = str2epoch(start) if gap == '1d': end = datetime.now().strftime("%Y%m%d") period2 = str2epoch(end, endOfDay=True) else: period2 = (datetime.fromtimestamp(period1) + timedelta(days=1)).strftime("%s") range_periods = "period1={}&period2={}".format(period1, period2) elif end: period2 = str2epoch(end, endOfDay=True) period1 = (datetime.fromtimestamp(period2) + timedelta(days=-1)).strftime("%s") #calc beginning time of period1 date #if gap=='1d': # d = datetime.fromtimestamp(float(period1)) # period1 = datetime.strptime(d.strftime('%Y%m%d'),'%Y%m%d').strftime("%s") range_periods = "period1={}&period2={}".format(period1, period2) else: range_periods = "range=1d" urx = "https://query1.finance.yahoo.com/v8/finance/chart/{}?region=US&lang=en-US&includePrePost=false&interval={}&{}" url = urx.format(ticker, gap, range_periods) else: urx = "https://query1.finance.yahoo.com/v8/finance/chart/{}?region=US&lang=en-US&includePrePost=false&interval={}&range={}" url = urx.format(ticker, gap, ranged) if debugTF is True: pqint(url, file=sys.stderr) jX = {} try: #jX = pd.read_json(url) ret = requests.get(url, timeout=5) jX = ret.json() except Exception as e: pqint(str(e), file=sys.stderr) return {} if 'chart' in jX: try: jTmp = jX['chart']['result'][0] except Exception as e: return {} return jTmp
def tk_video(ticker, lang): xcmd = ('./stock_ticker2video.sh {} {}').format(ticker, lang) pqint('RUNNING ', xcmd, file=sys.stderr) p = Popen(xcmd, shell=True, bufsize=1024, stderr=PIPE, stdout=PIPE) try: outs, errs = p.communicate() return True except: p.kill() outs, errs = p.communicate(timeout=30) return False
def run_j2ts(optGet={}, optPost={}): pqint(optGet, optPost, file=sys.stderr) dd = {} if optPost is not None: dd.update(optPost) if optGet is not None: dd.update(optGet) if 'j2ts' in dd: j2ts = dd['j2ts'] dd = subDict(dd, ['j2ts'], reverseTF=True) else: j2ts = 'Usage of /?key1=value1&key2=value2 ...' return jj_fmt(j2ts, dd, j2onlyTF=True)
def geteach_daily_history(ticker,fdLst,**opts): from _alan_calc import pullStockHistory from yh_chart import yh_spark_hist as ysh, runOTF datax = [] try: datax = pullStockHistory(ticker,pgDB=pgDB,**opts) if datax is None or len(datax)<1: sys.stderr.write("**WARNING:{}\n".format("data not found in DB, live pulling")) datax=runOTF(ysh,ticker,deltaTolerance=86400,types='chart',tablename='yh_daily_hist',zpk=['ticker','pbdt'],range='1y',interval='1d',debugTF=True,dbname='ara') datax=pd.DataFrame(datax) except Exception as e: pqint("**ERROR:{} @ {}, opts:\n{}".format(str(e),'geteach_daily_history',opts) ,file=sys.stderr) return datax
def report_comment(tkLst,fdLst,lang='cn',dbname='ara',hostname='localhost',limit=1,**optx): outTF=optx.pop('outTF',True) tablename='daily_single_stock' if tkLst is not None and len(tkLst)>0: ticker=tkLst[0].upper() jobj={"ticker":ticker} else: ticker='' jobj={} sortLst={'pbdt'} data=[] try: xtmp,_,_=find_mdb(jobj,tablename=tablename,dbname=dbname,sortLst=sortLst,limit=1,dfTF=False) if len(xtmp)>0: field = {x for x in xtmp[0] if not isinstance(x,(dict,list))} data,_,err=find_mdb(jobj,tablename=tablename,dbname=dbname,field=field,sortLst=sortLst,limit=limit,dfTF=outTF) except Exception as e: sys.stderr.write("**ERROR: {},{} @{}\n".format(ticker,str(e),'report_comment')) data=[] if len(data)<1: ret=rerun_dss(ticker) sys.stderr.write("**WARNING: {} @{}\n".format(ret,'report_comment')) return ret pqint( " --from tablename: {}".format(data) ,file=sys.stderr) # data = data_output(data,output) pqint( "===report_comment():\nFind:{},Field:{},Sort:{}\n".format(jobj,field,sortLst) ,file=sys.stderr) pqint( " --from tablename: {}".format(tablename) ,file=sys.stderr) pqint( " --DF[top]:\n{}".format(data) ,file=sys.stderr) return data
def getlist_quotes(tkLst,fdLst,tablename="iex_sector_quote",lang=None,dbname="ara",hostname='localhost',colx='symbol'): from _alan_str import find_mdb dd = [] pqint( tkLst ,file=sys.stderr) for ticker in tkLst: ret,_,_=find_mdb({colx:ticker},tablename=tablename,dbname=dbname) pqint( ret,ticker,tablename,dbname ,file=sys.stderr) dd = dd + ret df = pd.DataFrame(dd) if fdLst != '*': field = set(fdLst.split(',')) newcol = list(field & set(df.columns)) df = df[newcol] return df
def create_topic_theme_ipo(updTF=False, **opts): ''' create 'topic_theme_ipo' based on 'nasdaq_ipos' and yh live-quote info ''' from _alan_calc import renameDict, subDict from _alan_str import find_mdb, upsert_mdb from yh_chart import yh_quote_comparison as yqc # Note: 500 limit may cause close prices of certain tickers not get updated, need further debugging limit = opts.pop('limit', 500) ipoLst, _, _ = find_mdb(tablename='nasdaq_ipos', dbname='ara', sortLst=['pbdate'], limit=limit, dfTF=True) ipoLst = renameDict(ipoLst, dict(pbdate='ipoDate', price='ipoPrice')) ipoD = subDict(ipoLst, ['ticker', 'ipoDate', 'ipoPrice', 'sector', 'industry']) quoLst = yqc(ipoD['ticker'].values) quoD = pd.DataFrame(quoLst) colX = [ 'ticker', 'close', 'fiftyTwoWeekRange', 'marketCap', 'pbdate', 'shortName', 'changePercent', 'epsTrailingTwelveMonths', 'pbdt' ] quoD = subDict(quoD, colX) quoD = renameDict( quoD, dict(epsTrailingTwelveMonths='EPS', close='closePrice', shortName='Company', fiftyTwoWeekRange='Range52Week', changePercent='dayChg%', change='Chg', pbdt='pubDate')) df = ipoD.merge(quoD, on='ticker') #- remove no-quote rows # ,how='left') df.dropna(subset=['marketCap'], inplace=True) df['ipoChg%'] = (df['closePrice'] / df['ipoPrice'].astype(float) - 1) * 100 colX = [ 'ticker', 'ipoDate', 'marketCap', 'ipoPrice', 'closePrice', 'ipoChg%', 'dayChg%', 'EPS', 'Company', 'Range52Week', 'pbdate', 'pubDate', 'sector', 'industry' ] df = subDict(df, colX) pqint(" --ipo DF:\n{}".format(df), file=sys.stderr) dbname = opts.pop('dbname', 'ara') tablename = opts.pop('tablename', 'topic_theme_ipo') zpk = opts.pop('zpk', {'ticker'}) upsert_mdb(df, dbname=dbname, tablename=tablename, zpk=zpk) sys.stderr.write(" --DF:\n{}\n".format(df.head().to_string(index=False))) return df
def search_quote(tkLst,fdLst,**opts): tkLst=get_sector_etfname(tkLst,**opts) sys.stderr.write("---tkLst: {} @ search_quote\n".format(tkLst)) instrument = getKeyVal(opts,'instrument','stock') outTF = getKeyVal(opts,'outTF',True) hostname,dbname,tablename,lang = getKeyVal(opts,['hostname','dbname','tablename','lang'],['localhost','ara',None,None]) colx='ticker' if instrument=='stock' else 'series' data=[] opts.pop('ticker',None) for ticker in tkLst: try: # get quotes from MDB::"yh_quote_curr" for yahoo source indices setup in the PGDB::'mapping_series_label' if instrument=='stock' or re.search(r'[=^.]',ticker): mktLst =['^GSPC','^DJI','^IXIC','^SOX'] if ticker.upper() in mktLst: tablename="market_indicator_quote" elif re.search(r'[=^.]',ticker): tablename="yh_spark_hist" else: tablename="iex_spark_hist" jobj={"ticker":ticker} ret = list(mgDB[tablename].find(jobj,{"_id":0},sort=[("epochs",-1)]).limit(1)) #ret,_,_=find_mdb(jobj,tablename=tablename,dbname="ara") ret = subDict(ret,['ticker','close','change','pchg','xclose','epochs','pbdate','pbdt']) ret = renameDict(ret,{'pchg':'changePercent','xclose':'prevClose'}) else: # get quotes for all fields from pgDB ret=geteach_quote(ticker,fdLst='*',tablename=tablename,lang=lang,dbname=dbname,hostname=hostname,colx=colx) if ret is not None and len(ret)>0: #data=data.append(ret,ignore_index=True) data.extend(ret) else: continue except Exception as e: pqint( "**ERROR:{} @ {}".format(str(e),search_quote) ,file=sys.stderr) continue if len(data)<1: return None if not outTF: return data data=pd.DataFrame(data) if fdLst is None: pass elif len(fdLst)>2 and fdLst.lower()=='all': pass else: colx=['ticker','epochs','open','high','low','close','volume','xclose','change','pchg','pbdt','hhmm','pbdate','changePercent','prevClose','marketCap'] data=subDF(data,colx) # data = data_output(data,output) return data
def getdb_udfLst(category="stock", tbname="mapping_udf_comment", pgDB=None, lang="en"): try: if all(map(lambda x: x is not None, (tbname, pgDB))): tbname = "{}_{}".format(tbname, lang) if lang != "en" else tbname sqx = "SELECT * FROM {1} WHERE category={0!r} ORDER BY thd DESC" sqr = sqx.format(str(category), tbname) return pd.read_sql(sqr, pgDB) else: return None except Exception as e: pqint("**ERROR @ getdb_udfLst():", str(e), file=sys.stderr) return None
def batch_yh_hist(tkLst=[], opts=None, **optx): #- Set input parameters if opts is None or len(opts) < 1: opts, _ = opt_yh_hist([]) if optx is not None: opts.update(optx) kys = ['gap', 'ranged', 'tsTF', 'pchgTF', 'debugTF'] debugTF = getKeyVal(opts, 'debugTF', False) indexTF = getKeyVal(opts, 'indexTF', True) output = getKeyVal(opts, 'output', None) sep = getKeyVal(opts, 'sep', '|') #for ky,va in opts.items(): # exec('{}=va'.format(ky)) hdrTF = True if 'funcArg' in opts and opts['funcArg'] in globals(): funcArg = globals()[opts['funcArg']] else: funcArg = yh_hist if len(tkLst) > 0 and tkLst[0] == '-': tkLst = sys.stdin.read().split("\n") dm = pd.DataFrame() for j, ticker in enumerate(tkLst): hdrTF = True if j < 1 else False try: df = funcArg(ticker, hdrTF=hdrTF, **subDict(opts, kys)) if len(df) < 1: continue if output is None or len(output) < 1: dm = pd.concat([dm, df]) except Exception as e: pqint("**ERROR: {}.{}\n{}".format(j, ticker, str(e)), file=sys.stderr) continue if output is not None and 'ticker' not in df: df['ticker'] = ticker if output == 'csv': sep = sep.encode().decode( 'unicode_escape' ) if sys.version_info.major == 3 else sep.decode("string_escape") sys.stdout.write(df.to_csv(sep=sep, index=indexTF, header=hdrTF)) elif output == 'html': sys.stdout.write(df.to_html(index=indexTF)) elif output == 'json': sys.stdout.write(df.to_json(orient='records')) hdrTF = False return dm
def page_report(**optx): topic, subtopic, search = getKeyVal(optx, ['topic', 'subtopic', 'search'], ['report', None, 'comment']) optx.update(search=search, topic=topic, subtopic=subtopic, output='json') pqint(" --page_report INPUT:\n", optx, file=sys.stderr) if subtopic == 'company': optx.update(topic='news') ret = page_api(**optx) pqint(" --page_report OUTPUT:\n", type(ret), "{}".format(ret)[:30], file=sys.stderr) if isinstance(ret, list) and len(ret) > 0 and subtopic != 'company': return ret[0] else: return ret
def hourly_comment(tkLst,fdLst,tablename=None,lang=None,dbname="ara",hostname='localhost',output=None,start=None,end=None,topic=None,subtopic=None,**optx): from _alan_str import find_mdb if fdLst=='*': field = {} else: field = set(fdLst.split(',')) limit=0 instrument = getKeyVal(optx,'instrument','stock') rpt_hm = getKeyVal(optx,'rpt_hm',None) hhmm = getKeyVal(optx,'hhmm',None) pqint(tablename,optx,file=sys.stderr) #if subtopic.lower() == 'sector': tablename="mkt_briefing_media" if subtopic == 'sector': tablename="hourly_report" if len(tkLst)<1: jobj={"ticker":{"$in":["^GSPC","^DJI","^IXIC"]}} sortLst=['rpt_time'] field={'ticker','comment','rpt_time','rpt_hm','rpt_status','title','label'} limit=3 elif rpt_hm is not None or len(tkLst)>0: tablename="mkt_briefing_details" jobj={} if rpt_hm is not None: hmLst = [int(x) for x in rpt_hm.split(',')] jobj.update(rpt_hm={"$in":hmLst}) if len(tkLst)>0 and tkLst[0]!='$' : jobj.update(ticker={"$in":tkLst}) if len(field)<1: field = {"ticker","label","rpt_hm","rpt_time","cprice","xprice","comment","rpt_date","pngname"} sortLst=['rpt_time'] elif hhmm is not None: hmLst = [int(x) for x in hhmm.split(',')] jobj={"hhmm":{"$in":hmLst}} sortLst=['pbdt'] else: jobj={} sortLst=['pbdt'] limit=1 outTF = getKeyVal(optx,'outTF',True) df,_,_=find_mdb(jobj,tablename=tablename,dbname=dbname,field=field,sortLst=sortLst,limit=limit,dfTF=outTF) data = df # data = data_output(df,output) pqint( "===hourly_comment():\nFind:{},Field:{},Sort:{}\n".format(jobj,field,sortLst) ,file=sys.stderr) pqint( " --tkLst: {},fdLst: {}".format(tkLst,fdLst) ,file=sys.stderr) pqint( " --from tablename: {}".format(tablename) ,file=sys.stderr) pqint( " --DF:\n{}".format(df)[:200]+"\n" ,file=sys.stderr) return data
def run_comment_hourly(df, opts=None): from _alan_str import generate_comment_hourly if 'ticker' not in opts: if 'ticker' in df.columns: opts['ticker'] = df['ticker'].iloc[0] else: return {} ohlcComboTF = opts['ohlcComboTF'] if 'ohlcComboTF' in opts else False jobj = {} if opts['j2ts'] is not None and len(opts['j2ts'])>0: ts=opts['j2ts'] elif 'j2name' in opts and len(opts['j2name'])>0: fj2name = "{}_{}.j2".format(opts['j2name'],opts['lang']) ts='{} include "{}" {}'.format('{%',fj2name,'%}') else: ts='' if len(ts)<1: return {} try: jobj = generate_comment_hourly(ts,df,opts) ret = jobj['comment'] if 'comment' in jobj else '' if 'plotTF' in opts and opts['plotTF'] is False: return jobj if len(ret)>0: pqint( ret, file=sys.stdout) if 'plotTextTF' in opts and opts['plotTextTF']: bottom = 0.35 if ohlcComboTF is True else 0.25 plt.subplots_adjust(left=0.1,bottom=bottom) txbtm = 0.05 if ohlcComboTF is True else 0.1 plt.text(0.05,txbtm,ret,fontsize=11,color='yellow',fontproperties=prop, transform=plt.gcf().transFigure) pngname = opts['pngname'] if pngname is not None and len(pngname)>4: #pngx = pngname.split('.')[0].split('_')[0] + opts['rpt_time'].strftime("_%Y%m%d_%H%M") +'.svg' #if opts['pngname'] == pngx: # pqint( "svg filename {} is duplicated!".format(pngx), file=sys.stderr) # return jobj #opts['pngname'] = pngx outdir = opts['outdir'] if 'outdir' in opts else '.' pngpath = "{}/{}".format(outdir,pngname) sys.stderr.write("rpt_time:{},svg_path:{}\n".format(opts['rpt_time'],pngpath)) plt.savefig(pngpath, format='svg') else: plt.show() except Exception as e: sys.stderr.write("**ERROR: @ generate_comment_hourly\n{}\n".format(str(e))) return jobj
def qt_result(name, lang, videoYN='0', errMsg=''): from yh_chart import yh_hist_query as yhq #from dateutil.parser import parse as Timestamp name = str(name.upper()) videoYN = str(videoYN) d = dict(name=name, lang=lang, videoYN=videoYN, len=len, enumerate=enumerate, error_message=errMsg) if videoYN == '1': pqint("\tvideoYN:{} @ {}".format(videoYN, 'qt_result'), file=sys.stderr) try: e = '' fname = upd_video(name, lang) except Exception as e: fname = '' if len(fname) < 1: errMsg = "**ERROR: No [{}_{}] video {} @ {}".format( name, lang, str(e), 'upd_video') d.update(error_message=errMsg, mp4path=fname) return d pqint("\tvideoYN:{} @ {}".format(videoYN, 'qt_result'), file=sys.stderr) try: qt = yhq([name], types='quote', rawTF=True, dbname=None)[0] pqint("===QT:\n{}".format(qt), file=sys.stderr) except Exception as e: pqint("===yh_hist_query: {}".format(str(e)), file=sys.stderr) qt = {} if 'regularMarketTime' in qt: qt['lastUpdate'] = datetime.datetime.fromtimestamp( qt['regularMarketTime']).strftime("%Y/%m/%d %H:%M:%S") d.update(qt) try: d['quote'] = render_template('tp_quote.html', quote=qt) except Exception as e: pqint("===render_template: {}".format(str(e)), file=sys.stderr) if 'longName' in qt: d['company'] = qt['longName'] if 'epsTrailingTwelveMonths' in qt: d['earnings'] = qt['epsTrailingTwelveMonths'] if 'trailingPE' in qt: d['financials'] = qt['trailingPE'] return d
def run_comment_fcst(ticker='AAPL', label='', pgDB=None, dotSign='.', prcn=0, usdSign='', lang="cn", fp=None, ts=None, mp3YN=True): if fp is None: #sqx="SELECT * FROM ara_weekly_forecast WHERE ticker={0!r} and freq='W'" sqx = "SELECT * FROM ohlc_fcs WHERE ticker={0!r} and freq='W'" try: fp = getdb_ticker(ticker, sqx, pgDB).iloc[0] except: pqint("No forecast available!", file=sys.stderr) return '' if 'label' not in fp: fp['label'] = label if 'category' not in fp: fp['category'] = 'stock' category = fp['category'] if ts is None and lang == "cn": #ts="""{label}在前一週{pastTrendWd},目前收盤價{price}元,我們預期在下一週將{comingTrendWd},介于{lowerWd}至{upperWd}元之間。""" if category == 'stock': ts = """依據{label}股價波動狀況,預估下一週價位有七成可能{rangeWd}。{posPbWd}""" else: ts = """依據{label}波動狀況,預估下一{unitStr}有七成可能{rangeWd}。{posPbWd}""" #====Performance Comment==================== udfLst = getdb_udfLst(category, "mapping_udf_comment", pgDB=pgDB, lang=lang) ret, rmp3 = generate_cmt(fp, ts=ts, dotSign=dotSign, prcn=prcn, usdSign=usdSign, udfLst=udfLst, lang=lang, funcname="generate_comment_fcst", mp3YN=mp3YN) return ret
def yh_data(ticker=None, gap='1m', ranged='1d', hdrTF=True, tsTF=True, pchgTF=False, debugTF=False): #- PULL raw data jTmp = yh_rawdata(ticker=ticker, gap=gap, ranged=ranged, debugTF=debugTF) #- ARRANGE input data # get timestamp if 'indicators' in jTmp and 'adjclose' in jTmp['indicators']: adjclose = np.array(jTmp['indicators']['adjclose'][0]['adjclose']) if debugTF: sys.stderr.write( "===== Pulling symbol={}&interval={}&range={}...\n".format( ticker, gap, ranged)) if 'timestamp' not in jTmp: sys.stderr.write( "**ERROR pulling symbol={}&interval={}&range={}...\n".format( ticker, gap, ranged)) pqint(jTmp, file=sys.stderr) return {}, {} pbtimestamp = np.array(jTmp['timestamp']) pbdatetime = [epoch_parser(x, 1) for x in pbtimestamp] # build output data in datafame dx = pd.DataFrame(jTmp['indicators']['quote'][0]) dx.loc[:, 'ticker'] = ticker # build date/time as pbdate/epochs column into datafame if gap[-1] == 'd': # for daily data if 'adjclose' in locals(): dx['adjusted'] = adjclose dx['pbdate'] = [int(x.strftime("%Y%m%d")) for x in pbdatetime] else: # for minute data dx['epochs'] = pbtimestamp * 1000 # add datetime index to datafame if tsTF is True: dx.set_index(pd.DatetimeIndex(pbdatetime), inplace=True) dx.index.rename('date', inplace=True) # remove NA rows related to [close] data dx.dropna(subset=['close'], inplace=True) if pchgTF: dx = pchg_calc(dx, jTmp) return dx, jTmp
def main(): """ API for ALAN Usage: alanapi.py [option] SYMBOL ... """ pp = myPrettyPrinter(indent=4,width=20) xreq=os.getenv('REQUEST_METHOD') data='' if xreq in ('GET','POST') : # WEB MODE data=cgi_api() else: (opts, args)=opt_alanapi() pqint( (opts, args) ,file=sys.stderr) symLst = args if len(args)>0 else None fdLst = opts['field'] opts.pop('field',None) sys.stderr.write("===START: {}:{}:{}\n".format(symLst,fdLst,opts)) data=run_alanapi(symLst,fdLst,**opts) print(data) return data
def run_topic_theme(dfTF=True, **opts): subtopic = getKeyVal(opts, 'subtopic', 'majorplayer') funcName = "_".join(["topic", "theme", subtopic]) funcArg = globals()[funcName] if funcName in globals( ) else topic_theme_majorplayer try: pqint(" --dfTF:{},{},funcArg:{} @{}".format(dfTF, funcArg, opts, 'run_topic_theme()'), file=sys.stderr) df = funcArg(**opts) if dfTF: return df cfm = {'marketCap': "{:,.0f}".format} ret = df.to_html(index=False, formatters=cfm) sty = "<style>.dataframe {text-align:right;}</style>\n" return sty + ret except Exception as e: pqint(str(e)) return str(e)
def eten_minute(ticker=None, pbdate=20181120, hdrTF=True, tsTF=True, debugTF=False): """ Get daily/minute data via eten api """ if ticker is None: return '' urx = "http://mx68t.etencorp.com:8080/EtenDS/process.php?version=1&objtype=5&extcode={}" url = urx.format(ticker) if debugTF is True: pqint(url, file=sys.stderr) try: jTmp = pd.read_json(url)['objectData'][0] except Exception as e: pqint(str(e), file=sys.stderr) return {} #- ARRANGE input data # build output data in datafame dx = pd.DataFrame(jTmp['data']) dx.loc[:, 'ticker'] = jTmp['extcode'] # build date/time as pbdate/epochs column into datafame pbdatetime = [ datetime.strptime(str(pbdate) + x, '%Y%m%d%H:%M') for x in dx['time'].values ] dx['epochs'] = [int(x.strftime('%s000')) for x in pbdatetime] dx[['open', 'high', 'low', 'close', 'vol']] = dx[['open', 'high', 'low', 'close', 'vol']].astype('float') # add datetime index to datafame if tsTF is True: dx.set_index(pd.DatetimeIndex(pbdatetime), inplace=True) dx.index.rename('date', inplace=True) # remove NA rows related to [close] data dx.rename(columns={'vol': 'volume'}, inplace=True) dx.dropna(subset=['close'], inplace=True) # change to prc_temp columns setup return dx[['open', 'high', 'low', 'close', 'volume', 'epochs', 'ticker']]
def search_list(tkLst,fdLst,**opts): topic,start,subtopic,output = getKeyVal(opts,['topic','start','subtopic','output'],[None,None,None,None]) hostname,dbname,tablename,lang = getKeyVal(opts,['hostname','dbname','tablename','lang'],['localhost','ara',None,None]) pqint("Using topic:{},subtopic:{},field:{}".format(topic,subtopic,fdLst) ,file=sys.stderr) if hasattr(subtopic,'__len__') is not True: subtopic = '' if topic in ['filter','recommend','AI','market']: # select either [getlist_filter|getlist_recommend] if 'id' not in opts: id = None funcName = "getlist_{}".format(topic) if funcName in globals(): searchListFunc = globals()[funcName] pqint( "Applying {}: {}".format(funcName,searchListFunc) ,file=sys.stderr) data = searchListFunc(fdLst,subtopic=subtopic,start=start,id=id) # data = data_output(df,output) return data elif topic in ['ytdRtn']: from ytdRtn_calc import ytdRtn_calc,ytdOTF if subtopic is None or len(subtopic)<1: subtopic='sector' if start is None or len(start)<1: start=20200219 data = ytdRtn_calc(start=start,group=subtopic) return data #tablename='ytdRtn_{}'.format(subtopic) #data=ytdOTF(ytdRtn_calc,start=start,deltaTolerance=900,tablename=tablename,zpkChk=['group'],zpk=[subtopic],group=subtopic,debugTF=True,dbname='ara') #sys.stderr.write(" --ytdRtn_calc data:{}\n{}\n".format(type(data),data)) #outTF=opts.pop('outTF',True) #if outTF: # data=pd.DataFrame(data) #return data elif topic in ['peers']: data = geteach_peers(tkLst,fdLst,subtopic=subtopic,output=output) return data if tablename is None: tablename='spdr_list' data=geteach_list(tkLst,fdLst,tablename=tablename,lang=lang,dbname=dbname,hostname=hostname) # data = data_output(data,output) return data
def get_data_hist(ticker, gap='1m', ranged='1d', tsTF=True, debugTF=False, src='yh', plotTF=True): if any([x in ticker for x in ['^', '=']]) is True or src == 'yahoo' or src == 'yh': src = 'yh' pqint("Using src={},range:{},gap:{}".format(src, ranged, gap), file=sys.stderr) df = get_hist_yh(ticker, gap=gap, ranged=ranged, tsTF=tsTF, debugTF=debugTF, plotTF=plotTF) elif src == 'tw': pqint("Using src={}".format(src), file=sys.stderr) df = get_hist_tw(ticker, gap=gap, ranged=ranged, tsTF=tsTF, debugTF=debugTF) else: pqint("Using src={}".format(src), file=sys.stderr) df = get_hist_iex(ticker, gap=gap, ranged=ranged, tsTF=tsTF, debugTF=debugTF) return df
def str2gtts(pfx, xstr, lang="cn", tempo=1.50, filetype="mp3", dirname="./"): glang = "zh-tw" if lang == "cn" else "en" fname = pfx.replace("_mp3", "").replace(".txt", "") fname = "{}/{}.{}".format(dirname, fname, filetype) if tempo == 1.0: xcmd = "/usr/local/bin/gtts-cli -l {} -f - -o {}".format(glang, fname) else: xcmd = "/usr/local/bin/gtts-cli -l {} -f - | sox -G -t {} - {} tempo {}".format( glang, filetype, fname, tempo) pqint("===PIPE stdin:\n{}".format(xstr), file=sys.stderr) pqint("===PIPE SHELL:\n{}".format(xcmd), file=sys.stderr) p = subprocess.Popen(xcmd, shell=True, bufsize=1024, stdout=subprocess.PIPE, stdin=subprocess.PIPE) out, err = p.communicate(xstr.encode('utf-8')) #p.stdin.write(xstr) #p.communicate()[0] #p.stdin.close() return (out, err)
def run_comment_pppscf(ticker='AAPL', label='', pgDB=None, dotSign='.', prcn=0, usdSign='$', lang="cn", fp=None, ts=None): if fp is None: sqx = "SELECT * FROM ohlc_pppscf WHERE ticker={0!r} ORDER BY pbdate" fp = getdb_ticker(ticker, sqx, pgDB) if 'label' not in fp: fp['label'] = label if 'freq' not in fp: fp['freq'] = 'D' if 'category' not in fp: fp['category'] = 'stock' category = fp['category'].iloc[-1] #====Performance Comment==================== udfLst = getdb_udfLst(category, "mapping_udf_comment", pgDB=pgDB, lang=lang) try: ret = generate_comment_pppscf(fp, dotSign=dotSign, prcn=prcn, usdSign=usdSign, udfLst=udfLst, lang=lang, ts=ts) except Exception as e: pqint("**ERROR @ run_comment_pppscf():", str(e), file=sys.stderr) #pqint( fp, file=sys.stderr) return None return ret