def Import_ListOfQuotes_NZE(quotes,market='NEW ZEALAND EXCHANGE',dlg=None,x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection=ITradeConnection(cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication) if market=='NEW ZEALAND EXCHANGE': url = 'http://www.findata.co.nz/Markets/NZX/%s.htm' else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines select_alpha = map(chr,range(65,91)) # A to Z count = 0 isin = '' for letter in select_alpha: if dlg: dlg.Update(x," NZX : %s to Z"%letter) try: data=connection.getDataFromUrl(url%letter) except: debug('Import_ListOfQuotes_NZE unable to connect :-(') return False # returns the data lines = splitLines(data) for line in lines: if '"hideInfo();">' in line: tickername = line[line.find('"hideInfo();">')+14:line.find('</td><td align=right>')] if not 'Index' in tickername: ticker = tickername[:tickername.index('<')] if not '0' in ticker[-1:]: name = tickername[tickername.index('<td>')+4:] count = count + 1 # ok to proceed quotes.addQuote(isin=isin,name=name, ticker=ticker,market='NEW ZEALAND EXCHANGE',currency='NZD',place='NZE',country='NZ') if itrade_config.verbose: print 'Imported %d lines from NEW ZEALAND EXCHANGE' % (count) return True
def Import_ListOfQuotes_SWX(quotes,market='SWISS EXCHANGE',dlg=None,x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) if market=='SWISS EXCHANGE': url = 'http://www.six-swiss-exchange.com/shares/companies/download/issuers_all_fr.csv' try: data = connection.getDataFromUrl(url) except: info('Import_ListOfQuotes_SWX_%s:unable to get file name :-(' % market) return False else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines # returns the data lines = splitLines(data) n = 0 isin = '' for line in lines[1:]: line = line.replace('!',' ') line = line.replace(',',' ') line = line.replace('à','a') line = line.replace('ä','a') line = line.replace('â','a') line = line.replace('ö','o') line = line.replace('ü','u') line = line.replace('é','e') line = line.replace('è','e') line = line.replace('+',' ') data = string.split(line,';') # csv line name = data[0].strip() ticker = data[1].strip() country = data[3].strip() currency = data[4].strip() exchange = data[5].strip() quotes.addQuote(isin=isin,name=name, ticker=ticker,market='SWISS EXCHANGE', currency=currency,place=exchange,country=country) n = n + 1 if itrade_config.verbose: print 'Imported %d lines from %s' % (n,market) return True
def __init__(self): debug('Import_yahoojp:__init__') self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout)
def __init__(self): debug('Import_euronext:__init__') self.m_url = 'http://www.euronext.com/tools/datacentre/dataCentreDownloadExcell.jcsv' self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout)
def Import_ListOfQuotes_OTCBB(quotes, market='OTCBB', dlg=None, x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout) if market == 'OTCBB': url = 'http://www.otcbb.com/dynamic/tradingdata/download/allotcbb.txt' else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data = connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_OTCBB:unable to connect :-(') return False # returns the data lines = splitLines(data) count = 0 isin = '' for line in lines[1:]: if '|' in line: data = string.split(line, '|') if data[3] == 'ACTIVE': count = count + 1 name = data[2] name = name.strip() name = name.replace(',', '') ticker = data[0] quotes.addQuote(isin=isin, name=name, ticker=ticker, market='OTCBB', currency='USD', place='NYC', country='US') if itrade_config.verbose: print 'Imported %d lines from OTCBB data.' % count return True
def Import_ListOfQuotes_ASX(quotes, market='ASX', dlg=None, x=0): print 'Update %s list of symbols' % market connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout) if market == 'ASX': url = "http://www.asx.com.au/asx/research/ASXListedCompanies.csv" n = 0 else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data = connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_ASX:unable to connect :-(') return False # returns the data lines = splitLines(data) isin = '' for line in lines[3:]: line = line.replace('"', '') data = string.split(line, ',') name = data[0] ticker = data[1] quotes.addQuote(isin=isin, name=name, ticker=ticker, market='ASX', currency='AUD', place='SYD', country='AU') n = n + 1 if itrade_config.verbose: print 'Imported %d lines from %s data.' % (n, market) return True
def Import_ListOfQuotes_Xetra(quotes, market='FRANKFURT EXCHANGE', dlg=None, x=0): print 'Update %s list of symbols' % market connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout) if market == 'FRANKFURT EXCHANGE': url = "http://info.xetra.de/download_xetrawerte.txt" else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines #info('Import_ListOfQuotes_Xetra:connect %s to %s' % (market,url)) try: data = connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_Xetra:unable to connect :-(') return False # returns the data lines = splitLines(data) n = 0 for line in lines[2:]: data = string.split(line, ';') # ; delimited if len(data) > 5: if data[6] in ('GER0', 'GER1', 'GER2', 'DAX1', 'MDAX1', 'SDX1', 'TDX1'): quotes.addQuote(isin=data[1],name=data[0].replace(',',' '),ticker=data[3],\ market='FRANKFURT EXCHANGE',currency='EUR',place='FRA',country='DE') n = n + 1 print 'Imported %d/%d lines from %s data.' % (n, len(lines), 'Xetra') return True
def __init__(self): debug('News_Balo:__init__') self.m_feed = None self.m_url = None self.m_quote = None self.m_baseurl = "balo.journal-officiel.gouv.fr" self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout)
def __init__(self): debug('LiveUpdate_boursorama:__init__') self.m_default_host = "www.boursorama.fr" self.m_login_url = "https://www.boursorama.fr/logunique.phtml" self.m_logged = False self.m_cookies = ITradeCookies() # Manualy set the cookie that tell boursorama we are a cookie aware browser self.m_cookies.set("SUP_COOKIE=OUI") self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout) debug('Boursorama login (%s) - ready to run' % self.m_default_host)
def __init__(self, market='TOKYO EXCHANGE'): debug('LiveUpdate_yahoojp:__init__') self.m_connected = False self.m_livelock = thread.allocate_lock() self.m_dateindice = {} self.m_clock = {} self.m_dcmpd = {} self.m_lastclock = 0 self.m_lastdate = "20070101" self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout)
def Import_ListOfQuotes_OTCBB(quotes,market='OTCBB',dlg=None,x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) if market=='OTCBB': url = 'http://www.otcbb.com/dynamic/tradingdata/download/allotcbb.txt' else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_OTCBB:unable to connect :-(') return False # returns the data lines = splitLines(data) count = 0 isin = '' for line in lines[1:]: if '|' in line: data = string.split (line, '|') if data[3]== 'ACTIVE': count = count + 1 name = data[2] name = name.strip() name =name.replace(',','') ticker = data[0] quotes.addQuote(isin=isin,name=name,ticker=ticker,market='OTCBB',currency='USD',place='NYC',country='US') if itrade_config.verbose: print 'Imported %d lines from OTCBB data.' % count return True
def Import_ListOfQuotes_Xetra(quotes,market='FRANKFURT EXCHANGE',dlg=None,x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) if market=='FRANKFURT EXCHANGE': url = 'http://deutsche-boerse.com/dbag/dispatch/en/xetraCSV/gdb_navigation/trading/20_tradable_instruments/900_tradable_instruments/100_xetra' else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_Deutsche Borse AG :unable to connect :-(') return False # returns the data lines = splitLines(data) n = 0 for line in lines[6:]: data = string.split (line, ';') # ; delimited if len(data) >5: if data[8] == 'EQUITIES FFM2': if data[2][:2] == 'DE': name = data[1].replace(',','').replace(' ','').replace (' -','-').replace ('. ','.').replace(' + ','&').replace('+','&') quotes.addQuote(isin=data[2],name=name,ticker=data[5],market='FRANKFURT EXCHANGE',currency=data[73],place='FRA',country='DE') n = n + 1 if itrade_config.verbose: print 'Imported %d/%d lines from %s' % (n,len(lines),market) return True
def Import_ListOfQuotes_ASX(quotes,market='ASX',dlg=None,x=0): print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) if market=='ASX': url = "http://www.asx.com.au/asx/research/ASXListedCompanies.csv" n = 0 else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_ASX:unable to connect :-(') return False # returns the data lines = splitLines(data) isin = '' for line in lines[3:]: line = line.replace('"','') data = string.split (line, ',') name=data[0] ticker=data[1] quotes.addQuote(isin = isin,name = name, \ ticker = ticker,market='ASX',currency='AUD',place='SYD',country='AU') n = n + 1 if itrade_config.verbose: print 'Imported %d lines from %s data.' % (n,market) return True
def __init__(self): debug('Import_yahoojp:__init__') self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout )
def Import_ListOfQuotes_NYSE(quotes,market='NYSE',dlg=None,x=0): print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) if market=='NYSE': url = "http://www.nysedata.com/nysedata/asp/download.asp?s=txt&prod=symbols" else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_NYSE:unable to connect :-(') return False # returns the data lines = splitLines(data) for line in lines: data = string.split (line, '|') if len(data)==5: country,issuer,issue = extractCUSIP(data[1]) if issue=='10': #print data[1],country,issuer,issue,data[2] if country=='US': isin = buildISIN(country,data[1]) name = filterName(data[2]) quotes.addQuote(isin=isin,name=name,ticker=data[0],market='NYSE',currency='USD',place='NYC',country='US') print 'Imported %d lines from NYSE data.' % len(lines) return True
def Import_ListOfQuotes_ASX(quotes, market='ASX', dlg=None, x=0): print 'Update %s list of symbols' % market connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout) if market == 'ASX': url = "http://www.asx.com.au/programs/ISIN.xls" # is actually tab delimited else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data = connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_ASX:unable to connect :-(') return False # returns the data lines = splitLines(data) n = 0 for line in lines[5:]: # skip header lines (PeterMills> 2007-06-22) data = string.split(line, '\t') # tab delimited if data[2] == 'ORDINARY FULLY PAID': # only want ordinary shares quotes.addQuote(isin=data[3],name=data[1].replace(',',' '), \ ticker=data[0],market='ASX',currency='AUD',place='SYD',country='AU') n = n + 1 print 'Imported %d/%d lines from ASX data.' % (n, len(lines)) return True
def checkNewRelease(ping=False): # just to test : remove '#' from the line just below #__svnversion__ = 'r565' # development release : do not test if not ping and __svnversion__[0] == 'x': if verbose: print 'checkNewRelease(): development release' return 'dev' from itrade_connection import ITradeConnection connection = ITradeConnection(cookies = None, proxy = proxyHostname, proxyAuth = proxyAuthentication, connectionTimeout = 3 ) # get OFFICIAL file from svn try: latest=connection.getDataFromUrl(softwareLatest) except IOError: print 'checkNewRelease(): exeption getting OFFICIAL file' return 'err' if latest[0]!='r': if verbose: print 'checkNewRelease(): OFFICIAL file malformed' return 'err' # development release : do not test if __svnversion__[0] == 'x': if verbose: print 'checkNewRelease(): development release (ping)' return 'dev' current = int(__svnversion__[1:]) latest = int(latest[1:]) #print current,latest if current<latest: print 'checkNewRelease(): please update (%d vs %d) : %s' % (current,latest,downloadURL) return downloadURL else: print 'checkNewRelease(): up to date' return 'ok'
def get(self,curTo,curFrom): if not itrade_config.isConnected(): return None if curFrom == 'N/A' or curTo == 'N/A': return None if self.m_connection==None: self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) #print "**** Create Currency Connection" # pence if curFrom in self._s1.keys(): a = self._s1[curFrom] else: a = curFrom if curTo in self._s1.keys(): b = self._s1[curTo] else: b = curTo # get data url = self.m_url % (a,b) try: buf = self.m_connection.getDataFromUrl(url) except: return None # extract data #print url,buf sdata = string.split(buf, ',') f = float(sdata[1]) # pence if curFrom in self._s2.keys(): f = f / self._s2[curFrom] if curTo in self._s2.keys(): f = f * self._s2[curTo] #print 'get: %s %s rate = %.4f' %(curTo,curFrom,float(sdata[1])) return self.update(curTo,curFrom,f)
def __init__(self): debug('Import_euronext:__init__') self.m_url = 'http://www.euronext.com/tools/datacentre/dataCentreDownloadExcell.jcsv' self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout )
def __init__(self): debug('Import_euronext:__init__') self.m_url = 'https://europeanequities.nyx.com/nyx_eu_listings/price_chart/download_historical' self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout)
def __init__(self): debug('Import_euronext_bonds:__init__') self.m_url = 'https://bonds.nyx.com/en/popup/data/download?ml=nyx_pd_bonds&cmd=default&formKey=nyx_pd_filter_values%3Acddaabb01525e7a66f50cd7f51ece3dd' self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout)
def Import_ListOfQuotes_ASX(quotes,market='ASX',dlg=None,x=0): print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) if market=='ASX': url = "http://www.asx.com.au/programs/ISIN.xls" # is actually tab delimited else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_ASX:unable to connect :-(') return False # returns the data lines = splitLines(data) n = 0 for line in lines[5:]: # skip header lines (PeterMills> 2007-06-22) data = string.split (line, '\t') # tab delimited if data[2]=='ORDINARY FULLY PAID': # only want ordinary shares quotes.addQuote(isin=data[3],name=data[1].replace(',',' '), \ ticker=data[0],market='ASX',currency='AUD',place='SYD',country='AU') n = n + 1 print 'Imported %d/%d lines from ASX data.' % (n,len(lines)) return True
def __init__(self, market='EURONEXT'): debug('LiveUpdate_Euronext:__init__') self.m_connected = False self.m_livelock = thread.allocate_lock() self.m_data = None self.m_clock = {} self.m_dcmpd = {} self.m_lastclock = 0 self.m_lastdate = "20070101" self.m_market = market self.m_url = 'http://www.euronext.com/tools/datacentre/dataCentreDownloadExcell.jcsv' self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout)
def __init__(self): debug('News_Boursorama:__init__') self.m_feed = None self.m_url = None self.m_quote = None self.m_baseurl = {} self.m_host = "www.boursorama.com" self.m_conn = None self.m_baseurl[0] = "http://www.boursorama.com/infos/actualites/actu_societes_code.phtml?symbole=1rP%s" self.m_baseurl[1] = "http://www.boursorama.com/communique/communique_code.phtml?symbole=1rP%s" self.m_baseurl[2] = "http://www.boursorama.com/infos/calendrier_code.phtml?symbole=1rP%s" self.m_baseurl[3] = "http://www.boursorama.com/conseils/conseils_index_code.phtml?symbole=1rP%s" self.m_baselink = "http://www.boursorama.com/infos/imprimer_news.phtml?news=%s" self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout )
def __init__(self): debug('News_Balo:__init__') self.m_feed = None self.m_url = None self.m_quote = None self.m_baseurl = "balo.journal-officiel.gouv.fr" self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout )
def Import_ListOfQuotes_NASDAQ(quotes, market='NASDAQ', dlg=None, x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout) if market == 'NYSE': url = 'http://www.nasdaq.com/screening/companies-by-industry.aspx?&exchange=nyse&render=download' elif market == 'NASDAQ': url = 'http://www.nasdaq.com/screening/companies-by-industry.aspx?&exchange=nasdaq&render=download' elif market == 'AMEX': url = 'http://www.nasdaq.com/screening/companies-by-name.aspx?&exchange=amex&render=download' else: return False try: data = urllib.urlopen(url) #data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_NASDAQ:unable to connect :-(') return False reader = csv.reader(data, delimiter=',') count = -1 isin = '' # returns the data for line in reader: count = count + 1 if count > 0: name = line[1] name = name.strip() name = name.replace(',', '').replace('"', '"').replace(''', "'") ticker = line[0] ticker = ticker.strip() ticker = ticker.replace('/', '-').replace('^', '-P') quotes.addQuote(isin=isin, name=name, ticker=ticker, market=market, currency='USD', place='NYC', country='US') if itrade_config.verbose: print 'Imported %d lines from NASDAQ data.' % count return True
def __init__(self,market='TOKYO EXCHANGE'): debug('LiveUpdate_yahoojp:__init__') self.m_connected = False self.m_livelock = thread.allocate_lock() self.m_dateindice = {} self.m_clock = {} self.m_dcmpd = {} self.m_lastclock = 0 self.m_lastdate = "20070101" self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout )
def __init__(self): debug("LiveUpdate_boursorama:__init__") self.m_default_host = "www.boursorama.fr" self.m_login_url = "https://www.boursorama.fr/logunique.phtml" self.m_logged = False self.m_cookies = ITradeCookies() # Manualy set the cookie that tell boursorama we are a cookie aware browser self.m_cookies.set("SUP_COOKIE=OUI") self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout, ) debug("Boursorama login (%s) - ready to run" % self.m_default_host)
def get(self,curTo,curFrom): if not itrade_config.isConnected(): return None if curFrom == 'N/A' or curTo == 'N/A': return None if self.m_connection is None: self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) #print "**** Create Currency Connection" # pence if curFrom in self._s1.keys(): a = self._s1[curFrom] else: a = curFrom if curTo in self._s1.keys(): b = self._s1[curTo] else: b = curTo # get data url = self.m_url % (a,b) try: buf = self.m_connection.getDataFromUrl(url) except: return None # extract data #print url,buf sdata = string.split(buf, ',') f = float(sdata[1]) # pence if curFrom in self._s2.keys(): f = f / self._s2[curFrom] if curTo in self._s2.keys(): f = f * self._s2[curTo] #print 'get: %s %s rate = %.4f' %(curTo,curFrom,float(sdata[1])) return self.update(curTo,curFrom,f)
def __init__(self,market='EURONEXT'): debug('LiveUpdate_Euronext:__init__') self.m_connected = False self.m_livelock = thread.allocate_lock() self.m_data = None self.m_clock = {} self.m_dcmpd = {} self.m_lastclock = 0 self.m_lastdate = "20070101" self.m_market = market self.m_url = 'http://www.euronext.com/tools/datacentre/dataCentreDownloadExcell.jcsv' self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout )
def __init__(self, market='EURONEXT'): debug('LiveUpdate_Euronext_bonds:__init__') self.m_connected = False self.m_livelock = thread.allocate_lock() self.m_data = None self.m_clock = {} self.m_dateindice = {} self.m_dcmpd = {} self.m_lastclock = 0 self.m_lastdate = "20070101" self.m_market = market self.m_url = 'https://bonds.nyx.com/fr/nyx_eu_listings/real-time/quote?_=&' self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout)
def Import_ListOfQuotes_IE(quotes,market='IRISH EXCHANGE',dlg=None,x=0): print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) if market=='IRISH EXCHANGE': url = "http://www.ise.ie/app/equityList.asp?list=full&type=SEDOL&exportTo=text" # is actually XML file else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_IE:unable to connect :-(') return False # returns the data lines = splitLines(data) count = 0 nlines = 0 sedol = '' isin = '' name = '' inst = '' tick = '' for line in lines[1:]: # extract data sp1 = re.search('<td class="equityName">',line,re.IGNORECASE|re.MULTILINE) if sp1: sp1 = sp1.end() sp2 = re.search('</td>',line[sp1:],re.IGNORECASE|re.MULTILINE) if sp2: sp2 = sp2.start() data = line[sp1:] data = data[:sp2] data = data.strip() data = data.upper() print data # fill the next field if sedol=='': sedol = data elif isin=='': isin = data elif name=='': name = data elif inst=='': inst = data else: tick = data # ok to proceed name = name.replace('&','&') name = name.replace(',',' ') if inst[0:3]=='ORD': # only want ordinary shares quotes.addQuote(isin=isin,name=name, \ ticker=tick,market='IRISH EXCHANGE',currency='EUR',place='DUB',country='IE') count = count + 1 # reset for next value sedol = '' isin = '' name = '' inst = '' tick = '' nlines = nlines + 1 print 'Imported %d/%d lines from IRISH EXCHANGE data.' % (count,nlines) return True
def euronext_InstrumentId(quote): deprecated # if quote.list() == QLIST_INDICES: urlid = "http://www.euronext.com/quicksearch/resultquicksearchindices-7000-EN.html?matchpattern=%s&fromsearchbox=true&path=/quicksearch&searchTarget=quote" else: urlid = "http://www.euronext.com/quicksearch/resultquicksearch-2986-EN.html?matchpattern=%s&fromsearchbox=true&path=/quicksearch&searchTarget=quote" connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout, ) # get instrument ID IdInstrument = quote.get_pluginID() if IdInstrument == None: try: f = open(os.path.join(itrade_config.dirCacheData, "%s.id" % quote.key()), "r") IdInstrument = f.read().strip() f.close() # print "euronext_InstrumentId: get id from file for %s " % quote.isin() except IOError: # print "euronext_InstrumentId: can't get id file for %s " % quote.isin() pass if IdInstrument == None: url = urlid % quote.isin() if itrade_config.verbose: print "euronext_InstrumentId: urlID=%s " % url try: buf = connection.getDataFromUrl(url) except: print "euronext_InstrumentId: %s exception error" % url return None sid = re.search( "selectedMep=%d&idInstrument=\d*&isinCode=%s" % (euronext_place2mep(quote.place()), quote.isin()), buf, re.IGNORECASE | re.MULTILINE, ) if sid: sid = buf[sid.start() : sid.end()] # print'seq-1 found:',sid sexch = re.search("&isinCode", sid, re.IGNORECASE | re.MULTILINE) if sexch: IdInstrument = sid[31 : sexch.start()] # print 'seq-2 found:',IdInstrument else: print "euronext_InstrumentId: seq-2 not found : &isinCode" else: print "euronext_InstrumentId: seq-1 not found : selectedMep=%d&idInstrument=\d*&isinCode=%s" % ( euronext_place2mep(quote.place()), quote.isin(), ) # print buf # exit(0) if IdInstrument == None: print "euronext_InstrumentId:can't get IdInstrument for %s " % quote.isin() return None else: if itrade_config.verbose: print "euronext_InstrumentId: IdInstrument for %s is %s" % (quote.isin(), IdInstrument) quote.set_pluginID(IdInstrument) try: f = open(os.path.join(itrade_config.dirCacheData, "%s.id" % quote.key()), "w") f.write("%s" % IdInstrument) f.close() except IOError: # print "euronext_InstrumentId: can't write id file for %s " % quote.isin() pass return IdInstrument
def Import_ListOfQuotes_LSE(quotes,market='LSE SETS',dlg=None,x=0): print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) import xlrd if market=='LSE SETS': url = "http://www.londonstockexchange.com/NR/rdonlyres/1BCC9E48-6846-411B-8F2A-06F1DD17EB22/0/ListofSETSsecurities.xls" elif market=='LSE SETSmm': url = "http://www.londonstockexchange.com/NR/rdonlyres/6B92591A-68A2-4715-8333-F28FD517AB27/0/ListofSETSmmsecurities.xls" elif market=='LSE SEAQ': url = "http://www.londonstockexchange.com/NR/rdonlyres/9731A6AB-B60B-469F-BE6B-A721247CE76C/0/ListofSEAQsecurities.xls" else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines info('Import_ListOfQuotes_LSE_%s:connect to %s' % (market,url)) try: data = connection.getDataFromUrl(url) except: info('Import_ListOfQuotes_LSE_%s:unable to connect :-(' % market) return False #print data[:250] # returns the data book = itrade_excel.open_excel(file=None,content=data) sh = book.sheet_by_index(0) n = 0 indice = {} print 'Import_ListOfQuotes_LSE_%s:' % market,'book',book,'sheet',sh,'nrows=',sh.nrows for line in range(sh.nrows): if sh.cell_type(line,1) != xlrd.XL_CELL_EMPTY: if n==0: for i in range(sh.ncols): val = sh.cell_value(line,i) indice[val] = i # be sure we have detected the title if val=='ISIN': n = n + 1 if n==1: if itrade_config.verbose: print 'Indice:',indice iISIN = indice['ISIN'] iName = indice['Short Name'] iCurrency = indice['Currency'] iCountry = indice['Country of Register'] iTicker = indice['Mnemonic'] else: ticker = sh.cell_value(line,iTicker) if type(ticker)==float: ticker='%s' % ticker #print line,iTicker,ticker,type(ticker) if ticker[-1:]=='.': ticker = ticker[:-1] name = sh.cell_value(line,iName).replace(',',' ') #print line,'>',sh.cell_value(line,iISIN),' : ',name,ticker quotes.addQuote(isin=sh.cell_value(line,iISIN),name=name, \ ticker=ticker,market=market,\ currency=sh.cell_value(line,iCurrency),place='LON',\ country=sh.cell_value(line,iCountry)) n = n + 1 print 'Imported %d/%d lines from %s data.' % (n,sh.nrows,market) return True
class News_Boursorama(object): def __init__(self): debug('News_Boursorama:__init__') self.m_feed = None self.m_url = None self.m_quote = None self.m_baseurl = {} self.m_host = "www.boursorama.com" self.m_conn = None self.m_baseurl[0] = "http://www.boursorama.com/infos/actualites/actu_societes_code.phtml?symbole=1rP%s" self.m_baseurl[1] = "http://www.boursorama.com/communique/communique_code.phtml?symbole=1rP%s" self.m_baseurl[2] = "http://www.boursorama.com/infos/calendrier_code.phtml?symbole=1rP%s" self.m_baseurl[3] = "http://www.boursorama.com/conseils/conseils_index_code.phtml?symbole=1rP%s" self.m_baselink = "http://www.boursorama.com/infos/imprimer_news.phtml?news=%s" self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) # ---[ protected interface ] --- def getURL(self): return self.m_url def getFeed(self): return self.m_feed def getQuote(self): return self.m_quote def splitLines(self,buf): p = re.compile(r"\d\d/\d\d/\d\d\d\d</td>[ \t\n\r]*<td></td>[ \t\n\r]*.*</td>", re.IGNORECASE|re.MULTILINE) return p.findall(buf) def getdata(self,url): debug("News_Boursorama:getdata: url=%s ",url) try: buf = self.m_connection.getDataFromUrl(url) except: debug('News_Boursorama:unable to connect :-(') return None buf = unicode(buf,'iso-8859-1','strict') return buf def feed(self,url): self.m_url = url self.m_feed = _FeedEntry() self.m_feed.entries = [] self.m_feed.feed = _FeedEntry() self.m_feed.feed.title = 'Boursorama: ' + self.m_quote.ticker() info('Boursorama News refresh %s',self.m_url) buf = self.getdata(url) iter = self.splitLines(buf) #print iter for eachLine in iter: sdate = time.strptime(eachLine[0:10], "%d/%m/%Y") #print '%s -> %s' % (eachLine[0:10],sdate) snum = re.search(r'news=\d*', eachLine, re.IGNORECASE|re.MULTILINE) if snum: snum = snum.group()[5:] stitle = re.search(r'<a.*>.*</a>', eachLine, re.IGNORECASE|re.MULTILINE) if stitle: stitle = stitle.group() stitle = re.search(r'>.*<', stitle, re.IGNORECASE|re.MULTILINE) if stitle: stitle = stitle.group()[1:-1] entry = _FeedEntry() entry.link = ('boursorama::%s' % self.m_baselink) % snum entry.title = stitle entry.date = sdate entry.summary = "" entry.source = "boursorama" self.m_feed.entries.append(entry) return self.m_feed def goto(self,html,url): if html: html.paint0() info('goto %s',url) buf = self.getdata(url) #print buf if not buf: if html: html.paint_NC() else: print 'unable to connect' return title = re.search(r'<tr>[ \t\n\r]+<td.*</font></td>[ \t\n\r]+</tr>', buf, re.IGNORECASE|re.MULTILINE|re.DOTALL) if title: title = title.group() else: title = '' buf = re.search(r'<tr>[ \t\n\r]*<td>.*</table>', buf, re.IGNORECASE|re.MULTILINE|re.DOTALL) if buf: buf = buf.group()[:-8] #print '----------------(' #print buf #print ')----------------' page = '<html><meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"><body>' + "<br><a href=':back'>%s</a><H3>" % message('backtolist') + title + "</H3>" + buf + "<br><br><a href=':back'>%s</a>" % message('backtolist') + "</body></html>" if html: html.SetPageWithoutCache(page) else: print page else: if html: html.paint_NC() else: print 'empty' # ---[ public interface ] --- def feedQuote(self,quote,lang=None,page=0): self.m_quote = quote if lang==None: lang = self.m_quote.country() return self.feed(self.m_baseurl[page] % self.m_quote.ticker())
def Import_ListOfQuotes_SAO(quotes,market='SAO PAULO EXCHANGE',dlg=None,x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection=ITradeConnection(cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication) if market == 'SAO PAULO EXCHANGE': url ='http://www.bmfbovespa.com.br/suplemento/ExecutaAcaoDownload.asp?arquivo=Securities_Traded.zip' currency = 'BRL' place = 'SAO' country = 'BR' else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines info('Import_ListOfQuotes_BOVESPA_%s:connect to %s' % (market,url)) try: urllib.urlretrieve(url,'Securities_Traded.zip') zfile = zipfile.ZipFile('Securities_Traded.zip') data = zfile.read('SECURITIES_TRADED.TXT') except: debug('Import_ListOfQuotes_BOVESPA:unable to connect :-(') return False # returns the data lines = splitLines(data) n = 0 for line in lines: record_type = line[0:2] if record_type == '01': name = line[6:66] short_name = line[66:78] short_name = short_name.strip() elif record_type == '02': ticker = line[2:14] ticker = ticker.strip() bdi_code = line[18:21] isin = line[81:93] market_code = line[108:111] specific_code = line[133:136] specific_code = specific_code.strip() if bdi_code == '002': n= n + 1 quotes.addQuote(isin = isin,name = short_name+'-'+specific_code,ticker = ticker,market = market,currency = currency,place = place,country = country) if itrade_config.verbose: print 'Imported %d lines from %s data.' % (n,market) zfile.close() os.remove('Securities_Traded.zip') return True
def Import_ListOfQuotes_MEX(quotes,market='MEXICO EXCHANGE',dlg=None,x=0): print 'Update %s list of symbols' % market connection=ITradeConnection(cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication) if market=='MEXICO EXCHANGE': url = 'http://www.bmv.com.mx/wb3/wb/BMV/BMV_busqueda_de_valores/_rid/222/_mto/3/_url/BMVAPP/componenteSelectorInput.jsf?st=1' else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_MEX unable to connect :-(') return False cj = None urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.LWPCookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) url = 'http://www.bmv.com.mx/wb3/wb/BMV/BMV_busqueda_de_valores/_rid/222/_mto/3/_url/BMVAPP/componenteSelectorInput.jsf?st=1' req = Request(url) handle = urlopen(req) cj = str(cj) cookie = cj[cj.find('JSESSIONID'):cj.find(' for www.bmv.com.mx/>')] host = 'www.bmv.com.mx' url = '/wb3/wb/BMV/BMV_componente_selector_de_valores/_rid/199/_mto/3/_url/BMVAPP/componenteSelectorBusqueda.jsf?st=1' headers = { "Host": host , "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; fr; rv:1.9.0.3)Gecko/2008092417 Firefox/3.0.3" , "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" , "Accept-Language": "fr" , "Accept-Encoding": "gzip,deflate" , "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7" , "Keep-Alive":300 , "Connection": "keep-alive" , "Referer": "http://www.bmv.com.mx/wb3/wb/BMV/BMV_busqueda_de_valores/_rid/222/_mto/3/_url/BMVAPP/componenteSelectorInput.jsf?st=1" , "Cookie": cookie } conn = httplib.HTTPConnection(host,80) conn.request("GET",url,None,headers) response = conn.getresponse() #print response.status, response.reason url = '/wb3/wb/BMV/BMV_componente_selector_de_valores/_rid/199/_mto/3/_url/BMVAPP/componenteSelectorBusqueda.jsf' countname = 0 countserie = 0 for page in range(28): indice = str(page) previouspage = str(page-1) endpage = '27' if page == 0: params='tab1%3AformaListaEmisoras%3AletraActual=&tab1%3AformaListaEmisoras%3AtipoActual=1&tab1%3AformaListaEmisoras%3AsectorActualKey=0%2C0%2C0%2C0&tab1%3AformaListaEmisoras%3AbotonSubmit=Buscar+un+valor&tab1%3AformaListaEmisoras=tab1%3AformaLista' else: params = 'tab1%3AformaListaPaginas=tab1%3AformaListaPaginas&indice='+indice+'&tab1%3AformaListaPaginas%3A_idcl=tab1%3AformaListaPaginas%3Apagina%3A'+previouspage+'%3A_id85' headers = { "Host": host , "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; fr; rv:1.9.0.3)Gecko/2008092417 Firefox/3.0.3" , "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" , "Accept-Language": "fr" , "Accept-Encoding": "gzip,deflate" , "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7" , "Keep-Alive":300 , "Connection": "keep-alive" , "Referer": "http://www.bmv.com.mx/wb3/wb/BMV/BMV_componente_selector_de_valores/_rid/199/_mto/3/_url/BMVAPP/componenteSelectorBusqueda.jsf?st=1" , "Cookie": cookie , "Content-Type": "application/x-www-form-urlencoded" , "Content-Length": len(params) } conn = httplib.HTTPConnection(host,80) conn.request("POST",url,params,headers) response = conn.getresponse() #print response.status, response.reason if page > 0 : #Partial activation of the Progressbar x=x+0.07 dlg.Update(x,'%s : %s / %s'%(market,indice,endpage)) startch = '<tr class="Tabla1_Renglon_' endch = '</tr><input type="hidden"' # returns the data data = response.read() if data.find(startch): a= data.find(startch) dataline = data[a:data.index(endch,a)] dataline = dataline.replace('</tr>','') dataline = dataline.replace('<tr>','') dataline = dataline.replace('<tbody>','') dataline = dataline.replace('</tbody>','') dataline = dataline.replace('</table>','') lines = splitLines(dataline) lineticker ='text-align: left;">' linename = 'margin-right:5px;">' lineserie = 'text-valign:bottom;">' for line in lines: if lineticker in line: ticker = line[line.index(lineticker)+19:line.index('</span>')] if linename in line: name = line[line.index(linename)+19:line.index('</span>')] name = name.replace('Ó','O') # Ó name = name.replace('Ñ','N') # Ñ name = name.replace('É','E') # É name = name.replace('&','&') name = name.replace(',','') name = name.replace(' ',' ') countname = countname + 1 if lineserie in line: serie = line[line.index(lineserie)+21:line.index('</span>')] if serie != 'Series': newticker = ticker+serie if serie == '*' : newticker = ticker newticker = newticker.replace('&','&') countserie = countserie + 1 quotes.addQuote(isin='',name=name,ticker=newticker,market='MEXICO EXCHANGE',currency='MXN',place='MEX',country='MX') print 'Imported %d quotes with %d different tickers from MEXICO EXCHANGE data.' % (countname,countserie) response.close() handle.close() return True
class Import_yahoo(object): def __init__(self): debug('Import_yahoo:__init__') #self.m_connection=ITradeConnection(proxy="172.30.0.3:8080") self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) def name(self): return 'yahoo' def interval_year(self): return 0.5 def connect(self): return True def disconnect(self): pass def getstate(self): return True def parseDate(self,d): return (d.year, d.month, d.day) def splitLines(self,buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines def getdata(self,quote,datedebut=None,datefin=None): if not datefin: datefin = date.today() if not datedebut: datedebut = date.today() if isinstance(datedebut,Datation): datedebut = datedebut.date() if isinstance(datefin,Datation): datefin = datefin.date() d1 = self.parseDate(datedebut) d2 = self.parseDate(datefin) debug("Import_yahoo:getdata quote:%s begin:%s end:%s" % (quote,d1,d2)) sname = yahooTicker(quote.ticker(),quote.market(),quote.place()) if sname[0]=='^': ss = "%5E" + sname[1:] else: ss = sname query = ( ('s', ss), ('a', '%02d' % (int(d1[1])-1)), ('b', d1[2]), ('c', d1[0]), ('d', '%02d' % (int(d2[1])-1)), ('e', d2[2]), ('f', d2[0]), ('y', '0'), ('g', 'd'), ('ignore', '.csv'), ) query = map(lambda (var, val): '%s=%s' % (var, str(val)), query) query = string.join(query, '&') url = yahooUrl(quote.market(),live=False) + '?' + query debug("Import_yahoo:getdata: url=%s ",url) try: buf=self.m_connection.getDataFromUrl(url) except: debug('Import_yahoo:unable to connect :-(') return None # pull data lines = self.splitLines(buf) if len(lines)<=0: # empty content return None header = string.split(lines[0],',') data = "" if (header[0] != "Date"): # no valid content return None for eachLine in lines: sdata = string.split (eachLine, ',') sdate = sdata[0] if (sdate != "Date"): if re_p3_1.match(sdate): #print 'already good format ! ',sdate,sdata pass else: sdate = dd_mmm_yy2yyyymmdd(sdate) open = string.atof(sdata[1]) high = string.atof(sdata[2]) low = string.atof(sdata[3]) value = string.atof(sdata[6]) # Adj. Close* volume = string.atoi(sdata[5]) if volume >= 0: # encode in EBP format # ISIN;DATE;OPEN;HIGH;LOW;CLOSE;VOLUME line = ( quote.key(), sdate, open, high, low, value, volume ) line = map(lambda (val): '%s' % str(val), line) line = string.join(line, ';') # append data = data + line + '\r\n' return data
def Import_ListOfQuotes_OSLO(quotes,market='OSLO EXCHANGE',dlg=None,x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection=ITradeConnection(cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication) if market=='OSLO EXCHANGE': starturl = 'http://www.oslobors.no/markedsaktivitet/stockIsinList?newt_isinList-stock_exch=ose&newt_isinList-stock_sort=aLONG_NAME&newt_isinList-stock_page=' endurl = '&newt__menuCtx=1.12' else: return False def splitLines(buf): lines = string.split(buf, 'Overview?') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines nlines = 0 endpage = 8 select_page = ['1','2','3','4','5','6','7','8'] for page in select_page: url = starturl + page + endurl try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_OSLO:unable to connect :-(') return False # returns the data lines = splitLines(data) #typical line #newt__ticker=TFSO" title="">24Seven Technology Group</a></td><td class="c2">NO0010279474</td><td class="c3 o l">TFSO</td></tr><tr id="manamind_isinList__stock_table_table_r2" class="r2"><td class="c0 f"><div title="Aksjer på Oslo Børs"><img src="http://ose.asp.manamind.com/ob/images/markedssymbol-XOSL-tiny.png" width="8" height="8" /></div></td><td class="c1 o"><a href= for line in lines: if line.find('newt__ticker=') != -1: #partial activation of Progressbar dlg.Update(x,'%s : %s / %s'%(market,page,endpage)) ticker = line[line.index('newt__ticker=')+13:line.index('" title="">')] if ticker == 'SAS+NOK' : ticker = 'SAS' name = line[line.index(' title="">')+10:line.index('</a></td><td')] name = name.replace('&','&') name = name.replace('ö','o') name = name.replace('æ','ae') name = name.replace('ø','o') isin = line[line.index('class="c2">')+11:line.index('</td><td class="c3 o l">')] #ok to proceed quotes.addQuote(isin=isin,name=name, \ ticker=ticker,market='OSLO EXCHANGE', \ currency='NOK',place='OSL',country='NO') nlines = nlines + 1 if itrade_config.verbose: print 'Imported %d lines from OSLO EXCHANGE' % (nlines) return True
class Currencies(object): def __init__(self): # url self.m_url = 'http://finance.yahoo.com/d/quotes.csv?s=%s%s=X&f=s4l1t1c1ghov&e=.csv' self.m_connection = None # to-from self.m_currencies = {} self.m_list = buildListOfSupportedCurrencies() for eachCur in self.m_list: curTo,curFrom = eachCur self.update(curTo,curFrom,1.0) def list(self): return self.m_list # ---[ Load / Save cache file ] --- def update(self,curTo,curFrom,rate): if curFrom == 'N/A' or curTo == 'N/A': return rate if curTo != curFrom: key = self.key(curTo, curFrom) if key in self.m_currencies: used,oldrate = self.m_currencies[key] else: used = False self.m_currencies[key] = (used,rate) return rate def load(self,fn=None): # open and read the file to load these currencies information infile = itrade_csv.read(fn,os.path.join(itrade_config.dirCacheData,'currencies.txt')) if infile: # scan each line to read each rate for eachLine in infile: item = itrade_csv.parse(eachLine,3) if item: # logging.debug('%s ::: %s' % (eachLine,item)) self.update(item[0],item[1],float(item[2])) def save(self,fn=None): # generate list of strings TO;FROM;RATE curs = [] for eachCurrency in self.m_currencies: used,rate = self.m_currencies[eachCurrency] curs.append("%s;%s;%.8f"%(eachCurrency[:3],eachCurrency[3:],rate)) # open and write the file with these currencies information itrade_csv.write(fn,os.path.join(itrade_config.dirCacheData,'currencies.txt'),curs) # ---[ Convert ] --- def key(self,curTo,curFrom): return curTo.upper() + curFrom.upper() def rate(self,curTo,curFrom): if curFrom == 'N/A' or curTo == 'N/A': return 1.0 if curTo == curFrom: return 1.0 key = self.key(curTo,curFrom) if key in self.m_currencies: used,rate = self.m_currencies[key] return rate else: return 1.0 def convert(self,curTo,curFrom,Value): rate = self.rate(curTo,curFrom) #print 'convert: value:%f from:%s to:%s rate=%f retval=%f' % (Value,curFrom,curTo,rate,Value*rate) return Value * rate # ---[ Currency in use or not ? ] --- def used(self,curTo,curFrom): if curFrom == 'N/A' or curTo == 'N/A': return False if curTo == curFrom: return True key = self.key(curTo,curFrom) if key in self.m_currencies: used,rate = self.m_currencies[key] #print 'used >>> currency : ',key,' inUse: ',used,' rate: ',rate return used else: return False def inuse(self,curTo,curFrom,bInUse): if curFrom == 'N/A' or curTo == 'N/A': return if curTo == curFrom: return key = self.key(curTo,curFrom) if key in self.m_currencies: used,rate = self.m_currencies[key] self.m_currencies[key] = (bInUse,rate) #print 'inuse >>> currency : ',key,' inUse: ',bInUse,' rate: ',rate def reset(self): #print '>>> currency reset' for key in self.m_currencies: used,rate = self.m_currencies[key] self.m_currencies[key] = (False,rate) # ---[ Get Last Trade from network ] --- _s1 = { "GBX": "GBP", } _s2 = { "GBX": 100.0, } def get(self,curTo,curFrom): if not itrade_config.isConnected(): return None if curFrom == 'N/A' or curTo == 'N/A': return None if self.m_connection is None: self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) #print "**** Create Currency Connection" # pence if curFrom in self._s1.keys(): a = self._s1[curFrom] else: a = curFrom if curTo in self._s1.keys(): b = self._s1[curTo] else: b = curTo # get data url = self.m_url % (a,b) try: buf = self.m_connection.getDataFromUrl(url) except: return None # extract data #print url,buf sdata = string.split(buf, ',') f = float(sdata[1]) # pence if curFrom in self._s2.keys(): f = f / self._s2[curFrom] if curTo in self._s2.keys(): f = f * self._s2[curTo] #print 'get: %s %s rate = %.4f' %(curTo,curFrom,float(sdata[1])) return self.update(curTo,curFrom,f) def getlasttrade(self,bAllEvenNotInUse=False): if not itrade_config.isConnected(): return for eachCurrency in self.m_currencies: curTo = eachCurrency[:3] curFrom = eachCurrency[3:] if bAllEvenNotInUse or self.used(curTo,curFrom): self.get(curTo,curFrom) self.save()
def Import_ListOfQuotes_SWX(quotes, market='SWISS EXCHANGE', dlg=None, x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout) if market == 'SWISS EXCHANGE': url = 'http://www.six-swiss-exchange.com/shares/companies/download/issuers_all_fr.csv' try: data = connection.getDataFromUrl(url) except: info('Import_ListOfQuotes_SWX_%s:unable to get file name :-(' % market) return False else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines # returns the data lines = splitLines(data) n = 0 isin = '' for line in lines[1:]: line = line.replace('!', ' ') line = line.replace(',', ' ') line = line.replace('à', 'a') line = line.replace('ä', 'a') line = line.replace('â', 'a') line = line.replace('ö', 'o') line = line.replace('ü', 'u') line = line.replace('é', 'e') line = line.replace('è', 'e') line = line.replace('+', ' ') data = string.split(line, ';') # csv line name = data[0].strip() ticker = data[1].strip() country = data[3].strip() currency = data[4].strip() exchange = data[5].strip() quotes.addQuote(isin=isin,name=name, ticker=ticker,market='SWISS EXCHANGE',\ currency=currency,place=exchange,country=country) n = n + 1 if itrade_config.verbose: print 'Imported %d lines from %s' % (n, market) return True
def Import_ListOfQuotes_SWX(quotes,market='SWISS EXCHANGE',dlg=None,x=0): print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) if market=='SWISS EXCHANGE': url = "http://www.swx.com/data/market/statistics/swx_swiss_shares_reference_data.csv" # is actually tab delimited else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines info('Import_ListOfQuotes_SWX:connect to %s' % url) try: data=connection.getDataFromUrl(url) except: info('Import_ListOfQuotes_SWX:unable to connect :-(') return False # returns the data lines = splitLines(data) n = 0 indice = {} for line in lines: item = itrade_csv.parse(line,7) if len(item)>2: if n==0: i = 0 for ind in item: indice[ind] = i i = i + 1 iISIN = indice['ISIN'] iName = indice['ShortName'] iCurrency = indice['TradingBaseCurrency'] iExchange = indice['Exchange'] iCountry = indice['GeographicalAreaCode'] iTicker = indice['ValorSymbol'] else: quotes.addQuote(isin=item[iISIN],name=item[iName].replace(',',' '), ticker=item[iTicker],market='SWISS EXCHANGE',\ currency=item[iCurrency],place=item[iExchange],country=item[iCountry]) n = n + 1 print 'Imported %d/%d lines from %s data.' % (n,len(lines),market) return True
class Import_yahoojp(object): def __init__(self): debug('Import_yahoojp:__init__') self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout) def name(self): return 'yahoojp' def interval_year(self): return 0.5 def connect(self): return True def disconnect(self): pass def getstate(self): return True def parseDate(self, d): return (d.year, d.month, d.day) def splitLines(self, buf): lines = string.split(buf, '\n') lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines def getdata(self, quote, datedebut=None, datefin=None): # specific numTradeYears itrade_config.numTradeYears = 2 if not datefin: datefin = date.today() if not datedebut: datedebut = date.today() if isinstance(datedebut, Datation): datedebut = datedebut.date() if isinstance(datefin, Datation): datefin = datefin.date() d1 = self.parseDate(datedebut) d2 = self.parseDate(datefin) debug("Import_yahoojp:getdata quote:%s begin:%s end:%s" % (quote, d1, d2)) sname = yahooTicker(quote.ticker(), quote.market(), quote.place()) ss = sname ch = '<tr align=right bgcolor="#ffffff">' lines = [] for cursor in range(0, 4650, 50): url = yahooUrlJapan( quote.market(), live=False ) + '?' + 'c=%s&a=%s&b=%s&f=%s&d=%s&e=%s&g=d&s=%s&y=%s&z=%s' % ( d1[0], d1[1], d1[2], d2[0], d2[1], d2[2], ss, str(cursor), ss) #url = 'http://table.yahoo.co.jp/t?s=%s&a=1&b=1&c=2000&d=%s&e=%s&f=%s&g=d&q=t&y=%s&z=/b?p=tjfzqcvy4.ewcf7pt&x=.csv' % (ss,d2[1],d2[2],d2[0],str(cursor)) debug("Import_yahoojp:getdata: url=%s ", url) try: buf = self.m_connection.getDataFromUrl(url) except: debug('Import_yahoojp:unable to connect :-(') return None # pull data linesjp = self.splitLines(buf) if len(linesjp) <= 0: # empty content return None #typical lines indices #<tr align=right bgcolor="#ffffff"> #<td><small>2009126</small></td> (DATE) #<td><small>772.59</small></td> (OPEN) #<td><small>777.91</small></td> (HIGH) #<td><small>767.82</small></td> (LOW) #<td><small><b>768.28</b></small></td> (LAST) #typical lines quotes #<tr align=right bgcolor="#ffffff"> #<td><small>2009119</small></td> (DATE) #<td><small>198</small></td> (OPEN) #<td><small>200</small></td> (HIGH) #<td><small>198</small></td> (LOW) #<td><small><b>199</b></small></td> (LAST) #<td><small>92,000</small></td> (VOLUME) #<td><small>199</small></td> (ADJUSTCLOSE) #</tr><tr align=right bgcolor="#ffffff"> #<td><small>2009116</small></td> #<td><small>197</small></td> #<td><small>200</small></td> n = 0 i = 0 q = 0 #header = 'Date,Open,High,Low,Close,Volume,Adj Close' #filedata.write(header+'\n') for line in linesjp: if ch in line: n = 1 if n == 1: q = 1 if '<td><small>' in line: i = i + 1 data = line[(line.find('small>') + 6):(line.find('</'))] if i == 1: date = data date = date.replace('ǯ', ' ') date = date.replace('·î', ' ') date = date.replace('Æü', '') date = date.split() if len(date[1]) == 1: date[1] = '0' + date[1] if len(date[2]) == 1: date[2] = '0' + date[2] date = '-'.join(date) elif i == 2: open = data open = open.replace(',', '') elif i == 3: high = data high = high.replace(',', '') elif i == 4: low = data low = low.replace(',', '') elif i == 5: close = data[3:] close = close.replace(',', '') if ss == '998405' or ss == '998407' or ss == '23337': volume = '0' open = open.replace(',', '') high = high.replace(',', '') low = low.replace(',', '') close = close.replace(',', '') adjustclose = close i = 0 n = 0 ligne = ','.join([ date, open, high, low, close, volume, adjustclose ]) #print ligne lines.append(ligne) elif i == 6: volume = data volume = volume.replace(',', '') elif i == 7: i = 0 n = 0 adjustclose = data adjustclose = adjustclose.replace(',', '') ligne = ','.join([ date, open, high, low, close, volume, adjustclose ]) #print ligne lines.append(ligne) if q == 0: break data = "" for eachLine in lines: sdata = string.split(eachLine, ',') sdate = sdata[0] open = string.atof(sdata[1]) high = string.atof(sdata[2]) low = string.atof(sdata[3]) value = string.atof(sdata[6]) # Adj. Close* volume = string.atoi(sdata[5]) if volume >= 0: # encode in EBP format # ISIN;DATE;OPEN;HIGH;LOW;CLOSE;VOLUME line = (quote.key(), sdate, open, high, low, value, volume) line = map(lambda (val): '%s' % str(val), line) line = string.join(line, ';') # append data = data + line + '\r\n' return data
def Import_ListOfQuotes_KRX(quotes,market='KOREA STOCK EXCHANGE',dlg=None,x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) if market=='KOREA STOCK EXCHANGE': params = "isu_cd=&gbn=1&market_gubun=1&isu_nm=&sort=&std_ind_cd=&std_ind_cd1=&par_pr=&cpta_scl=&sttl_trm=&lst_stk_vl=1&in_lst_stk_vl=&in_lst_stk_vl2=&cpt=1&in_cpt=&in_cpt2=&nat_tot_amt=1&in_nat_tot_amt=&in_nat_tot_amt2=" place = 'KRX' elif market=='KOREA KOSDAQ EXCHANGE': params = "isu_cd=&gbn=2&market_gubun=2&isu_nm=&sort=&std_ind_cd=&std_ind_cd1=&par_pr=&cpta_scl=&sttl_trm=&lst_stk_vl=1&in_lst_stk_vl=&in_lst_stk_vl2=&cpt=1&in_cpt=&in_cpt2=&nat_tot_amt=1&in_nat_tot_amt=&in_nat_tot_amt2=" place = 'KOS' else: return False def splitLines(buf): lines = string.split(buf, '</td></tr>') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines url = 'http://eng.krx.co.kr' info('Import_ListOfQuotes_KRX_%s:connect to %s' % (market,url)) try: data = connection.getDataFromUrl(url) except: info('Import_ListOfQuotes_KRX_%s:unable to connect :-(' % market) return False cj = None urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.LWPCookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) req = Request(url) handle = urlopen(req) cj = str(cj) cookie = cj[cj.find('JSESSIONID'):cj.find(' for eng.krx.co.kr/>]>')] host = 'eng.krx.co.kr' url = "/por_eng/corelogic/process/ldr/lst_s_001.xhtml?data-only=true" headers = { "Host": host , "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; fr; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729)" , "Accept": "text/javascript, text/html, application/xml, text/xml, */*" , "Accept-Language": "fr" , "Accept-Encoding": "gzip,deflate" , "Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7" , "Keep-Alive":115 , "Connection": "keep-alive" , "X-Requested-With": "XMLHttpRequest" , "X-Prototype-Version": "1.6.1" , "Content-Type": "application/x-www-form-urlencoded; charset=UTF-8" , "Referer": "http://eng.krx.co.kr/m6/m6_1/m6_1_1/JHPENG06001_01.jsp" , "Content-Length": len(params) , "Cookie": cookie , "Pragma": "no-cache" , "Cache-Control": "no-cache" } try: conn = httplib.HTTPConnection(host,80) conn.request("POST",url,params,headers) response = conn.getresponse() except: debug('Import_ListOfQuotes_KRX unable to connect :-(') return False debug("status:%s reason:%s" %(response.status, response.reason)) if response.status != 200: debug('Import_ListOfQuotes_KRX:status!=200') return False data = response.read() # returns the data lines = splitLines(data) n = 0 isin = '' print 'Import_ListOfQuotes_KRX_%s:' % market for line in lines: ticker = line[8:line.index('</td><td>')] name = line[line.find('</td><td>')+9:] name = name[:name.find('</td><td>')] name = name.replace(',','') name = name.replace(';','') name = name.replace('&',' & ') if ticker == '035000': name = 'G'+'||'+'R' if ticker == '060380': name = 'DY S'+'-'+'TEC' # ok to proceed n = n + 1 quotes.addQuote(isin = isin,name = name,ticker = ticker,\ market = market,currency = 'KRW',place = place, country = 'KR') if itrade_config.verbose: print 'Imported %d lines from %s data.' % (n,market) return True
def Import_ListOfQuotes_OSLO(quotes,market='OSLO EXCHANGE',dlg=None,x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection=ITradeConnection(cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication) if market=='OSLO EXCHANGE': starturl = 'http://www.oslobors.no/markedsaktivitet/stockIsinList?newt_isinList-stock_exch=ose&newt_isinList-stock_sort=aLONG_NAME&newt_isinList-stock_page=' endurl = '&newt__menuCtx=1.12' else: return False def splitLines(buf): lines = string.split(buf, 'Overview?') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines nlines = 0 endpage = 8 select_page = ['1','2','3','4','5','6','7','8'] for page in select_page: url = starturl + page + endurl try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_OSLO:unable to connect :-(') return False # returns the data lines = splitLines(data) #typical line #newt__ticker=TFSO" title="">24Seven Technology Group</a></td><td class="c2">NO0010279474</td><td class="c3 o l">TFSO</td></tr><tr id="manamind_isinList__stock_table_table_r2" class="r2"><td class="c0 f"><div title="Aksjer på Oslo Børs"><img src="http://ose.asp.manamind.com/ob/images/markedssymbol-XOSL-tiny.png" width="8" height="8" /></div></td><td class="c1 o"><a href= for line in lines: if line.find('newt__ticker=') != -1: #partial activation of Progressbar dlg.Update(x,'%s : %s / %s'%(market,page,endpage)) ticker = line[line.index('newt__ticker=')+13:line.index('" title="">')] if ticker == 'SAS+NOK' : ticker = 'SAS' name = line[line.index(' title="">')+10:line.index('</a></td><td')] name = name.replace('&','&') name = name.replace('ö','o') name = name.replace('æ','ae') name = name.replace('ø','o') isin = line[line.index('class="c2">')+11:line.index('</td><td class="c3 o l">')] #ok to proceed quotes.addQuote(isin=isin, name=name, ticker=ticker, market='OSLO EXCHANGE', currency='NOK', place='OSL', country='NO') nlines = nlines + 1 if itrade_config.verbose: print 'Imported %d lines from OSLO EXCHANGE' % (nlines) return True
class LiveUpdate_yahoo(object): def __init__(self): debug('LiveUpdate_yahoo:__init__') self.m_connected = False self.m_livelock = thread.allocate_lock() self.m_dateindice = {} self.m_clock = {} self.m_dcmpd = {} self.m_lastclock = 0 self.m_lastdate = "20070101" self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) # ---[ reentrant ] --- def acquire(self): self.m_livelock.acquire() def release(self): self.m_livelock.release() # ---[ properties ] --- def name(self): # name of the connector return 'yahoo' def delay(self): # delay in minuts to get a live data # put 0 if no delay (realtime) return 15 def timezone(self): # timezone of the livedata (see pytz all_timezones) return "EST" # ---[ connexion ] --- def connect(self): return True def disconnect(self): pass def alive(self): return self.m_connected # ---[ state ] --- def getstate(self): # no state return True # ---[ code to get data ] --- def yahooDate (self,date): # Date part is easy. sdate = string.split (date[1:-1], '/') month = string.atoi (sdate[0]) day = string.atoi (sdate[1]) year = string.atoi (sdate[2]) return "%4d%02d%02d" % (year,month,day) def convertClock(self,place,clock,date): clo = clock[:-2] min = clo[-2:] hour = clo[:-3] val = (int(hour)*60) + int(min) per = clock[-2:] if per=='pm': if int(hour) < 12: val = val + 12*60 elif per == 'am': if int(hour) >= 12: val = val - 12*60 # yahoo return EDT OR EST time eastern = timezone('US/Eastern') mdatetime = datetime(int(date[0:4]),int(date[4:6]),int(date[6:8]),val/60,val%60) loc_dt = eastern.localize(mdatetime) if str(loc_dt.strftime('%Z')) == 'EDT': val = val-60 if val <= 0: val = (12*60)-60 #print clock,clo,hour,min,val,per,date if val>self.m_lastclock and date>=self.m_lastdate: self.m_lastdate = date self.m_lastclock = val # convert from connector timezone to market place timezone mdatetime = datetime(int(date[0:4]),int(date[4:6]),int(date[6:8]),val/60,val%60) mdatetime = convertConnectorTimeToPlaceTime(mdatetime,self.timezone(),place) return "%d:%02d" % (mdatetime.hour,mdatetime.minute) def getdata(self,quote): debug("LiveUpdate_yahoo:getdata quote:%s " % quote) self.m_connected = False sname = yahooTicker(quote.ticker(),quote.market(),quote.place()) if sname[0]=='^': ss = "%5E" + sname[1:] else: ss = sname query = ( ('s', ss), ('f', 'sl1d1t1c1ohgv'), ('e', '.csv'), ) query = map(lambda (var, val): '%s=%s' % (var, str(val)), query) query = string.join(query, '&') url = yahooUrl(quote.market(),live=True) + '?' + query debug("LiveUpdate_yahoo:getdata: url=%s",url) try: data=self.m_connection.getDataFromUrl(url)[:-2] # Get rid of CRLF except: debug('LiveUpdate_yahoo:unable to connect :-(') return None # pull data s400 = re.search(r"400 Bad Request", data, re.IGNORECASE|re.MULTILINE) if s400: if itrade_config.verbose: info('unknown %s quote (400 Bad Request) from Yahoo' % (quote.ticker())) return None sdata = string.split (data, ',') if len (sdata) < 9: if itrade_config.verbose: info('invalid data (bad answer length) for %s quote' % (quote.ticker())) return None #print sdata # connexion / clock self.m_connected = True # store for later use key = quote.key() sclock = sdata[3][1:-1] if sclock=="N/A" or sdata[2]=='"N/A"' or len(sclock)<5: if itrade_config.verbose: info('invalid datation for %s : %s %s' % (quote.ticker(),sclock,sdata[2])) #print sdata return None # start decoding symbol = sdata[0][1:-1] if symbol != sname: if itrade_config.verbose: info('invalid ticker : ask for %s and receive %s' % (sname,symbol)) return None # date try: date = self.yahooDate(sdata[2]) self.m_dcmpd[key] = sdata self.m_clock[key] = self.convertClock(quote.place(),sclock,date) self.m_dateindice[key] = sdata[2].replace('"','') except ValueError: if itrade_config.verbose: info('invalid datation for %s : %s %s' % (quote.ticker(),sclock,sdata[2])) return None # decode data value = string.atof (sdata[1]) if (sdata[4]=='N/A'): debug('invalid change : N/A') change = 0.0 return None else: change = string.atof (sdata[4]) if (sdata[5]=='N/A'): debug('invalid open : N/A') open = 0.0 return None else: open = string.atof (sdata[5]) if (sdata[6]=='N/A'): debug('invalid high : N/A') high = 0.0 return None else: high = string.atof (sdata[6]) if (sdata[7]=='N/A'): debug('invalid low : N/A') low = 0.0 return None else: low = string.atof (sdata[7]) volume = string.atoi (sdata[8]) if volume<0: debug('volume : invalid negative %d' % volume) return None if volume==0 and quote.list()!=QLIST_INDICES: debug('volume : invalid zero value %d' % volume) return None else: if value-change <= 0: return None else: percent = (change / (value - change))*100.0 # ISIN;DATE;OPEN;HIGH;LOW;CLOSE;VOLUME;PERCENT;PREVIOUS data = ( key, date, open, high, low, value, volume, percent, (value-change) ) data = map(lambda (val): '%s' % str(val), data) data = string.join(data, ';') # temp: hunting an issue (SF bug 1848473) # if itrade_config.verbose: # print data return data # ---[ cache management on data ] --- def getcacheddata(self,quote): # no cache return None def iscacheddataenoughfreshq(self): # no cache return False def cacheddatanotfresh(self): # no cache pass # ---[ notebook of order ] --- def hasNotebook(self): return True def currentNotebook(self,quote): # key = quote.key() if not self.m_dcmpd.has_key(key): # no data for this quote ! return [],[] d = self.m_dcmpd[key] #buy = [] #buy.append([0,0,d[9]]) #sell = [] #sell.append([0,0,d[10]]) #return buy,sell return [],[] # ---[ status of quote ] --- def hasStatus(self): return itrade_config.isConnected() def currentStatus(self,quote): # key = quote.key() if not self.m_dcmpd.has_key(key): # no data for this quote ! return "UNKNOWN","::","0.00","0.00","::" d = self.m_dcmpd[key] st = 'OK' cl = '::' return st,cl,"-","-",self.m_clock[key] def currentClock(self,quote=None): if quote==None: if self.m_lastclock == 0: return "::" # hh:mm return "%d:%02d" % (self.m_lastclock/60,self.m_lastclock%60) key = quote.key() if not self.m_clock.has_key(key): # no data for this quote ! return "::" else: return self.m_clock[key] def currentDate(self,quote=None): key = quote.key() if not self.m_dateindice.has_key(key): # no date for this quote ! return "----" else: # convert yahoo date conv=time.strptime(self.m_dateindice[key],"%m/%d/%Y") return time.strftime("%d/%m/%Y",conv) def currentTrades(self,quote): # clock,volume,value return None def currentMeans(self,quote): # means: sell,buy,last return "-","-","-"
def Import_ListOfQuotes_WBO(quotes, market='WIENER BORSE', dlg=None, x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection(cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication) if market == 'WIENER BORSE': url = "http://en.wienerborse.at/marketplace_products/trading/auction/?query=&markets=A_G_D&market=all" else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines info('Import_ListOfQuotes_WBO_%s:connect to %s' % (market, url)) try: data = connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_WBO:unable to connect :-(') return False # returns the data lines = splitLines(data) count = 0 n = 1 i = 0 for line in lines: #typical lines: #<td class="left">AT00000ATEC9</td> #<td class="left">ATEC</td> #<td class="left">A-TEC INDUSTRIES AG</td> #<td class="left">08:55</td> #<td class="left">12:00</td> #<td class="left">17:30</td> # extract data if '<th colspan="6"><b>Prime Market.at</b></th>' in line: n = 0 if n == 0: if '<td class="left">' in line: i = i + 1 ch = line[(line.find('>') + 1):(line.find('</td>'))] if i == 1: isin = ch elif i == 2: ticker = ch elif i == 3: name = ch name = name.replace('ä', 'a') #\xe4 name = name.replace('ö', 'o') #\xf6 name = name.replace('Ö', 'O') #\xd6 name = name.replace('ü', 'u') #\xfc name = name.replace('ß', '?') #\xdf elif i == 6: i = 0 #print isin, name, ticker # ok to proceed quotes.addQuote(isin = isin,name = name, \ ticker = ticker,market= market,currency = 'EUR', \ place = 'WBO',country = 'AT') count = count + 1 if itrade_config.verbose: print 'Imported %d lines from WIENER BORSE' % (count) return True
def Import_ListOfQuotes_SHG(quotes,market='SHANGHAI EXCHANGE',dlg=None,x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection=ITradeConnection(cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication) def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines # Download SSE A SHARE urlA = 'http://www.sse.com.cn/sseportal/webapp/datapresent/queryindexcnpe?indexCode=000002&CURSOR=' ch = '<TD class=content bgColor=white><a href="/sseportal/webapp/datapresent/SSEQueryListCmpAct?reportName=QueryListCmpRpt&REPORTTYPE=GSZC&COMPANY_CODE=' cursor = 1 count = 0 for cursor in range(1,921,20): url = urlA+str(cursor) info('Import_ListOfQuotes_SSE A SHARE:connect to %s' %url) req = urllib2.Request(url) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20041202 Firefox/1.0') try: f = urllib2.urlopen(req) data = f.read() f.close() #data=connection.getDataFromUrl(url) except: print 'Import_ListOfQuotes_SSE A SHARE:unable to connect to',url return False lines = splitLines(data) for line in lines: if ch in line: ticker = line[(line.find(ch)+len(ch)): line.find('&PRODUCTID=')] name = line[(line.find('">')+2): line.find('</a> </TD>')] if ticker == '600717': name = 'TIANJIN PORT CO. LTD.' name = name.replace('&','&') name = name.replace('&nbsp;','& ') name = name.replace(' ','') name = name.replace(',',' ') name = name.replace(';',' ') name = name.replace('£¦','&') name = name.replace('£','-') name = name.replace('£¬',' ') name = name.replace('£¨',' (') name = name.replace('£©',' )') name = name.replace('£®','') name = name.replace('£¬','') name = name.replace('¡¯','\'') name = name.replace('¡ª','') name = name.replace('¡¡',' ') name = name.replace(' ',' ') name = name.replace('..','.') name = name.upper() name = name.replace('COMPANY','CO.') name = name.replace('LIMITED','LTD') name = name.replace('CORPORATION','CORP.') name = name.replace('DEVELOPMENT','DEV.') count = count + 1 dlg.Update(x,'SSE A SHARE: %s / ~1000'%cursor) quotes.addQuote(isin='',name=name, ticker=ticker,market='SHANGHAI EXCHANGE', currency='CNY',place='SHG',country='CN') # Download SSE B SHARE url = 'http://www.sse.com.cn/sseportal/en_us/ps/bshare/lccl.shtml' info('Import_ListOfQuotes_SSE B SHARE:connect to %s' %url) try: data=connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_SSE B SHARE:unable to connect :-(') return False lines = splitLines(data) #typical lines # <td class="table3" bgcolor="#dbedf8" > SRB</td> # <td class="table3" bgcolor="#dbedf8" > 900907</td> # <td class="table3" bgcolor="#dbedf8" > Shanghai Rubber Belt Co., Ltd.</td> # <td class="table3" bgcolor="#dbedf8" > 19th Floor, 1600 Shiji Avenue, Shanghai</td> # <td class="table3" bgcolor="#dbedf8" > 200122</td> # </tr> # <tr> # <td class="table3" bgcolor="white" > SCAC B</td> # <td class="table3" bgcolor="white" > 900908</td> # <td class="table3" bgcolor="white" > Shanghai Chlor Alkali Chemical # Co., Ltd.</td> # <td class="table3" bgcolor="white" > 17th Floor, 1271 Pudong Nan Road, Shanghai</td> # <td class="table3" bgcolor="white" > 200122</td> n = 1 i = 0 dlg.Update(x,'SHANGHAI B SHARE') for line in lines: if '<td class="table_title2" bgcolor="#337fb2" >Post Code</td>' in line : n = 0 if n == 0 : if ('<td class="table3" bgcolor="#dbedf8" > ' in line or '<td class="table3" bgcolor="white" > ' in line): i = i + 1 ch = line[(line.find('>')+2):(line.find ('</td>'))] if i == 2 : ticker = ch elif i == 3 : name = ch elif i == 5 : i = 0 name = name.replace(' ','') name = name.replace('&','&') name = name.replace(',','') count = count + 1 quotes.addQuote(isin='',name=name, ticker=ticker,market='SHANGHAI EXCHANGE', currency='CNY',place='SHG',country='CN') elif i == 3: name = name +' '+ line.strip() if name.find('</td>'): name = name[:-5] if itrade_config.verbose: print 'Imported %d lines from SHANGHAI EXCHANGE' %count return True
def Import_ListOfQuotes_SHE(quotes, market='SHENZHEN EXCHANGE', dlg=None, x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection(cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication) if market == 'SHENZHEN EXCHANGE': url = 'http://www.szse.cn/szseWeb/FrontController.szse?ACTIONID=8&CATALOGID=1693&TABKEY=tab1&ENCODE=1' else: return False def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines info('Import_ListOfQuotes_SHE_%s:connect to %s' % (market, url)) try: data = connection.getDataFromUrl(url) except: info('Import_ListOfQuotes_SHE_%s:unable to connect :-(' % market) return False data = data.replace("style='mso-number-format:\@' align='center' >", '\n') # returns the data lines = splitLines(data) currency = 'CNY' nlines = 0 #print 'Import_ListOfQuotes_SHE_%s:' % market,'book',book,'sheet',sh,'nrows=',sh.nrows for line in lines[2:]: if line.find("</td><td class='cls-data-td' align='left' >"): ticker = line[:line.index('<')] name = line[51:line.index('<', 51)] name = name.replace(',', ' ') if name[-2:] == '-B': currency = 'HKD' name = name[:-2] else: currency = 'CNY' if ticker == '000517': name = 'RONGAN PROPERTY CO' if ticker == '000529': name = 'GUANGDONG MEIYA' if ticker == '000650': name = 'RHENE PHARMACY CO' quotes.addQuote(isin='', name=name, ticker=ticker, market='SHENZHEN EXCHANGE', currency=currency, place='SHE', country='CN') nlines = nlines + 1 if itrade_config.verbose: print 'Imported %d lines from %s data.' % (nlines, market) return True
class Login_boursorama(object): def __init__(self): debug("LiveUpdate_boursorama:__init__") self.m_default_host = "www.boursorama.fr" self.m_login_url = "https://www.boursorama.fr/logunique.phtml" self.m_logged = False self.m_cookies = ITradeCookies() # Manualy set the cookie that tell boursorama we are a cookie aware browser self.m_cookies.set("SUP_COOKIE=OUI") self.m_connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout, ) debug("Boursorama login (%s) - ready to run" % self.m_default_host) # ---[ properties ] --- def name(self): return "boursorama" def desc(self): return message("login_boursorama_desc") # ---[ userinfo ] --- def saveUserInfo(self, u, p): f = open(os.path.join(itrade_config.dirUserData, "boursorama_userinfo.txt"), "w") s = u + "," + p f.write(s) f.close() def loadUserInfo(self): try: f = open(os.path.join(itrade_config.dirUserData, "boursorama_userinfo.txt"), "r") except IOError: return None, None s = f.read().strip() f.close() v = s.split(",") if len(v) == 2: return v[0].strip(), v[1].strip() return None, None # ---[ login ] --- def login(self, u=None, p=None): # load username / password (if required) if u == None or p == None: u, p = self.loadUserInfo() if u == None or p == None: print "login: userinfo are invalid - please reenter Access Information" return False try: param = { "org": "/index.phtml?", "redirect": "", "login": u, "password": p, "memo": "oui", "submit2": "Valider", } buf = self.m_connection.getDataFromUrl(self.m_login_url, data=param) except IOError, e: print "Exception occured while requesting Boursorama login page : %s" % e return False print "bourso login response :saved to bourso.html" file("bourso.html", "w").write(buf) return True
class Currencies(object): def __init__(self): # url self.m_url = 'http://finance.yahoo.com/d/quotes.csv?s=%s%s=X&f=s4l1t1c1ghov&e=.csv' self.m_connection = None # to-from self.m_currencies = {} self.m_list = buildListOfSupportedCurrencies() for eachCur in self.m_list: curTo,curFrom = eachCur self.update(curTo,curFrom,1.0) def list(self): return self.m_list # ---[ Load / Save cache file ] --- def update(self,curTo,curFrom,rate): if curFrom == 'N/A' or curTo == 'N/A': return rate if curTo != curFrom: key = self.key(curTo, curFrom) if self.m_currencies.has_key(key): used,oldrate = self.m_currencies[key] else: used = False self.m_currencies[key] = (used,rate) return rate def load(self,fn=None): # open and read the file to load these currencies information infile = itrade_csv.read(fn,os.path.join(itrade_config.dirCacheData,'currencies.txt')) if infile: # scan each line to read each rate for eachLine in infile: item = itrade_csv.parse(eachLine,3) if item: # debug('%s ::: %s' % (eachLine,item)) self.update(item[0],item[1],float(item[2])) def save(self,fn=None): # generate list of strings TO;FROM;RATE curs = [] for eachCurrency in self.m_currencies: used,rate = self.m_currencies[eachCurrency] curs.append("%s;%s;%.8f"%(eachCurrency[:3],eachCurrency[3:],rate)) # open and write the file with these currencies information itrade_csv.write(fn,os.path.join(itrade_config.dirCacheData,'currencies.txt'),curs) # ---[ Convert ] --- def key(self,curTo,curFrom): return curTo.upper() + curFrom.upper() def rate(self,curTo,curFrom): if curFrom == 'N/A' or curTo == 'N/A': return 1.0 if curTo == curFrom: return 1.0 key = self.key(curTo,curFrom) if self.m_currencies.has_key(key): used,rate = self.m_currencies[key] return rate else: return 1.0 def convert(self,curTo,curFrom,Value): rate = self.rate(curTo,curFrom) #print 'convert: value:%f from:%s to:%s rate=%f retval=%f' % (Value,curFrom,curTo,rate,Value*rate) return Value * rate # ---[ Currency in use or not ? ] --- def used(self,curTo,curFrom): if curFrom == 'N/A' or curTo == 'N/A': return False if curTo == curFrom: return True key = self.key(curTo,curFrom) if self.m_currencies.has_key(key): used,rate = self.m_currencies[key] #print 'used >>> currency : ',key,' inUse: ',used,' rate: ',rate return used else: return False def inuse(self,curTo,curFrom,bInUse): if curFrom == 'N/A' or curTo == 'N/A': return if curTo == curFrom: return key = self.key(curTo,curFrom) if self.m_currencies.has_key(key): used,rate = self.m_currencies[key] self.m_currencies[key] = (bInUse,rate) #print 'inuse >>> currency : ',key,' inUse: ',bInUse,' rate: ',rate def reset(self): #print '>>> currency reset' for key in self.m_currencies.keys(): used,rate = self.m_currencies[key] self.m_currencies[key] = (False,rate) # ---[ Get Last Trade from network ] --- _s1 = { "GBX": "GBP", } _s2 = { "GBX": 100.0, } def get(self,curTo,curFrom): if not itrade_config.isConnected(): return None if curFrom == 'N/A' or curTo == 'N/A': return None if self.m_connection==None: self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) #print "**** Create Currency Connection" # pence if curFrom in self._s1.keys(): a = self._s1[curFrom] else: a = curFrom if curTo in self._s1.keys(): b = self._s1[curTo] else: b = curTo # get data url = self.m_url % (a,b) try: buf = self.m_connection.getDataFromUrl(url) except: return None # extract data #print url,buf sdata = string.split(buf, ',') f = float(sdata[1]) # pence if curFrom in self._s2.keys(): f = f / self._s2[curFrom] if curTo in self._s2.keys(): f = f * self._s2[curTo] #print 'get: %s %s rate = %.4f' %(curTo,curFrom,float(sdata[1])) return self.update(curTo,curFrom,f) def getlasttrade(self,bAllEvenNotInUse=False): if not itrade_config.isConnected(): return for eachCurrency in self.m_currencies: curTo = eachCurrency[:3] curFrom = eachCurrency[3:] if bAllEvenNotInUse or self.used(curTo,curFrom): self.get(curTo,curFrom) self.save()
class Import_yahoo(object): def __init__(self): debug('Import_yahoo:__init__') #self.m_connection=ITradeConnection(proxy="172.30.0.3:8080") self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) def name(self): return 'yahoo' def interval_year(self): return 0.5 def connect(self): return True def disconnect(self): pass def getstate(self): return True def parseDate(self,d): return (d.year, d.month, d.day) def splitLines(self,buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines def getdata(self,quote,datedebut=None,datefin=None): if not datefin: datefin = date.today() if not datedebut: datedebut = date.today() if isinstance(datedebut,Datation): datedebut = datedebut.date() if isinstance(datefin,Datation): datefin = datefin.date() d1 = self.parseDate(datedebut) d2 = self.parseDate(datefin) debug("Import_yahoo:getdata quote:%s begin:%s end:%s" % (quote,d1,d2)) sname = yahooTicker(quote.ticker(),quote.market(),quote.place()) if sname[0]=='^': ss = "%5E" + sname[1:] else: ss = sname query = ( ('s', ss), ('a', '%02d' % (int(d1[1])-1)), ('b', d1[2]), ('c', d1[0]), ('d', '%02d' % (int(d2[1])-1)), ('e', d2[2]), ('f', d2[0]), ('y', '0'), ('g', 'd'), ('ignore', '.csv'), ) query = map(lambda (var, val): '%s=%s' % (var, str(val)), query) query = string.join(query, '&') url = yahooUrl(quote.market(),live=False) + '?' + query debug("Import_yahoo:getdata: url=%s ",url) try: buf=self.m_connection.getDataFromUrl(url) except: debug('Import_yahoo:unable to connect :-(') return None # pull data lines = self.splitLines(buf) if len(lines)<=0: # empty content return None header = string.split(lines[0],',') data = "" if (header[0]<>"Date"): # no valid content return None for eachLine in lines: sdata = string.split (eachLine, ',') sdate = sdata[0] if (sdate<>"Date"): if re_p3_1.match(sdate): #print 'already good format ! ',sdate,sdata pass else: sdate = dd_mmm_yy2yyyymmdd(sdate) open = string.atof(sdata[1]) high = string.atof(sdata[2]) low = string.atof(sdata[3]) value = string.atof(sdata[6]) # Adj. Close* volume = string.atoi(sdata[5]) if volume>=0: # encode in EBP format # ISIN;DATE;OPEN;HIGH;LOW;CLOSE;VOLUME line = ( quote.key(), sdate, open, high, low, value, volume ) line = map(lambda (val): '%s' % str(val), line) line = string.join(line, ';') # append data = data + line + '\r\n' return data
def Import_ListOfQuotes_MADRID(quotes, market='MADRID EXCHANGE', dlg=None, x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection( cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication, connectionTimeout=itrade_config.connectionTimeout) if market == 'MADRID EXCHANGE': url = 'http://www.bolsamadrid.es/docs/SBolsas/InformesSB/listadodevalores.pdf' else: return False def splitLines(buf): lines = string.split(buf, cr) lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines info('Import_ListOfQuotes_MADRID_%s:connect to %s' % (market, url)) f = 'listadodevalores.pdf' n = 0 try: urllib.urlretrieve(url, f) except: info('Import_ListOfQuotes_MADRID_%s:unable to connect :-(' % market) return False # returns the data source = open(f, 'rb') pdf = pyPdf.PdfFileReader(source) for page in pdf.pages: data = page.extractText() data = data[data.find('DecimalsFixing') + 15:] cr = data[:8] lines = splitLines(data) for line in lines: if 'Sociedad de Bolsas' in line: pass else: line = line[:line.find('<')] line = line[:line.find('0,0')] ticker = line[:8].strip() if 'BBVA' in ticker and len(ticker) == 5: pass else: ticker = ticker.replace('.', '-') isin = line[8:21] name = line[21:].strip() if not 'LYX' in name and not 'TRACKERS' in name: name = name.encode('cp1252') name = name.replace(',', ' ') name = name.replace('Ó', 'O') name = name.replace('Ñ', 'N') name = name.replace('Ç', 'C') #print isin,name,ticker,market quotes.addQuote(isin=isin, name=name, ticker=ticker, market=market, currency='EUR', place='MAD', country='ES') n = n + 1 if itrade_config.verbose: print 'Imported %d lines from %s' % (n, market) source.close() os.remove(f) return True
class News_Boursorama(object): def __init__(self): debug('News_Boursorama:__init__') self.m_feed = None self.m_url = None self.m_quote = None self.m_baseurl = {} self.m_host = "www.boursorama.com" self.m_conn = None self.m_baseurl[0] = "http://www.boursorama.com/infos/actualites/actu_societes_code.phtml?symbole=1rP%s" self.m_baseurl[1] = "http://www.boursorama.com/communique/communique_code.phtml?symbole=1rP%s" self.m_baseurl[2] = "http://www.boursorama.com/infos/calendrier_code.phtml?symbole=1rP%s" self.m_baseurl[3] = "http://www.boursorama.com/conseils/conseils_index_code.phtml?symbole=1rP%s" self.m_baselink = "http://www.boursorama.com/infos/imprimer_news.phtml?news=%s" self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) # ---[ protected interface ] --- def getURL(self): return self.m_url def getFeed(self): return self.m_feed def getQuote(self): return self.m_quote def splitLines(self,buf): p = re.compile("\d\d/\d\d/\d\d\d\d</td>[ \t\n\r]*<td></td>[ \t\n\r]*.*</td>",re.IGNORECASE|re.MULTILINE) return p.findall(buf) def getdata(self,url): debug("News_Boursorama:getdata: url=%s ",url) try: buf = self.m_connection.getDataFromUrl(url) except: debug('News_Boursorama:unable to connect :-(') return None buf = unicode(buf,'iso-8859-1','strict') return buf def feed(self,url): self.m_url = url self.m_feed = _FeedEntry() self.m_feed.entries = [] self.m_feed.feed = _FeedEntry() self.m_feed.feed.title = 'Boursorama: ' + self.m_quote.ticker() info('Boursorama News refresh %s',self.m_url) buf = self.getdata(url) iter = self.splitLines(buf) #print iter for eachLine in iter: sdate = time.strptime(eachLine[0:10], "%d/%m/%Y") #print '%s -> %s' % (eachLine[0:10],sdate) snum = re.search('news=\d*',eachLine,re.IGNORECASE|re.MULTILINE) if snum: snum = snum.group()[5:] stitle = re.search('<a.*>.*</a>',eachLine,re.IGNORECASE|re.MULTILINE) if stitle: stitle = stitle.group() stitle = re.search('>.*<',stitle,re.IGNORECASE|re.MULTILINE) if stitle: stitle = stitle.group()[1:-1] entry = _FeedEntry() entry.link = ('boursorama::%s' % self.m_baselink) % snum entry.title = stitle entry.date = sdate entry.summary = "" entry.source = "boursorama" self.m_feed.entries.append(entry) return self.m_feed def goto(self,html,url): if html: html.paint0() info('goto %s',url) buf = self.getdata(url) #print buf if not buf: if html: html.paint_NC() else: print 'unable to connect' return title = re.search('<tr>[ \t\n\r]+<td.*</font></td>[ \t\n\r]+</tr>',buf,re.IGNORECASE|re.MULTILINE|re.DOTALL) if title: title = title.group() else: title = '' buf = re.search('<tr>[ \t\n\r]*<td>.*</table>',buf,re.IGNORECASE|re.MULTILINE|re.DOTALL) if buf: buf = buf.group()[:-8] #print '----------------(' #print buf #print ')----------------' page = '<html><meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"><body>' + "<br><a href=':back'>%s</a><H3>" % message('backtolist') + title + "</H3>" + buf + "<br><br><a href=':back'>%s</a>" % message('backtolist') + "</body></html>" if html: html.SetPageWithoutCache(page) else: print page else: if html: html.paint_NC() else: print 'empty' # ---[ public interface ] --- def feedQuote(self,quote,lang=None,page=0): self.m_quote = quote if lang==None: lang = self.m_quote.country() return self.feed(self.m_baseurl[page] % self.m_quote.ticker())
def Import_ListOfQuotes_SHG(quotes, market='SHANGHAI EXCHANGE', dlg=None, x=0): if itrade_config.verbose: print 'Update %s list of symbols' % market connection = ITradeConnection(cookies=None, proxy=itrade_config.proxyHostname, proxyAuth=itrade_config.proxyAuthentication) def splitLines(buf): lines = string.split(buf, '\n') lines = filter(lambda x: x, lines) def removeCarriage(s): if s[-1] == '\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines # Download SSE A SHARE urlA = 'http://www.sse.com.cn/sseportal/webapp/datapresent/queryindexcnpe?indexCode=000002&CURSOR=' ch = '<TD class=content bgColor=white><a href="/sseportal/webapp/datapresent/SSEQueryListCmpAct?reportName=QueryListCmpRpt&REPORTTYPE=GSZC&COMPANY_CODE=' cursor = 1 count = 0 for cursor in range(1, 921, 20): url = urlA + str(cursor) info('Import_ListOfQuotes_SSE A SHARE:connect to %s' % url) req = urllib2.Request(url) req.add_header( 'User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20041202 Firefox/1.0' ) try: f = urllib2.urlopen(req) data = f.read() f.close() #data=connection.getDataFromUrl(url) except: print 'Import_ListOfQuotes_SSE A SHARE:unable to connect to', url return False lines = splitLines(data) for line in lines: if ch in line: ticker = line[(line.find(ch) + len(ch)):line.find('&PRODUCTID=')] name = line[(line.find('">') + 2):line.find('</a> </TD>')] if ticker == '600717': name = 'TIANJIN PORT CO. LTD.' name = name.replace('&', '&') name = name.replace('&nbsp;', '& ') name = name.replace(' ', '') name = name.replace(',', ' ') name = name.replace(';', ' ') name = name.replace('£¦', '&') name = name.replace('£', '-') name = name.replace('£¬', ' ') name = name.replace('£¨', ' (') name = name.replace('£©', ' )') name = name.replace('£®', '') name = name.replace('£¬', '') name = name.replace('¡¯', '\'') name = name.replace('¡ª', '') name = name.replace('¡¡', ' ') name = name.replace(' ', ' ') name = name.replace('..', '.') name = name.upper() name = name.replace('COMPANY', 'CO.') name = name.replace('LIMITED', 'LTD') name = name.replace('CORPORATION', 'CORP.') name = name.replace('DEVELOPMENT', 'DEV.') count = count + 1 dlg.Update(x, 'SSE A SHARE: %s / ~1000' % cursor) quotes.addQuote(isin='', name=name, ticker=ticker, market='SHANGHAI EXCHANGE', currency='CNY', place='SHG', country='CN') # Download SSE B SHARE url = 'http://www.sse.com.cn/sseportal/en_us/ps/bshare/lccl.shtml' info('Import_ListOfQuotes_SSE B SHARE:connect to %s' % url) try: data = connection.getDataFromUrl(url) except: debug('Import_ListOfQuotes_SSE B SHARE:unable to connect :-(') return False lines = splitLines(data) #typical lines # <td class="table3" bgcolor="#dbedf8" > SRB</td> # <td class="table3" bgcolor="#dbedf8" > 900907</td> # <td class="table3" bgcolor="#dbedf8" > Shanghai Rubber Belt Co., Ltd.</td> # <td class="table3" bgcolor="#dbedf8" > 19th Floor, 1600 Shiji Avenue, Shanghai</td> # <td class="table3" bgcolor="#dbedf8" > 200122</td> # </tr> # <tr> # <td class="table3" bgcolor="white" > SCAC B</td> # <td class="table3" bgcolor="white" > 900908</td> # <td class="table3" bgcolor="white" > Shanghai Chlor Alkali Chemical # Co., Ltd.</td> # <td class="table3" bgcolor="white" > 17th Floor, 1271 Pudong Nan Road, Shanghai</td> # <td class="table3" bgcolor="white" > 200122</td> n = 1 i = 0 dlg.Update(x, 'SHANGHAI B SHARE') for line in lines: if '<td class="table_title2" bgcolor="#337fb2" >Post Code</td>' in line: n = 0 if n == 0: if ('<td class="table3" bgcolor="#dbedf8" > ' in line or '<td class="table3" bgcolor="white" > ' in line): i = i + 1 ch = line[(line.find('>') + 2):(line.find('</td>'))] if i == 2: ticker = ch elif i == 3: name = ch elif i == 5: i = 0 name = name.replace(' ', '') name = name.replace('&', '&') name = name.replace(',', '') count = count + 1 quotes.addQuote(isin='', name=name, ticker=ticker, market='SHANGHAI EXCHANGE', currency='CNY', place='SHG', country='CN') elif i == 3: name = name + ' ' + line.strip() if name.find('</td>'): name = name[:-5] if itrade_config.verbose: print 'Imported %d lines from SHANGHAI EXCHANGE' % count return True
class LiveUpdate_yahoojp(object): def __init__(self,market='TOKYO EXCHANGE'): debug('LiveUpdate_yahoojp:__init__') self.m_connected = False self.m_livelock = thread.allocate_lock() self.m_dateindice = {} self.m_clock = {} self.m_dcmpd = {} self.m_lastclock = 0 self.m_lastdate = "20070101" self.m_connection = ITradeConnection(cookies = None, proxy = itrade_config.proxyHostname, proxyAuth = itrade_config.proxyAuthentication, connectionTimeout = itrade_config.connectionTimeout ) def splitLines(self,buf): lines = string.split(buf, '\n') lines = filter(lambda x:x, lines) def removeCarriage(s): if s[-1]=='\r': return s[:-1] else: return s lines = [removeCarriage(l) for l in lines] return lines # ---[ reentrant ] --- def acquire(self): self.m_livelock.acquire() def release(self): self.m_livelock.release() # ---[ properties ] --- def name(self): # name of the connector return 'yahoojp' def delay(self): # delay in minuts to get a live data # put 0 if no delay (realtime) return 15 def timezone(self): # timezone of the livedata (see pytz all_timezones) return "Asia/Tokyo" # ---[ connexion ] --- def connect(self): return True def disconnect(self): pass def alive(self): return self.m_connected # ---[ state ] --- def getstate(self): # no state return True # ---[ code to get data ] --- def yahooDate (self,date): # Date part is easy. sdate = string.split (date[1:-1], '/') month = string.atoi (sdate[0]) day = string.atoi (sdate[1]) year = string.atoi (sdate[2]) return "%4d%02d%02d" % (year,month,day) def convertClock(self,place,clock,date): min = clock[-2:] hour = clock[:-3] val = (int(hour)*60) + int(min) if val>self.m_lastclock and date>=self.m_lastdate: self.m_lastdate = date self.m_lastclock = val # convert from connector timezone to market place timezone mdatetime = datetime(int(date[0:4]),int(date[4:6]),int(date[6:8]),val/60,val%60) mdatetime = convertConnectorTimeToPlaceTime(mdatetime,self.timezone(),place) return "%d:%02d" % (mdatetime.hour,mdatetime.minute) def getdata(self,quote): sname = yahooTicker(quote.ticker(),quote.market(),quote.place()) ss = sname url = yahooUrlJapan(quote.market(),live=True) + '?' +'s=%s&d=v2' % (ss) debug("LiveUpdate_yahoojp:getdata: url=%s",url) try: data=self.m_connection.getDataFromUrl(url) data = data.replace('</td>','\n') lines = self.splitLines(data) # returns the data for line in lines: if 'uncompressed' in line: year = line[line.find('JST ')+4:line.find(' -->')] else : year = '0000' #sdata =[] ch = '<td nowrap align=center>' i = 0 n = 0 #typical lines #1 datetime <td nowrap align=center>1/21 #2 last <td nowrap><b>226</b> #3 changetoday <td nowrap><font color=ff0020>-4</font> #<td nowrap>--- #4 change_percent <td nowrap><font color=ff0020>-1.74%</font> #<td nowrap> #5 volume <td nowrap>1,705,200 #6 open <td nowrap>221 #7 high <td nowrap>230 #8 low <td nowrap>221 for line in lines: if '§<strong>' in line: local_time = line[line.find(':')-2:line.find(':')+3] local_date = line[line.find('§<strong>')+9:line.find('Æü ')] local_date = local_date.replace('·î ',' ').split() month = local_date[0] if len(month)== 1 : month = '0' + month day = local_date[1] if len(day)== 1 : day = '0' + day local_date = '"'+month+'/'+day+'/'+year+'"' if ch in line: n = 1 if n == 1: i=i+1 if i == 1: date_time = line[len(ch):] if date_time.find(':') != -1: #print "last clock" sclock = '"'+date_time+'"' date = local_date else : #print "last date" if date_time == '---': date = 'N/A' else: date = 'N/A' #date = '"'+date_time+'/'+year+'"' sclock = "N/A" #print 'date,hour',date,sclock if i == 2: last = line[line.find('<b>')+3:line.find('</b>')] if last == '---': last = '0.0' last = last.replace(',','') #print 'last:',last if i == 3: if '<td nowrap><font color=ff0020>' in line: change = line[line.find('ff0020>')+7:line.find('</font>')] else: change = line[line.find('<td nowrap>')+11:] if change == '---': change = 'N/A' if i == 4: if '<td nowrap><font color=ff0020>' in line: change_percent = line[line.find('ff0020>')+7:line.find('</font>')] else: change_percent = line[line.find('<td nowrap>')+11:] if i == 5: volume = line[line.find('<td nowrap>')+11:] volume = volume.replace(',','') if volume == '---': volume = '0' if i == 6: open = line[line.find('<td nowrap>')+11:] if open == '---': open = 'N/A' open = open.replace(',','') if i == 7: high = line[line.find('<td nowrap>')+11:] if high == '---': high = 'N/A' high = high.replace(',','') if i == 8: low = line[line.find('<td nowrap>')+11:] if low == '---': low = 'N/A' low = low.replace(',','') n = 0 i = 0 colors = ['red', 'blue', 'green', 'yellow'] result = ''.join(colors) data = ','.join(['"'+ss+'"',last,date,sclock,change,open,high,low,volume]) break sdata = string.split (data, ',') if len (sdata) < 9: if itrade_config.verbose: info('invalid data (bad answer length) for %s quote' % (quote.ticker())) return None # connexion / clock self.m_connected = True # store for later use key = quote.key() sclock = sdata[3][1:-1] if sclock=="N/A" or sdata[2]=='"N/A"' or len(sclock)<5: if itrade_config.verbose: info('invalid datation for %s : %s : %s' % (quote.ticker(),sclock,sdata[2])) return None # start decoding symbol = sdata[0][1:-1] if symbol != sname: if itrade_config.verbose: info('invalid ticker : ask for %s and receive %s' % (sname,symbol)) return None # date try: date = self.yahooDate(sdata[2]) self.m_dcmpd[key] = sdata self.m_clock[key] = self.convertClock(quote.place(),sclock,date) self.m_dateindice[key] = sdata[2].replace('"','') except ValueError: if itrade_config.verbose: info('invalid datation for %s : %s : %s' % (quote.ticker(),sclock,sdata[2])) return None # decode data value = string.atof (sdata[1]) if (sdata[4]=='N/A'): debug('invalid change : N/A') change = 0.0 return None else: change = string.atof (sdata[4]) if (sdata[5]=='N/A'): debug('invalid open : N/A') open = 0.0 return None else: open = string.atof (sdata[5]) if (sdata[6]=='N/A'): debug('invalid high : N/A') high = 0.0 return None else: high = string.atof (sdata[6]) if (sdata[7]=='N/A'): debug('invalid low : N/A') low = 0.0 return None else: low = string.atof (sdata[7]) volume = string.atoi (sdata[8]) if volume<0: debug('volume : invalid negative %d' % volume) return None if volume==0 and quote.list()!=QLIST_INDICES: debug('volume : invalid zero value %d' % volume) return None else: if value-change <= 0: return None else: percent = (change / (value - change))*100.0 # ISIN;DATE;OPEN;HIGH;LOW;CLOSE;VOLUME;PERCENT;PREVIOUS data = ( key, date, open, high, low, value, volume, percent, (value-change) ) data = map(lambda (val): '%s' % str(val), data) data = string.join(data, ';') # temp: hunting an issue (SF bug 1848473) #if itrade_config.verbose: #print data return data except: debug('LiveUpdate_yahoojapan:unable to connect :-(') return None # ---[ cache management on data ] --- def getcacheddata(self,quote): # no cache return None def iscacheddataenoughfreshq(self): # no cache return False def cacheddatanotfresh(self): # no cache pass # ---[ notebook of order ] --- def hasNotebook(self): return True def currentNotebook(self,quote): key = quote.key() if not self.m_dcmpd.has_key(key): # no data for this quote ! return [],[] d = self.m_dcmpd[key] buy = [] #buy.append([0,0,'-']) sell = [] #sell.append([0,0,'-']) return buy,sell # ---[ status of quote ] --- def hasStatus(self): return itrade_config.isConnected() def currentStatus(self,quote): # key = quote.key() if not self.m_dcmpd.has_key(key): # no data for this quote ! return "UNKNOWN","::","0.00","0.00","::" d = self.m_dcmpd[key] st = 'OK' cl = '::' return st,cl,"-","-",self.m_clock[key] def currentClock(self,quote=None): if quote==None: if self.m_lastclock==0: return "::" # hh:mm return "%d:%02d" % (self.m_lastclock/60,self.m_lastclock%60) # key = quote.key() if not self.m_clock.has_key(key): # no data for this quote ! return "::" else: return self.m_clock[key] def currentDate(self,quote=None): key = quote.key() if not self.m_dateindice.has_key(key): # no date for this quote ! return "-/-/-" else: return self.m_dateindice[key] def currentTrades(self,quote): # clock,volume,value return None def currentMeans(self,quote): # means: sell,buy,last return "-","-","-"