def msnMoneyTenYearSummary(symbol, local=False): import bs4 import utils if not len(symbol): return None url = "http://investing.money.msn.com/investments/financial-statements?symbol=" + symbol url, page = utils.openUrl(url, local) print url soup = bs4.BeautifulSoup(page, "lxml") # Extract tables tables = soup.find_all("table", {"class": "mnytbl"}) # Parse income statement table income = {} for row in tables[0].find_all("tr"): cols = row.find_all("td") if len(cols) == 0: continue tuple = () for icol, col in enumerate(cols): entry = col.find_all(text=True)[1].strip() if icol == 0: try: secs = utils.makeEpochTime(str(entry), "%m/%y") tuple = tuple + (secs,) except ValueError: tuple = tuple + (str(entry),) else: tuple = utils.extractData(entry, tuple) income[tuple[0]] = tuple[1:] # Parse balance sheet table balance = {} for row in tables[1].find_all("tr"): cols = row.find_all("td") if len(cols) == 0: continue tuple = () for icol, col in enumerate(cols): entry = col.find_all(text=True)[1].strip() if icol == 0: try: secs = utils.makeEpochTime(str(entry), "%m/%y") tuple = tuple + (secs,) except ValueError: tuple = tuple + (str(entry),) else: tuple = utils.extractData(entry, tuple) balance[tuple[0]] = tuple[1:] return income, balance
def msnMoneyQuote( symbol, local = False ) : import bs4 import utils if not len(symbol) : return None url = 'http://investing.money.msn.com/investments/stock-price?symbol=' + symbol url,page = utils.openUrl(url,local) print url soup = bs4.BeautifulSoup(page,"lxml") # Extract date stamp from below "details" table footers = soup.find_all("span",{"class":"foot"}) string = footers[0].find_all(text=True)[0].strip().split(' ')[2] date = utils.makeEpochTime(string,'%m/%d/%Y') # Extract tables tables = soup.find_all("table",{"class":"mnytbl"}) # Parse "details" table details = {} tuple = () cntr = 0 for row in tables[0].find_all("tr") : cells = row.find_all("td") if len(cells) == 0 : continue data = cells[1].find_all(text=True)[1].strip() tuple = utils.extractData(data,tuple) cntr = cntr + 1 details[date] = tuple # Parse "financial highlights" table highlights = {} tuple = () cntr = 0 for row in tables[1].find_all("tr") : cells = row.find_all("td") if len(cells) == 0 : continue index = 2 if ( cntr == 2 or cntr == 3 ) else 1 data = cells[1].find_all(text=True)[index].strip() tuple = utils.extractData(data,tuple) cntr = cntr + 1 highlights[date] = tuple return details,highlights
def msnMoneyBalanceSheet( symbol, local = False ) : import bs4 import utils if not len(symbol) : return None url = 'http://investing.money.msn.com/investments/stock-balance-sheet/?symbol=' + symbol + '&stmtView=Ann' url,page = utils.openUrl(url,local) print url soup = bs4.BeautifulSoup(page,"lxml") rows = soup.find_all("tr") ncols = len(rows[-1].find_all("td"))-1 titles = [] tuples = [() for x in range(ncols)] for irow,row in enumerate( rows ) : for icol,col in enumerate( row.find_all("td") ) : entries = col.find_all(text=True) index = None if len(entries) == 1 : index = 0 elif len(entries) == 3 : index = 1 elif len(entries) == 7 : index = 4 else : continue entry = entries[index].strip().encode("utf-8") if irow == 7 or irow == 30 : continue if len(entry) : if icol == 0 : titles.append(str(entry)) else : dates = {1:'%Y',2:'%m/%d/%Y',4:'%m/%d/%Y'} try : date = dates[irow] secs = utils.makeEpochTime(str(entry),date) tuples[icol-1] = tuples[icol-1] + (secs,) except KeyError : tuples[icol-1] = utils.extractData(entry,tuples[icol-1]) dict = {} for col in range(len(tuples)) : dict[tuples[col][0]] = tuples[col][1:] return titles,dict
def msnMoneyHistoricalPrices(symbol, local=False): import bs4 import utils if not len(symbol): return None url = ( "http://investing.money.msn.com/investments/equity-historical-price/?PT=7&D4=1&DD=1&D5=0&DCS=2&MA0=0&MA1=0&CF=0&nocookie=1&SZ=0&symbol=" + symbol ) # url = 'http://investing.money.msn.com/investments/equity-historical-price/?symbol=us%3a' + symbol + '&CA=0&CB=0&CC=0&CD=0&D4=1&DD=1&D5=0&DCS=2&MA0=0&MA1=0&C5=0&C5D=0&C6=0&C7=0&C7D=0&C8=0&C9=0&CF=4&D8=0&DB=1&DC=1&D9=0&DA=0&D1=0&SZ=0&PT=11' url, page = utils.openUrl(url, local) print url soup = bs4.BeautifulSoup(page, "lxml") rows = soup.find_all("tr") titles = [] prices = {} dividends = {} for irow, row in enumerate(rows): cols = row.find_all("td") # Extract titles from table header headers = row.find_all("th") for header in headers: entries = header.find_all(text=True) entry = entries[1].strip() if not len(entry): continue titles.append(str(entry)) # Extract ex-dividend dates, dividends paid, and share price if len(cols) == 3: date = 0 div = 0.0 price = 0.0 try: entries = cols[0].find_all(text=True) entry = entries[1].strip() if len(entry): date = utils.makeEpochTime(str(entry), "%m/%d/%Y") except ValueError: date = 0 try: entries = cols[1].find_all(text=True) entry = entries[1].strip().split(" ")[0] if len(entry): div = float(entry) except ValueError: div = 0.0 try: if irow < len(rows): entries = rows[irow + 1].find_all("td")[4].find_all(text=True) entry = entries[1].strip() price = float(entry) else: price = 0.0 except ValueError: price = 0.0 if date != 0: dividends[date] = (div, price) # Loop through rows and extract share prices else: tuple = () if len(cols) != 6: continue for icol, col in enumerate(cols): entries = col.find_all(text=True) entry = entries[1].strip() if not len(entry): continue try: secs = utils.makeEpochTime(str(entry), "%m/%d/%Y") tuple = tuple + (secs,) except ValueError: tuple = utils.extractData(entry, tuple) prices[tuple[0]] = tuple[1:] return titles, prices, dividends