def BuildKeyStatEntries(self, stock):
		pagesoup = bs(uo(self.generateKeyStatUrl(stock)))
		#Table 1: Valuation Measures		
		table1 = pagesoup.findAll("table", {'width':"100%", 'cellpadding':"2", 'cellspacing':"1", 'border':"0"})[0]
		table1rows = table1.findAll("tr")		
		self.KeyStatsDict['MarketCap'] = table1rows[0].findAll('td')[1].text
		self.KeyStatsDict['EnterpriseValue'] = table1rows[1].findAll('td')[1].text
		self.KeyStatsDict['TrailingPE'] = table1rows[2].findAll('td')[1].text
		self.KeyStatsDict['ForwardPE'] = table1rows[3].findAll('td')[1].text
		self.KeyStatsDict['PEGRatio'] = table1rows[4].findAll('td')[1].text
		self.KeyStatsDict['Prices/Sales'] = table1rows[5].findAll('td')[1].text
		self.KeyStatsDict['Prices/Book'] = table1rows[6].findAll('td')[1].text
		self.KeyStatsDict['Enterprise Value/Revenue'] = table1rows[7].findAll('td')[1].text
		self.KeyStatsDict['Enterprise Value/EBITDA'] = table1rows[8].findAll('td')[1].text
	
		#Table 6: Balance Sheet
		table6 = pagesoup.findAll("table", {'width':"100%", 'cellpadding':"2", 'cellspacing':"1", 'border':"0"})[5]
		table6rows = table1.findAll("tr")
		self.KeyStatsDict['Debt/Equity'] = table6rows[4].findAll('td')[1].text

		#Table 9: Share Statistics
		table9 = pagesoup.findAll("table", {'class':"yfnc_datamodoutline1", 'width':"100%", 'cellpadding':"0", 'cellspacing':"0", 'border':"0"})[8]
		table9rows = table9.findAll("tr")
		self.KeyStatsDict['PercentInsiders'] = table9rows[6].findAll('td')[1].text		
		self.KeyStatsDict['PercentInstitutions'] = table9rows[7].findAll('td')[1].text

		#Table 10: Dividends & Splits
		table10 = pagesoup.findAll("table", {'class':"yfnc_datamodoutline1", 'width':"100%", 'cellpadding':"0", 'cellspacing':"0", 'border':"0"})[9]
		table10rows = table10.findAll("tr")
		self.KeyStatsDict['FADY'] = table10rows[3].findAll('td')[1].text #Forward Annual Dividend Yield
		self.KeyStatsDict['TADY'] = table10rows[5].findAll('td')[1].text #Trailing Annual Dividend Yield
	def BuildProfileEntries(self, stock):
		pagesoup = bs(uo(self.generateProfileUrl(stock)))
		ProfileTable = pagesoup.findAll("table", {'class':"yfnc_datamodoutline1"})[0].findAll("table")[0]
		Entries = ProfileTable.findAll("tr")
		self.KeyStatsDict['Index'] = Entries[0].findAll('td')[1].text
		self.KeyStatsDict['Sector'] = Entries[1].findAll('td')[1].text
		self.KeyStatsDict['Industry'] = Entries[2].findAll('td')[1].text
		self.KeyStatsDict['Employees'] = Entries[3].findAll('td')[1].text
Esempio n. 3
0
    def rawData(self):
        site = self.site
        data = uo(site+"/data/").read()
        ata = []
        for d in data.split(","):
            try:
                ata.append(int(d))
            except Exception as err:
                off = "on"

        return array(ata).T
Esempio n. 4
0
"""Demonstrate high quality docstrings.

Repeatedly search for and extract substrings that match a particular pattern.
Construct a well-formed regular expression to match and extract the link values
"""

from urllib import urlopen as uo
from re import findall as f

default = 'http://www.py4inf.com/book.html'
url = raw_input('Enter URL: ') or default
html = uo(url).read()
links = f('href="(http://.*?)"', html)
for link in links:
    print link
Esempio n. 5
0
def read():
    uo(url+"/write/s0000").read()
    sleep(.1)
    incoming = uo(url+"/read/").read()
    return incoming.split("GO,")[-1].split(",ST")[0]
Esempio n. 6
0
def write(ss):
    uo(url+"/write/"+ss).read()
Esempio n. 7
0
Retrieve a non-text (or binary) file.
This program reads all of the data in at once across the network and stores it
in the variable img in the main memory of your computer, then opens the file
cover.jpg and writes the data out to your disk. This will work if the size of
the file is less than the size of the memory of your computer.

However if this is a large audio or video file, this program may crash or at
least run extremely slowly when your computer runs out of memory. In order to
avoid running out of memory, we retrieve the data in blocks (or buffers) and
then write each block to your disk before retrieving the next block. This way
the program can read any size file without using up all of the memory you have
in your computer.
"""

from urllib import urlopen as uo

default = 'http://www.py4inf.com/cover.jpg'
url = raw_input('Enter URL: ') or default
img = uo(url)
fhand = open('cover.jpg', 'w')
size = 0
while True:
    info = img.read(100000)
    if len(info) < 1:
        break
    size = size + len(info)
    fhand.write(info)
print size, 'characters copied.'
fhand.close()
"""Following Links in Python.

Start at: http://python-data.dr-chuck.net/known_by_Christopher.html

"""

from urllib import urlopen as uo
from BeautifulSoup import BeautifulSoup as bS
u = raw_input('Enter URL: ')
c = int(raw_input('Enter count: '))
p = int(raw_input('Enter position: '))
print "Starting  : ", u
for i in range(c):
    h = uo(u).read()
    s = bS(h)
    t = s('a')
    u = str((t[p - 1]).get('href', None))
    print "Retrieving: ", u
Esempio n. 9
0
 def getGainBaseline(self):
     a = uo(self.site+"/getGainBaseline").read()
     base =  json.loads(a)
     return [item for sublist in base for item in sublist] 
Esempio n. 10
0
 def sendCommand(self,c):
     return uo(site+"/sendCmd/%s" % c).read()
Esempio n. 11
0
def write(x):
    response = uo(site + "write/" + x)
    display(response)
def awrite(val):
    uo(site+"/write/"+str(val)).read()
    sleep(.5)
def aread():
    return uo(site+"/read/").read()
place_id and your Python code below.

Hint:
The first seven characters of the place_id are "ChIJz2i ..."

Make sure to retreive the data from the URL specified above and not
the normal Google API. Your program should work with the Google API -
but the place_id may not match for this assignment.

"""

from urllib import urlopen as uo, urlencode as ue
from json import loads as ls  # , dumps as ds

# serviceurl = 'http://maps.googleapis.com/maps/api/geocode/json?'
serviceurl = 'http://python-data.dr-chuck.net/geojson?'
default = "AGH University of Science and Technology"
address = raw_input('Enter location: ') or default
url = serviceurl + ue(
    {
        'sensor': 'false',
        'address': address
    }
)
print 'Retrieving', url
uh = uo(url)
data = uh.read()
print 'Retrieved', len(data), 'characters'
js = ls(str(data))
print "Place id: ", js['results'][0]['place_id']
Esempio n. 15
0
"""Reading binary files using urllib.

Retrieve a non-text (or binary) file.
This program reads all of the data in at once across the network and stores it
in the variable img in the main memory of your computer, then opens the file
cover.jpg and writes the data out to your disk. This will work if the size of
the file is less than the size of the memory of your computer.

However if this is a large audio or video file, this program may crash or at
least run extremely slowly when your computer runs out of memory. In order to
avoid running out of memory, we retrieve the data in blocks (or buffers) and
then write each block to your disk before retrieving the next block. This way
the program can read any size file without using up all of the memory you have
in your computer.
"""

from urllib import urlopen as uo

default = 'http://www.py4inf.com/cover.jpg'
url = raw_input('Enter URL: ') or default
img = uo(url).read()
fhand = open('cover.jpg', 'w')
fhand.write(img)
fhand.close()