Example #1
0
print 'testing url_feeder'

from aquire import url_feeder
feeder = url_feeder('sp500.csv')
i = 0
for (site,dic) in feeder:
	i += 1
	if i > 5:
		break
	print site

print 'testing url_grabber'

from aquire import url_grabber
feeder = url_feeder('sp500.csv')
grabber = url_grabber()
i = 0
for (site,dic) in feeder:
	i += 1
	if i > 5:
		break
	html = grabber.retrieve(site)
	print len(html)

print 'testing data_extractor'

from prune import data_extractor
extractor = data_extractor(feeder)
extractor.extract(html[0])
Example #2
0
 def __init__(self):
     self.feeder = url_feeder("sp500.csv")
     self.grabber = url_grabber()
     self.extractor = data_extractor(self.feeder)
     self.cleaner = data_cleanser()
     self.eater = database_feeder("pybros", "getIn2it", "rev3")
Example #3
0
	def __init__(self):
		self.feeder = url_feeder('sp500.csv')
		self.grabber = url_grabber()
		self.extractor = data_extractor(self.feeder)
		self.cleaner = data_cleanser()
		self.eater = database_feeder('root', 'pybros', 'rev3')
Example #4
0
 def __init__(self):
     self.feeder = url_feeder('sp500.csv')
     self.grabber = url_grabber()
     self.extractor = data_extractor(self.feeder)
     self.cleaner = data_cleanser()
     self.eater = database_feeder('pybros', 'getIn2it', 'rev3')
Example #5
0
print 'testing url_feeder'

from aquire import url_feeder
feeder = url_feeder('sp500.csv')
i = 0
for site in feeder:
    i += 1
    if i > 1:
        break
    print site

print 'testing url_grabber'

from aquire import url_grabber
feeder = url_feeder('sp500.csv')
grabber = url_grabber()
i = 0
for site in feeder:
    i += 1
    if i > 1:
        break
    xml = grabber.retrieve(site)

print 'testing data_extractor'

from prune import data_extractor
extractor = data_extractor(feeder)
extractor.extract(xml[0])