def get_content(url): """ returns the content from Flickr """ assert(url.startswith("http")) f = Retrieve(Flickr.__name__).open(url) content = f.read() f.close() return content
def get_content(url): """ returns the content from Flickr """ assert (url.startswith("http")) f = Retrieve(Flickr.__name__).open(url) content = f.read() f.close() return content
def testRetrievalTimeout(self): ''' tests whether the socket timeout is honored by our class ''' SLOW_URL = "http://www.csse.uwa.edu.au/" with raises(urllib2.URLError): r = Retrieve(self.__class__.__name__, default_timeout=0.1).open(SLOW_URL) content = r.read() r.close()
def _get_content( url ): """ returns the content from delicious """ assert( url.startswith("http") ) f = Retrieve(Delicious.__name__).open(url) content = f.read() f.close() sleep(1) return content
def _get_content(url): """ returns the content from delicious """ assert(url.startswith("http")) f = Retrieve(Delicious.__name__).open(url) content = f.read() f.close() sleep(1) return content
def t_retrieve(url): ''' retrieves the given url from the web @remarks helper module for the testMultiProcessing unit test. ''' r = Retrieve(__name__).open(url) try: content = r.read() finally: # this is required as GzipFile does not support the context protocol # in python 2.6 r.close() return content
def get_content(url): """ returns the content from Technorati """ assert(url.startswith("http")) logger.debug('Fetching content for URL %s' % url) if (time.time() - Technorati.last_access) < SLEEP_TIME: logger.debug('Sleeping %s seconds!' % SLEEP_TIME) time.sleep(SLEEP_TIME) Technorati.last_access = time.time() f = Retrieve("%s_new" % Technorati.__name__).open(url) content = f.read() f.close() return content