def _retrieveFeed(self):
     """do the actual work and try to retrieve the feed"""
     url = self.url
     if url!='':
         self._last_update_time_in_minutes = time.time()/60
         self._last_update_time = DateTime()
         try:
             data = tool.read_data(url, force=True)
         except urllib2.URLError, ex:
             try:
                 data = tool.read_data(url)
             except:
                 # we tried at least but have a failed load
                 self._loaded = True 
                 self._failed = True
                 return False
         self._parser = parser.Parser()
         self._parser.parse(data)
         self._title = u'Events'
         self._items = self._model2view(self._parser.items)
         self._loaded = True
         self._failed = False
         return True
Example #2
0
    def parse(self, data):
        self.p.Parse(data)


if __name__ == '__main__':
##    p = _StripHTMLParser()
##    result = p.strip(u"""<div>
##<img src="http://example.com/img.jpg"/>
##<h1>ORC</h1>
##<br />
##<span>0123456789</span><span>ABCDEFGHIJ</span></div>""", 10)
##    print result
##    sys.exit(0)

    data = tool.read_data('http://www.itsatrip.org/api/xmlfeed.ashx')
    p = Parser()
    p.parse(data)
    print u'Events count: %s' % len(p.events)
    #result = tool.search(p, [u'Art, History & Museums'])
    result = tool.free_events(p.items)
    print 'len(result):%s' % len(result)
    i = 0
    for item in result:
        id, name = item.id, item.name.encode('ascii', 'replace')
        print 'id:%s name:%s' % (id, name)
        desc = item.description.encode('ascii', 'replace')
        summary = item.summary.encode('ascii', 'replace')
        print 'description(%s):%s' % (len(desc), desc)
        print 'summary(%s):%s' % (len(summary), summary)
        i += 1