Example #1
0
 def __init__(self, title, url, description, rssFile=None, pickleFile=None,
              maxItems=20, **kwargs):
     RSS2.__init__(self, title, url, description, **kwargs)
     WebPageMetadata.__init__(self, url, pickleFile)
     self.maxItems = maxItems
     if not rssFile:
         rssFile = self.digest() + '.xml'
     self.rssFile = rssFile
     self.currentGuids = {}
Example #2
0
 def __init__(self,
              title,
              url,
              description,
              rssFile=None,
              pickleFile=None,
              maxItems=20,
              **kwargs):
     RSS2.__init__(self, title, url, description, **kwargs)
     WebPageMetadata.__init__(self, url, pickleFile)
     self.maxItems = maxItems
     if not rssFile:
         rssFile = self.digest() + '.xml'
     self.rssFile = rssFile
     self.currentGuids = {}
Example #3
0
def posts_feed():
    base_url = url_for('general.index', _external=True)
    items = []
    posts = Post.get_published(num=10).all()

    for post in posts:
        post_url = urljoin(base_url, post.url)

        # TODO: Add a real description
        item = RSSItem(title=post.title,
                       link=post_url,
                       description=post.body.split('\r\n', 1)[0],
                       author='{} ({})'.format(post.author.email,
                                               post.author.full_name),
                       categories=[tag.name for tag in post.tags],
                       guid=Guid(post_url),
                       pubDate=post.pub_date)
        items.append(item)

    feed_config = current_app.config['BLOG_POSTS_FEED']
    rss2_feed = RSS2(title=feed_config['title'],
                     link=base_url,
                     description=feed_config['description'],
                     language='en-us',
                     webMaster=feed_config['webmaster'],
                     lastBuildDate=posts[0].pub_date if posts else None,
                     ttl=1440,
                     items=items)
    return current_app.response_class(rss2_feed.to_xml(encoding='utf-8'),
                                      mimetype='application/rss+xml')
Example #4
0
    def failures(self):
        """CherryPy handler to make an RSS feed of failures."""

        # TODO: make limit configurable (in CherryPy App .ini file)
        events = self.store.get_fail_events(limit=20)

        # Attach output
        for fail in events:
            fail['stdout'] = None
            fail['stderr'] = None

            if fail['finishid'] is not None:
                (fail['stdout'], fail['stderr']) = self.store.get_job_output(
                    fail['finishid'], fail['host'], fail['user'], fail['id'],
                    fail['crabid'])

        rssitems = [self.event_to_rssitem(e) for e in events]

        rss = RSS2('Crab failures',
                   self.base + '/',
                   'List of recent cron job failures.',
                   lastBuildDate=datetime.now(),
                   ttl=30,
                   items=rssitems)
        return rss.to_xml()
Example #5
0
def update_rss(items):
    rss = RSS2(title="C[omp]ute",
               link=BASE_URL,
               description="Andrew Cooke's blog",
               lastBuildDate=datetime.now(),
               items=items)
    with open(FEED_FILE, 'w') as destn:
        rss.write_xml(destn)
Example #6
0
    def __init__(self,
                 title,
                 link,
                 description,

                 language = None,
                 copyright = None,
                 managingEditor = None,
                 webMaster = None,
                 pubDate = None,  # a datetime, *in* *GMT*
                 lastBuildDate = None, # a datetime
                 
                 categories = None, # list of strings or Category
                 generator = 'MunkiStagingRSSFeed 1.0',
                 docs = "https://docs.orchard.ox.ac.uk/rss",
                 cloud = None,    # a Cloud
                 ttl = None,      # integer number of minutes

                 image = None,     # an Image
                 rating = None,    # a string; I don't know how it's used
                 textInput = None, # a TextInput
                 skipHours = None, # a SkipHours with a list of integers
                 skipDays = None,  # a SkipDays with a list of strings

                 items = None,     # list of RSSItems
                 ):

        # Initialise base class .. 
        RSS2.__init__(self, title, link, description, language, copyright,
                 managingEditor, webMaster, pubDate, lastBuildDate,
                 categories, generator, docs, cloud, ttl, image,
                 rating, textInput, skipHours, skipDays, items)

        # Add media name space (for item icons)
        # Um ... rss_attrs is a class attribute, so this may have
        # unexpected side effects (but I don't particularly want to
        # re-engineer the class
        self.rss_attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'
        self.rss_attrs['xmlns:dc']    = 'http://purl.org/dc/elements/1.1/'
Example #7
0
def create_rss():
    'Generate XML file'

    title = u'Dealabs, tous les deals hots - Filtrés'
    dest = "/var/www/example.org/lab/rss"
    url = "https://lab.example.org/rss/"
    filename = 'dealabs.xml'

    rss = RSS2(title=title.encode('utf-8'),
               link=os.path.join(url, filename),
               description=title.encode('utf-8'),
               lastBuildDate=datetime.now(),
               items=[RSSItem(**article) for article in filter_deals()])
    rss.write_xml(open(os.path.join(dest, filename), "w"), encoding='utf-8')
Example #8
0
    def __init__(
            self,
            title,
            link,
            description,
            language=None,
            copyright=None,
            managingEditor=None,
            webMaster=None,
            pubDate=None,  # a datetime, *in* *GMT*
            lastBuildDate=None,  # a datetime
            categories=None,  # list of strings or Category
            generator='MunkiStagingRSSFeed 1.0',
            docs="https://docs.orchard.ox.ac.uk/rss",
            cloud=None,  # a Cloud
            ttl=None,  # integer number of minutes
            image=None,  # an Image
            rating=None,  # a string; I don't know how it's used
            textInput=None,  # a TextInput
            skipHours=None,  # a SkipHours with a list of integers
            skipDays=None,  # a SkipDays with a list of strings
            items=None,  # list of RSSItems
    ):

        # Initialise base class ..
        RSS2.__init__(self, title, link, description, language, copyright,
                      managingEditor, webMaster, pubDate, lastBuildDate,
                      categories, generator, docs, cloud, ttl, image, rating,
                      textInput, skipHours, skipDays, items)

        # Add media name space (for item icons)
        # Um ... rss_attrs is a class attribute, so this may have
        # unexpected side effects (but I don't particularly want to
        # re-engineer the class
        self.rss_attrs['xmlns:media'] = 'http://search.yahoo.com/mrss/'
        self.rss_attrs['xmlns:dc'] = 'http://purl.org/dc/elements/1.1/'
Example #9
0
 def rss(self,language=None,*args,**kwargs):
     if language:
         l = models.get_language(language)
     conf = cherrypy.request.app.config['ligiloj']
     query = models.Link().select(models.Link,models.Language).join(models.Language)
     if language:
         query = query.where(models.Link.language == l)
     cherrypy.response.headers['Content-Type'] = 'application/xml'
     return RSS2(title=u'{0} - {1}'.format(conf['site_title'],language and l.name or conf['global_title_text']),
         link=conf['rss_site_url'],
         description=conf['rss_description'],
         language=language or conf['rss_default_language'],
         items=[RSSItem(title=language and link.title or u"{0}: {1}".format(link.language.name,link.title),
             link=link.url,
             pubDate=link.published.isoformat(),
             guid=Guid(link.url,str(link.id))) for link in query]).to_xml('utf-8')
Example #10
0
 def generate_rss(self):
     if 'rss_title' not in self.config or 'rss_description' not in self.config:
         return
     RSS2(title=self.config['rss_title'],
          link=self.root_url,
          description=self.config['rss_description'],
          lastBuildDate=datetime.datetime.now(),
          items=[
              RSSItem(title=entry['title'],
                      link=self.root_url + entry['link'],
                      description=entry['html'],
                      guid=Guid(self.root_url + entry['link']),
                      pubDate=datetime.datetime.strptime(
                          entry['date'][:10], '%Y-%m-%d'))
              for entry in self.entries
          ]).write_xml(file(self.out + 'feed.xml', 'wb'), encoding='utf-8')
Example #11
0
    def get(self, keywords):
        commits = DBQ.findByKeywords(keywords)

        feed = RSS2(title="Crypto.is Code Audit Feed",
                    description="Just a thing, right?",
                    link="https://crypto.is",
                    lastBuildDate=datetime.datetime.utcnow())

        for c in commits:
            feed.items.append(c.toRSSItem())

        self.set_header('Content-Type', 'application/rss+xml')

        xml = feed.to_xml()
        self.write(xml)
        return
Example #12
0
def compile_rss(posts, conf, outpath):
    """
    Compile a list of Posts to the specified outpath.
    """
    items = [
        RSSItem(title=p['title'],
                link=os.path.join(conf['SITE_URL'], p['category'], p['slug']),
                description=p['html'],
                pubDate=p['published_at']) for p in posts
    ]

    rss = RSS2(title=conf['SITE_NAME'],
               link=conf['SITE_URL'],
               description=conf['SITE_DESC'],
               lastBuildDate=datetime.now(),
               items=items)

    rss.write_xml(open(outpath, 'w'))
Example #13
0
def main():
    fourtwenty = next420()
    place = random.choice(fourtwenty['places'])
    if len(fourtwenty['places']) > 1:
        title = "Get ready for #fourtwenty at {0} and other fine places.".format(
            place)
    else:
        title = "Get ready for #fourtwenty at {0}.".format(place)

    file('420.rss', 'w').write(
        RSS2(title='Global 4:20 clock',
             link='https://zzzen.com/420',
             description='Keep up with where #FourTwenty happens next',
             language='en',
             items=[
                 RSSItem(title=title,
                         link='https://zzzen.com/420#{0}'.format(
                             urllib2.quote(place)))
             ]).to_xml('utf-8'))
Example #14
0
def menu():
	resource = urllib2.urlopen("http://www.ufrgs.br/ufrgs/ru")
	page = BeautifulSoup(resource)
	items = []
	for ru in page.find_all("div", "ru"):
		ru_name = ru.h3.contents[0]
		desc = ', '.join([(item or '').strip() for item in ru.div.contents if not hasattr(item, 'contents')])
		items.append(RSSItem(
			title = '%s - %s' % (ru_name, date.today().strftime('%d/%m/%Y')),
			link='http://www.ufrgs.br/ufrgs/ru',
			description=desc,
			guid=Guid(ru_name+date.today().isoformat()),
		))
	feed = RSS2(
		title=u"Cardápio do RU-UFRGS - diário",
		link='http://www.ufrgs.br/ufrgs/ru',
		description=u"Cardápio do dia no Restaurante Universitário da UFRGS",
		pubDate=datetime.today(),
		items=items,
	)
	return feed.to_xml()
Example #15
0
items = []
for count, (uri, item) in enumerate(links):
    if cached_data.has_key(uri):
        cached_item = cached_data[uri]
        items.append(RSSItem(**cached_item))
    else:
        items.append(item)
        # we will only really create entries for new stuff; make sure
        # we have their pubDate at least one minute (some readers seem
        # to not consider seconds) more for each of the newer stuff,
        # so that the order of the comics will be correct
        item.pubDate += timedelta(0.0007 * (count + 1))
        cached_data[uri] = dict(title=str(item.title),
                                link=str(item.link),
                                description=str(item.description),
                                pubDate=item.pubDate)

# generate the RSS here
rss = RSS2(title='The Order of the Stick',
           link=uri,
           description='The Order of the Stick',
           lastBuildDate=datetime.now(),
           items=items)
data = rss.to_xml('utf-8')
open('ots-rss.xml', 'w').write(xml.sax.saxutils.unescape(data))

# update cache
cache_file = open('links.cache', 'w')
pickle.dump(cached_data, cache_file)
cache_file.close()
Example #16
0
 def __init__(self, **kwargs):
     RSS2.__init__(self, **kwargs)
     self.rss_attrs[
         'xmlns:content'] = 'http://purl.org/rss/1.0/modules/content/'
     self.rss_attrs['xmlns:dc'] = 'http://purl.org/dc/elements/1.1/'
Example #17
0
    print (title, link, author, description, category,comments, pubDate)
    item = None
    link = None
    description = None
    guid = None
    pubDate = None
    comments = None



rss = RSS2(
        title = "AO3 works of {}".format(USER),
        link = BASE_URL,
        description = "{}'s AO3 works".format(USER),
  #      lastBuildDate = now(),

        items = items
         #   RSSItem(
         #       title = title,
         #       link = link,
         #       description = description,
         #       guid = Guid(guid),
         #       pubDate = pubDate,
         #       # author = author, TODO
         #       # category = category, # TODO fails?
         #       comments = comments)]
        )

rss.write_xml(open("fool.xml", "w"))

Example #18
0
def getFeed():
    feed = RSS2(title="Crypto.is Code Audit Feed",
                description="Just a thing, right?",
                link="https://crypto.is",
                lastBuildDate=datetime.datetime.utcnow())
    return feed
Example #19
0
File: util.py Project: PBarmby/site
 def __init__(self, **kwargs):
     RSS2.__init__(self, **kwargs)
     self.rss_attrs['xmlns:content']='http://purl.org/rss/1.0/modules/content/'
     self.rss_attrs['xmlns:dc']='http://purl.org/dc/elements/1.1/'
Example #20
0
hit_list = []
for name in series:
    name = urllib.quote(name)
    feed = feedparser.parse('http://ezrss.it/search/index.php?show_name=%s&quality=720p&mode=rss'%name)
    if len(feed['items'])==0:
            feed = feedparser.parse('http://ezrss.it/search/index.php?show_name=%s&mode=rss'%name)
    print feed.url
    hit_list.append(feed)

# get the feeds and join them in one big list
feeds = hit_list
print "Found",len(feeds),"feeds."

entries = []
for feed in feeds:
    entries.extend(feed['items'])

# this section is for sorting the entries
decorated = [(entry["date_parsed"], entry) for entry in entries]
decorated.sort()
decorated.reverse()
entries = [entry for (date,entry) in decorated]

items = [RSSItem(**item) for item in entries]
feeds = RSS2(title="My series feed",description="This feed is an aggregation of various feeds",link="",items = items)
f = open('feed.xml','w')
f.write(feeds.to_xml())
f.close()