コード例 #1
0
def rss():
    response.headers['Content-Type'] = 'application/rss+xml'
    import gluon.contrib.rss2 as rss2
    requested_groups = request.vars.groups or '1'
    try:
        requested_groups = tuple([int(i) for i in requested_groups.split(',')])
    except:
        return ''
    entries=db(db.announcement.id==db.access.record_id)\
            (db.access.table_name=='announcement')\
            (db.access.persons_group.belongs(requested_groups))\
            (db.announcement.to_rss==True)\
            (db.auth_user.id==db.announcement.owner)\
            .select(groupby=db.announcement.id)
    items = [
        rss2.RSSItem(title=entry.announcement.title,
                     link=MAIN,
                     author=entry.auth_user.email,
                     description=entry.announcement.body,
                     pubDate=entry.announcement.posted_on) for entry in entries
    ]
    rss = rss2.RSS2(title='public rss for groups ' + str(requested_groups),
                    link=MAIN,
                    description=str(requested_groups),
                    lastBuildDate=datetime.datetime.now(),
                    items=items)
    return rss2.dumps(rss)
コード例 #2
0
ファイル: default_fix.py プロジェクト: reingart/web2conf
def planet():
    #return ""
    import gluon.contrib.rss2 as rss2

    # store planet rss entries in disk (forever...)
    import portalocker
    import os, cPickle as pickle
    path = os.path.join(request.folder,'cache', "planet.rss")
    if not os.path.exists(path):
        f = open(path, "w+")
        rss = get_planet_rss(None)
        rss = [{'title': item.title, 'author': item.author, 'pubDate': item.pubDate, 'link': item.link, 'description': item.description} for item in rss.items]
    else:
        f = open(path, "r+")
        rss = None
    portalocker.lock(f, portalocker.LOCK_EX)
    if not rss:
        rss = pickle.load(f)
    else:
        f.seek(0)
        pickle.dump(rss, f)
    portalocker.unlock(f)
    f.close()

    # .rss requests
    if request.extension == "rss":
        # return new rss feed xml
        response.headers['Content-Type']='application/rss+xml'
        return rss2.dumps(rss)

    # else send the rss object to be processed by
    # the view
    
    return response.render(dict(rss = rss, rss2 = rss2))
コード例 #3
0
ファイル: news.py プロジェクト: hsorby/web2py-appliances
def rss():
    response.headers["Content-Type"] = "application/rss+xml"
    import gluon.contrib.rss2 as rss2

    requested_groups = request.vars.groups or "1"
    try:
        requested_groups = tuple([int(i) for i in requested_groups.split(",")])
    except:
        return ""
    entries = db(db.announcement.id == db.access.record_id)(db.access.table_name == "announcement")(
        db.access.persons_group.belongs(requested_groups)
    )(db.announcement.to_rss == True)(db.auth_user.id == db.announcement.owner).select(groupby=db.announcement.id)
    items = [
        rss2.RSSItem(
            title=entry.announcement.title,
            link=MAIN,
            author=entry.auth_user.email,
            description=entry.announcement.body,
            pubDate=entry.announcement.posted_on,
        )
        for entry in entries
    ]
    rss = rss2.RSS2(
        title="public rss for groups " + str(requested_groups),
        link=MAIN,
        description=str(requested_groups),
        lastBuildDate=datetime.datetime.now(),
        items=items,
    )
    return rss2.dumps(rss)
コード例 #4
0
def planet():
    FILTER = 'web2py'
    import datetime
    import re
    import gluon.contrib.rss2 as rss2
    import gluon.contrib.feedparser as feedparser

    # filter for general (not categorized) feeds
    regex = re.compile(FILTER, re.I)
    # select all feeds
    feeds = db(db.feed).select()
    entries = []
    for feed in feeds:
        # fetch and parse feeds
        d = feedparser.parse(feed.url)
        for entry in d.entries:
            # filter feed entries
            if not feed.general or regex.search(entry.description):
                # extract entry attributes
                entries.append({
                    'feed': {'author':feed.author,
                             'link':feed.link,
                             'url':feed.url,
                             'name':feed.name},
                    'title': entry.title,
                    'link': entry.link,
                    'description': entry.description,
                    'author': hasattr(entry, 'author_detail') \
                              and entry.author_detail.name \
                              or feed.author,
                    'date': datetime.datetime(*entry.date_parsed[:6])
                })
    # sort entries by date, descending
    entries.sort(key=lambda x: x['date'], reverse=True)
    now = datetime.datetime.now()
    # aggregate rss2 feed with parsed entries
    rss = rss2.RSS2(
        title="Planet web2py",
        link=URL("planet").encode("utf8"),
        description="planet author",
        lastBuildDate=now,
        items=[
            rss2.RSSItem(
                title=entry['title'],
                link=entry['link'],
                description=entry['description'],
                author=entry['author'],
                # guid = rss2.Guid('unknown'),
                pubDate=entry['date']) for entry in entries
        ])

    # return new rss feed xml
    response.headers['Content-Type'] = 'application/rss+xml'
    return rss2.dumps(rss)
コード例 #5
0
def planet():
    FILTER = 'web2py'
    import datetime
    import re
    import gluon.contrib.rss2 as rss2
    import gluon.contrib.feedparser as feedparser

    # filter for general (not categorized) feeds
    regex =  re.compile(FILTER,re.I)
    # select all feeds
    feeds = db(db.feed).select()
    entries = []
    for feed in feeds:
        # fetch and parse feeds
        d = feedparser.parse(feed.url)
        for entry in d.entries:
            # filter feed entries
            if not feed.general or regex.search(entry.description):
                # extract entry attributes
                entries.append({
                    'feed': {'author':feed.author,
                             'link':feed.link,
                             'url':feed.url,
                             'name':feed.name},
                    'title': entry.title,
                    'link': entry.link,
                    'description': entry.description,
                    'author': hasattr(entry, 'author_detail') \
                              and entry.author_detail.name \
                              or feed.author,
                    'date': datetime.datetime(*entry.date_parsed[:6])
                })
    # sort entries by date, descending
    entries.sort(key=lambda x: x['date'],reverse=True)
    now = datetime.datetime.now()
    # aggregate rss2 feed with parsed entries
    rss = rss2.RSS2(title="Planet web2py",
       link = URL("planet").encode("utf8"),
       description = "planet author",
       lastBuildDate = now,
       items = [
          rss2.RSSItem(
            title = entry['title'],
            link = entry['link'],
            description = entry['description'],
            author = entry['author'],
            # guid = rss2.Guid('unknown'),
            pubDate = entry['date']) for entry in entries]
       )

    # return new rss feed xml
    response.headers['Content-Type']='application/rss+xml'
    return rss2.dumps(rss)
コード例 #6
0
ファイル: simple_examples.py プロジェクト: huiker/web2py
def rss_aggregator():
    import datetime
    import gluon.contrib.rss2 as rss2
    import gluon.contrib.feedparser as feedparser
    d = feedparser.parse('http://rss.slashdot.org/Slashdot/slashdot/to')

    rss = rss2.RSS2(title=d.channel.title, link=d.channel.link,
                    description=d.channel.description,
                    lastBuildDate=datetime.datetime.now(),
                    items=[rss2.RSSItem(title=entry.title,
                    link=entry.link, description=entry.description,
                    pubDate=datetime.datetime.now()) for entry in
                    d.entries])
    response.headers['Content-Type'] = 'application/rss+xml'
    return rss2.dumps(rss)
コード例 #7
0
def view_collection_feed():
    """ 
    Creates an rss feed.  Creates items (based on stories) for this feed.  Items have audio enclosures (will these always be mp3?).

    """

    stories = {}
    collection_id=request.args(0)
    collection = db.collection[collection_id] or redirect(error_page)
    stories=db(db.story.collection.contains(collection_id)).select(orderby=db.story.title)
    email = db(db.auth_user.id == db.auth_user.id==collection.created_by).select(db.auth_user.email).as_list()
    email = email[0]['email']
    first_name = db(db.auth_user.id == db.auth_user.id==collection.created_by).select(db.auth_user.first_name).as_list()
    first_name = first_name[0]['first_name']
    last_name = db(db.auth_user.id == db.auth_user.id==collection.created_by).select(db.auth_user.last_name).as_list()
    last_name = last_name[0]['last_name']
    length=len(stories)
    # print email
    # print first_name
    # print last_name
    scheme = request.env.get('WSGI_URL_SCHEME', 'http').lower()
    # rss = rss2.RSS2(title=collection.title,
    rss = GeoRSSFeed(title=collection.title,
        link = scheme + '://' + request.env.http_host + request.env.path_info,
        description = collection.description,
        lastBuildDate = collection.modified_on,
        items = [
            # rss2.RSSItem(title = story.title,
            GeoRSSItem(title = story.title,
            author = email + '(' + first_name + ' ' + last_name + ')',
            link = story.url,
            guid = scheme + '://' + request.env.http_host + '/publicradioroadtrip/default/view_story/' + str(story.id),
            enclosure = rss2.Enclosure(story.audio_url, 0, 'audio/mpeg'),
            description = story.description,
            content = '<p><a href="' + format_audio_url(story.id) + '">Listen here</a></p>',
            point = story.latitude + ' ' + story.longitude,
            # comments = 'test',
            pubDate = story.date) for story in stories])
            
    response.headers['Content-Type']='application/rss+xml'
    return rss2.dumps(rss)
コード例 #8
0
def rss():
    response.headers['Content-Type']='application/rss+xml'
    import gluon.contrib.rss2 as rss2
    requested_groups=request.vars.groups or '1'
    try: requested_groups=tuple([int(i) for i in requested_groups.split(',')])
    except: return ''
    entries=db(db.announcement.id==db.access.record_id)\
            (db.access.table_name=='announcement')\
            (db.access.auth_users_group.belongs(requested_groups))\
            (db.announcement.to_rss==True)\
            (db.auth_user.id==db.announcement.owner)\
            .select(groupby=db.announcement.id)
    items = [rss2.RSSItem(
               title=entry.announcement.title,
               link=MAIN,
               author=entry.auth_user.email,
               description = entry.announcement.body,
               pubDate = entry.announcement.posted_on) for entry in entries]
    rss = rss2.RSS2(title='public rss for '+str(requested_groups),
       link = MAIN,
       description = str(requested_groups),
       lastBuildDate = datetime.datetime.now(),
       items=items)
    return rss2.dumps(rss)
コード例 #9
0
ファイル: defult_orig.py プロジェクト: reingart/web2conf
def planet():
    #return ""
    import gluon.contrib.rss2 as rss2

    # store planet rss entries in disk (forever...)
    def get_rss_feeds():
        rss = get_planet_rss(None)
        rss = [{'title': item.title, 'author': item.author, 'pubDate': item.pubDate, 'link': item.link, 'description': item.description} for item in rss.items]
        return rss

    rss = cache.disk(request.env.path_info + ".planet", 
                                   get_rss_feeds, 
                                   time_expire=60*15)

    # .rss requests
    if request.extension == "rss":
        # return new rss feed xml
        response.headers['Content-Type']='application/rss+xml'
        return rss2.dumps(rss)

    # else send the rss object to be processed by
    # the view
    
    return response.render(dict(rss = rss, rss2 = rss2))