Ejemplo n.º 1
0
def rss():
    response.headers['Content-Type'] = 'application/rss+xml'
    import gluon.contrib.rss2 as rss2
    requested_groups = request.vars.groups or '1'
    try:
        requested_groups = tuple([int(i) for i in requested_groups.split(',')])
    except:
        return ''
    entries=db(db.announcement.id==db.access.record_id)\
            (db.access.table_name=='announcement')\
            (db.access.persons_group.belongs(requested_groups))\
            (db.announcement.to_rss==True)\
            (db.auth_user.id==db.announcement.owner)\
            .select(groupby=db.announcement.id)
    items = [
        rss2.RSSItem(title=entry.announcement.title,
                     link=MAIN,
                     author=entry.auth_user.email,
                     description=entry.announcement.body,
                     pubDate=entry.announcement.posted_on) for entry in entries
    ]
    rss = rss2.RSS2(title='public rss for groups ' + str(requested_groups),
                    link=MAIN,
                    description=str(requested_groups),
                    lastBuildDate=datetime.datetime.now(),
                    items=items)
    return rss2.dumps(rss)
def planet():
    FILTER = 'web2py'
    import datetime
    import re
    import gluon.contrib.rss2 as rss2
    import gluon.contrib.feedparser as feedparser

    # filter for general (not categorized) feeds
    regex = re.compile(FILTER, re.I)
    # select all feeds
    feeds = db(db.feed).select()
    entries = []
    for feed in feeds:
        # fetch and parse feeds
        d = feedparser.parse(feed.url)
        for entry in d.entries:
            # filter feed entries
            if not feed.general or regex.search(entry.description):
                # extract entry attributes
                entries.append({
                    'feed': {'author':feed.author,
                             'link':feed.link,
                             'url':feed.url,
                             'name':feed.name},
                    'title': entry.title,
                    'link': entry.link,
                    'description': entry.description,
                    'author': hasattr(entry, 'author_detail') \
                              and entry.author_detail.name \
                              or feed.author,
                    'date': datetime.datetime(*entry.date_parsed[:6])
                })
    # sort entries by date, descending
    entries.sort(key=lambda x: x['date'], reverse=True)
    now = datetime.datetime.now()
    # aggregate rss2 feed with parsed entries
    rss = rss2.RSS2(
        title="Planet web2py",
        link=URL("planet").encode("utf8"),
        description="planet author",
        lastBuildDate=now,
        items=[
            rss2.RSSItem(
                title=entry['title'],
                link=entry['link'],
                description=entry['description'],
                author=entry['author'],
                # guid = rss2.Guid('unknown'),
                pubDate=entry['date']) for entry in entries
        ])

    # return new rss feed xml
    response.headers['Content-Type'] = 'application/rss+xml'
    return rss2.dumps(rss)
Ejemplo n.º 3
0
def rss_aggregator():
    import datetime
    import gluon.contrib.rss2 as rss2
    import gluon.contrib.feedparser as feedparser
    d = feedparser.parse('http://rss.slashdot.org/Slashdot/slashdot/to')

    rss = rss2.RSS2(title=d.channel.title, link=d.channel.link,
                    description=d.channel.description,
                    lastBuildDate=datetime.datetime.now(),
                    items=[rss2.RSSItem(title=entry.title,
                    link=entry.link, description=entry.description,
                    pubDate=datetime.datetime.now()) for entry in
                    d.entries])
    response.headers['Content-Type'] = 'application/rss+xml'
    return rss.to_xml(encoding='utf-8')
Ejemplo n.º 4
0
def rss(feed):
    if not 'entries' in feed and 'items' in feed:
        feed['entries'] = feed['items']
    def safestr(obj, key, default=''):
        return str(obj[key]).encode('utf-8', 'replace') if key in obj else default

    now = datetime.datetime.now()
    rss = rss2.RSS2(title=safestr(feed,'title'),
                    link=safestr(feed,'link'),
                    description=safestr(feed,'description'),
                    lastBuildDate=feed.get('created_on', now),
                    items=[rss2.RSSItem(
                           title=safestr(entry,'title','(notitle)'),
                           link=safestr(entry,'link'),
                           description=safestr(entry,'description'),
                           pubDate=entry.get('created_on', now)
                           ) for entry in feed.get('entries', [])])
    return rss.to_xml(encoding='utf-8')
Ejemplo n.º 5
0
def rss(feed):
    if not 'entries' in feed and 'items' in feed:
        feed['entries'] = feed['items']
    now = datetime.datetime.now()
    rss = rss2.RSS2(
        title=str(feed.get('title', '(notitle)').encode('utf-8', 'replace')),
        link=str(feed.get('link', None).encode('utf-8', 'replace')),
        description=str(
            feed.get('description', '').encode('utf-8', 'replace')),
        lastBuildDate=feed.get('created_on', now),
        items=[
            rss2.RSSItem(title=str(
                entry.get('title', '(notitle)').encode('utf-8', 'replace')),
                         link=str(
                             entry.get('link',
                                       None).encode('utf-8', 'replace')),
                         description=str(
                             entry.get('description',
                                       '').encode('utf-8', 'replace')),
                         pubDate=entry.get('created_on', now))
            for entry in feed.get('entries', [])
        ])
    return rss.to_xml(encoding='utf-8')
Ejemplo n.º 6
0
def feed():
    request.args = List(request.raw_args.split('/'))
    if not URL.verify(request, hmac_key=SIGN_KEY):
        raise HTTP(403, 'invalid signature')
    rtn = recalc_token(user=request.get_vars.user)
    if request.args(0) not in rtn:
        raise HTTP(403, 'instance not valid')
    request.extension = 'rss'
    rargs = List(request.raw_args.split('/'))
    fname = "%s.rss" % hashlib.md5('/'.join(rargs)).hexdigest()
    basepath = os.path.join(request.folder, 'private', 'temp_feeds')
    if not os.path.exists(basepath):
        os.makedirs(basepath)
    fpath = os.path.join(basepath, fname)
    lock_file = fpath + '.__lock'
    return_cached = os.path.isfile(lock_file)
    if not return_cached:
        return_cached = os.path.isfile(
            fpath) and time.time() - os.path.getmtime(fpath) < CACHE_FOR
    if return_cached:
        x = 0
        while x < 60:
            x += 1
            if os.path.isfile(fpath):
                with open(fpath, 'rb') as g:
                    rtn = g.read()
                return rtn
    with open(lock_file, 'w') as g:
        g.write('%s' % request.utcnow)
    folder = rargs(2) if rargs(1) == 'folder' else 'all'
    project = rargs(4) if rargs(3) == 'project' else 'all'
    status = rargs(6) if rargs(5) == 'status' else 'all'
    package = rargs(8) if rargs(7) == 'package' else 'all'
    folder_pattern = folder != 'all' and fixup_like_param(folder) or '%'
    project_name = project != 'all' and fixup_like_param(project) or '%'
    package_pattern = package != 'all' and fixup_like_param(package) or '%'
    status_ = status != 'all' and status or 'all'
    status = 0
    for k, v in STATUS_CODES.iteritems():
        if status_ == v:
            status = k
            break
    res = read_and_exec('package-list.sql',
                        placeholders=(HOURSPAN, folder_pattern, project_name,
                                      package_pattern, status),
                        as_dict=True)
    msgs = read_and_exec('package-messages.sql',
                         placeholders=(HOURSPAN, folder_pattern, project_name,
                                       package_pattern, status),
                         as_dict=True)
    res = massage_resultset(res)
    msgs = massage_resultset(msgs)
    msgs_dict = defaultdict(list)
    for msg in msgs:
        msgs_dict[msg.operation_id].append(msg)
    title = "SSIS Dashboard: Executions for %s" % request.args(0)
    title += folder != 'all' and ', folder %s' % folder or ''
    title += project != 'all' and ', project %s' % project or ''
    title += status != 'all' and ', status %s' % STATUS_CODES[status] or ''
    title += package != 'all' and ', package %s' % package or ''
    items = []
    for entry in res:
        now = request.utcnow.strftime('%Y%m%d%H%M%S')
        link = URL('console',
                   'overview',
                   host=True,
                   scheme='https',
                   extension='',
                   args=[
                       request.args(0), 'folder', entry.folder_name, 'project',
                       entry.project_name, 'status', 'all', 'package',
                       entry.package_name, 'execution', entry.execution_id
                   ])
        if entry.elapsed_time_min is None:
            guid = link + '@' + now
        else:
            guid = link + '@now'
        detailed_status = [['Status', STATUS_CODES[entry.status]],
                           ['Elapsed (min)', entry.elapsed_time_min]]
        errors = None
        if entry.execution_id in msgs_dict:
            errors = 0
            warnings = 0
            for m in msgs_dict[entry.execution_id]:
                if m.event_name == 'OnWarning':
                    warnings += 1
                if m.event_name == 'OnError':
                    errors += 1
            detailed_status.extend([
                ['Errors', errors],
                ['Warnings', warnings],
            ])
            err_rows = msgs_dict[entry.execution_id]
            messages = [[
                TDNW('Event Name'),
                TDNW('Message Time (UTC)'),
                TDNW('Message'),
                TDNW('Package'),
                TDNW('Package Path'),
                TDNW('Subcomponent Name'),
                TDNW('Execution Path')
            ]]
            for msg in err_rows:
                messages.append([
                    TDNW(msg.event_name),
                    TDNW(msg.message_time),
                    TD(msg.message),
                    TDNW(msg.package_name),
                    TD(msg.package_path),
                    TDNW(msg.subcomponent_name),
                    TD(msg.execution_path)
                ])
            errors = TABLE([TR([a for a in msg]) for msg in messages],
                           _border=1)
        detailed_status = TABLE(
            [TR([TD(el) for el in row]) for row in detailed_status])
        if errors:
            detailed_status = detailed_status.xml() + '<hr />' + errors.xml()
        else:
            detailed_status = detailed_status.xml()
        pubdate = request.now
        if entry.end_time:
            pubdate = datetime.datetime.strptime(entry.end_time,
                                                 '%Y-%m-%d %H:%M:%S')
        items.append(
            CDATARSS2(
                title="%s - %s - (%s)" %
                (entry.execution_id, entry.package_name,
                 STATUS_CODES[entry.status]),
                link=link,
                author="%s/%s@%s" %
                (entry.folder_name, entry.project_name, request.args(0)),
                guid=guid,
                description=detailed_status,
                pubDate=pubdate,
            ))
    rss = rss2.RSS2(title=title,
                    link=URL(args=request.args, scheme='https', host=True),
                    description="Execution Packages",
                    ttl=CACHE_FOR,
                    lastBuildDate=request.utcnow,
                    items=items)
    rtn = rss.to_xml(encoding='utf-8')
    if not os.path.exists(os.path.dirname(fpath)):
        os.makedirs(os.path.dirname(fpath))
    with open(fpath, 'wb') as g:
        g.write(rtn)
    try:
        os.unlink(lock_file)
    except:
        pass

    # can we do some cleaning ?
    allfiles = glob.glob(os.path.join(basepath, '*.rss'))
    for fpath in allfiles:
        if os.path.isfile(
                fpath) and time.time() - os.path.getmtime(fpath) > PRUNING:
            try:
                os.unlink(fpath)
            except:
                pass
    # this should never happen
    allfiles = glob.glob(os.path.join(basepath, '*.__lock'))
    for fpath in allfiles:
        if os.path.isfile(
                fpath) and time.time() - os.path.getmtime(fpath) > 120:
            try:
                os.unlink(fpath)
            except:
                pass
    return rtn