예제 #1
0
def serialize_category_atom(category, url, user, event_filter):
    """Export the events in a category to Atom

    :param category: The category to export
    :param url: The URL of the feed
    :param user: The user who needs to be able to access the events
    :param event_filter: A SQLalchemy criterion to restrict which
                         events will be returned.  Usually something
                         involving the start/end date of the event.
    """
    query = (Event.query
             .filter(Event.category_chain.contains([int(category.getId())]),
                     ~Event.is_deleted,
                     event_filter)
             .options(load_only('id', 'start_dt', 'title', 'description', 'protection_mode'),
                      subqueryload('acl_entries'))
             .order_by(Event.start_dt))
    events = [e for e in query if e.can_access(user)]

    feed = AtomFeed(feed_url=url, title='Indico Feed [{}]'.format(to_unicode(category.getTitle())))
    for event in events:
        feed.add(title=event.title,
                 summary=unicode(event.description),  # get rid of RichMarkup
                 url=url_for('event.conferenceDisplay', confId=event.id, _external=True),
                 updated=event.start_dt)
    return BytesIO(feed.to_string().encode('utf-8'))
예제 #2
0
    def render(self, data, media_type, **options):
        title = u'Novi\u010dar'

        # I'm sure this could be done better...
        page_url = settings.LOCAL_URL + "#!/latest"

        if "query" in data:
            query = data["query"]
            if isinstance(query, unicode):
                query = query.encode("utf8")

            urlencoded_query = urllib.quote(query)
            page_url += "#" + urlencoded_query
            url = settings.LOCAL_URL + '/v1/news/latest/?q=' + urlencoded_query
            title = data["query"].strip() + " - " + title
        else:
            url = settings.LOCAL_URL + '/v1/news/latest/'

        # It would be nice to know time of last crawl, so that the "updated"
        # field for the feed could be set.
        feed = AtomFeed(title=title,
                        url=page_url,
                        feed_url=url)

        if 'results' in data:
            self.add_feed_items(feed, data['results'])

        return feed.to_string()
예제 #3
0
파일: views.py 프로젝트: uastory/ecogwiki
 def get_sp_index(self, user, head):
     restype = self._get_restype()
     if restype == "default":
         pages = WikiPage.get_index(user)
         page_group = groupby(pages, lambda p: title_grouper(p.title))
         html = self._template("wiki_sp_index.html", {"page_group": page_group})
         self.response.headers["Content-Type"] = "text/html; charset=utf-8"
         self._set_response_body(html, head)
     elif restype == "atom":
         pages = WikiPage.get_index(None)
         config = WikiPage.yaml_by_title(".config")
         host = self.request.host_url
         url = "%s/sp.index?_type=atom" % host
         feed = AtomFeed(
             title="%s: title index" % config["service"]["title"],
             feed_url=url,
             url="%s/" % host,
             author=config["admin"]["email"],
         )
         for page in pages:
             feed.add(
                 title=page.title,
                 content_type="html",
                 author=page.modifier,
                 url="%s%s" % (host, page.absolute_url),
                 updated=page.updated_at,
             )
         self.response.headers["Content-Type"] = "text/xml; charset=utf-8"
         self._set_response_body(feed.to_string(), head)
     else:
         self.abort(400, "Unknown type: %s" % restype)
예제 #4
0
def generate_atom_feed():
    striplist = sorted(get_striplist())

    last_five_strips = reversed(striplist[-5:])

    feed = AtomFeed(
        title="Gone with the Blastwave",
        subtitle="Unofficial feed for the GWTB comics.",
        feed_url="https://github.com/Bystroushaak/gwtb_atom_generator",
        url="http://www.blastwave-comic.com/",
        author="Bystroushaak")

    for comic_id, title in last_five_strips:
        image_link = image_link_from_comic_number(comic_id)
        date = get_date_from_image(image_link)

        feed.add(
            title=title,
            # content="Body of my post",
            # content_type="text",
            author='GWTB',
            url='http://www.blastwave-comic.com/index.php?p=comic&nro=%d' %
            comic_id,
            updated=date)

    return feed.to_string()
예제 #5
0
파일: feed.py 프로젝트: mozii/golb
def gen_feed(runtime):
    print "Generate feed.atom.."
    posts = runtime.posts
    conf = runtime.conf
    charset = runtime.charset
    feed = AtomFeed(
        title=conf["blog"]["name"],
        subtitle=conf["blog"]["description"],
        feed_url=conf["blog"]["url"]+"/feed.atom",
        url=conf["blog"]["url"],
        author=conf["author"]["name"]
    )

    # gen the first 10 posts
    for post in posts[:10]:
        feed.add(
            title=post.title,
            content=post.html,
            content_type="html",
            author=conf["author"]["name"],
            url=conf["blog"]["url"]+"/"+post.out,
            updated=post.update_at
        )

    open("feed.atom", "w").write(feed.to_string().encode(charset))
예제 #6
0
파일: views.py 프로젝트: namongk/ecogwiki
    def get_changes(self, user, head):
        restype = get_restype(self.request)
        rendered = None

        if restype == 'default':
            if rendered is None:
                pages = WikiPage.get_changes(user)
                rendered = template(self.request, 'wiki_sp_changes.html',
                                          {'pages': pages})
            self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
            set_response_body(self.response, rendered, head)
        elif restype == 'atom':
            if rendered is None:
                pages = WikiPage.get_changes(None, 3, include_body=True)
                config = WikiPage.get_config()
                host = self.request.host_url
                url = "%s/sp.changes?_type=atom" % host
                feed = AtomFeed(title="%s: changes" % config['service']['title'],
                                feed_url=url,
                                url="%s/" % host,
                                author=config['admin']['email'])
                for page in pages:
                    feed.add(title=page.title,
                             content_type="html",
                             content=page.rendered_body,
                             author=page.modifier,
                             url='%s%s' % (host, page.absolute_url),
                             updated=page.updated_at)
                rendered = feed.to_string()
            self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
            set_response_body(self.response, rendered, head)
        else:
            self.abort(400, 'Unknown type: %s' % restype)
예제 #7
0
파일: views.py 프로젝트: namongk/ecogwiki
    def get_changes(self, user, head):
        restype = get_restype(self.request)
        rendered = None

        if restype == 'default':
            if rendered is None:
                pages = WikiPage.get_changes(user)
                rendered = template(self.request, 'wiki_sp_changes.html',
                                    {'pages': pages})
            self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
            set_response_body(self.response, rendered, head)
        elif restype == 'atom':
            if rendered is None:
                pages = WikiPage.get_changes(None, 3, include_body=True)
                config = WikiPage.get_config()
                host = self.request.host_url
                url = "%s/sp.changes?_type=atom" % host
                feed = AtomFeed(title="%s: changes" %
                                config['service']['title'],
                                feed_url=url,
                                url="%s/" % host,
                                author=config['admin']['email'])
                for page in pages:
                    feed.add(title=page.title,
                             content_type="html",
                             content=page.rendered_body,
                             author=page.modifier,
                             url='%s%s' % (host, page.absolute_url),
                             updated=page.updated_at)
                rendered = feed.to_string()
            self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
            set_response_body(self.response, rendered, head)
        else:
            self.abort(400, 'Unknown type: %s' % restype)
예제 #8
0
파일: views.py 프로젝트: namongk/ecogwiki
 def get_index(self, user, head):
     restype = get_restype(self.request)
     if restype == 'default':
         pages = WikiPage.get_index(user)
         page_group = groupby(pages,
                              lambda p: title_grouper(p.title))
         html = template(self.request, 'wiki_sp_index.html',
                               {'page_group': page_group})
         self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
         set_response_body(self.response, html, head)
     elif restype == 'atom':
         pages = WikiPage.get_index(None)
         config = WikiPage.get_config()
         host = self.request.host_url
         url = "%s/sp.index?_type=atom" % host
         feed = AtomFeed(title="%s: title index" % config['service']['title'],
                         feed_url=url,
                         url="%s/" % host,
                         author=config['admin']['email'])
         for page in pages:
             feed.add(title=page.title,
                      content_type="html",
                      author=page.modifier,
                      url='%s%s' % (host, page.absolute_url),
                      updated=page.updated_at)
         self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
         set_response_body(self.response, feed.to_string(), head)
     else:
         self.abort(400, 'Unknown type: %s' % restype)
예제 #9
0
파일: views.py 프로젝트: namongk/ecogwiki
 def get_index(self, user, head):
     restype = get_restype(self.request)
     if restype == 'default':
         pages = WikiPage.get_index(user)
         page_group = groupby(pages, lambda p: title_grouper(p.title))
         html = template(self.request, 'wiki_sp_index.html',
                         {'page_group': page_group})
         self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
         set_response_body(self.response, html, head)
     elif restype == 'atom':
         pages = WikiPage.get_index(None)
         config = WikiPage.get_config()
         host = self.request.host_url
         url = "%s/sp.index?_type=atom" % host
         feed = AtomFeed(title="%s: title index" %
                         config['service']['title'],
                         feed_url=url,
                         url="%s/" % host,
                         author=config['admin']['email'])
         for page in pages:
             feed.add(title=page.title,
                      content_type="html",
                      author=page.modifier,
                      url='%s%s' % (host, page.absolute_url),
                      updated=page.updated_at)
         self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
         set_response_body(self.response, feed.to_string(), head)
     else:
         self.abort(400, 'Unknown type: %s' % restype)
예제 #10
0
    def publish(self):
        feed = AtomFeed(title=self.title,
                        id=self.link,
                        url=self.link,
                        icon=self.icon,
                        updated=self.updated)

        for date, summary in sorted(self.new_entries, reverse=True):
            entry_link = self.link + '#' + date.strftime('%Y-%m-%d')
            updated = date + datetime.timedelta(hours=23, minutes=59, seconds=59)

            title = "Digest for " + date.strftime('%Y-%m-%d')

            if self.prefix:
                title = '[' + self.prefix + '] ' + title

            if self.suffix:
                title = title + ' [' + self.suffix + ']'

            feed.add(title=title,
                     id=entry_link,
                     content=summary,
                     content_type='html',
                     url=entry_link,
                     updated=updated)

        self.xml = feed.to_string()
예제 #11
0
파일: feeds.py 프로젝트: tribela/dju-postg
def notification_feed():
    url = 'http://office.dju.kr/postg/board/board1.htm'
    resp = session.get(url)
    tree = html.fromstring(resp.content.decode(resp.apparent_encoding))
    table = tree.xpath('//*/table//table//table//table//table[2]')[0]

    feed = AtomFeed(title='DJU postg notification',
                    url='http://localhost/',
                    author='Kjwon15')

    for tr in table.xpath('tr[position() mod 2 = 1 and position() != last()]'):
        number = tr.xpath('td[1]')[0].text_content().strip()
        title = tr.xpath('td[2]')[0].text_content().strip()
        is_new = bool(tr.xpath('td[2]')[0].xpath('img'))
        author = tr.xpath('td[3]')[0].text_content().strip()
        date = tr.xpath('td[4]')[0].text_content().strip()
        date = datetime.strptime(date, '%Y-%m-%d')
        link = url + tr.xpath('td[2]/a')[0].attrib['href']

        feed.add(title='{}{} {}'.format(number, ' [new]' if is_new else '',
                                        title),
                 author=author,
                 url=link,
                 updated=date)

    return feed.to_string()
예제 #12
0
    def render_output(self):
        _feed_global = {
            'author': self.flourish.site_config['author'],
            'title': self.flourish.site_config['title'],
            'url': self.flourish.site_config['base_url'],
            'feed_url': '%s%s' % (
                self.flourish.site_config['base_url'],
                self.current_url,
            ),
        }
        _feed = AtomFeed(**_feed_global)

        for _object in self.source_objects:
            entry = {
                'title': _object.title,
                'content': _object.body,
                'content_type': 'html',
                'url': _object.absolute_url,
                'published': _object.published,
                'updated': _object.published,
                'author': self.flourish.site_config['author'],
            }
            if 'author' in _object:
                entry['author'] = _object.author
            if 'updated' in _object:
                entry['updated'] = _object.updated
            _feed.add(**entry)

        return _feed.to_string()
예제 #13
0
 def _feed(self, *args, **kwargs):
     host = cherrypy.request.base
     atom = AtomFeed(title=self.blog_title, url=host,
             feed_url=cherrypy.url(),
             author=self.author)
     for post in self.listing():
         atom.add(title=post["title"],
                 url=host + post["path"],
                 author=self.author,
                 content_type="html",
                 content=post["html"],
                 updated=post["date"])
     return atom.to_string()
예제 #14
0
def render_atom(req, title, path, pages, include_content=False, use_published_date=False):
    config = WikiPage.get_config()
    host = req.get_host()
    title = '%s: %s' % (config['service']['title'], title)
    url = "%s/%s?_type=atom" % (host, path)
    feed = AtomFeed(title=title, feed_url=url, url="%s/" % host, author=config['admin']['email'])
    for page in pages:
        feed.add(title=page.title,
                 content_type="html",
                 content=(page.rendered_body if include_content else ""),
                 author=page.modifier,
                 url='%s%s' % (host, page.absolute_url),
                 updated=(page.published_at if use_published_date else page.updated_at))
    return feed.to_string()
예제 #15
0
파일: markbox.py 프로젝트: one2Ter/markbox
 def _feed(self, *args, **kwargs):
     host = cherrypy.request.base
     atom = AtomFeed(title=self.blog_title,
                     url=host,
                     feed_url=cherrypy.url(),
                     author=self.author)
     for post in self.listing():
         atom.add(title=post["title"],
                  url=host + post["path"],
                  author=self.author,
                  content_type="html",
                  content=post["html"],
                  updated=post["date"])
     return atom.to_string()
예제 #16
0
파일: atom.py 프로젝트: vstitches/indico
    def _execute(self, fossils):
        results = fossils["results"]
        if type(results) != list:
            results = [results]

        feed = AtomFeed(title="Indico Feed", feed_url=fossils["url"])

        for fossil in results:
            feed.add(
                title=unicodeOrNone(fossil["title"]),
                summary=unicodeOrNone(fossil["description"]),
                url=fossil["url"],
                updated=fossil["startDate"],  # ugh, but that's better than creationDate
            )
        return feed.to_string()
예제 #17
0
	def spider_opened(self, spider):
		self.feed = AtomFeed(
			title = "Hacker News >100",
			subtitle = "Hacker News over 100 points",
			feed_url = "http://feeds.dannysu.com/hackernews100.atom",
			url = "http://news.ycombinator.com/over?points=100"
		)
예제 #18
0
파일: atom.py 프로젝트: marcosmolla/indico
    def _execute(self, fossils):
        results = fossils['results']
        if type(results) != list:
            results = [results]

        feed = AtomFeed(title='Indico Feed', feed_url=fossils['url'])

        for fossil in results:
            feed.add(
                title=unicodeOrNone(fossil['title']),
                summary=unicodeOrNone(fossil['description']),
                url=fossil['url'],
                updated=fossil[
                    'startDate']  # ugh, but that's better than creationDate
            )
        return feed.to_string()
예제 #19
0
파일: atom.py 프로젝트: wasm-network/indico
    def _execute(self, fossils):
        results = fossils['results']
        if not isinstance(results, list):
            results = [results]

        feed = AtomFeed(title='Indico Feed', feed_url=fossils['url'])

        for fossil in results:
            feed.add(
                title=to_unicode(fossil['title']) or None,
                summary=to_unicode(fossil['description']) or None,
                url=fossil['url'],
                updated=_deserialize_date(
                    fossil['startDate']
                )  # ugh, but that's better than creationDate
            )
        return feed.to_string()
예제 #20
0
파일: anatolik.py 프로젝트: dzeban/anatolik
def output():
    print('\n [:::  Writing output :::]\n')
    out_dir = site.root['output']
    for post in site.posts.values():
        path = os.path.join(out_dir, post.Url)
        os.makedirs(os.path.dirname(path), exist_ok=True)
        with open(path, 'w') as f:
            f.write(post.content)
        print(path)

    for f in site.files:
        path_split = f.split(site.root['content'])
        if len(path_split) == 1:  # Doesn't split
            continue

        file_path = path_split[-1]
        file_path = file_path[1:]  # Cut first '/'
        path = os.path.join(out_dir, file_path)
        print(path)
        os.makedirs(os.path.dirname(path), exist_ok=True)
        if os.path.exists(path):
            os.remove(path)
        shutil.copy(f, path)

    # Generate feed
    feed = AtomFeed(title=site.info['title'],
                    feed_url=site.info['url'] + '/feed',
                    url=site.info['url'],
                    author=site.info['author'])

    for post in site.posts.values():
        if post.Layout == 'post':
            feed.add(title=post.Title,
                     content=post.html,
                     content_type="html",
                     author=post.Author,
                     url=post.Url,
                     updated=post.Date)

    with open(os.path.join(site.root['output'], 'feed'), 'w') as feed_file:
        feed_file.write(feed.to_string())

    # Update cache
    with open(os.path.join(site.root['output'], site.cache_name),
              'wb') as cache_file:
        pickle.dump(site.posts, cache_file)
예제 #21
0
파일: anatolik.py 프로젝트: dzeban/anatolik
def output():
    print('\n [:::  Writing output :::]\n')
    out_dir = site.root['output']
    for post in site.posts.values():
        path = os.path.join(out_dir, post.Url)
        os.makedirs(os.path.dirname(path), exist_ok = True)
        with open(path, 'w') as f:
            f.write(post.content)
        print(path)

    for f in site.files:
        path_split = f.split(site.root['content'])
        if len(path_split) == 1: # Doesn't split
            continue

        file_path = path_split[-1]
        file_path = file_path[1:] # Cut first '/'
        path = os.path.join(out_dir, file_path)
        print(path)
        os.makedirs(os.path.dirname(path), exist_ok = True)
        if os.path.exists(path):
            os.remove(path)
        shutil.copy(f, path)

    # Generate feed
    feed = AtomFeed(title    = site.info['title'],
                    feed_url = site.info['url'] + '/feed',
                    url      = site.info['url'],
                    author   = site.info['author'])

    for post in site.posts.values():
        if post.Layout == 'post':
            feed.add(title        = post.Title,
                     content      = post.html,
                     content_type = "html",
                     author       = post.Author,
                     url          = post.Url,
                     updated      = post.Date)

    with open(os.path.join( site.root['output'],'feed'), 'w') as feed_file:
        feed_file.write(feed.to_string())

    # Update cache
    with open(os.path.join(site.root['output'], site.cache_name), 'wb') as cache_file:
        pickle.dump(site.posts, cache_file)
예제 #22
0
파일: atom.py 프로젝트: NIIF/indico
    def _execute(self, fossils):
        results = fossils['results']
        if type(results) != list:
            results = [results]

        feed = AtomFeed(
            title='Indico Feed',
            feed_url=fossils['url']
        )

        for fossil in results:
            feed.add(
                title=unicodeOrNone(fossil['title']),
                summary=unicodeOrNone(fossil['description']),
                url=fossil['url'],
                updated=fossil['startDate']  # ugh, but that's better than creationDate
                )
        return feed.to_string()
예제 #23
0
def viewCategoriesATOM():
    try:

        feed = AtomFeed('Categories',
                        feed_url=request.url, url=request.url_root)
        categories = session.query(Categories).order_by('name')

        for category in categories:
            feed.add(category.name, unicode(category.name),
                     content_type='html',
                     author=category.name,
                     url='',
                     updated=category.date_modified,
                     published=category.date_created)
        return feed.get_response()
    except:
        flash('Error')
        return redirect(url_for('showHomepage'))
def index(req):
   forumurl = "writeme" # replace with e.g. "https://forums.factorio.com/viewforum.php?f=3"
   if forumurl == "writeme":
      req.status = mod_python.apache.HTTP_INTERNAL_SERVER_ERROR
      return "The admin for this script needs to manually set the 'forumurl' parameter in the source code"

   baseurl = get_baseurl(forumurl);
   soup = get_soup(forumurl)

   forumtitle = get_forumtitle(soup)

   #generate feed
   feed = AtomFeed(title=forumtitle,
                   url=forumurl,
                   icon=get_favicon(soup, baseurl))
   #Add forum topics
   for a in soup.findAll("a", { "class" : "topictitle" }):
      datestring = a.parent.contents[-1]
      datematch = re.match('^ » (.*?)\s*$', datestring, re.M)
      datestring_trimmed = datematch.group(1)
      published = updated = dateutil.parser.parse(datestring_trimmed)

      author_a = a.parent.find("a", { "class" : "username-coloured"})
      if author_a:
         author = author_a.string
      else:
         author = "(author not found)"

      #phpBB generates a unique new session id (sid) for each forum
      #download, and adds this to all urls. This will make feed
      #readers interpret each link as unique each time it polls. So we
      #need to remove the sid=...
      url = baseurl + "/" + a["href"]
      url = re.sub('&sid=[0-9a-f]+','', url)

      feed.add(title=a.string,
               url=url,
               published=published,
               updated=updated,
               author=author,
               )

   return feed.to_string()
예제 #25
0
class HackernewsPipeline(object):
	output_filename = "hackernews100.atom"

        def __init__(self):
                dispatcher.connect(self.spider_opened, signal=signals.spider_opened)
                dispatcher.connect(self.spider_closed, signal=signals.spider_closed)
		self.existing_feed = feedparser.parse(self.output_filename)

	def spider_opened(self, spider):
		self.feed = AtomFeed(
			title = "Hacker News >100",
			subtitle = "Hacker News over 100 points",
			feed_url = "http://feeds.dannysu.com/hackernews100.atom",
			url = "http://news.ycombinator.com/over?points=100"
		)

	def spider_closed(self, spider):
		f = codecs.open(self.output_filename, 'w', 'utf-8')
		f.write(self.feed.to_string())

	def process_item(self, item, spider):
		found = False
		for entry in self.existing_feed['entries']:
			if entry.link == item['link']:
				item['body'] = entry.content[0].value
				found = True

		if not found:
			body = ""
			if not item['link'].endswith('.pdf'):
				html = urllib.urlopen(item['link']).read()
				body = Document(html).summary()
			item['body'] = '<a href="' + item['comment'] + '">HN Comments</a><br>' + body

		self.feed.add(
			url = item['link'],
			title = item['title'],
			content = item['body'],
			content_type = "html",
			updated=datetime.datetime.utcnow()
		)
		return item
예제 #26
0
def hn100():
    feed = AtomFeed(
        title = "Hacker News >100",
        subtitle = "Hacker News over 100 points",
        feed_url = "http://feeds.dannysu.com/hackernews100.atom",
        url = "http://news.ycombinator.com/over?points=100"
    )

    entries = Entry.query.all()
    regex = re.compile(r"HN Comments.*", re.DOTALL)
    for entry in entries:
        feed.add(
            url = entry.link,
            title = entry.title,
            content = regex.sub("HN Comments</a></br></body></html>", entry.body),
            content_type = "html",
            updated=datetime.datetime.utcnow()
        )

    return Response(feed.to_string(), mimetype='application/atom+xml')
예제 #27
0
def atom(request):
        """Display atom feed
        """
    
        feed = AtomFeed(title="Alternativebit",
                        subtitle="Alternativebit",
                        feed_url="http://www.alternativebit.fr/atom",
                        url="http://alternativebit.fr",
                        author="Ninja Trappeur")
        session = dbSession()
        query = session.query(Article).order_by(desc('id'))[0:14]
        session.close()
        for article in query:
                feed.add(title=article.title,
                        content=article.content,
                        content_type="html",
                        author=article.author,
                        url="http://www.alternativebit.fr/article/{0}".format(article.id),
                        updated=article.date)
        return Response(feed.to_string())
예제 #28
0
def note_feed():
    from pyatom import AtomFeed
    site_url = '://'.join(request.urlparts[:2])
    author = app_config.get('feed.author')
    db_session = Session()
    notes = db_session.query(Note).order_by(Note.created_at.desc()).limit(10).all()
    feed = AtomFeed(title=app_config.get('feed.title'),
                    subtitle=app_config.get('feed.subtitle'),
                    feed_url=site_url + app.get_url('note-feed'),
                    url=site_url + app.get_url('note-list'),
                    author=author)
    for note in notes:
        feed.add(title=note.title,
                 content=strip_cut(note.text),
                 content_type="html",
                 author=author,
                 url=site_url + app.get_url('note-details', note_id=note.id),
                 updated=note.created_at)
    response.add_header('Content-Type', 'application/atom+xml')
    return feed.to_string()
예제 #29
0
파일: shot.py 프로젝트: Vostbur/blogshot
def rss(dmp):
    url = config['site'][:-1] if config['site'].endswith('/') else config['site']
    feed = AtomFeed(title=config['title'],
                feed_url=url + "/feed",
                url=url,
                author=config['author'])
    feed_dict = OrderedDict(sorted(dmp.archive.items(), reverse=True))
    #current_date = datetime.now().strftime('%Y-%m-%d')
    try:
        for x in xrange(10):
            feed.add(title=feed_dict.items()[x][1]['title'],
                    content=feed_dict.items()[x][1]['content'],
                    content_type="html",
                    author=config['author'],
                    url=url + feed_dict.items()[x][0],
                    updated=datetime.strptime(feed_dict.items()[x][1]['date'], '%Y-%m-%d')
                    )
    except IndexError:
        pass
    save(os.path.join(config['output'], config['feed']), feed.to_string())
예제 #30
0
파일: views.py 프로젝트: uastory/ecogwiki
    def _render_posts_atom(self, title, pages):
        host = self.request.host_url
        config = WikiPage.yaml_by_title(".config")
        if title is None:
            feed_title = "%s: posts" % config["service"]["title"]
            url = "%s/sp.posts?_type=atom" % host
        else:
            feed_title = title
            url = "%s/%s?_type=atom" % (WikiPage.title_to_path(title), host)

        feed = AtomFeed(title=feed_title, feed_url=url, url="%s/" % host, author=config["admin"]["email"])
        for page in pages:
            feed.add(
                title=page.title,
                content_type="html",
                content=page.rendered_body,
                author=page.modifier,
                url="%s%s" % (host, page.absolute_url),
                updated=page.published_at,
            )
        return feed.to_string()
예제 #31
0
def atom(request):
    """Display atom feed
        """

    feed = AtomFeed(title="Alternativebit",
                    subtitle="Alternativebit",
                    feed_url="http://www.alternativebit.fr/atom",
                    url="http://alternativebit.fr",
                    author="Ninja Trappeur")
    session = dbSession()
    query = session.query(Article).order_by(desc('id'))[0:14]
    session.close()
    for article in query:
        feed.add(title=article.title,
                 content=article.content,
                 content_type="html",
                 author=article.author,
                 url="http://www.alternativebit.fr/article/{0}".format(
                     article.id),
                 updated=article.date)
    return Response(feed.to_string())
예제 #32
0
def note_feed():
    from pyatom import AtomFeed
    site_url = '://'.join(request.urlparts[:2])
    author = app_config.get('feed.author')
    db_session = Session()
    notes = db_session.query(Note).order_by(
        Note.created_at.desc()).limit(10).all()
    feed = AtomFeed(title=app_config.get('feed.title'),
                    subtitle=app_config.get('feed.subtitle'),
                    feed_url=site_url + app.get_url('note-feed'),
                    url=site_url + app.get_url('note-list'),
                    author=author)
    for note in [n for n in notes if not n.is_draft]:
        feed.add(title=note.title,
                 content=strip_cut(note.text),
                 content_type="html",
                 author=author,
                 url=site_url + app.get_url('note-details', note_id=note.id),
                 updated=note.created_at)
    response.add_header('Content-Type', 'application/atom+xml')
    return feed.to_string()
예제 #33
0
파일: psorss.py 프로젝트: Sakaki/pso2rss
def getCurrentData():
    feed = AtomFeed(title=u"PSO2 予告緊急RSS",
                    subtitle=u"【非公式】PSO2の予告緊急を近い順に表示するよ",
                    feed_url=u"http://sakaki2700.dip.jp/pso2rss",
                    url=u"http://pso2.jp/players/news/?mode=event",
                    author=u"")

    conn = sqlite3.connect(db_path)
    sql = "select * from events where time > datetime('now', '+9 hours') order by time limit 10;".decode("utf-8")

    cursor = conn.cursor()
    cursor.execute(sql)
    for row in cursor:
        feed.add(title=u"【"+row[0]+u"】"+row[1],
                 content=row[2],
                 content_type=u"html",
                 author=u"pso2",
                 url=u"http://pso2.jp/players/news/?mode=event",
                 updated=datetime.utcnow())

    return feed.to_string().encode("utf-8")
예제 #34
0
파일: views.py 프로젝트: namongk/ecogwiki
def render_posts_atom(req, title, pages):
    host = req.host_url
    config = WikiPage.get_config()
    if title is None:
        feed_title = '%s: posts' % config['service']['title']
        url = "%s/sp.posts?_type=atom" % host
    else:
        feed_title = title
        url = "%s/%s?_type=atom" % (WikiPage.title_to_path(title), host)

    feed = AtomFeed(title=feed_title,
                    feed_url=url,
                    url="%s/" % host,
                    author=config['admin']['email'])
    for page in pages:
        feed.add(title=page.title,
                 content_type="html",
                 content=page.rendered_body,
                 author=page.modifier,
                 url='%s%s' % (host, page.absolute_url),
                 updated=page.published_at)
    return feed.to_string()
예제 #35
0
파일: blog.py 프로젝트: blakwurm/wurmforge
def __makeFeedsForOverview(forge, pagekey, posts, url):
    if forge.settingFor('address'):
        filename = 'atom.xml'
        relpath = url + '/' + filename
        feedurl = forge.settingFor('address') + '/' + relpath
        pagedef = forge.pageInfoFor(pagekey)
        feed = AtomFeed(title=pagedef['title'],
                        subtitle=pagedef['subtitle'],
                        feed_url=feedurl,
                        url=url,
                        author=pagedef['author'])
        for postdef in posts:
            feed.add(title=postdef['title'],
                     content=str(postdef['soup']),
                     content_type='xhtml',
                     author=postdef['author'],
                     url=forge.settingFor('address') +
                     makePostUrl(pagedef, postdef),
                     updated=datetime.fromisoformat(postdef['date']))
        return {relpath: feed.to_string()}
    else:
        return {}
예제 #36
0
def feed(self):
	db.reset_queries()
	url = ''.join(['https://' if self.is_secure() else 'http://', self.get_host()])
	feed = AtomFeed(
		title = 'The VitalVas',
		url = url,
		feed_url = ''.join([url, reverse('feed')]),
		author = {
			'name': 'VitalVas',
			'email': '*****@*****.**'
		},
	)
	for item in Article.objects.filter(publish=True).filter(published__lt=datetime.now())[:5]:
		feed.add(
			title = item.title,
			content = cut_preview(item.html_compile),
			content_type = 'html',
			author = 'VitalVas',
			url = ''.join([url, item.get_absolute_url()]),
			updated = item.updated,
		)
	return HttpResponse(feed.to_string(), content_type='text/xml')
예제 #37
0
파일: views.py 프로젝트: namongk/ecogwiki
def render_posts_atom(req, title, pages):
    host = req.host_url
    config = WikiPage.get_config()
    if title is None:
        feed_title = '%s: posts' % config['service']['title']
        url = "%s/sp.posts?_type=atom" % host
    else:
        feed_title = title
        url = "%s/%s?_type=atom" % (WikiPage.title_to_path(title), host)

    feed = AtomFeed(title=feed_title,
                    feed_url=url,
                    url="%s/" % host,
                    author=config['admin']['email'])
    for page in pages:
        feed.add(title=page.title,
                 content_type="html",
                 content=page.rendered_body,
                 author=page.modifier,
                 url='%s%s' % (host, page.absolute_url),
                 updated=page.published_at)
    return feed.to_string()
예제 #38
0
파일: generator.py 프로젝트: jincheng/lilac
    def initialize(self, localhost):
        """Initialize config, blog, author, feed and jinja2 environment"""
        # read config to update the default
        try:
            conf = config.read()
        except ConfigSyntaxError as e:
            logger.error(e.__doc__)
            sys.exit(1)

        update_nested_dict(self.config, conf)
        # update blog and author according to configuration
        self.blog.__dict__.update(self.config['blog'])
        self.author.__dict__.update(self.config['author'])
        # reset root_path if not in localhost
        if not localhost:
            self.root_path = self.config["root_path"]
        # initialize feed
        self.feed.feed = AtomFeed(
            title=self.blog.name,
            subtitle=self.blog.description,
            feed_url=self.blog.url+"/feed.atom",
            url=self.blog.url,
            author=self.author.name
        )
        #
        # -------- initialize jinja2 --
        #
        # detect if there is a theme.toml
        theme_toml = join(self.blog.theme, "theme.toml")

        if exists(theme_toml):
            self.theme = toml.loads(open(theme_toml).read().decode(charset))
        # update theme_conf with config's theme section
        # user's configuation can reset theme's configuation
        config_theme_section = self.config.get("theme", {})
        update_nested_dict(self.theme, config_theme_section)

        # get templates directory
        templates = join(self.blog.theme, "templates")
        # set a render
        jinja_global_data = dict(
            root_path=self.root_path,
            blog=self.blog,
            author=self.author,
            config=self.config,
            theme=self.theme
        )
        renderer.initialize(templates, jinja_global_data)
        logger.success("Generator initialized, root_path = \"%s\"" % self.root_path)
        # send signal that generator was already initialized
        signals.initialized.send(self)
예제 #39
0
def render_atom(req,
                title,
                path,
                pages,
                include_content=False,
                use_published_date=False):
    config = WikiPage.get_config()
    host = req.host_url
    title = '%s: %s' % (config['service']['title'], title)
    url = "%s/%s?_type=atom" % (host, path)
    feed = AtomFeed(title=title,
                    feed_url=url,
                    url="%s/" % host,
                    author=config['admin']['email'])
    for page in pages:
        feed.add(title=page.title,
                 content_type="html",
                 content=(page.rendered_body if include_content else ""),
                 author=page.modifier,
                 url='%s%s' % (host, page.absolute_url),
                 updated=(page.published_at
                          if use_published_date else page.updated_at))
    return feed.to_string()
예제 #40
0
def atom_feed(request):
    feed = AtomFeed(
        title=request.registry.settings['site_name'],
        feed_url=request.route_url('atom_feed'),
        url=request.route_url('view_all'),
        author=request.registry.settings['site_name']  # will do for now
    )

    articles = DBSession.query(Article).order_by(
        Article.date_published.desc()).filter_by(
            is_published=True)  # TODO: limit x
    for article in articles:
        content = format_article(article)
        feed.add(content['title'],
                 content['body'],
                 url=article.get_url(request),
                 updated=article.updated,
                 published=article.date_published)

    return Response(
        body=feed.to_string(),
        content_type='application/atom+xml',
    )
예제 #41
0
파일: build.py 프로젝트: colons/words
def render_feed(articles):
    feed = AtomFeed(
        title='words from a colons',
        feed_url=DOMAIN + FEED_URL,
        url=DOMAIN + ROOT,
        author=AUTHOR,
    )

    feed_item_template = get_template('feed_item.html')

    for article in articles:
        context = Context({'article': article})

        feed.add(
            title=article.title,
            content=feed_item_template.render(context),
            content_type='html',
            author=AUTHOR,
            url=DOMAIN + article.absolute_url,
            updated=article.meta['date'],
        )

    return feed.to_string()
예제 #42
0
def news_feed():
    feed = AtomFeed("Jazzband News Feed",
                    feed_url=request.url,
                    url=request.url_root,
                    generator=None)
    for page in news_pages:
        if page.path == "index":
            continue
        published = page.meta.get("published", None)
        updated = page.meta.get("updated", published)
        summary = page.meta.get("summary", None)
        feed.add(
            title=page.meta["title"],
            content=str(page.html),
            content_type="html",
            summary=summary,
            summary_type="text",
            author=page.meta.get("author", None),
            url=full_url(url_for("content.news", path=page.path)),
            updated=updated,
            published=published,
        )
    return Response(feed.to_string(), mimetype="application/atom+xml")
예제 #43
0
    def get(self, username):
        developer = findDeveloperByUsername(username)
        if not developer:
            self.response.set_status(404)
            self.renderResponse('errors/404.html')
            return
        snippets = findSnipsByDeveloperId(developer['developer_id'])

        from pyatom import AtomFeed
        import datetime

        feed = AtomFeed(
            title='%s\'s code snippets' % developer['username'],
            subtitle='(most recent code snippets from this developer)',
            feed_url=buildUrl('dev-feed',
                              username=developer['username'],
                              _full=True),
            url=buildUrl('dev-username',
                         username=developer['username'],
                         _full=True),
            author=developer['username'])

        for snip in snippets:
            feed.add(
                title=snip['title'],
                content=snip['description'],
                content_type='html',
                author=developer['username'],
                url=buildUrl('snip-index',
                             lang=snip['lang'],
                             cat=snip['cat'],
                             title=snip['sanitized_title']),
                updated=snip['creation_date']  # datetime.datetime.utcnow()
            )

        self.response.headers['Content-type'] = 'text/xml;charset=utf-8'
        self.response.write(feed.to_string())
예제 #44
0
파일: views.py 프로젝트: uastory/ecogwiki
    def get_sp_changes(self, user, head):
        restype = self._get_restype()
        email = user.email() if user is not None else "None"
        rendered = None

        if restype == "default":
            if rendered is None:
                pages = WikiPage.get_changes(user)
                rendered = self._template("wiki_sp_changes.html", {"pages": pages})
            self.response.headers["Content-Type"] = "text/html; charset=utf-8"
            self._set_response_body(rendered, head)
        elif restype == "atom":
            if rendered is None:
                pages = WikiPage.get_changes(None, 3, include_body=True)
                config = WikiPage.yaml_by_title(".config")
                host = self.request.host_url
                url = "%s/sp.changes?_type=atom" % host
                feed = AtomFeed(
                    title="%s: changes" % config["service"]["title"],
                    feed_url=url,
                    url="%s/" % host,
                    author=config["admin"]["email"],
                )
                for page in pages:
                    feed.add(
                        title=page.title,
                        content_type="html",
                        content=page.rendered_body,
                        author=page.modifier,
                        url="%s%s" % (host, page.absolute_url),
                        updated=page.updated_at,
                    )
                rendered = feed.to_string()
            self.response.headers["Content-Type"] = "text/xml; charset=utf-8"
            self._set_response_body(rendered, head)
        else:
            self.abort(400, "Unknown type: %s" % restype)
예제 #45
0
    def generate_feed(posts, subtitle='', url="https://sowingseasons.com/feed.atom"):
        logger.info('generate_feed(%s)' % url)

        feed = AtomFeed(
            title="SowingSeasons",
            title_type="text",
            subtitle="takes awhile to grow anything. %s" % subtitle,
            subtitle_type="text",
            feed_url=url,
            url="https://sowingseasons.com",
            author="Blake VandeMerwe",
            icon="/static/img/ico_black.png",
            logo="/static/img/logo.png",
            rights="MIT LICENSE",
            rights_type="text",
            generator=("PyAtom", "https://github.com/sramana/pyatom", "1.4")
        )

        for post in posts.results:
            post = DotDict(post)

            feed.add(
                title=post.title,
                title_type="text",
                content=fn_markdown(post.content),
                content_type="html",
                summary=post.summary,
                summary_type="text",
                url='https://sowingseasons.com' + document_slug(post),
                updated=post.modified,
                author="Blake VandeMerwe",
                published=post.modified,
                rights="MIT LICENSE",
                rights_type="text"
            )

        return feed.to_string()
예제 #46
0
 def render_string(self):
     feed = AtomFeed(**self.doc)
     return feed.to_string()
예제 #47
0
파일: build_blog.py 프로젝트: rsaxvc/blagr
index_posts = posts[:POSTS_PER_PAGE]
archive_posts = posts[POSTS_PER_PAGE:]
write_posts( POST_PATH_BASE + "index.html", BLOG_TITLE, index_posts, archive_posts, end_text )

tags = globulate_tags( posts )
tags.sort()
for tag in tags:
	write_tag_html( tag, posts, end_text )

shutil.copytree( INPUT_CSS_PATH, CSS_PATH_BASE )

if pyatom_present:
	feed = AtomFeed(title=BLOG_TITLE,
		subtitle=BLOG_SUBTITLE,
		feed_url=BLOG_FEED_URL,
		url=BLOG_URL,
		author=BLOG_AUTHOR)

	for post in posts:
		feed.add(title=post.title,
			content=post.text,
			content_type="html",
			author=post.author,
			url=post.wobpath(),
			updated=post.cdt
			)

	f = my_open(ATOM_PATH, 'w', 'utf-8')
	f.write( feed.to_string() )
	f.close()
예제 #48
0
# load google json for application
CLIENT_ID = json.loads(open('client_secrets.json',
                            'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog Item App"

# Connect to Database and create database session
engine = create_engine('sqlite:///itemcatalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()

# Create a feed for Atom
feed = AtomFeed(title="Catalog Item App",
                subtitle="All changes made to the catalog.",
                feed_url="localhost:8000/atomfeed/",
                url="localhost:8000",
                author="John McLellan")


@app.route('/login')
def showLogin():
    setState()
    return render_template('login.html', STATE=login_session['state'])


# Create anti-forgery state token
def setState():
    if 'state' not in login_session:
        state = ''.join(
            random.choice(string.ascii_uppercase + string.digits)
  item = []
  lstIdToMarkRead = []
  cat = i[1]
  lang = "en"
  for la in langArray:
    if cat == la[0]:
      lang = la[1]

  if lang == "en":
    print cat + " -- Language: " + lang
    fname = string.lower(cat.replace(' ','-')) + ".rss"
    if not os.path.isfile(fname):
      open(fname, 'w').close() 
    feedNew = AtomFeed(title=cat,
                  subtitle="My Feeds",
                  feed_url=feedUrl+string.lower(cat.replace(' ','-')) + ".rss",
                  url=feedUrl.split("/")[0] + "//" + feedUrl.split("/")[2] + "/",
                  author="")
    if len(lstRss) > 0:
      print "You have %s feeds!" % (str(len(lstRss)))
      rssUrl = urlRoot + fname    
      feedOld = feedparser.parse(rssUrl) #lay feed cu tu file

      for rss in lstRss:
        title = cleanStr(rss[2])
        link = rss[3].replace('"','')
        author = rss[8]
        dateUpdated = rss[6]
        content = rss[5]
        lstIdToMarkRead.append(str(rss[7]))
from pyatom import AtomFeed
import datetime

feed = AtomFeed(title="My Blog",
                subtitle="My example blog for a feed test.",
                feed_url="http://example.org/feed",
                url="http://example.org",
                author="Me")

# Do this for each feed entry
feed.add(title="My Post",
         content="Body of my post",
         content_type="html",
         author="Me",
         url="http://example.org/entry1",
         updated=datetime.datetime.utcnow())

print(feed.to_string())
예제 #51
0
    item = []
    lstIdToMarkRead = []
    cat = i[1]
    lang = "en"
    for la in langArray:
        if cat == la[0]:
            lang = la[1]

    if not (lang == "en"):
        print cat + " -- Language: " + lang
        fname = string.lower(cat.replace(' ', '-')) + ".rss"
        if not os.path.isfile(fname):
            open(fname, 'w').close()
        feedNew = AtomFeed(
            title=cat,
            subtitle="My Feeds",
            feed_url=feedUrl + string.lower(cat.replace(' ', '-')) + ".rss",
            url=feedUrl.split("/")[0] + "//" + feedUrl.split("/")[2] + "/",
            author="")
        if len(lstRss) > 0:
            print "You have %s feeds!" % (str(len(lstRss)))
            rssUrl = urlRoot + fname
            feedOld = feedparser.parse(rssUrl)  #lay feed cu tu file

            for rss in lstRss:
                title = cleanStr(rss[2])
                link = rss[3].replace('"', '')
                author = rss[8]
                dateUpdated = rss[6]
                content = rss[5]
                lstIdToMarkRead.append(str(rss[7]))
예제 #52
0
def timeline_to_feed(config):
    api_kwargs = {}
    if config.get('cache_dir'):
        api_kwargs['cache'] = tweepy.FileCache('.cache', timeout=60 * 5)

    api = get_api(config, **api_kwargs)
    print('Created Twitter API connection')

    me = api.me()
    print("Begin creating feed for {}'s timeline".format(me.screen_name))

    feed = AtomFeed(
        title=config.get(
            'feed_title',
            'Links from the timeline of @{}'.format(me.screen_name)),
        url='https://twitter.com/{}'.format(me.screen_name),
        feed_url=config.get('feed_url'),
        generator=('Tauphi', 'https://github.com/tgecho/tauphi', None),
    )

    assert config.get('max_items') or config.get(
        'max_days'), 'Please specify at least one of max_items or max_days.'

    item_count = 0
    min_date = date.today() - timedelta(
        days=config['max_days']) if config.get('max_days') else None

    for tweet in tweepy.Cursor(api.home_timeline, count=200).items():
        if tweet.entities.get('urls'):
            author = tweet.author

            item_count += 1

            if item_count % 10 == 0:
                print('{} items found'.format(item_count))

            if config.get(
                    'max_items') and item_count > config.get('max_items'):
                print('Max items ({}) reached'.format(config['max_items']))
                break

            if min_date and tweet.created_at.date() < min_date:
                print('Max days ({}) reached'.format(config['max_days']))
                break

            tweet_url = 'https://twitter.com/{}/status/{}'.format(
                tweet.author.screen_name, tweet.id_str)

            title = tweet.text
            content = """
            <p>{}</p>
            <p>
                <a href="{}">@{}</a>
                (<a href="{}">original</a>)
            </p>
            """.format(tweet.text,
                       'https://twitter.com/{}'.format(author.screen_name),
                       author.screen_name, tweet_url)

            for url in tweet.entities['urls']:
                expanded = url['expanded_url']
                display = url['display_url']

                title = title.replace(url['url'], display)

                link = '<a href="{}" title="{}">{}</a>'.format(
                    url['url'],
                    expanded,
                    display,
                )
                content = content.replace(url['url'], link)

                if any(expanded.endswith(e) for e in IMAGE_EXTENSIONS):
                    content += '<p><img src="{}" /></p>'.format(expanded)

            if getattr(tweet, 'extended_entities', None):
                for embed in tweet.extended_entities['media']:
                    if embed == 'photo':
                        content += '<p><img src="{}" /></p>'.format(
                            embed.media_url_https)

            if len(tweet.entities['urls']) == 1:
                item_url = tweet.entities['urls'][0]['url']
            else:
                item_url = tweet_url

            feed.add(id=tweet_url,
                     url=item_url,
                     title=title,
                     content=content,
                     content_type='html',
                     author='{} (@{})'.format(author.name, author.screen_name),
                     published=tweet.created_at,
                     updated=tweet.created_at,
                     links=[{
                         'href': u['url']
                     } for u in tweet.entities['urls']])

    feed_str = unicode(feed)

    print('Feed generated with {} items'.format(item_count))
    return feed_str
예제 #53
0
 cursor.execute(query)
 lstRss = cursor.fetchall()
 item = []
 lstIdToMarkRead = []
 cat = i[1]
 lang = "en"
 for la in langArray:
   if cat == la[0]:
     lang = la[1]
 print cat + " -- Language: " + lang
 fname = string.lower(cat.replace(' ','-')) + ".rss"
 if not os.path.isfile(fname):
   open(fname, 'w').close() 
 feedA = AtomFeed(title=cat,
               subtitle="My Feeds",
               feed_url="http://172.16.69.162/atom/"+string.lower(cat.replace(' ','-')) + ".rss",
               url="http://172.16.69.162/",
               author="XXX")
 if len(lstRss) > 0:
   print "You have %s feeds!" % (str(len(lstRss)))
   rssUrl = urlRoot + fname    
   feed = feedparser.parse(rssUrl)
   for rss in lstRss:
     title = cleanStr(rss[2]).decode('unicode-escape')
     link = rss[3].replace('"','')
     author = rss[8]
     dateUpdated = rss[6]
     lstIdToMarkRead.append(str(rss[7]))
     if not (lang == "en"):
       title = trans(title, lang[1], "en")
       link = "http://translate.google.com.vn/translate?sl=" + lang[1] + "&tl=en&prev=_t&hl=en&ie=UTF-8&eotf=1&u=" + urllib.quote_plus(link)
예제 #54
0
                f.write(texto)
                f.close()
                generate_html(texto, item)

                # clean up
                rm_files = glob.glob(item + "*p?m")
                for i in rm_files:
                    os.remove(i)
                rm_files = glob.glob(item + "*txt")
                for i in rm_files:
                    os.remove(i)

# create RSS feed
feed = AtomFeed(title="Proyectos de Ley",
                subtitle=u'del Congreso de la República del Perú',
                feed_url="http://myurl.com/feed",
                url="http://myurl.com",
                author="Me")

html_files = glob.glob(os.path.join("pdf", "*html"))
data_file = codecs.open("proyectos_data.json", "r", "utf-8")
data = json.loads(data_file.read())
data_file.close()

for i in html_files:
    title = i.replace("pdf/", "")
    title = title.replace(".html", "")
    title = title.replace("_", "/")
    for json in data:
        if json['numero_proyecto'] == title:
            content = json['titulo'] + "<br />"
예제 #55
0
    def generate(self):
        '''Generate the whole static site.

        Iterates through all existing s2 pages, rendering and writing
        them (and copying all common files along). 
        It also generates the toc, a sitemap, and the atom feed
        etc. (in the future it should handle tags and categories)

        '''
        if self._dirs['base'] == None or not self._tree_ready:
            #there's NO base here or up the chain
            raise ValueError  #cannot generate!

        # wipe www dir & recreate
        self._wipe_www_dir()  #copy common files
        #shutil.copytree(self.dirs['common'],
        #                os.path.join(self.dirs['www'],"common"))
        slist = glob.glob(os.path.join(self.dirs['common'], "*"))
        for fo in slist:
            rfn = os.path.split(fo)[1]
            if os.path.isdir(fo):
                shutil.copytree(fo, os.path.join(self.dirs['www'], rfn))
            else:
                shutil.copy(fo, self.dirs['www'])

        # init atom file
        title = self.site_config['site_title']
        if title == '':
            title = "<No title>"
        feed = AtomFeed(title=title,
                        subtitle=self.site_config['site_subtitle'],
                        feed_url=os.path.join(self.site_config['site_url'],
                                              "atom.xml"),
                        url=self.site_config['site_url'],
                        author=self.site_config['default_author'])

        themes_to_copy = []  # full paths!
        generated_page_info = []
        for slug in self._pages_to_generate(
        ):  #this list of pages is in reverse chrono order
            p = s2page.Page(self, slug, isslug=True)
            generated_page_info.append({
                'slug': p.slug,
                'title': p.title,
                'date': p.creation_date,
                'in_toc': p.in_toc
            })
            t = p.theme_path
            if not t in themes_to_copy:
                themes_to_copy.append(t)
            # wipe destination.
            self._wipe_www_page(slug)
            pg_content = p.generate()  #generate page
            # add atom entry
            try:
                cdd = datetime.strptime(
                    p.creation_date,
                    '%Y-%m-%d')  # feed.add needs the dat in datetime format
            except:
                print "Wrong date format in page '%s'. It should be YYYY-MM-DD." % p.slug
                print "Site Generation stopped!!  correct the date and generate again."
                self._wipe_www_dir()
                sys.exit()
            feed.add(title=p.title,
                     content=pg_content,
                     content_type="html",
                     author=p.author,
                     url=os.path.join(self.site_config['site_url'],
                                      "atom.xml"),
                     updated=cdd)

        # copy themes
        wthemesdir = os.path.join(self.dirs['www'], "themes")
        os.mkdir(wthemesdir)
        for d in themes_to_copy:
            dname = os.path.split(d)[1]
            destpath = os.path.join(wthemesdir, dname)
            shutil.copytree(d, destpath)
            # delete tpl files
            ttr = glob.glob(os.path.join(destpath, "*tpl"))
            for f in ttr:
                os.remove(f)

        # write atom file
        atomfile = codecs.open(os.path.join(self.dirs['www'], "atom.xml"),
                               "w",
                               encoding="utf-8",
                               errors="xmlcharrefreplace")
        atomfile.write(feed.to_string())
        atomfile.close()

        # create front page/s
        #print "generated_page_info for gf ",generated_page_info
        ff = self.site_config['fixed_frontpage']
        if ff != None and ff != '':
            self._set_fixed_frontpage(ff)
        else:
            self.generate_front(generated_page_info)
        self._generate_site_map(generated_page_info)
예제 #56
0
 def render_string(self):
     feed = AtomFeed(**self.doc)
     return feed.to_string()
예제 #57
0
 cursor.execute(query)
 lstRss = cursor.fetchall()
 item = []
 lstIdToMarkRead = []
 cat = i[1]
 lang = "en"
 for la in langArray:
     if cat == la[0]:
         lang = la[1]
 print cat + " -- Language: " + lang
 fname = string.lower(cat.replace(' ', '-')) + ".rss"
 if not os.path.isfile(fname):
     open(fname, 'w').close()
 feedA = AtomFeed(title=cat,
                  subtitle="My Feeds",
                  feed_url="http://172.16.69.162/atom/" +
                  string.lower(cat.replace(' ', '-')) + ".rss",
                  url="http://172.16.69.162/",
                  author="XXX")
 if len(lstRss) > 0:
     print "You have %s feeds!" % (str(len(lstRss)))
     rssUrl = urlRoot + fname
     feed = feedparser.parse(rssUrl)
     for rss in lstRss:
         title = cleanStr(rss[2]).decode('unicode-escape')
         link = rss[3].replace('"', '')
         author = rss[8]
         dateUpdated = rss[6]
         lstIdToMarkRead.append(str(rss[7]))
         if not (lang == "en"):
             title = trans(title, lang[1], "en")
             link = "http://translate.google.com.vn/translate?sl=" + lang[