예제 #1
0
def create_rss_object(all_titles_with_s3, pod):
    items_list = []

    for title in all_titles_with_s3:
        id = title["data-media-id"]
        guid = f"https://{pod.bucket_name}.s3.amazonaws.com/{id}.mp3"
        url = guid

        item = Item(
            title=title["data-asset-title"],
            link=url,
            description=title["data-title-description"],
            author="France Culture",
            guid=Guid(guid),
            pubDate=datetime.datetime.fromtimestamp(
                int(title["data-asset-created-date"])),
        )
        items_list.append(item)

    feed = Feed(
        title=pod.podcast,
        link="https://www.franceculture.fr/emissions/" + pod.podcast,
        image=Image(pod.podcast_pic_url, pod.podcast, pod.podcast_url)
        if pod.podcast_pic_url is not None else None,
        description=pod.podcast,
        language="en-US",
        lastBuildDate=datetime.datetime.now(),
        items=items_list,
    )
    return feed
예제 #2
0
def pop_blog(f, blog_dir, post_dict, post_template, main_template):
    """populates an html page from an org blog post

    Parameters
    ----------
    f : str
        basename for org file
    blog_dir : str
        location where org blog posts are located
    post_dict : dict
        dictionary of posts mapping titles to urls
    post_template : jinja2 Template instance
        template containing content for blog post
    main_template : jinja2 Template instnace
        template for adding style to blog post

    Returns
    -------
    tuple
        containing updated post_dict and item to add to rss feed
    """

    # prep inputs
    bn, ext = os.path.splitext(f)
    comp = bn.split("-")
    date = "-".join(comp[:3])
    title = " ".join(comp[3:])
    full_title = "%s %s" % (date, title)
    full_path = os.path.join(blog_dir, f)
    html_f = os.path.join("posts", bn + ".html")
    url = "https://lbybee.github.io/%s" % html_f

    # build html
    cmd = "pandoc -r org -t html --mathjax %s -o /dev/stdout"

    html = spbash(cmd % full_path)
    html = post_template.render(date=date, pandoc_html=html)
    html = main_template.render(title=title, content_html=html)

    with open(html_f, "w") as fd:
        fd.write(html)

    # add to rss feed
    item = Item(title=full_title,
                link=url,
                author="Leland Bybee",
                description=full_title,
                guid=Guid(url),
                pubDate=parse(date))

    # update content dict
    post_dict[full_title] = url

    return post_dict, item
예제 #3
0
def build_item(post: Post, base_url: Url, base_image_url: Url) -> Item:
    link = post.link(base_url)

    return Item(
        title=post.title,
        link=link,
        description=post.metadata.description,
        author=post.metadata.author,
        guid=Guid(link),
        pubDate=post.metadata.date,
        categories=[Category(tag) for tag in post.metadata.tags],
        enclosure=PostWrapper(post, base_image_url),
    )
예제 #4
0
def proposals(space):
    # Get the proposal from the space
    graphql_query = """
        {
            proposals(
                orderBy: "created",
                orderDirection: desc,
                where:{space:"%s", state:"active"}
            ) {
                id
                title
                body
                created
                author
                state
            }
        }
    """ % space
    r = requests.post('%s/graphql' % SNAPSHOT_API_ENDPOINT,
                      json={'query': graphql_query})

    # Success response
    if r.ok:
        # Build list of items
        items = []
        for proposal in r.json()['data']['proposals']:
            items.append(
                Item(
                    title=proposal['title'],
                    link='%s/#/%s/proposal/%s' %
                    (SNAPSHOT_BASE_URL, space, proposal['id']),
                    description=proposal['body'],
                    author=proposal['author'],
                    guid=Guid(proposal['id'], False),
                    pubDate=datetime.fromtimestamp(int(proposal['created'])),
                ))

        feed = Feed(title='%s Proposals' % space,
                    link='%s/api/v1/spaces/%s/proposals' %
                    (RSS_FEED_BASE_URL, space),
                    description="Proposals for %s" % space,
                    language='en-US',
                    lastBuildDate=datetime.now(),
                    items=items)

        return feed.rss()
    else:
        return jsonify({'code': r.status_code, 'text': r.text})
예제 #5
0
def generate_rss_feed(records):
    now_utc = datetime.now(timezone.utc)
    items = []
    for record in available_records_generator(records):
        description = f"""
        <h1>Vaccine availble</h1>
        <table>
          <tr>
            <th>Provider</th>
            <th>Store Name</th>
            <th>Store Address</th>
            <th>Store City</th>
            <th>Appointment Link</th>
          </tr>
          <tr>
            <td>{record.provider}</td>
            <td>{record.store_name}</td>
            <td>{record.store_address}</td>
            <td>{record.store_city}</td>
            <td><a href="{record.link}">{record.link}</a></td>
          </tr>
        </table>
        """

        item = Item(
            title="Vaccine Available",
            description=description,
            guid=Guid(
                f"{record.store_name}{record.store_address}{record.store_city}{str(now_utc.date())}",
                isPermaLink=False,
            ),
            pubDate=now_utc,
        )
        items.append(item)

    feed = Feed(
        title="Covid Vaccine Finder Updates",
        link="https://kirkhansen.github.io/covid-vaccine-finder/",
        description=
        "Feed produces new records when available vaccine is detected.",
        language="en-US",
        lastBuildDate=now_utc,
        items=items,
    )

    return feed
예제 #6
0
파일: app.py 프로젝트: tanrax/wallaviso
def rss_view(id):
    # Get all searchs from user
    search = Search.query.get(id)
    # Fix prices
    min_price = search.min_price
    if min_price == 0:
        min_price = ''
    max_price = search.max_price
    if max_price == 0:
        max_price = ''
    # Get Data
    cookies = {
        'searchLat': str(search.lat),
        'searchLng': str(search.lng),
        'content': str(search.name),
        'hideCookieMessage': 'true',
        'userHasLogged': '%7B%22hasLogged%22%3Afalse%2C%22times%22%3A1%7D	'
    }
    urlSearch = f'https://es.wallapop.com/rest/items?dist={search.distance}&kws={search.name}&lat={search.lat}&lng={search.lng}&maxPrice={max_price}&minPrice={min_price}'
    results = requests.get(urlSearch, cookies=cookies).json()['items']
    # Generate RSS
    items = []
    for result in results:
        items.append(
            Item(
                title=
                f"{result['title']} - {result['salePrice']}{result['currency']['symbol']}",
                link=f"https://es.wallapop.com/item/{result['url']}",
                description=result['description'],
                author=result['sellerUser']['microName'],
                guid=Guid(result['itemId']),
                pubDate=datetime.utcfromtimestamp(
                    int(str(result['publishDate'])[:-3]))))
    lastBuildDate = datetime.now()
    if results:
        lastBuildDate = datetime.utcfromtimestamp(
            int(str(results[0]['publishDate'])[:-3]))
    feed = Feed(
        title=f"{search.name} - Wallaviso RSS",
        link="http://www.wallaviso.com",
        description=
        "Se el primero en Wallapop. Programa tus busquedas y recibe una notificacion al instante.",
        language="es-ES",
        lastBuildDate=lastBuildDate,
        items=items)
    return feed.rss()
예제 #7
0
def generate_feed(json_list):
    items = []
    for json_item in json_list:
        level = json_item.get('level') or 'INFO'
        item = Item(title=json_item.get('_line'),
                    link='https://app.logdna.com/',
                    pubDate=datetime.now(),
                    author=json_item.get('container'),
                    description=level)
        items.append(item)
    feed = Feed(title="LogDNA RSS Feed",
                link=os.environ.get('RSS_LINK'),
                description='LogDNA RSS Feed for Rainmeter widgets.',
                lastBuildDate=datetime.now(),
                items=items)

    return feed.rss()
예제 #8
0
파일: feed.py 프로젝트: tho96/blog-platform
def summary():
    response = requests.get("http://localhost/articles/recent/meta/10")
    data = response.json()
    item = []
    for d in data:
        item1 = Item(title=d['title'],
                     author=d['author'],
                     pubDate=datetime.datetime(2014, 12, 29, 10, 00),
                     link="http://localhost/articles/recent/meta/10")
        item.append(item1)
    feed = Feed(
        title="A summary feed listing",
        link="http://localhost/rss",
        description=
        "a summary feed listing the title, author, date, and link for 10 most recent articles",
        language="en-US",
        lastBuildDate=datetime.datetime.now(),
        items=item)
    return feed.rss()
예제 #9
0
파일: feed.py 프로젝트: tho96/blog-platform
def comment_feed():
    a_response = requests.get("http://localhost/articles/recent/10")
    a_data = a_response.json()
    comments = []
    for a in a_data:
        article_id = a['url'].split('/')[-1]
        c_response = requests.get("http://localhost/article/comments/" +
                                  str(a['article_id']))
        c_data = c_response.json()
        for c in c_data:
            item3 = Item(author=c['author'],
                         comment=c['comment'],
                         pubDate=datetime.datetime(2014, 12, 29, 10, 00),
                         link="http://localhost/article/" +
                         str(c['article_id']))
            comments.append(item3)

    feed = Feed(title="Comment feed",
                link="http://localhost/rss/comments",
                description="A comment feed for each articles",
                language="en-US",
                lastBuildDate=datetime.datetime.now(),
                item=comments)
    return feed.rss()
예제 #10
0
def getArticleSummary(article_link, headline):
    r2 = requests.get(article_link)
    soup2 = BeautifulSoup(r2.text, features="html.parser")
    i = 1
    publication_date = soup2.find('time')['datetime']
    data2 = soup2.findAll('p')[i]
    #For some reason the page likes to have random paragraphs which are empty?
    #So we set i=1 and increase it if there is more empty or near empty strings detected ie building names
    while (len(data2.text) < 30):
        i = i + 1
        data2 = soup2.findAll('p')[i]
    article_pub_date = datetime.datetime.strptime(publication_date,
                                                  "%Y-%m-%d %H:%M:%S")
    #append to articles_list in order to print as RSS compatible XML
    articles_list.append(
        Item(title=headline,
             link=article_link,
             description=data2.text,
             guid=Guid(article_link),
             pubDate=datetime.datetime(article_pub_date.year,
                                       article_pub_date.month,
                                       article_pub_date.day,
                                       article_pub_date.hour,
                                       article_pub_date.minute)))
예제 #11
0
파일: feed.py 프로젝트: tho96/blog-platform
def full_feed():
    response = requests.get("http://localhost/articles/recent/10")
    data = response.json()
    articles = []

    for d in data:
        article_id = d['url'].split('/')[-1]
        item2 = Item(title=d['title'],
                     pubDate=datetime.datetime(2014, 12, 29, 10, 00),
                     link="http://localhost/article/" + str(d['article_id']))
        a_response = requests.get("http://localhost/article/" +
                                  str(d['article_id']))
        a_data = a_response.json()
        item2.title = a_data['title']
        item2.author = a_data['author']
        item2.description = a_data['body']

        c_response = requests.get("http://localhost/article/comments/count/" +
                                  str(d['article_id']))
        c_data = c_response.json()
        item2.comments = c_data['count']

        t_response = requests.get("http://localhost/article/tags/" +
                                  str(d['article_id']))
        t_data = t_response.json()
        item2.categories = t_data['tags']
        articles.append(item2)

    feed = Feed(
        title="Full feed",
        link="http://localhost/rss/full_feed",
        description=
        "A full feed containing the full text for each article, its tags as RSS categories, and a comment count.",
        language="en-US",
        lastBuildDate=datetime.datetime.now(),
        item=articles)
    return feed.rss()
예제 #12
0
    def get_xml(self: Serializer, response: Response) -> Tuple[str, int]:
        """
        Serialize the provided response data into RSS, version 2.0.

        Parameters
        ----------
        response : Response
            The search response data to be serialized.

        Returns
        -------
        data : str
            The serialized XML results.
        status
            The HTTP status code for the operation.

        """
        # Get the archive info from the first hit.  Is this OK?
        archive = response.hits[0]["primary_classification"]["archive"]
        archive_id = archive["id"]
        archive_name = archive["name"]
        feed = Feed(
            title=f"{archive_id} updates on arXiv.org",
            link="http://arxiv.org/",
            description=f"{archive_name} ({archive_id}) updates on the arXiv.org e-print archive",
            language="en-us",
            pubDate=datetime.now(),
            lastBuildDate=datetime.now(),
            managingEditor="*****@*****.**"
        )

        # Remove two elements added by the Rfeed package
        feed.generator = None
        feed.docs = None

        # Add extensions that will show up as attributes of the rss element
        feed.extensions.append(Content())
        feed.extensions.append(Taxonomy())
        feed.extensions.append(Syndication())
        feed.extensions.append(Admin())
        feed.image = Image(url="http://arxiv.org/icons/sfx.gif",
                           title="arXiv.org", link="http://arxiv.org")

        # Add each search result "hit" to the feed
        for hit in response:
            # Add links for each author and the abstract to the description element
            description = "<p>Authors: "
            first = True
            for author in hit['authors']:
                if first:
                    first = False
                else:
                    description += ", "
                name = f"{author['last_name']},+{author['initials'].replace(' ', '+')}"
                description += f"<a href='http://arxiv.org/search/?query={name}&searchtype=author'>"
                description += f"{author['full_name']}</a>"
            description += f"</p><p>{hit['abstract']}</p>"

            # Create the item element for the "hit"
            item = Item(
                title=hit['title'],
                link=url_for("abs_by_id", paper_id=hit['paper_id']),
                # link=f"http://arxiv.org/abs/{hit['paper_id']}",
                description=description,
                guid=Guid(f"oai:arXiv.org:{hit['paper_id']}", isPermaLink=False)
            )
            feed.items.append(item)

        # Print and return the feed content
        data = feed.rss()
        status_code = status.HTTP_200_OK
        return data, status_code