Exemplo n.º 1
0
def show_entries():
    """ The main view: presents author info and entries. """
    entries = []
    cur = g.db.execute(
        """
        SELECT location
        FROM entries
        ORDER BY published DESC
        """
    )

    for (row,) in cur.fetchall():
        if os.path.exists(row+".json"):
            entries.append(file_parser_json(row+".json"))

    try:
        entries = entries[:10]
    except IndexError:
        entries = None

    before = 1

    # try:
    #     for entry in entries:
    #         for i in entry['syndication'].split(','):
    #             if i.startswith('https://twitter.com/'):
    #                 vals = i.split('/')
    #                 twitter = {'id': vals[len(vals)-1], 'link': i}
    #                 entry['twitter'] = twitter
    #                 break
    # except AttributeError:
    #     pass

    return render_template('blog_entries.html', entries=entries, before=before)
Exemplo n.º 2
0
def show_draft(name):
    if request.method == 'GET':
        if not session.get('logged_in'):
            abort(401)
        draft_location = 'drafts/' + name + ".json"
        entry = get_post_for_editing(draft_location)
        return render_template('edit_entry.html', entry=entry, type="draft")

    if request.method == 'POST':
        if not session.get('logged_in'):
            abort(401)
        data = post_from_request(request)

        if "Save" in request.form:  # if we're updating a draft
            file_name = "drafts/{0}".format(name)
            entry = file_parser_json(file_name + ".json")
            update_json_entry(data, entry, g=g, draft=True)
            return redirect("/drafts")

        if "Submit" in request.form:  # if we're publishing it now
            location = add_entry(request, draft=True)
            if os.path.isfile(
                    "drafts/" + name +
                    ".json"):  # this won't always be the slug generated
                os.remove("drafts/" + name + ".json")
            return redirect(location)
Exemplo n.º 3
0
def bridgy_twitter(location):
    """send a twitter mention to brid.gy"""
    if location.startswith('/e/'):
        location = location[3:]

    url = 'https://' + DOMAIN_NAME + "/e/" + location

    r = send_mention(url,
                     'https://brid.gy/publish/twitter',
                     endpoint='https://brid.gy/publish/webmention')
    syndication = r.json()
    print("\n\n\n {0} \n\n\n".format(url))
    data = file_parser_json('data/' + location + ".json", md=False)
    old_entry = data
    if data['syndication']:
        print syndication
        if type(data['syndication']) is unicode:
            data['syndication'] = data['syndication'].split(',')
        data['syndication'].append(syndication['url'])
    else:
        try:
            data['syndication'] = [syndication['url']]
        except KeyError:
            raise KeyError("There was no url! {0}".format(syndication))
    data['twitter'] = {'url': syndication['url'], 'id': syndication['id']}
    update_json_entry(data=data, old_entry=old_entry, g=None)
Exemplo n.º 4
0
def show_atom():
    """ The atom view: presents entries in atom form. """

    entries = []  # store the entries which will be presented
    cur = g.db.execute(  # grab in order of newest
        """
        SELECT location
        FROM entries
        ORDER BY published DESC
        """)
    updated = None
    for (row, ) in cur.fetchall():  # iterate over the results
        if os.path.exists(
                row + ".json"
        ):  # if the file fetched exists, append the parsed details
            entries.append(file_parser_json(row + ".json"))

    try:
        entries = entries[:10]  # get the 10 newest
        updated = entries[0]['published']
    except IndexError:
        entries = None  # there are no entries

    template = render_template('atom.xml', entries=entries, updated=updated)
    response = make_response(template)
    response.headers['Content-Type'] = 'application/atom+xml'
    return response
Exemplo n.º 5
0
def show_drafts():
    if request.method == 'GET':
        drafts_location = "drafts/"
        entries = [
                drafts_location + f for f in os.listdir(drafts_location)
                if os.path.isfile(os.path.join(drafts_location, f))
                and f.endswith('.json')]
        entries = [file_parser_json(entry) for entry in entries]
        return render_template("drafts_list.html", entries=entries)
Exemplo n.º 6
0
def edit(year, month, day, name):
    """ The form for user-submission """
    app.logger.info(request)
    if request.method == "GET":
        try:
            file_name = "data/{year}/{month}/{day}/{name}".format(year=year, month=month, day=day, name=name)
            entry = file_parser_json(file_name + ".json", md=False)
            try:
                entry['category'] = ', '.join(entry['category'])
            except TypeError:
                entry['category'] = ''
            return render_template('edit_entry.html', entry=entry)
        except IOError:
            return redirect('/404')

    elif request.method == "POST":
        if not session.get('logged_in'):
            abort(401)
        app.logger.info(request.form)

        if "Submit" in request.form:
            data = post_from_request(request)
            if data['location'] is not None and data['location'].startswith("geo:"):
                (place_name, geo_id) = resolve_placename(data['location'])
                data['location_name'] = place_name
                data['location_id'] = geo_id

            location = "{year}/{month}/{day}/{name}".format(year=year, month=month, day=day, name=name)

            if request.form.get('twitter'):

                t = Timer(30, bridgy_twitter, [location])
                t.start()

            if request.form.get('facebook'):
                t = Timer(30, bridgy_facebook, [location])
                t.start()
            file_name = "data/{year}/{month}/{day}/{name}".format(year=year, month=month, day=day, name=name)
            entry = file_parser_json(file_name+".json")
            update_json_entry(data, entry, g=g)
            return redirect("/e/"+location)
        return redirect("/")
Exemplo n.º 7
0
def get_post_for_editing(draft_location, md=False):
    entry = file_parser_json(draft_location, md=False)
    if entry['category']:
        entry['category'] = ', '.join(entry['category'])

    if entry['published']:
        try:
            entry['published'] = entry['published'].strftime('%Y-%m-%d')
        except AttributeError:
            entry['published'] = None
    return entry
Exemplo n.º 8
0
def get_entries_by_date():
    entries = []
    cur = g.db.execute("""
        SELECT entries.location FROM entries
        ORDER BY entries.published DESC
        """.format(datetime=datetime))

    for (row, ) in cur.fetchall():
        if os.path.exists(row + ".json"):
            entries.append(file_parser_json(row + ".json"))

    return entries
Exemplo n.º 9
0
def show_drafts():
    if request.method == 'GET':
        if not session.get('logged_in'):
            abort(401)
        drafts_location = "drafts/"
        entries = [
            drafts_location + f for f in os.listdir(drafts_location)
            if os.path.isfile(os.path.join(drafts_location, f))
            and f.endswith('.json')
        ]
        entries = [file_parser_json(entry) for entry in entries]
        return render_template("drafts_list.html", entries=entries)
Exemplo n.º 10
0
def profile(year, month, day, name):
    """ Get a specific article """

    file_name = "data/{year}/{month}/{day}/{name}".format(year=year, month=month, day=day, name=name)
    if request.headers.get('Accept') == "application/ld+json":  # if someone else is consuming
        return action_stream_parser(file_name+".json")

    entry = file_parser_json(file_name+".json")

    if os.path.exists(file_name+".jpg"):
        entry['photo'] = file_name+".jpg"                   # get the actual file
    if os.path.exists(file_name+".mp4"):
        entry['video'] = file_name+".mp4"                   # get the actual file
    if os.path.exists(file_name+".mp3"):
        entry['audio'] = file_name+".mp3"                   # get the actual file

    mentions = get_mentions('http://' + DOMAIN_NAME + '/e/{year}/{month}/{day}/{name}'.
                            format(year=year, month=month, day=day, name=name))

    reply_to = []                                           # where we store our replies so we can fetch their info
    if entry['in_reply_to']:
        for i in entry['in_reply_to']:                          # for all the replies we have...
            if type(i) == dict:                                 # which are not images on our site...
                reply_to.append(i)
            elif i.startswith('http://127.0.0.1:5000'):
                reply_to.append(file_parser_json(i.replace('http://127.0.0.1:5000/e/', 'data/', 1) + ".json"))
            elif i.startswith('http'):                          # which are not data resources on our site...
                reply_to.append(get_entry_content(i))
    # if entry['syndication']:
    #     for i in entry['syndication'].split(','):               # look at all the syndication links
    #         if i.startswith('https://twitter.com/'):                    # if there's twitter syndication
    #             twitter = dict()
    #             vals = i.split('/')
    #             twitter['id'] = vals[len(vals)-1]
    #             twitter['link'] = i
    #             entry['twitter'] = twitter
    #         if i.startswith('https://www.facebook.com/'):
    #             entry['facebook'] = {'link':i}

    return render_template('entry.html', entry=entry, mentions=mentions, reply_to=reply_to)
Exemplo n.º 11
0
def time_search_year(year):
    """ Gets all entries posted during a specific year """
    entries = []
    cur = g.db.execute("""
        SELECT entries.location FROM entries
        WHERE CAST(strftime('%Y',entries.published)AS INT) = {year}
        ORDER BY entries.published DESC
        """.format(year=int(year)))

    for (row, ) in cur.fetchall():
        if os.path.exists(row + ".json"):
            entries.append(file_parser_json(row + ".json"))
    return render_template('blog_entries.html', entries=entries)
Exemplo n.º 12
0
def show_draft(name):
    if request.method == 'GET':
        draft_location = 'drafts/' + name + ".json"
        entry = file_parser_json(draft_location, md=False)
        if entry['category']:
            entry['category'] = ', '.join(entry['category'])
        return render_template('edit_draft.html', entry=entry)

    if request.method == 'POST':
        if not session.get('logged_in'):
            abort(401)
        data = post_from_request(request)

        if "Save" in request.form:                          # if we're updating a draft
            file_name = "drafts/{0}".format(name)
            entry = file_parser_json(file_name+".json")
            location = update_json_entry(data, entry, g=g, draft=True)
            return redirect("/drafts")

        if "Submit" in request.form:                        # if we're publishing it now
            data['published'] = datetime.now()

            location = create_json_entry(data, g=g)
            if data['in_reply_to']:
                send_mention('http://' + DOMAIN_NAME +location, data['in_reply_to'])

            if request.form.get('twitter'):
                t = Timer(30, bridgy_twitter, [location])
                t.start()

            if request.form.get('facebook'):
                t = Timer(30, bridgy_facebook, [location])
                t.start()

            if os.path.isfile("drafts/"+name+".json"):           # this won't always be the slug generated
                os.remove("drafts/"+name+".json")

            return redirect(location)
Exemplo n.º 13
0
def search_by_tag(category):
    entries = []
    cur = g.db.execute("""
         SELECT entries.location FROM categories
         INNER JOIN entries ON
         entries.slug = categories.slug AND
         entries.published = categories.published
         WHERE categories.category='{category}'
         ORDER BY entries.published DESC
        """.format(category=category))
    for (row, ) in cur.fetchall():
        if os.path.exists(row + ".json"):
            entries.append(file_parser_json(row + ".json"))
    return entries
Exemplo n.º 14
0
def time_search_year(year):
    """ Gets all entries posted during a specific year """
    entries = []
    cur = g.db.execute(
        """
        SELECT entries.location FROM entries
        WHERE CAST(strftime('%Y',entries.published)AS INT) = {year}
        ORDER BY entries.published DESC
        """.format(year=int(year)))

    for (row,) in cur.fetchall():
        if os.path.exists(row+".json"):
            entries.append(file_parser_json(row+".json"))
    return render_template('blog_entries.html', entries=entries)
Exemplo n.º 15
0
def show_json():
    """ The rss view: presents entries in json feed form. """

    entries = []  # store the entries which will be presented
    cur = g.db.execute(  # grab in order of newest
        """
        SELECT location
        FROM entries
        ORDER BY published DESC
        """)

    for (row, ) in cur.fetchall():  # iterate over the results
        if os.path.exists(
                row + ".json"
        ):  # if the file fetched exists, append the parsed details
            entries.append(file_parser_json(row + ".json"))

    try:
        entries = entries[:10]  # get the 10 newest
    except IndexError:
        entries = None  # there are no entries

    feed_items = []

    for entry in entries:
        feed_item = {
            'id':
            entry['url'],
            'url':
            entry['url'],
            'content_text':
            entry['summary'] if entry['summary'] else entry['content'],
            'date_published':
            entry['published'],
            'author': {
                'name': 'Alex Kearney'
            }
        }
        feed_items.append(feed_item)

    feed_json = {
        'version': 'https://jsonfeed.org/version/1',
        'home_page_url': 'https://kongaloosh.com/',
        'feed_url': 'https://kongaloosh.com/json.feed',
        'title': 'kongaloosh',
        'items': feed_items
    }

    return jsonify(feed_json)
Exemplo n.º 16
0
def time_search(year, month, day):
    """ Gets all notes posted on a specific day """
    entries = []
    cur = g.db.execute("""
        SELECT entries.location FROM entries
        WHERE CAST(strftime('%Y',entries.published)AS INT) = {year}
        AND CAST(strftime('%m',entries.published)AS INT) = {month}
        AND CAST(strftime('%d',entries.published)AS INT) = {day}
        ORDER BY entries.published DESC
        """.format(year=int(year), month=int(month), day=int(day)))

    for (row, ) in cur.fetchall():
        if os.path.exists(row + ".json"):
            entries.append(file_parser_json(row + ".json"))
    return render_template('blog_entries.html', entries=entries)
Exemplo n.º 17
0
def articles():
    """ Gets all the articles """
    entries = []
    cur = g.db.execute("""
         SELECT entries.location FROM categories
         INNER JOIN entries ON
         entries.slug = categories.slug AND
         entries.published = categories.published
         WHERE categories.category='{category}'
         ORDER BY entries.published DESC
        """.format(category='article'))

    for (row, ) in cur.fetchall():
        if os.path.exists(row + ".json"):
            entries.append(file_parser_json(row + ".json"))
    return render_template('blog_entries.html', entries=entries)
Exemplo n.º 18
0
def time_search(year, month, day):
    """ Gets all notes posted on a specific day """
    entries = []
    cur = g.db.execute(
        """
        SELECT entries.location FROM entries
        WHERE CAST(strftime('%Y',entries.published)AS INT) = {year}
        AND CAST(strftime('%m',entries.published)AS INT) = {month}
        AND CAST(strftime('%d',entries.published)AS INT) = {day}
        ORDER BY entries.published DESC
        """.format(year=int(year), month=int(month), day=int(day)))

    for (row,) in cur.fetchall():
        if os.path.exists(row+".json"):
            entries.append(file_parser_json(row+".json"))
    return render_template('blog_entries.html', entries=entries)
Exemplo n.º 19
0
def tag_search(category):
    """ Get all entries with a specific tag """
    entries = []
    cur = g.db.execute(
        """
         SELECT entries.location FROM categories
         INNER JOIN entries ON
         entries.slug = categories.slug AND
         entries.published = categories.published
         WHERE categories.category='{category}'
         ORDER BY entries.published DESC
        """.format(category=category))
    for (row,) in cur.fetchall():
        if os.path.exists(row+".json"):
            entries.append(file_parser_json(row+".json"))
    return render_template('blog_entries.html', entries=entries)
Exemplo n.º 20
0
def bridgy_facebook(location):
    """send a facebook mention to brid.gy"""
    # send the mention
    r = send_mention(
        'http://' + DOMAIN_NAME + location,
        'https://brid.gy/publish/facebook',
        endpoint='https://brid.gy/publish/webmention'
    )
    # get the response from the send
    syndication = r.json()
    data = file_parser_json('data/' + location.split('/e/')[1]+".json", md=False)
    app.logger.info(syndication)
    if data['syndication']:
        data['syndication'].append(syndication['url'])
    else:
        data['syndication'] = [syndication['url']]
    data['facebook'] = {'url': syndication['url']}
    create_json_entry(data, g=None,update=True)
Exemplo n.º 21
0
def bridgy_twitter(location):
    """send a twitter mention to brid.gy"""
    location = 'http://' + DOMAIN_NAME + location
    app.logger.info("bridgy sent to {0}".format(location))
    r = send_mention(
        location,
        'https://brid.gy/publish/twitter',
        endpoint='https://brid.gy/publish/webmention'
    )
    syndication = r.json()
    app.logger.info(syndication)
    app.logger.info("recieved {0} {1}".format(syndication['url'], syndication['id']))
    data = file_parser_json('data/' + location.split('/e/')[1]+".json", md=False)
    if data['syndication']:
        data['syndication'].append(syndication['url'])
    else:
        data['syndication'] = [syndication['url']]
    data['twitter'] = {'url': syndication['url'],
                       'id': syndication['id']}
    create_json_entry(data, g=None, update=True)
Exemplo n.º 22
0
def pagination(number):
    entries = []
    cur = g.db.execute(
        """
        SELECT entries.location FROM entries
        ORDER BY entries.published DESC
        """.format(datetime=datetime)
    )

    for (row,) in cur.fetchall():
        if os.path.exists(row+".json"):
            entries.append(file_parser_json(row+".json"))

    try:
        start = int(number) * 10
        entries = entries[start:start+10]
    except IndexError:
        entries = None

    before = int(number)+1

    return render_template('blog_entries.html', entries=entries, before=before)
Exemplo n.º 23
0
def profile(year, month, day, name):
    """ Get a specific article """

    file_name = "data/{year}/{month}/{day}/{name}".format(year=year,
                                                          month=month,
                                                          day=day,
                                                          name=name)
    if request.headers.get(
            'Accept') == "application/ld+json":  # if someone else is consuming
        return action_stream_parser(file_name + ".json")

    entry = file_parser_json(file_name + ".json")

    mentions, likes, reposts = get_mentions(
        'https://' + DOMAIN_NAME + '/e/{year}/{month}/{day}/{name}'.format(
            year=year, month=month, day=day, name=name))

    return render_template('entry.html',
                           entry=entry,
                           mentions=mentions,
                           likes=likes,
                           reposts=reposts)
Exemplo n.º 24
0
def show_entries():
    """ The main view: presents author info and entries. """

    if 'application/atom+xml' in request.headers.get('Accept'):
        # if the header is requesting an xml or atom feed, simply return it
        return show_atom()

    # getting the entries we want to display.
    entries = []  # store the entries which will be presented
    cur = g.db.execute(  # grab in order of newest
        """
        SELECT location
        FROM entries
        ORDER BY published DESC
        """)

    for (row, ) in cur.fetchall():  # iterate over the results
        if os.path.exists(row + ".json"):  # if the file fetched exists...
            entries.append(file_parser_json(
                row +
                ".json"))  # parse the json and add it to the list of entries.

    try:
        entries = entries[:10]  # get the 10 newest
    except IndexError:
        if len(entries) == 0:  # if there's an index error and the len is low..
            entries = None  # there are no entries
    # otherwise there are < 10 entries and we'll just display what we have ...
    before = 1  # holder which tells us which page we're on
    tags = get_most_popular_tags()[:10]

    display_articles = search_by_tag("article")[:3]

    return render_template('blog_entries.html',
                           entries=entries,
                           before=before,
                           popular_tags=tags[:10],
                           display_articles=display_articles)
Exemplo n.º 25
0
def update_entry(update_request, year, month, day, name, draft=False):
    data = post_from_request(update_request)
    if data['location'] is not None and data['location'].startswith("geo:"):
        # get the place name for the item in the data.
        (place_name, geo_id) = resolve_placename(data['location'])
        data['location_name'] = place_name
        data['location_id'] = geo_id

    location = "{year}/{month}/{day}/{name}".format(year=year,
                                                    month=month,
                                                    day=day,
                                                    name=name)
    data['content'] = run(data['content'], date=data['published'])

    file_name = "data/{year}/{month}/{day}/{name}".format(year=year,
                                                          month=month,
                                                          day=day,
                                                          name=name)
    entry = file_parser_json(file_name + ".json",
                             g=g)  # get the file which will be updated
    update_json_entry(data, entry, g=g, draft=draft)
    syndicate_from_form(update_request, location, data['in_reply_to'])
    return location
Exemplo n.º 26
0
def map():
    """"""
    geo_coords = []
    cur = g.db.execute(  # grab in order of newest
        """
        SELECT location
        FROM entries
        ORDER BY published DESC
        """)

    for (row, ) in cur.fetchall():  # iterate over the results
        if os.path.exists(
                row + ".json"
        ):  # if the file fetched exists, append the parsed details
            entry = file_parser_json(row + ".json")
            try:
                geo_coords.append(entry['location'][4:].split(';')[0])
            except (AttributeError, TypeError):
                pass

    app.logger.info(geo_coords)
    return render_template('map.html',
                           geo_coords=geo_coords,
                           key=GOOGLE_MAPS_KEY)