Beispiel #1
0
def html_daily_stats():
    html = "<b>Daily count (" + str(len(logview.get_log(None, None, None, None, None))) + " total)</b>\n"
    date1 = datetime.date(2012, 8, 6)  # the beginning of time
    date2 = datetime.date.today()
    day_count = (date2 - date1).days + 1
    for d in [d for d in (date1 + datetime.timedelta(n) for n in range(day_count)) if d <= date2]:
        log = logview.get_log(None, str(d), None, None, None)
        html += "<br/>" + str(d) + " : " + str(len(log)) + "\n"
    html += "<p/>"
    return html
Beispiel #2
0
def get_logger_gps():
	log = logview.get_log(None, None, None, None, None)
	gps = []
	for row in log:
		(date, lat, lng) = row[2:5]
		if lat is not None and lng is not None:
			gps.append([date,lat,lng])
	return gps
Beispiel #3
0
    def index(self, keyword = None, date1 = None, date2 = None, location = None, limit = None):
		# get log according to parameters given
		if keyword is None and date1 is None and location is None:
			date1 = str(datetime.date.today())
			if len(logview.get_log(None, date1, None, None, limit))==0:
				date1 = str(datetime.date.today() - datetime.timedelta(1))
		log = logview.get_log(keyword, date1, date2, location, limit) 
		media = logview.get_media(date1, date2)
		html = '<div id="left">'
		html += logview.html_log(log)
		html += '</div>'
		html += '<div id="right">'
		html += mapbox.html_get_map(log)
		html += logview.html_media(media)
		html += logview.html_calendar()
		html += '</div>'
		html = make_page(html, ['mapbox', 'lightbox'], ['mapbox', 'jquery-1.7.2.min', 'lightbox'], True)
		return html
Beispiel #4
0
def get_all_text():
    logs = logview.get_log(None, None, None, None, None)

    alltext = ""

    all_people = []
    all_tags = []
    all_locations = []

    for log in logs:
        # clean up text
        alltext += log[5] + " "
        log_lower = log[5].lower()
        logp = re.compile("~([A-Za-z0-9]+)").findall(log[5])
        logt = re.compile("#([A-Za-z0-9]+)").findall(log_lower)
        logl = re.compile("@([A-Za-z0-9]+)").findall(log_lower)

        post_people = []
        post_tags = []
        post_locations = []

        for p in logp:
            post_people.append(p)
            all_people.append(p)
        for t in logt:
            post_tags.append(t)
            all_tags.append(t)
        for l in logl:
            post_locations.append(l)
            all_locations.append(l)

    all_tokens = Counter(nltk.word_tokenize(alltext))
    all_people = Counter(all_people)
    all_tags = Counter(all_tags)
    all_locations = Counter(all_locations)

    totalwords = 0
    for t in all_tokens:
        totalwords += all_tokens[t]

    # TOKENS
    html = "<br/>Total tokens: " + str(totalwords) + "\n"
    html += "<br/>Unique tokens: " + str(len(all_tokens)) + "\n"

    html += '<table border=0 width="90%"><tr>\n'
    html += "<td>\n"

    # TOP PEOPLE
    # needs fix: should not count double mentions in single line
    html += "<p/>Unique people: " + str(len(all_people)) + "\n"
    html += "<ul>\n"
    for p in all_people.most_common(NUM_TOP_PEOPLE):
        html += '<br/><li><a href="/?keyword=' + p[0] + '">' + p[0] + " (" + str(p[1]) + ")</li>\n"
    html += "</ul>\n"

    html += "</td><td>\n"

    # TOP TAGS
    html += "<p/>Unique tags: " + str(len(all_tags)) + "\n"
    html += "<ul>\n"
    for p in all_tags.most_common(NUM_TOP_TAGS):
        html += '<br/><li><a href="/?keyword=' + p[0] + '">' + p[0] + " (" + str(p[1]) + ")</li>\n"
    html += "</ul>\n"

    html += "</td><td>\n"

    # TOP LOCATIONS
    html += "<p/>Unique locations: " + str(len(all_locations)) + "\n"
    html += "<ul>\n"
    for p in all_locations.most_common(NUM_TOP_LOCATIONS):
        html += '<br/><li><a href="/?keyword=' + p[0] + '">' + p[0] + " (" + str(p[1]) + ")</li>\n"
    html += "</ul>\n"

    html += "</td></tr></table>\n"
    return html