def load(request): ###load twitter #### url = "http://api.twitter.com/1/trends/2450022.json" req = urllib2.Request(url) req.add_header('User-Agent', 'Safari 3.2') response = urllib2.urlopen(req) output_json = json.load(response) for x in output_json: for y in x['trends']: s = Trend.get_or_insert(y['name'], title=y['name'], source="twitter") ###laod Google #### gurl = "http://www.google.com/trends/hottrends/atom/hourly" data = urllib2.urlopen(gurl) tree = ET.parse(data) root = tree.getroot() for child in root: grandchildren = child.getchildren() for grandchild in grandchildren: if grandchild.text: soup = BeautifulSoup(grandchild.text) hottrends = [] for ht in soup('li'): gkeyword = ht.a.string s = Trend.get_or_insert(gkeyword, title=str(gkeyword), source="google") return HttpResponse("loaded twitter and google")
def get_all_trends(last_retrieved_trend_id): """ Gets all the hashtags. *Parameters:* - *last_retrieved_kweek_id (string)*: The id of the last retrieved trend (used to fetch more). Nullable. *Returns:* - *List of models.Hashtag objects* """ if last_retrieved_trend_id is not None: try: last_retrieved_trend_id = int(last_retrieved_trend_id) except ValueError: raise database_trends = query_factory.get_all_trends() # Paginate the results try: database_trends = paginate(dictionaries_list=database_trends, required_size=20, start_after_key='id', start_after_value=last_retrieved_trend_id) except TypeError as E: print(E) raise if database_trends is None: return None trends = [] for database_trend in database_trends: trend = { 'id': database_trend['id'], 'text': database_trend['text'], 'number_of_kweeks': database_trend['number_of_kweeks'] } trends.append(Trend(trend)) return trends
def index(request): e = Employee(name="John", role="manager", email='*****@*****.**') e.hire_date = datetime.datetime.now().date() #e.put() day1 = datetime.timedelta(days=1) trendslist=[] gtrendslist=[] yesterday = datetime.date.today()-day1 query = Trend.all().filter('created >= ', yesterday).order('-created') results = query.fetch(100) for p in results: trendslist.append(p.title) gv = globvars context = { 'thispage':'Home', 'trends':trendslist, 'gtrends':gtrendslist, } context = dict(context, **gv.context) #combines the 'local' context with the 'global' context return render_to_response('index.html', context)
def get(self): ''' The class serving the page for the trends. ''' months = [ "2012-11", "2012-10", "2012-09", "2012-08", "2012-07", "2012-06", "2012-05", "2012-04", "2012-03", "2012-02", "2012-01", "2011-12", "2011-11", "2011-10", "2011-09", "2011-08", "2011-07", "2011-06", "2011-05", "2011-04", "2011-03", "2011-02", "2011-01", "2010-12", "2010-11", "2010-10", "2010-09", "2010-08", "2010-07", "2010-06", "2010-05", "2010-04", "2010-03", "2010-02", "2010-01", "2009-12", "2009-11", "2009-10", "2009-09", "2009-08", "2009-07", "2009-06", "2009-05", "2009-04", "2009-03", "2009-02", "2009-01", "2008-12", "2008-11", "2008-10", "2008-09", "2008-08", "2008-07", "2008-06", "2008-05", "2008-04", "2008-03", "2008-02", "2008-01" ] months.reverse() #Get jobs for trends jobs = self.request.get_all("job") jobs = [j for j in jobs if len(j) > 0] logging.info("Trends for jobs: " + ','.join(jobs)) #Also get the total counts for months jobs.append('total') #Get the trends from the database split_trends = [] if len(jobs) > 1: trends = Trend.all() trends.filter("job IN", jobs) for t in trends: nt = SplitTrend(t.job, t.monthly_count.split(';')) if nt.job == 'total': total = nt logging.info("Total - " + str(t)) else: split_trends.append(nt) logging.info(t) trends_names = [t.job for t in split_trends] #Compute percentages for t in split_trends: t.values = [ val * 100.0 / total.values[idx] for idx, val in enumerate(t.values) ] #Generate the page template_values = { 'jobs': TopJobs, 'trends': split_trends, 'trends_names': trends_names, 'count': len(split_trends), 'months': months } template = jinja_environment.get_template('templates/trends.html') self.response.out.write(template.render(template_values))
def trendinfo(request, trendid, trendname): places, worldwide = Place.get_trend_places(trendid) clusters = Clusters.get_trend_clusters(trendid) tendency, flag = Trend.get_tendency(trendid) return render(request, 'geomap/trendinfo.html', {'trendname':trendname, 'trendid' : trendid,\ 'tendency' : json.dumps(tendency), 'flag':flag, \ 'clusters' : clusters, 'places':places,\ 'worldwide' : worldwide,})
def search(request): trends = [] value = '' post = False if request.method == 'POST': value = request.POST.get('value') if value: trends = Trend.search_trends(value) post = True return render(request, 'geomap/search.html', {'trends' : trends, 'value' : value, 'post':post})
def test_get_all_trends(): expected_trends = [] query = """ SELECT ID FROM HASHTAG WHERE TEXT = 'trend' """ trend_id = db_manager.execute_query(query)[0]['id'] expected_trends.append( Trend({ 'id': trend_id, 'text': 'trend', 'number_of_kweeks': 2 })) query = """ SELECT ID FROM HASHTAG WHERE TEXT = 'trend2' """ trend_id = db_manager.execute_query(query)[0]['id'] expected_trends.append( Trend({ 'id': trend_id, 'text': 'trend2', 'number_of_kweeks': 0 })) actual_trends = actions.get_all_trends(None) for index, kweek in enumerate(actual_trends): assert expected_trends[index].to_json() == kweek.to_json() # Invalid ID exception_caught = False try: actions.get_all_trends('invalid_id') except ValueError: exception_caught = True assert exception_caught
def load(request): ###load twitter #### url = "http://api.twitter.com/1/trends/2450022.json" req = urllib2.Request(url) req.add_header('User-Agent', 'Safari 3.2') response = urllib2.urlopen(req) output_json = json.load(response) for x in output_json: for y in x['trends']: s = Trend.get_or_insert(y['name'], title=y['name'],source="twitter") ###laod Google #### gurl = "http://www.google.com/trends/hottrends/atom/hourly" data =urllib2.urlopen(gurl) tree = ET.parse(data) root = tree.getroot() for child in root: grandchildren = child.getchildren() for grandchild in grandchildren: if grandchild.text: soup = BeautifulSoup(grandchild.text) hottrends = [] for ht in soup('li'): gkeyword = ht.a.string s = Trend.get_or_insert(gkeyword, title=str(gkeyword),source="google") return HttpResponse("loaded twitter and google")
def get(self): ''' The class serving the page for the trends. ''' months = ["2012-11", "2012-10", "2012-09", "2012-08", "2012-07", "2012-06", "2012-05", "2012-04", "2012-03", "2012-02", "2012-01", "2011-12", "2011-11", "2011-10", "2011-09", "2011-08", "2011-07", "2011-06", "2011-05", "2011-04", "2011-03", "2011-02", "2011-01", "2010-12", "2010-11", "2010-10", "2010-09", "2010-08", "2010-07", "2010-06", "2010-05", "2010-04", "2010-03", "2010-02", "2010-01", "2009-12", "2009-11", "2009-10", "2009-09", "2009-08", "2009-07", "2009-06", "2009-05", "2009-04", "2009-03", "2009-02", "2009-01", "2008-12", "2008-11", "2008-10", "2008-09", "2008-08", "2008-07", "2008-06", "2008-05", "2008-04", "2008-03", "2008-02", "2008-01"] months.reverse() #Get jobs for trends jobs = self.request.get_all("job") jobs = [j for j in jobs if len(j) > 0] logging.info("Trends for jobs: " + ','.join(jobs)) #Also get the total counts for months jobs.append('total') #Get the trends from the database split_trends = [] if len(jobs) > 1: trends = Trend.all() trends.filter("job IN", jobs) for t in trends: nt = SplitTrend(t.job, t.monthly_count.split(';')) if nt.job == 'total': total = nt logging.info("Total - " + str(t)) else: split_trends.append(nt) logging.info(t) trends_names = [t.job for t in split_trends] #Compute percentages for t in split_trends: t.values = [val * 100.0 / total.values[idx] for idx, val in enumerate(t.values)] #Generate the page template_values = { 'jobs': TopJobs, 'trends': split_trends, 'trends_names': trends_names, 'count': len(split_trends), 'months': months} template = jinja_environment.get_template('templates/trends.html') self.response.out.write(template.render(template_values))
def index(request): e = Employee(name="John", role="manager", email='*****@*****.**') e.hire_date = datetime.datetime.now().date() #e.put() day1 = datetime.timedelta(days=1) trendslist = [] gtrendslist = [] yesterday = datetime.date.today() - day1 query = Trend.all().filter('created >= ', yesterday).order('-created') results = query.fetch(100) for p in results: trendslist.append(p.title) gv = globvars context = { 'thispage': 'Home', 'trends': trendslist, 'gtrends': gtrendslist, } context = dict( context, **gv.context) #combines the 'local' context with the 'global' context return render_to_response('index.html', context)
def placehistory(request, place, woeid): week_trends, another_name = Trend.get_weektrends(woeid) return render(request, 'geomap/placehistory.html', {'place' : place, \ 'another_name':another_name, 'week_trends' : week_trends})