예제 #1
0
def at_usage_graph(cursor):
    print("making @ graph")
    words_delete_all()
    users = db_get_all_users(cursor)
    tweets = []
    for u in users:
        tweets = db_get_tweets_in_last_time(cursor, u)
        for i in range(0, len(tweets)):
            word_add_array_at(tweets[i])

    word_clean()
    names, values = words_ret_hist()
    names = names[:20]
    values = values[:20]
    names.reverse()
    values.reverse()
    #word_print()

    y_pos = np.arange(len(names))

    plt.figure()  #,dpi=300 figsize=(25.0, 16.0)
    bars = plt.barh(y_pos, values, align='center')

    plt.yticks(y_pos, names)
    plt.xlabel('Usage')
    plt.xticks(rotation='vertical')
    plt.savefig('/var/www/html/graphs/at_usage.png', bbox_inches='tight')
예제 #2
0
def make_url_hist(cursor, party=""):
    max_days = 365
    words_delete_all()
    users = db_get_cols_from_table(cursor, "user_names",
                                   ["user_id", "job_type1"])

    tweets = []
    done = 0
    v = []

    for i in range(0, 10):
        u = users[i][0]
        p = users[i][1]
        print(u, p)
        if p.startswith(party) == True:
            cur_time = time.time()
            tweets = db_get_cols_from_table(cursor, u, ["date", "url"])
            http = 0
            urls = []
            for ii in range(0, len(tweets)):
                date = int(tweets[ii][0])
                url = tweets[ii][1]

                if url != "None" and url != "error":
                    if ((cur_time - date) / 60 / 60 / 24) < max_days:
                        address = url.split("/")[0]
                        if address.startswith("www."):
                            address = address[4:]
                        word_add(address)

    word_clean()
    return words_ret_hist()
예제 #3
0
def clas_stats(cursor):
    users = db_get_all_users(cursor)
    for u in users:
        print(u)
        words_delete_all()
        c = db_get_cols_from_table(cursor, u, ["clas"])
        for w in c:
            #print(w)
            word_add(w[0])

        words = words_ret_hist()
        w = ""
        for i in range(0, len(words[0])):
            w = w + words[0][i] + "=" + str(words[1][i]) + ";"
        w = w[:-1]
        db_update_record(cursor, "user_names", "user_id", u, "clas", w)
        db_commit()
        print(w)
    adas
def hashtag_get_most_used(cursor,delta=172800/2):
	words_delete_all()
	users=db_get_all_users(cursor)
	tweets=[]
	for u in users:
		tweets=db_get_tweets_in_last_time(cursor,u,delta=delta)
		for i in range(0,len(tweets)):
			word_add_array_hashtag(tweets[i])

	word_clean()
	names,values=words_ret_hist()

	file = open("word_usage.txt","w") 

	for i in range(0,len(names)):
		file.write(names[i]+"\n") 

	file.close()

	return names,values
예제 #5
0
def re_tweet(cursor, delta=172800 / 2):
    words_delete_all()

    users = db_get_all_users(cursor)
    tweets = []
    for u in users:
        #print(u)

        tweets = db_get_tweets_in_last_time(cursor, u, delta=1e10)
        tot = len(tweets)
        for i in range(0, len(tweets)):
            if tweets[i].startswith("RT @"):
                user = tweets[i].split(":")[0][3:]
                word_add_array_at(user)

        #word_clean()

        print(u)

    names, values = words_ret_hist(max_len=400)
    f = open('/var/www/html/stats/retweets.txt', 'w')
    print(names)
    for i in range(0, len(names)):
        ismp = users.count(names[i][1:])
        party = db_user_get_job1(cursor, names[i][1:])
        if party == None:
            party = "notmp"
        #print (names[i][1:],party)
        out = names[i] + " " + str(values[i]) + " " + str(ismp) + " " + party
        f.write(out + "\n")

    #print(names,values)

    f.close()

    aasdsad
예제 #6
0
def noun_anal(cursor):
	words_delete_all()

	users=db_get_all_users(cursor)

	tweets=[]
	v=[]
	update=False

	if update==True:
		print(len(users))
		for i in range(0,len(users)):
			u=users[i]
			print(i,u)	
			cur_time=time.time()
			tweets=db_get_cols_from_table(cursor,u,["tweet","date"])

			for ii in range(0,len(tweets)):
				t=tweets[ii][0]
				date=int(tweets[ii][1])
				if ((cur_time-date)/60/60/24)<100.0:
					#print(t)
					word_add_array(t)

		names,values=words_ret_hist()
		f = open('noun_hist.dat', 'w')
		for i in range(0,len(names)):
			f.write(names[i]+" "+str(values[i])+"\n")
		f.close()

	lines = open('noun_hist.dat').read().splitlines()
	http=[]
	times=[]
	for i in range(0,len(lines)):
		a=lines[i].split()
		http.append(a[0])
		times.append(int(a[1]))
		if i>15:
			break

	http.reverse()
	times.reverse()

	y_pos = np.arange(len(http))

	for_web=False
	if for_web==True:
		plt.figure(figsize=(25.0, 16.0),dpi=300)
		bars=plt.bar(y_pos, times,color="blue")

		plt.xticks(y_pos, http, fontsize=35)
		plt.legend(loc='best', fontsize=30)
		plt.ylabel('Usage (Tweets)', fontsize=30)
		#plt.yscale('log', fontsize=30)
		plt.yticks(fontsize=30)
		plt.xticks(rotation=45, rotation_mode="anchor", ha="right")
		plt.tight_layout()
		plt.savefig('/var/www/html/graphs/nouns.png')
	else:
		plt.figure(figsize=(10.0, 10.0),dpi=300)

		ax = plt.subplot(111)
		ax.spines['right'].set_visible(False)
		ax.spines['top'].set_visible(False)

		bars=plt.barh(y_pos, times,color="#36845b")
		for tick in ax.xaxis.get_major_ticks():
			tick.label.set_fontsize(25) 

		plt.yticks(y_pos, http, fontsize=25)
		plt.xticks(rotation=45, rotation_mode="anchor", ha="right")
		plt.legend(loc='best', fontsize=25)
		plt.xlabel('Usage (Tweets)', fontsize=25)
		#plt.yscale('log', fontsize=30)
		plt.tight_layout()
		plt.savefig('nouns.png')
예제 #7
0
def hashtag_flow(cursor):
	print("making hashtag flow graph")
	path="/var/www/html/graphs/"
	thumbs="/var/www/html/thumbs"
	if os.path.isdir(thumbs)==False:
		os.mkdir(thumbs)

	os.chdir("/var/www/html/graphs/")
	for f in glob.glob("hashtag_flow*.png"):
		os.remove(f)

	file_number=0
	ago=0.0
	pngs=""
	plt.figure(figsize=(8.0, 4.0))
	#color = cm.inferno_r(np.linspace(.4,.8, 20))
	loop=0
	pop_hash_tags=""

	while(ago<48):
		print("ago=",ago)
		words_delete_all()
		users=db_get_all_users(cursor)
		tweets=[]
		for u in users:
			tweets=db_get_tweets_in_time_frame(cursor,u,width=4,time_ago=ago)
			for i in range(0,len(tweets)):
				word_add_array_hashtag(tweets[i])

		word_clean()
		names,values=words_ret_hist()
		if len(names)>=10:
			names=names[:10]
			values=values[:10]
			names.reverse()
			values.reverse()

			if loop==0:
				pop_hash_tags=" ".join(names)

			color=[]
			#names[5]="#brexit"
			for i in range(0,len(names)):
				names[i]=names[i].strip().lower()
				r = float(hash(names[i]+"r") % 256) / 256 
				g = float(hash(names[i]+"g") % 256) / 256
				b = float(hash(names[i]+"b") % 256) / 256
				color.append([r,g,b,1.0])
				#print(names[i],(r,g,b,1.0))
			#print(color)

			#word_print()

			y_pos = np.arange(len(names))
			 #,dpi=300 figsize=(25.0, 16.0)
			plt.cla()
			bars=plt.barh(y_pos, values, align='center',color=color)
			plt.yticks(y_pos, names)
			t=time.time()
			t=t-ago*60*60
			ago_to_2dp="%.2f" %  ago
			plt.title("Number of tweets "+str(ago_to_2dp)+" hours ago from MPs")
			plt.xlabel('Tweets')
			plt.xlim([0,40])
			plt.xticks(rotation='vertical')
			plt.subplots_adjust(left=0.35, right=0.95, top=0.9, bottom=0.2)
			plt.savefig('/var/www/html/graphs/hashtag_flow'+str(file_number)+'.png')
			pngs=pngs+" hashtag_flow"+str(file_number)+'.png'
			file_number=file_number+1
		ago=ago+0.25
		loop=loop+1


	os.system("convert -delay 30 -loop 0 -quality 50% "+pngs+" hashtag_flow.gif")
	m = hashlib.md5()
	m.update(str(time.time()).encode('utf-8'))
	random_file=m.hexdigest()+".gif"
	shutil.copyfile(os.path.join(path,"hashtag_flow.gif"), os.path.join(thumbs,random_file))
	my_twitter_tweet("Top hashtags used by MPs in last 48 hours: http://mpstweets.com/flow.php?fname="+random_file+" "+pop_hash_tags)