import time

import text_utils as txu
import twitter_utils as twu

'''
 Implementing the second feature which would produce the median for tweets in a file ft2.txt

'''

# Calculate the time when the processing starts
start = time.clock()

# The mode of input can be a text file or twitter api json
inp, outp = txu.extract_arguments() 
tweets = twu.get_input(inp)

#initialize the median dictionary
median_dict = {'median': 0.0,
			   'length': 0}
outfile = open(outp, 'w')
for tweet in tweets:
	median_dict = txu.get_median_iterative(tweet, median_dict)
	outfile.write("{0:.2f}".format(round(median_dict['median'],2))+"\n")
outfile.close()

# Calculate the time processing ends
end = time.clock()

# Print total time taken 
print "Total time taken in processing median: ", end - start
		- extracting all URLs in the tweets
		- extracting all hashtags in the tweets
		- extracting all replyats in the tweets
		- get sentiment for each tweet
		- get worldwide trending hashtags

'''

def write_csv(data, filename):
	with open(filename, 'wb') as csvfile:
		writer = csv.writer(csvfile)
		writer.writerows(data)

if __name__ == '__main__':
	# get path for all outputs
	inp, urls, tags, replyats, sentiment, trends = txu.extract_arguments(n=7)
	tweets = twu.get_input(inp)
	tweets_text = "\n".join(tweets)	
	
	# extract all URLs in the tweets and write in a file
	start = time.clock()
	txu.write_file('\n'.join(txu.get_urls(tweets_text)), urls)
	print "Time taken in extracting URLs: ", time.clock() - start

	# extract all hashtags in the tweets and write in a file
	start = time.clock()
	txu.write_file('\n'.join(txu.extract_hashtags(tweets_text)), tags)
	print "Time taken in extracting hashtags: ", time.clock() - start

	# extract all replyats in the tweets and write in a file
	start = time.clock()
import time

import text_utils as txu
import twitter_utils as twu
'''
 Implementing the second feature which would produce the median for tweets in a file ft2.txt

'''

# Calculate the time when the processing starts
start = time.clock()

# The mode of input can be a text file or twitter api json
inp, outp = txu.extract_arguments()
tweets = twu.get_input(inp)

#initialize the median dictionary
median_dict = {'median': 0.0, 'length': 0}
outfile = open(outp, 'w')
for tweet in tweets:
    median_dict = txu.get_median_iterative(tweet, median_dict)
    outfile.write("{0:.2f}".format(round(median_dict['median'], 2)) + "\n")
outfile.close()

# Calculate the time processing ends
end = time.clock()

# Print total time taken
print "Total time taken in processing median: ", end - start
Beispiel #4
0
		- extracting all replyats in the tweets
		- get sentiment for each tweet
		- get worldwide trending hashtags

'''


def write_csv(data, filename):
    with open(filename, 'wb') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerows(data)


if __name__ == '__main__':
    # get path for all outputs
    inp, urls, tags, replyats, sentiment, trends = txu.extract_arguments(n=7)
    tweets = twu.get_input(inp)
    tweets_text = "\n".join(tweets)

    # extract all URLs in the tweets and write in a file
    start = time.clock()
    txu.write_file('\n'.join(txu.get_urls(tweets_text)), urls)
    print "Time taken in extracting URLs: ", time.clock() - start

    # extract all hashtags in the tweets and write in a file
    start = time.clock()
    txu.write_file('\n'.join(txu.extract_hashtags(tweets_text)), tags)
    print "Time taken in extracting hashtags: ", time.clock() - start

    # extract all replyats in the tweets and write in a file
    start = time.clock()