def main(election_search=None): #INTRO# #------------------------------------------------------------------------------------------------# #The convention is alphbetical# if (election_search == None): if (platform.system() == "Darwin"): #MAC election_search = raw_input( "What election: " ) #The two last names of the candidates i.e. Comstock Wexton if (platform.system() == "Windows"): #WINDOWS election_search = input( "What election: " ) #The two last names of the candidates i.e. Comstock Wexton else: pass """ collection_of_tweets follows this file format candidate, date, tweet, likes, replies, sentitment """ collection_of_tweets = {} """ collection_of_polls follows this file format date, candidate, score """ collection_of_polls = {} """ x = date y = score """ coordinates_of_tweets = {} #comprised of [x,y] scores coordinates_of_polls = {} #comprised of [x,y] scores """twitter handles for candidates""" #Collecting candidates Name list_of_candidates = [] list_of_names = election_search.split(" ") for i in range(int(len(list_of_names) / 2)): list_of_candidates.append(list_of_names[(2 * i)] + " " + list_of_names[(2 * i + 1)]) print(list_of_candidates) candidates_handle = {} for candidate in list_of_candidates: candidates_handle[candidate] = collecting.Handle(candidate) #Collecting Poll Data# #------------------------------------------------------------------------------------------------# if (os.path.isfile('DATA-POLL ' + election_search + '.txt')): with open('DATA-POLL ' + election_search + '.txt', 'r') as fp: collection_of_polls = json.load(fp) else: collection_of_polls = collecting.CollectPoll( election_search) #collection_of_polls[date] = score name """Iterate through the dates in the dict of polls and find the corresponding tweets from that week.""" if (os.path.isfile('DATA-TWEETS ' + election_search + '.txt')): with open('DATA-TWEETS ' + election_search + '.txt', 'r') as fp: collection_of_tweets = json.load(fp) else: #Collecting Over The Whole Election# #-----------------------------------------------------------------------------------------------# poll_list = [] for date in collection_of_polls: poll_list.append(date) print(poll_list) StartDate = poll_list[-1] month = int(StartDate.split("/")[0]) day = int(StartDate.split("/")[1]) #start = datetime.datetime(2018, month, day)#FIXME start = datetime.datetime(2018, 10, 1) EndDate = poll_list[0] month = int(EndDate.split("/")[0]) day = int(EndDate.split("/")[0]) #end = datetime.datetime(2018, month, day)#FIXME end = datetime.datetime(2018, 11, 6) #Collecting Tweets# #-----------------------------------------------------------------------------------------------# #ASCII for the letter A for candidate in list_of_candidates: collection_of_tweets[str(candidate)] = collecting.Collect( candidates_handle[candidate], start, end) with open('DATA-TWEETS ' + election_search + '.txt', 'w') as outfile: json.dump(collection_of_tweets, outfile) #Scoring Everything# #-----------------------------------------------------------------------------------------------# """ compDict = scoring.ConvertTweets('DATA-TWEETS ' + election_search + '.txt') print(collection_of_polls['11/6']) results = scoring.GetResults(list_of_candidates, collection_of_polls['11/6']) print(results) listArr = []; for candidate in collection_of_tweets.keys(): sumarr = []; for entry in compDict[candidate]: a = numpy.array(entry) sumarr.append(a) listArr.append(sumarr) final_resultsA, final_resultsB = scoring.Scoring("BLANK_FILE.txt", listArr, results) print("RESULTS: " , final_resultsA[2]) """ result, final_resultsX, final_resultsY = scoring.main_scoring( election_search) print("RESULTS: ", result) print("Y", final_resultsY) #Graphing# #------------------------------------------------------------------------------------------------# graphing.MakeGraphs('DATA-TWEETS ' + election_search + '.txt') graphing.Graph(final_resultsX, final_resultsY, "time", "Comparing")
final_x.append(tuple[0]) final_y.append(tuple[1]) return final_x, final_y x1, y1 = Order(x1, y1) x2, y2 = Order(x2, y2) #y1 = [(i)*100/3 for i in y1] #y2 = [(i)*100/6 for i in y2] #print(y1,y2) y = [(a_i - b_i) for a_i, b_i in zip(y1, y2)] #CORRECT GRAPH OF DIFFERENCE PER DAY graphing.Graph(x1, y, "Dates", "Comstock v. Wexton") graphing.GraphCompiled(x1[1:], y1[1:], x2[1:], y2[1:]) y_1 = [] y_2 = [] last = 0 for i in range(len(y1)): y_1.append(y1[i] + last) last += y1[i] last = 0 for i in range(len(y2)): y_2.append(y2[i] + last) last += y2[i] print("graphing") graphing.GraphCompiled(x1, y_1, x2, y_2) #CORRECT GRAPH OF DIFFERENCE OVER TIME
def main_scoring(candidates): name_of_file = "DATA-TWEETS " + candidates + ".txt" final_result_file = "DATA-POLL " + candidates + ".txt" final_result = 0 with open(final_result_file, 'r') as file: dict = json.load(file) final_result = dict['11/6'] print("FINAL--------", final_result) name = final_result.split('+')[0] final_result = (float(final_result.split('+')[1])) dict = ConvertTweets(name_of_file) with open(name_of_file + " compiled.txt", 'r') as fin: b = json.load(fin) listArr = [] result_list = [] i = 0 for candidate in b.keys(): print("CANDIDIATE", candidate) sumarr = [] if (i == 0): print("DATA___________", name, candidate, name.lower() in candidate.lower()) if name.lower() in candidate.lower(): final_result_a = 50 + final_result / 2 result_list.append(final_result_a) final_result_b = 50 - final_result / 2 result_list.append(final_result_b) else: final_result_a = 50 - final_result / 2 result_list.append(final_result_a) final_result_b = 50 + final_result / 2 result_list.append(final_result_b) i += 1 for entry in b[candidate]: a = numpy.array(entry) sumarr.append(a) listArr.append(sumarr) #print("ZIP: ", numpy.array(list(zip(listArr[0],listArr[1])))) ls = numpy.array(list(zip(listArr[0], listArr[1]))) array = [] array1 = [] array2 = [] for mat in ls: array.append(numpy.append(mat[0], mat[1], 0)) array1.append(numpy.append(mat[0], numpy.zeros(5), 0)) array2.append(numpy.append(numpy.zeros(5), mat[1], 0)) #Scoring("Comstock Wexton Matrix.txt", listArr, result_list) #Scoring("GREATEST_MATRIX_2.0.txt", array, result_list, 1001) Scoring("GREATEST_MATRIX_2.0.txt", array, final_result_a, 2001) #CHANGED NUM print(candidates, final_result_a, result_list) matrix = scoringMatrix.scoringMatrixOverTime( num_of_factors=10, num_of_weights=2, learning_rate=0.01, method=doNothing) #CONSTRUCTOR arr1, arr2 = loadMatrix("GREATEST_MATRIX_2.0.txt") matrix.create_weight_matrice(arr1, arr2) print("matrix1------", matrix.run(array1)[2]) z, x, y = matrix.run(array) import graphing graphing.MakeGraphs('DATA-TWEETS ' + candidates + '.txt') graphing.Graph(x, y, "Days", "Donnelly's Favorability Over Time") return (x, y, z)
def main(): #INTRO# #------------------------------------------------------------------------------------------------# #The convention is alphbetical# if(platform.system() == "Darwin"):#MAC election_search = raw_input("What election: ")#The two last names of the canidates i.e. Comstock Wexton if(platform.system() == "Windows"):#WINDOWS election_search = input("What election: ")#The two last names of the canidates i.e. Comstock Wexton """ collection_of_tweets follows this file format canidate, date, tweet, likes, replies, sentitment """ collection_of_tweets = {} """ collection_of_polls follows this file format date, canidate, score """ collection_of_polls = {} """ x = date y = score """ coordinates_of_tweets = {}#comprised of [x,y] scores coordinates_of_polls = {}#comprised of [x,y] scores """twitter handles for canidates""" #Collecting Canidates Name list_of_canidates = election_search.split(" ") canidates_handle = {} for canidate in list_of_canidates: canidates_handle[canidate] = collecting.Handle(canidate) #Collecting Poll Data# #------------------------------------------------------------------------------------------------# if(os.path.isfile(election_search + " poll.txt")): with open(election_search + " poll.txt", 'r') as fp: collection_of_polls = json.load(fp) else: collection_of_polls = collecting.CollectPoll(election_search)#collection_of_polls[date] = score name """Iterate through the dates in the dict of polls and find the corresponding tweets from that week.""" if(os.path.isfile("tweets")): with open(election_search + " tweets.txt",'r') as fp: collection_of_tweets = json.load(fp) else: for date in collection_of_polls: date_list = date.split("/") month = int(date_list[0]) day = int(date_list[1]) end = datetime.datetime(2018, month, day)# year, month, day start = end - timedelta(days=7)# a week back from the end print('\n\n'+ str(start) + " " + str(end) + '\n\n') #Collecting Tweets# #-------------------------------------------------------------------------------------------------# i = 65 #ASCII for the letter A for canidate in list_of_canidates: collection_of_tweets[(str(end)+chr(i))] = collecting.Collect(canidates_handle[canidate], start, end) i += 1 with open(election_search + " tweets.txt", 'w') as outfile: #with open(os.path.join(sys.path[0],"tweets"), 'w') as outfile: json.dump(collection_of_tweets, outfile) #Scoring Everything# #-----------------------------------------------------------------------------------------------# for date in collection_of_tweets.keys(): for j in range(len(list_of_canidates)): coordinates_of_tweets[date+chr(j+65)] = scoring.Scoring(collection_of_tweets, date) """FIXME""" #Coordinates of polls needs to be tuned# graphing.Graph(coordinates_of_polls, coordinates_of_tweets)
import read_maze as rm import search import graphing as g import time pic = 'Maze_4.png' start = time.perf_counter() maze = rm.Maze(pic) graph = g.Graph(maze.maze_to_array()) graph.detect_nodes() #print(graph.no_of_nodes) srh = search.Search(graph.start_node, graph.end_node, graph.graph) result = graph.decode_node(srh.dfs()) maze.write_on_maze(result) end = time.perf_counter() print("The time required is :", (end - start))