def print_smallest_error(): # Initialize smallest_error as 0 smallest_error = 0 # Initialize total_number_of_matches as length of match_data_for_error_reference total_number_of_matches = len(match_data_for_error_reference.index) # alliance_position is 1,2,or 3 for red alliance teams and 4,5, or 6 for blue alliance teams alliance_position = 1 # alliance_score_position is 7 for red alliance and 8 for blue alliance alliance_score_position = 7 # For loop iterates two times, once for each alliance for x in range(2): # For loop that iterates through total_number_of_matches for a in range(total_number_of_matches): # Set error_checker as the difference between the sum of the predicted opr of teams in the match # and the true score error_checker = find_teams_data.find_opr(match_data_for_error_reference.iloc[a, alliance_position]) \ + find_teams_data.find_opr(match_data_for_error_reference.iloc[a, alliance_position + 1]) \ + find_teams_data.find_opr(match_data_for_error_reference.iloc[a, alliance_position + 2]) \ - match_data_for_error_reference.iloc[a, alliance_score_position] # If the error_checker is smaller than smallest_error, set smallest_error as error_checker value if error_checker < abs(smallest_error): smallest_error = error_checker # Set alliance_position as 4 for blue alliance teams alliance_position = 4 # Set alliance_position as 8 for blue alliance scores alliance_score_position = 8 # Print smallest_error print(smallest_error)
def print_average_error_per_match(): # The sum of errors error_sum = 0 # total_number_of_matches is used for the for loop total_number_of_matches = len(match_data_for_error_reference.index) # alliance_position is 1,2,or 3 for red alliance teams and 4,5, or 6 for blue alliance teams alliance_position = 1 # alliance_score_position is 7 for red alliance and 8 for blue alliance alliance_score_position = 7 # For loop iterates two times, once for each alliance for x in range(2): # For loop that iterates through total_number_of_matches for a in range(total_number_of_matches): # Set error_sum as the sum of difference between the sum of the predicted opr of teams # in the match and the true score and the previous error_sum error_sum = error_sum + find_teams_data.find_opr(match_data_for_error_reference.iloc[a, alliance_position]) \ + find_teams_data.find_opr(match_data_for_error_reference.iloc[a, alliance_position + 1]) \ + find_teams_data.find_opr(match_data_for_error_reference.iloc[a, alliance_position + 2]) \ - match_data_for_error_reference.iloc[a, alliance_score_position] # Set alliance_position as 4 for blue alliance teams alliance_position = 4 # Set alliance_position as 8 for blue alliance scores alliance_score_position = 8 # Initialize total_scores_from_error_reference to be the total number of scores from match_data_for_error_reference total_scores_from_error_reference = 2 * len( match_data_for_error_reference.index) # Divide the error sum by total_scores_from_error_reference and print print(error_sum / total_scores_from_error_reference)
def send_opr_to_csv(): # Create empty total_teams x 2 array with objects as data type opr_data_for_csv = np.empty((create_matrices.total_teams, 2), dtype="object") # For loop that iterates through the number of total teams for x in range(create_matrices.total_teams): # Records team numbers into opr_data_for_csv array opr_data_for_csv[x, 0] = create_matrices.team_numbers.iloc[0, x] # For loop that iterates through the number of total teams for y in range(create_matrices.total_teams): # Records opr into opr_data_for_csv array opr_data_for_csv[y, 1] = str( round( find_teams_data.find_opr(create_matrices.team_numbers.iloc[0, y]), 2)) # Create new pandas DataFrame with opr_data_for_csv array data with 'Team Number' and 'Offensive Power Rating' # as column titles df = pd.DataFrame(opr_data_for_csv, columns=['Team Number', 'Offensive Power Rating']) # Create csv file called 'opr_data.csv' with df as data df.to_csv('csv_data/opr_data.csv')
print("5. Print Average Error Per Match") print("6. Time to calculate opr matrix") print("7. Graph") print("8. Send OPR data to csv") print("9. Exit") # Prompt for uer input choice = input("Please select a choice:") # Calculate the OPR of one team if choice == '1': # Ask for user input for team choice, then print calculated OPR try: team_choice = input("Please input a team number:") print("The OPR for " + str(team_choice) + " is " + str(find_teams_data.find_opr('frc' + team_choice))) # Print error prompt if input is invalid except: print( "Please input only team numbers of teams who competed in the events" ) # Calculate the OPR of three team elif choice == '2': # Ask for three teams, then print the predicted score try: first_team_choice = input("Please input the first team number:") second_team_choice = input("Please input the second team number:") third_team_choice = input("Please input the third team number:") print("The predicted score is " + str( predicted_score('frc' + first_team_choice, 'frc' +
def graph_error(): # Initialize total_number_of_matches as length of match_data_for_error_reference total_number_of_matches = len(match_data_for_error_reference.index) # alliance_position is 1,2,or 3 for red alliance teams and 4,5, or 6 for blue alliance teams alliance_position = 1 # alliance_score_position is 7 for red alliance and 8 for blue alliance alliance_score_position = 7 # error_graph_array_counter is used to iterate through error_graph_array_y error_graph_array_counter = 0 # For loop iterates two times, once for each alliance for x in range(2): # For loop that iterates through total_number_of_matches for a in range(total_number_of_matches): # Set current position in error_graph_array_y as the difference between the sum of # the predicted opr of teams in the match and the true score error_graph_array_y[ error_graph_array_counter] = find_teams_data.find_opr( match_data_for_error_reference.iloc[a, alliance_position] ) + find_teams_data.find_opr( match_data_for_error_reference.iloc[ a, alliance_position + 1]) + find_teams_data.find_opr( match_data_for_error_reference.iloc[ a, alliance_position + 2]) - match_data_for_error_reference.iloc[ a, alliance_score_position] # Set current position in error_graph_array_x as the value of the array counter. This is used to store the # "x-coordinate" of the error value. error_graph_array_x[ error_graph_array_counter] = error_graph_array_counter # Increment error_graph_array_counter error_graph_array_counter = error_graph_array_counter + 1 # Set alliance_position as 4 for blue alliance teams alliance_position = 4 # Set alliance_position as 8 for blue alliance scores alliance_score_position = 8 # Initialize fig and ax for fig, ax = plt.subplots() # Create histogram using error_graph_array_y data, 14 bins, yellow colour, and edge colour gray # Initialize counts, bins, and patches as values from this histogram counts, bins, patches = ax.hist(error_graph_array_y, bins=14, facecolor='yellow', edgecolor='gray') # Set the ticks to be at the edges of the bins. ax.set_xticks(bins) # Set the xaxis's tick labels to be formatted with 1 decimal place... ax.xaxis.set_major_formatter(FormatStrFormatter('%0.0f')) # Change the colors of bars at the edges... #twentyfifth, seventyfifth = np.percentile(error_graph_array_y, [25, 75]) #for patch, rightside, leftside in zip(patches, bins[1:], bins[:-1]): #if rightside < twentyfifth: #patch.set_facecolor('green') #elif leftside > seventyfifth: #patch.set_facecolor('red') # Label the raw counts below the x-axis... # bin_centers = 0.5 * np.diff(bins) + bins[:-1] # for count, x in zip(counts, bin_centers): # Label the raw counts # ax.annotate(str(int(count)), xy=(x, 0), xycoords=('data', 'axes fraction'), # xytext=(0, -18), textcoords='offset points', va='top', ha='center') # Label the percentages #percent = '%0.0f%%' % (100 * float(count) / counts.sum()) #ax.annotate(percent, xy=(x, 0), xycoords=('data', 'axes fraction'), #xytext=(0, -32), textcoords='offset points', va='top', ha='center') # Create more room at the bottom of the plot plt.subplots_adjust(bottom=0.15) # Set y label as 'Number In Bin' ax.set_ylabel('Number In Bin') # Label the x axis ax.set_xlabel("Error From Predicted Score and Real Score") # Set number of y axis ticks for histogram ax.locator_params(axis="y", nbins=10) # Fit a normal distribution to the data # Initialize mu as mean and std as standard deviation mu, std = norm.fit(error_graph_array_y) # Set xmin and xmax from plt data xmin, xmax = plt.xlim() # Set x as the linspace with parameters xmin, xmax, and 100 x = np.linspace(xmin, xmax, 100) # Plot the PDF of the data using x, mu, and std p = norm.pdf(x, mu, std) # Generate twin axes of ax called ax2 ax2 = ax.twinx() # Create y label for ax2 called 'Normal Distribution Scale' and colour blue ax2.set_ylabel('Normal Distribution Scale', color='tab:blue') # Plot normal distribution on ax2 ax2.plot(x, p, 'k', linewidth=2, color='tab:blue') # Set the ticks on ax2 y axis as blue ax2.tick_params(axis='y', labelcolor='tab:blue') # Set ax2 bottom limit as 0 ax2.set_ylim(bottom=0) # Set the graph to a tight layout, so the y-label of ax2 is not clipped fig.tight_layout(pad=2) # Temporary title for chart with mean and standard deviation #title = "Fit results: mu = %.2f, std = %.2f" % (mu, std) # New Title title = "Histogram of Error Per Match Over Gaussian Distribution" plt.title(title) # Plot the graph plt.show() print(mu) print(std)