def graph(self): if type(self.funct) == type(Function('')): graph(self.funct) elif type(self.funct) == type(Equation('')): graph(self.funct[1]) else: graph_par(self.funct)
def do_POST(self): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) self.send_response(200) self.end_headers() with open(file_path, "wb") as file: file.write(body) grapher.graph(file_path, output_path) with open(output_path, "rb") as file: self.wfile.write(file.read()) print(time.now().time(), "-> Success at ip:", self.client_address[0])
def graph(xText, yText, regression, regBox): #print(len(xText) ,len(yText) ) if (len(regression) == 0): tk.messagebox.showerror("Error", "Pick a regression type") return if (len(xText) != len(yText)) or len(xText) == 0: tk.messagebox.showerror("Error,", "Missing points") return print("Graphing " + str(len(xText))+ " data points") graph = grapher.graph(len(xText)) graph.numberOfPoints = len(xText) for i in range(len(xText)): try: graph.points[i,0] = float(xText[i]) except ValueError: tk.messagebox.showerror("Error", "All points must be numbers") return try: graph.points[i,1] = float(yText[i]) except ValueError: tk.messagebox.showerror("Error", "All points must be numbers") return graph.regression = regression[0]+1 #print(graph.regression) graph.coefficients = graph.polyFit(graph.regression) # print(graph.calculatedFunction(2)) print(graph.points[:,1]) print(graph.rSquaredCalculate()) regBox.configure(text = graph.polyLabel()) graph.execute() return
def update_graph(name): person = name words_me, words_you = words_used(data[person.replace(" ", "_")], name_fix) times_info, msgs_info_first_me, msgs_info_last_me, msgs_info_last_you, msgs_info_first_you = ms.get_msgs_time( save[person.replace(" ", "_")]) fig4 = graph(words_me, words_you, person) fig6 = ms.plot_overtime(times_info, 2) return fig4, fig6, msgs_info_last_me, msgs_info_last_you, msgs_info_first_me, msgs_info_first_you
def graph(request, city1, city2, field): template = loader.get_template('graph/graph.html') graph_html = grapher.graph(city1, city2, field) if (graph_html == None): return redirect('/graph/') graph_html = urllib.urlopen(graph_html).read() context = { "graph_html": graph_html, "city1": city1, "city2": city2, "field": field, } return HttpResponse(template.render(context, request))
def prepare_graph(regression_type): xPoints = Data.query.with_entities(Data.xValues) yPoints = Data.query.with_entities(Data.yValues) xPoints = [x for x, in xPoints] yPoints = [y for y, in yPoints] if len(xPoints) <= regression_type: print('error points regression_type') raise ValueError('Not enough points for regression') graph = gr.graph(len(xPoints)) graph.numberOfPoints = len(xPoints) for i in range(len(xPoints)): graph.points[i, 0] = xPoints[i] graph.points[i, 1] = yPoints[i] graph.regression = regression_type graph.coefficients = graph.polyFit(graph.regression) function = graph.getCoeff() rSquared = graph.rSquaredCalculate() #print(function) #print(rSquared) #return graph.graphToHtml() return graph.graphToHtml(), function, rSquared
import math import sys import grapher points_input = sorted( points_input, key=lambda x: x[1]) # sort points by y coordinates from least to greatest dimensions = ( sorted(points_input, key=lambda x: x[0])[-1][0], points_input[-1][1] ) # (max_x, max_y) = (sort by x then grab last (largest) x value, take y-sorted list and grab last (largest) value) point_vectors = [] precision = 2 grapher.graph(points_input) print("") def dist(point1, point2): # euclidean distance formula d = √((ay-by)^2+(ax-bx)^2) return round( math.sqrt((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2), precision) def heading(point1, point2, dist): if not dist: return 0 elif point2[0] == point1[0]: return int(not (point2[1] > point1[1])) * 180 elif point2[1] == point1[1]: return int(not (point2[0] > point1[0])) * 180 + 90
import classifier import neuralpy import grapher net = neuralpy.Network(2, 8, 1) uris = [ "miller_xml/" + str(i) + ".xml" for i in range(1,13) ] epochs = 200 learning_rate = 0.05 validation_percentage = .32 ps = [] classifier.stand = "L" for i in range(0, 10): net.randomize_parameters() p = classifier.train(net, uris, epochs, learning_rate, validation_percentage, save_file='results/miller_' + str(i) + '.txt') neuralpy.output(p) ps.append(p) i = ps.index(max(ps)) neuralpy.output("\n\n" + str(max(ps)) + " at " + str(i)) grapher.graph(filepath='results/miller_' + str(i) + '.txt') # grapher.graph(filepath='results/miller_4.txt')
points_input = [[3,10],[8,1],[3,5],[1,4],[8,10],[4,5]] ### Begin code import math import sys import grapher points_input = sorted(points_input, key=lambda x: x[1]) # sort points by y coordinates from least to greatest dimensions = (sorted(points_input, key=lambda x: x[0])[-1][0] , points_input[-1][1]) # (max_x, max_y) = (sort by x then grab last (largest) x value, take y-sorted list and grab last (largest) value) point_vectors = [] precision = 2 grapher.graph(points_input) print("") def dist(point1, point2): # euclidean distance formula d = √((ay-by)^2+(ax-bx)^2) return round(math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2), precision) def heading(point1, point2, dist): if not dist: return 0 elif point2[0] == point1[0]: return int(not (point2[1] > point1[1]))*180 elif point2[1] == point1[1]: return int(not (point2[0] > point1[0]))*180+90 else: return round(math.degrees(math.cos((point2[0]-point1[0])/dist)))
import grapher grapher.graph(None, f="test.txt")
# counts number of true and false values Accurate_Predicted_Total = compared["Match"].value_counts() # calculates actual/experimental % Accurate_Predicted_Total["Actual Percentage"] = ( Accurate_Predicted_Total["True"] / compared["Index"].max()) * 100 # grabs theoretical % from score variable above Accurate_Predicted_Total["Theoretical Percentage"] = score * 100 # calculates percent error (actual-theoretical)/theoretical # NOTE: any value of absolute value should bre read as positive even if negative Accurate_Predicted_Total["Percent Error"] = ( ((score * 100) - (Accurate_Predicted_Total["True"] / compared["Index"].max()) * 100) / (score * 100)) * 100 Accurate_Predicted_Total["Percent Error"] = math.fabs( Accurate_Predicted_Total["Percent Error"]) Accurate_Predicted_Total.to_csv(path_or_buf="data/ml/stats/Keystats.csv") from grapher import graph graph() import pickle pkl_filename = "pickle_model.pkl" with open(pkl_filename, 'wb') as file: pickle.dump(clf, file) # loading data # with open(pkl_filename, 'rb') as file: # pickle_model = pickle.load(file)