def getLimits(self): elevatorPositionLimit = self.doubleSpinBox_elevatorPosition.value() aileronPositionLimit = self.doubleSpinBox_aileronPosition.value() rudderPositionLimit = self.doubleSpinBox_rudderPosition.value() pitchRateLimit = self.doubleSpinBox_pitchRate.value() rollRateLimit = self.doubleSpinBox_rollRate.value() yawRateLimit = self.doubleSpinBox_yawRate.value() print(yawRateLimit) # TODO: Delete after debugging # FIXME: switch from @classmethod to standard method later DataHandler.setManuallyDefinedLimts({"elevatorPositionLimit": elevatorPositionLimit, "aileronPositionLimit": aileronPositionLimit, "rudderPositionLimit": rudderPositionLimit, "pitchRateLimit": pitchRateLimit, "rollRateLimit": rollRateLimit, "yawRateLimit": yawRateLimit})
def startTracking(self): # Attempt to get time RFD starts tracking and begin counting elapsed time try: if (DataHandler.lockStartTime == False): DataHandler.timeStartClicked = datetime.datetime.now() # Get datetime for displaying start time to screen DataHandler.startTime = time.time() # Get time for counting elapsed time self.label_infoBar_startTime.setText(DataHandler.timeStartClicked.strftime("Start Time: %H:%M:%S")) DataHandler.lockStartTime = True except Exception as ex: print("Could not request time() and/or datetime(): ", ex) # Initiate telemetry tracking and call methods which process incoming data try: self.label_infoBar_status.setText("Status: Tracking") retrievePCCLog(DataHandler) # Method in pccFileHandler.py which reads data in and validates segments PopulateDictionary(DataHandler) # Method in dataTransformer.py which checks segment integrity and stores segment in a key/value dictionary for later use UpdateHUD(self, DataHandler) # Method in dataValidator.py which updates the GUI with recently read-in values writeToLogFile(DataHandler) # Method in pccfileHandler.py which writes data, and alarm states to log # Get elapsed time and PCC time DataHandler.elapsedTime = (time.time() - DataHandler.startTime) PCCTime = DataHandler.getRawTelemetryData() PCCTime = PCCTime["<Clock>[ms]"] # Update GUI labels to elapsed time and PCC time self.label_infoBar_elapsedTime.setText(time.strftime("Elapsed Time: %H:%M:%S", time.gmtime(DataHandler.elapsedTime))) self.label_infoBar_PCCTime.setText("PCC Time: " + str(PCCTime)) return 1 except Exception as ex: print("Error occurred while initializing tracking ", ex) return 1
def __init__(self, data_path, image_path, until_frame=None): super(MainWindow, self).__init__() # create a data reader object self.handler = DataHandler(data_path = data_path, image_path = image_path, image_spacing = -5, scaling_factor = .25, sigma = 3) """ Data loading and management """ # read in the .ass subtitle data (gaze points and aois) self.handler.readAss() # throw away bunch of data (for faster testing) if until_frame is not None: self.handler.gaze_points = self.handler.gaze_points[:until_frame,:,:] # load the video frames (single .jpg images in folder) [frame size is obtained her] self.handler.loadFramesAsGLImageItems() # transform gaze points data into gaussians (ImageItems) self.handler.gazePointsToGaussians() # transform aois into GLMeshItems self.handler.aoisToGLMeshItems(option='translucent') # create 3d lines from gaze points: self.handler.gazePointsToLines(option='translucent') # create heatmaps: self.handler.gazePointsToHeatmaps() self.handler.aoisToGLLinePlotItems(option='opaque') """ Visualization and Plotting """ # instantiate window for the data visualization self.plotWindow = PlotWindow(parent=None) #add surface under and behind the scene surf_color = [0.945, 0.9098, 0.6823, 1.0] self.surf1 = np.zeros([2,2]) self.surf1 = gl.GLSurfacePlotItem(z=self.surf1, color = surf_color) self.surf1.scale(self.handler.gaze_points.shape[0]*self.handler.image_spacing*1.1, self.handler.frame_size[1]*1.1, 1) self.surf1.translate(-self.handler.gaze_points.shape[0]*self.handler.image_spacing*0.05, -self.handler.frame_size[1]*0.05, -self.handler.frame_size[0]-100*self.handler.scaling_factor) self.plotWindow.addItem(self.surf1) self.surf2 = np.zeros([2,2]) self.surf2 = gl.GLSurfacePlotItem(z=self.surf2, color = surf_color) self.surf2.scale(self.handler.gaze_points.shape[0]*self.handler.image_spacing*1.1, self.handler.frame_size[0]*1.1, 1) self.surf2.rotate(90,1,0,0) self.surf2.translate(-0.05*self.handler.gaze_points.shape[0]*self.handler.image_spacing, -100 * self.handler.scaling_factor, -self.handler.frame_size[0]*1.05) self.plotWindow.addItem(self.surf2) ''' adding heatmaps ''' self.plotWindow.addItem(self.handler.XYHeatmap) self.plotWindow.addItem(self.handler.XZHeatmap) # put video frames inside of visualization for frame in self.handler.frames: self.plotWindow.addItem(frame) frame.setVisible(False) # put gaussians on top of video frames: for gaussian in self.handler.gaussians: self.plotWindow.addItem(gaussian) gaussian.setVisible(False) # add aoi meshes into visualization for mesh in self.handler.aoiMeshes: self.plotWindow.addItem(mesh) #mesh.setVisible(False) self.handler.frames[0].setVisible(True) # add line plots for line in self.handler.gazePointsLinePlotItems: self.plotWindow.addItem(line) # add aoi frame lines for lines in self.handler.aoiLines: for line in lines: self.plotWindow.addItem(line) #self.horizontalLayout.addWidget(self.plotWindow) #self.plotWindow.show() self.cw = ControlWindow(mainWindow=self, handler=self.handler,parent=None) self.cw.showFrameNum(0) self.cw.showGaussianNum(0) self.mainLayout = QHBoxLayout(self) self.mainLayout.addWidget(self.cw,1) self.mainLayout.addWidget(self.plotWindow,5) self.setGeometry(100,100,900,300) self.show()
from sklearn.metrics import precision_score, recall_score, roc_auc_score, roc_curve, confusion_matrix, accuracy_score from sklearn.model_selection import train_test_split from sklearn import svm, preprocessing from sklearn.naive_bayes import GaussianNB, MultinomialNB from dataHandler import DataHandler from boW import EOS_token RSEED = 50 SIZE = 10 NCLASS = 3 TFIDF = False NORMALIZE = False np.random.seed(RSEED) d = DataHandler() d.createDictionary() #prepare data data = d.readDataPreproc(pre=True)[1:] x = [d.askDictionary.seq2tensor(xi[1], tfidf=TFIDF) for xi in data] x = np.array(x) if NORMALIZE: x = preprocessing.normalize(x) if NCLASS == 3: y = [ 0 if yi[2] == "alegria" else 1 if yi[2] == "neutro" else 2 for yi in data
#! /usr/bin/python # -*- coding: utf-8 -*- from dataHandler import DataHandler from network import Network import layer import activationFunctions if __name__ == '__main__': EPOCHS = 100 ETA = 0.03 PATH = "mnist/" CKPT_DIR = "./ckpt/" MBS = 10 #mini_batch_size dataHandler = DataHandler(PATH) dataHandler.load_training() sizes = [784, 16, 16, 10] network = Network(sizes, dataHandler, EPOCHS / 10, CKPT_DIR, activationFunctions.sigmoid) network.weights_for_humans("./") # network.SGD(dataHandler, EPOCHS, MBS, ETA, True)
plt.style.use(['ggplot']) plt.tight_layout() plt.gcf().subplots_adjust(bottom=0.13) plt.gcf().subplots_adjust(left=0.13) plt.rcParams["figure.figsize"] = (14, 12) plt.ticklabel_format(style='plain', useOffset=False) #%% data = pd.read_csv('../tommi+diego_test_data.csv', sep=";", header=0) #combined dataset data = data.loc[data["Warning_code"] == 0] data = data.reset_index(drop=True) data = DataHandler.calculateTotalForce(data) data = DataHandler.calculateStepTime(data) data = DataHandler.calculateForceValues(data) data = DataHandler.calculatePhaseForceValues(data) #%% Classifier testing, rbf kernel x_cols = DataColumns.getSelectedCols3() y_cols = ["label"] plots = True #Parameters kern = "rbf" avg_acc, real_label, pred_label = SvmClassifiers.testSvm( data, kern, x_cols, y_cols, plots)
class MyHandler( http.server.BaseHTTPRequestHandler ): # MyHandler extends http.server.BaseHTTPRequestHandler and implements the method do_Get and do_POST datahandler = DataHandler() def do_GET(self): if self.path == '/favicon.ico' and str(self.path).find('?') == -1: return try: start = timer() print(threading.currentThread().getName()) # print("\nGET {}".format(str(self.path))) # in the application the url must be 'https://ip/?room=number&image=something&pdf=something' keys = parse_qs(self.path[2:]) if 'check' in keys: self.sendStatistic() print('STATISTICHE INVIATE') elif 'room' in keys: room = str(keys.get('room')) room = room.replace('[\'', '').replace('\']', '') if 'image' in keys: image = str(keys.get('image')) image = image.replace('[\'', '').replace('\']', '') self.sendImage(room, image) print('la room selezionata è :', room) print('immagine richiesta:', image) end = timer() time = float("{0:.8f}".format(end - start)) else: self.sendContent(room) print('la room selezionata è :', room) print('immagine richiesta: nessuna') end = timer() time = float("{0:.8f}".format(end - start)) self.datahandler.addResponseTime(time) elif 'pdf' in keys: pdf = str(keys.get('pdf')) pdf = pdf.replace('[\'', '').replace('\']', '') self.sendPdf(pdf) print('pdf inviato: ', self.datahandler.pdf[pdf]) end = timer() time = float("{0:.8f}".format(end - start)) self.datahandler.addResponseTime(time) else: print('invalid url') self.send_error(404, 'File Not Found: %s' % self.path) end = timer() time = float("{0:.8f}".format(end - start)) print('response in:', time) except FileNotFoundError: print('invalid url') self.send_error(404, 'File Not Found: %s' % self.path) except: self.datahandler.addError() def doHead(self, contentType='txt'): # adding headers self.send_response(http.HTTPStatus.OK) # it is a protocol contentType = contentType + ';charset=utf-8;' self.send_header('Content-type', contentType) self.end_headers() def sendContent(self, room='example'): # send title, description etc.. self.doHead('json') self.wfile.write( self.datahandler.getContent(room).encode( 'UTF-8', 'replace')) # before sending message must be encode self.wfile.flush() def sendPdf(self, pdf="0"): # send title, description etc.. self.doHead('pdf') self.wfile.write( self.load(self.datahandler.datasPath + self.datahandler.getPdfName(pdf)) ) # before sending message must be encode self.wfile.flush() def sendImage(self, room='example', image=1): # send images self.doHead('image') self.wfile.write(self.load(self.datahandler.getImageName(room, image))) self.wfile.flush() def sendStatistic(self): self.doHead('json') self.wfile.write(self.datahandler.getStatistic().encode( 'UTF-8', 'replace')) # before sending message must be encode self.wfile.flush() def load(self, file): # open image f = open(file, 'rb') s = f.read() f.close() return s def encode(self, file): # encoding image return bytes(file, 'UTF-8') def do_POST(self): self.do_GET()
print('=', pair[1]) output_words, attentions = self.evaluate(pair[0]) output_sentence = ' '.join(output_words) print('<', output_sentence) print('') def evaluateAndShowAttention(self, input_sentence): output_words, attentions = self.evaluate(input_sentence) print('input =', input_sentence) print('output =', ' '.join(output_words)) showAttention(input_sentence, output_words, attentions) if __name__ == '__main__': d = DataHandler() d.createDictionary() pairs = d.readDataPreproc() s = Seq2Seq(d.askDictionary, d.ansDictionary) if isfile(PATH): s = torch.load(PATH) showPlot(s.plot_losses, title="Loss", axis=["Milhares de iterações", "loss"]) else: s.trainIters(pairs, 10000, print_every=5000) torch.save(s.state_dict(), "disc_" + PATH) torch.save(s, PATH)
#!/usr/bin/env python import os import json from dataHandler import DataHandler from flask import Flask, request, send_from_directory, jsonify app = Flask(__name__) www = os.path.join(os.path.dirname(os.path.abspath(__file__)), "www") dh = DataHandler(max=20) #------------------------------------------------------------------------------- # test the data APIs with curl: # # post data: # curl -d "datum=321" http://localhost:5000/api/data # # get data: # curl http://localhost:5000/api/data # #------------------------------------------------------------------------------- @app.route('/api/data', methods=['POST']) def api_data_post(): datum = request.form['datum'] try: json.loads(datum) except:
from sklearn.metrics import roc_auc_score plt.style.use(['ggplot']) plt.tight_layout() plt.gcf().subplots_adjust(bottom=0.13) plt.gcf().subplots_adjust(left=0.13) plt.rcParams["figure.figsize"] = (14, 12) plt.ticklabel_format(style='plain', useOffset=False) #%% data = pd.read_csv('../tommi+diego_test_data.csv', sep=";", header=0) data = data.loc[data["Warning_code"] == 0] data = data.reset_index(drop=True) tforce_DF = DataHandler.calculateTotalForce(data) step_t_DF = DataHandler.calculateStepTime(data) #%% Bagging test avg_acc, real_label, pred_label = Ensemble.testBagging(step_t_DF) pred_label_df = pred_label real_label_df = real_label pred_label_df = pred_label_df.replace("Normal", 0) pred_label_df = pred_label_df.replace("Fall", 1) real_label_df = real_label_df.replace("Normal", 0) real_label_df = real_label_df.replace("Fall", 1)
""" from PyQt5.QtCore import QThreadPool, QRunnable, pyqtSlot from dataTransformer import * from pccFileHandler import * from dataHandler import DataHandler from dataValidator import * from mainInterface import * """ No longer need to import limits dialog as manually defined limits by the user are no longer needed/used. Leaving the import here for now but commented out in case the feature for manual limits ever needs to be enabled again (unlikely). from limitsdialog import Ui_Dialog as Form, Ui_Dialog """ # Initialize a new DataHandler() class instance for storing/updating # important information used to maintain successful execution. DataHandler = DataHandler() def openPCCTelemetryFile(self): # Attempt to open user provided .tel file try: pccTelemetryFile = openFile(self, DataHandler) # Catch any I/O or File Not Found errors except Exception as ex: print("Could not open Telemetry File (check file type and try again).", ex) def Driver_openOutputLogFile(self): # Attempt to open or create the user specified file for RFD to log output try: FileHandler_OpenOutputLogFile(self, DataHandler)
def __init__(self, interface): self.interface = interface self.dh = DataHandler(self)
class EventHandler: def __init__(self, interface): self.interface = interface self.dh = DataHandler(self) def function(self): # dummy function pass def askOpenFileName(self): selectedFile = askopenfilename() self.interface.setFileName(selectedFile) def linearRegression(self): if self.interface.checkRequirements(): # Update Running Label self.interface.setRunningName("Linear Regression") # Load Column Names from CSV variables_list = self.dh.loadCSV(self.interface.getFileName()) # variables_list = ['loan_status', 'loan_amnt', 'int_rate', 'installment','loan_status2', 'loan_amnt2', 'int_rate2', 'installment2', 'loan_status3', 'loan_amnt3', 'int_rate3', 'installment3'] # Create Linear Regression GUI self.interface.createLinearRegression(variables_list) else: self.interface.promptAlertForCSV() def addSelectedVariables(self): # Get Variables List Box variables_lb = self.interface.getVariablesLB() # Get selected text in variables list box variables_list = variables_lb.get(0, 'end') selected_list = [ variables_lb.get(i) for i in variables_lb.curselection() ] nonselected_list = list(set(variables_list).difference(selected_list)) # Add Selected to Selected List Box for selected in selected_list: self.interface.appendSelectedVariablesLB(selected) # Refresh Variables List Box self.interface.refreshVariablesLB(nonselected_list) def removeSelectedVariables(self): # Get Selected Variables List Box selected_lb = self.interface.getSelectedVariablesLB() # Get variable text in selected variables list box selected_list = selected_lb.get(0, 'end') variables_list = [ selected_lb.get(i) for i in selected_lb.curselection() ] nonselected_list = list(set(selected_list).difference(variables_list)) # Add variables to Varaibles List Box for variable in variables_list: self.interface.appendVariablesLB(variable) # Refresh Variables List Box self.interface.refreshSelectedVariablesLB(nonselected_list) def addCategoricalVariables(self): # Get Variables List Box variables_lb = self.interface.getVariablesLB() # Get selected text in variables list box variables_list = variables_lb.get(0, 'end') categorical_list = [ variables_lb.get(i) for i in variables_lb.curselection() ] noncategorical_list = list( set(variables_list).difference(categorical_list)) # Add Categorical to Categorical List Box for categorical in categorical_list: self.interface.appendCategoricalVariablesLB(categorical) # Refresh Variables List Box self.interface.refreshVariablesLB(noncategorical_list) def removeCategoricalVariables(self): # Get Categorical Variables List Box categorical_lb = self.interface.getCategoricalVariablesLB() # Get variable text in selected variables list box categorical_list = categorical_lb.get(0, 'end') variables_list = [ categorical_lb.get(i) for i in categorical_lb.curselection() ] noncategorical_list = list( set(categorical_list).difference(variables_list)) # Add variables to Varaibles List Box for variable in variables_list: self.interface.appendVariablesLB(variable) # Refresh Variables List Box self.interface.refreshCategoricalVariablesLB(noncategorical_list) def removeOutputVariables(self, __singlar=False): # Get Selected Output List Box output_lb = self.interface.getOutputVariablesLB() # Get variable text in output variables list box output_list = output_lb.get(0, 'end') variables_list = [] if (__singlar): # Output List remove the old item (should be one or empty) variables_list = output_list else: variables_list = [ output_lb.get(i) for i in output_lb.curselection() ] nonoutput_list = list(set(output_list).difference(variables_list)) # Add variables to Varaibles List Box for variable in variables_list: self.interface.appendVariablesLB(variable) # Refresh Variables List Box self.interface.refreshOutputVariablesLB(nonoutput_list) def addOutputVariables(self, __singlar=False): variables_lb = self.interface.getVariablesLB() output_list = [ variables_lb.get(i) for i in variables_lb.curselection() ] # take the first curselected only if ((__singlar) and (len(output_list) > 0)): output_list = [output_list[0]] # Clean out Output Listbox by Removing Output Variable and Add back into Variable List Box self.removeOutputVariables(True) variables_list = variables_lb.get(0, 'end') # Get the nonoutputs to refresh variable list box nonoutput_list = list(set(variables_list).difference(output_list)) # Add output to Output List Box for output in output_list: self.interface.appendOutputVariablesLB(output) # Refresh Variables List Box self.interface.refreshVariablesLB(nonoutput_list)
class MyHandler( http.server.BaseHTTPRequestHandler ): # MyHandler extends http.server.BaseHTTPRequestHandler and implements the method do_Get and do_POST datahandler = DataHandler() def do_GET(self): if self.path == '/favicon.ico' and str(self.path).find('?') == -1: return start = timer() #print("\nGET {}".format(str(self.path))) #in the application the url must be 'https://ip/?room=number&image=something&pdf=something' keys = parse_qs(self.path[2:]) try: room = str(keys['room']).replace('[\'', '').replace('\']', '') image = str(keys['image']).replace('[\'', '').replace('\']', '') pdf = str(keys['pdf']).replace('[\'', '').replace('\']', '') if pdf != 'false': self.sendPdf(pdf) print('pdf inviato: ', self.datahandler.pdf[pdf]) elif image.isdecimal(): self.sendImage(room, image) print('la room selezionata è :', room) print('immagine richiesta:', image) else: self.sendContent(room) print('la room selezionata è :', room) print('immagine richiesta: nessuna') end = timer() print('response in:', end - start) except: print('error in sending response') def doHead(self, contentType='txt'): #adding headers self.send_response(http.HTTPStatus.OK) # it is a protocol contentType = contentType + ';charset=utf-8;' self.send_header('Content-type', contentType) self.end_headers() def sendContent(self, room='example'): #send title, description etc.. self.doHead('json') self.wfile.write( self.datahandler.getContent(room).encode( 'UTF-8', 'replace')) # before sending message must be encode self.wfile.flush() def sendPdf(self, pdf="0"): #send title, description etc.. self.doHead('pdf') self.wfile.write( self.load(self.datahandler.datasPath + self.datahandler.getPdfName(pdf)) ) # before sending message must be encode self.wfile.flush() def sendImage(self, room='example', image=1): #send images self.doHead('image') self.wfile.write(self.load(self.datahandler.getImageName(room, image))) self.wfile.flush() def load(self, file): #open image f = open(file, 'rb') s = f.read() f.close() return s def encode(self, file): #encoding image return bytes(file, 'UTF-8') def do_POST(self): self.do_GET()
from columns import DataColumns from dataHandler import DataHandler from tfClassifiers import TfClassifiers from collections import namedtuple plt.style.use(['ggplot']) plt.tight_layout() plt.gcf().subplots_adjust(bottom=0.13) plt.gcf().subplots_adjust(left=0.13) plt.rcParams["figure.figsize"] = (14, 12) plt.ticklabel_format(style='plain', useOffset=False) #%% data = pd.read_csv('../tommi_test_data.csv', sep=";", header=0) data = data.loc[data["Warning_code"] == 0] data = data.reset_index(drop=True) basedf = data tforce_DF = DataHandler.calculateTotalForce(data) step_t_DF = DataHandler.calculateStepTime(data) standardized_data = DataHandler.minmaxStandardizeForces(step_t_DF) #%% x_cols = DataColumns.getSelectedCols2() y_cols = ["label"] plots = True avg_acc, real_label, pred_label = TfClassifiers.testNn(standardized_data, x_cols, y_cols, plots)
def main(): dataHandler = DataHandler("data.json")
plt.style.use(['ggplot']) plt.tight_layout() plt.gcf().subplots_adjust(bottom=0.13) plt.gcf().subplots_adjust(left=0.13) plt.rcParams["figure.figsize"] = (14, 12) plt.ticklabel_format(style='plain', useOffset=False) #%% #data = pd.read_csv('../tommi_test_data.csv', sep=";", header=0) data = pd.read_csv('../tommi_test_data_more_diff_steps.csv', sep=";", header=0) data = data.loc[data["Warning_code"] == 0] data = data.reset_index(drop=True) tforce_DF = DataHandler.calculateTotalForce(data) step_t_DF = DataHandler.calculateStepTime(data) #%% Bagging test avg_acc, real_label, pred_label = Ensemble.testBagging(step_t_DF) pred_label_df = pred_label real_label_df = real_label pred_label_df = pred_label_df.replace("Normal", 0) pred_label_df = pred_label_df.replace("Fall", 1) real_label_df = real_label_df.replace("Normal", 0) real_label_df = real_label_df.replace("Fall", 1)
plt.style.use(['ggplot']) plt.tight_layout() plt.gcf().subplots_adjust(bottom=0.13) plt.gcf().subplots_adjust(left=0.13) plt.rcParams["figure.figsize"] = (14, 12) plt.ticklabel_format(style='plain', useOffset=False) #%% errors out from data data = pd.read_csv('../tommi_test_data_more_diff_steps.csv', sep=";", header=0) #harder data with various stepping styles data = data.loc[data["Warning_code"] == 0] data = data.reset_index(drop=True) basedf = data tforce_DF = DataHandler.calculateTotalForce(data) step_t_DF = DataHandler.calculateStepTime(data) standardized_data = DataHandler.minmaxStandardizeForces(step_t_DF) #generating datasets where data has been suffled and last random x rows has been dropped and data is now balanced dataset_amount = 1 drop_amount = 40 datasets = DataHandler.genRandomDatasets(data, dataset_amount, drop_amount) data = datasets[0] basedf #%% quick check on data xy_cols = DataColumns.getSelectedCols2andY() data.loc[:, xy_cols]