def OnSave(self, e): """Write data to file.""" convert_to = None if e.Id == 201: convert_to = "photoabsorption" elif e.Id == 202: convert_to = "refractive_index" logger.info("Save") fd = wx.FileDialog(self, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT) if fd.ShowModal()==wx.ID_OK: metadata = {"Density": float(self.DensityText.GetValue()), "Molecular Formula":self.StoichiometryText.GetValue(),"Formula Mass":data.calculate_FormulaMass(self.Stoichiometry)} data.export_data(fd.GetPath(), numpy.transpose(numpy.vstack((self.Full_E,self.KK_Real_Spectrum,data.coeffs_to_ASF(self.Full_E,self.Imaginary_Spectrum)))), header_info=metadata, convert_to=convert_to)
def search_data(query, category, intermediate_value): """ Searches <intermediate_value> based on user-specified <query> and <category>. Args: query (str): A search query. category (str): A category ('all' / 'spam' / 'ham'). intermediate_value (str): A string of data to process in JSON format. Returns: A list of matching data, and a string in the format "<length of data> items matched". """ status_code, data = parse_json(intermediate_value, category) download_link = file_download_link('output.csv') anchor = html.A(html.Button('Export Results', id='exportBtn', n_clicks=0), href=download_link, download=download_link) if status_code == 1: # Display empty table, and show the error message return [], data, anchor data = get_data(query, data) output = export_data(data_dict=data) save_file('output.csv', output) return data, "{} items matched.".format(len(data)), anchor
def calc_yearly_diff(rc,rid,load_saved=[True,True],save_file=[False,False], filename=['MaleRankCountDiffIndex.csv','MaleRankCountDiffCount.csv'],verbose=False): # loading data isloaded = [False,False] if len(load_saved) == 2 and len(filename) == 2: if verbose: print("Trying to import from "+filename[0]+" and "+filename[1]) if load_saved[0]: rc_diffi = data.import_csv(file=filename[0]) if rc_diffi.__len__() > 0: isloaded[0] = True if load_saved[1]: rc_diffc = data.import_csv(file=filename[1]) if rc_diffc.__len__() > 0: isloaded[1] = True if isloaded[0] and isloaded[1]: if verbose: print("Import successful") return rc_diffi, rc_diffc if not isloaded[0]: rc_diffi = pd.DataFrame(index=rc.index,columns=rc.columns[1:],dtype='int') if not isloaded[1]: rc_diffc = pd.DataFrame(index=rc.index,columns=rc.columns[1:],dtype='int') prev_yr = rc.columns[0] # Calculating first index of NaN values to improve looping efficiency when searching previous year data lens = rc.count(axis=0) # Starting calculations for cur_yr in rc.columns[1:]: for i in rc.index[:lens[cur_yr]]: prev_count = 0 cur_id = rid[cur_yr][i] res = rid[prev_yr][rid[prev_yr]==cur_id].index if res.size: prev_count = rc[prev_yr][res[0]] else: res = [lens[cur_yr]] rc_diffi[cur_yr][i] = i - res[0] rc_diffc[cur_yr][i] = rc[cur_yr][i] - prev_count if verbose: print("Year: "+str(cur_yr)) prev_yr = cur_yr #Saving data if len(save_file) == 2: if save_file[0]: data.export_data(df=rc_diffi,filename=filename[0],path='data') if save_file[1]: data.export_data(df=rc_diffi,filename=filename[1],path='data') return rc_diffi,rc_diffc
def save_dfs(dfs,filenames,save_file,verbose): if len(save_file) == len(dfs) and len(filenames) == len(dfs): if verbose: print("\nSaving.") for i in range(len(dfs)): if save_file[i]: data.export_data(df=dfs[i],filename=filenames[i],path='data')
import face_recognition import cv2 import pyttsx3 import time import data import ai print('loads trained data') known_face_names = [] for person in data.export_data(): known_face_names.append([person[0], person[1], None]) known_face_encodings = [] for face_encoding in known_face_names: known_face_encodings.append(face_encoding[1]) print('done loading') engine = pyttsx3.init() cap = cv2.VideoCapture(1) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) im = cap.read()[1] #because reasons r = cv2.selectROI(im) process_this_frame = True
def import_file(n_clicks, filename, contents, chantier, type_doc, nom_capteur, date): if n_clicks > 0: if contents is None: return "" else: df = read_data(contents, filename) if type_doc == 1: '''type : mesures topographiques globales''' filename_archive = f"topo_{date}.csv" filename_actif = "topo.csv" export_data(df, chantier, "actif", "topographie", filename_actif) export_data(df, chantier, "archive", "topographie", filename_archive) elif type_doc == 2: '''type : mesures associées à UN inclinomètre defini''' filename_archive = f"{nom_capteur}_{date}.csv" filename_actif = f"{nom_capteur}.csv" export_data(df, chantier, "actif", "inclinometrie", filename_actif) export_data(df, chantier, "archive", "inclinometrie", filename_archive) elif type_doc == 3: '''type : mesures associées à UN piezomètre defini ''' filename_archive = f"{nom_capteur}_{date}.csv" filename_actif = f"{nom_capteur}.csv" export_data(df, chantier, "actif", "piezometrie", filename_actif) export_data(df, chantier, "archive", "piezometrie", filename_archive) elif type_doc == 4: '''type : mesures tirant globales''' filename_archive = f"tirant_{date}.csv" filename_actif = "tirant.csv" export_data(df, chantier, "actif", "tirant", filename_actif) export_data(df, chantier, "archive", "tirant", filename_archive) elif type_doc == 5: '''type : mesures jauge globales''' filename_archive = f"jauge_{date}.csv" filename_actif = "jauge.csv" export_data(df, chantier, "actif", "jauge", filename_actif) export_data(df, chantier, "archive", "jauge", filename_archive) return "Le fichier à bien été importé" else: return ""