import pickle from Capitulo_6.formato_con_delimitadores.ficheros_delimitados_csv_tsv import lectura_de_planetas if __name__ == '__main__': planetas = list(lectura_de_planetas('planetas.csv')) with open('pickled_planetas', 'wb') as fw: pickler = pickle.Pickler(fw) pickler.dump(planetas) with open('pickled_planetas', 'rb') as fr: planetas_serializados = pickle.Unpickler(fr).load() print(f'Planetas serializados {planetas_serializados}') assert planetas == planetas_serializados
qmin, qmax = -1.4, 1.4 # 1/A, limits for q vectors to be written to file apply_APC = True # True when aperture correction should be performed # energies dE = 1 E = np.arange(10, 40, dE) + dE E = None # automatically determine Erange (over full image) # 1. get calibration object edisp_name = './EDisp.pkl' FILE = open(edisp_name, 'r') edisp = pickle.Unpickler(FILE).load() FILE.close() e2y = edisp['e2x'] print 'READ Energy Calibration: ' + edisp['descr'] # 2. read undistorted E-q map filename = './qseries_sum-rebinned64x11.pkl' FILE = open(filename, 'r') data = pickle.Unpickler(FILE).load() FILE.close() yqmap = data['yqmap'] qaxis = data['qaxis'] dqx = qaxis[1] - qaxis[0] assert np.allclose(dqx, np.diff(qaxis)) # require evenly space qpoint list Ny, Nx = yqmap.shape
def lire_fichier(lieu): # Fonction qui permet de lire dans un fichier plus facilement. Prends pour parametre l'emplacement du fichier. mon_fichier = open(lieu, "rb") depickler = pickle.Unpickler(mon_fichier) objet = depickler.load() mon_fichier.close() return objet
folder_path = "20190623_simulations_clonesig_cn_cancer_type/type5-perc_diploid20-nb_clones2-nb_mut300" start_time = 100 end_time = 234 """ print(folder_path) cancer_type = int(folder_path.split('/')[1].split('type')[1].split('-')[0]) perc_diploid = int( folder_path.split('/')[1].split('perc_diploid')[1].split('-')[0]) nb_clones = int( folder_path.split('/')[1].split('nb_clones')[1].split('-')[0]) nb_mut = int(folder_path.split('/')[1].split('nb_mut')[1].split('-')[0]) # get metrics from simulated data with open('{}/sim_data'.format(folder_path), 'rb') as sim_pickle_file: sim_pickle = pickle.Unpickler(sim_pickle_file) sim_data_obj = sim_pickle.load() dist_matrix = sp.spatial.distance.squareform( sp.spatial.distance.pdist(sim_data_obj.pi.dot(sim_data_obj.MU), 'cosine')) if len(dist_matrix[dist_matrix > 0]): min_dist = np.min(dist_matrix[dist_matrix > 0]) max_dist = np.max(dist_matrix[dist_matrix > 0]) avg_dist = np.mean(dist_matrix[dist_matrix > 0]) else: min_dist, max_dist, avg_dist = np.nan, np.nan, np.nan avg_major_cn = np.mean(sim_data_obj.C_tumor_tot - sim_data_obj.C_tumor_minor) avg_tot_cn = np.mean(sim_data_obj.C_tumor_tot) actual_perc_diploid = sum((sim_data_obj.C_tumor_tot == 2) & ( sim_data_obj.C_tumor_minor == 1)) / sim_data_obj.N
def get(filename): """recupere l'objet sauve par la fonction save dans le fichier filename""" with open(filename, 'rb') as f: get_record = pickle.Unpickler(f) return get_record.load()
import lib.gen_GDT as gen_GDT import lib.gen_BM as gen_BM # End of the preliminary definitions & imports ############################# ############################# # Importation of the generated sales data filename_transaction = 'transaction_data_' + data_version + '.dat' #ensures that the path is correct for data file script_dir = os.path.dirname(__file__) #absolute dir the script is in rel_path_transaction = "data/" + filename_transaction abs_file_transaction = os.path.join(script_dir, rel_path_transaction) with open(abs_file_transaction, 'rb') as sales: my_depickler = pickle.Unpickler(sales) Proba_product_train = my_depickler.load() Inventories_train = my_depickler.load() Proba_product_test = my_depickler.load() Inventories_test = my_depickler.load() Revenue = my_depickler.load() u = my_depickler.load() p = my_depickler.load() #definitions preparing the future exports #Definition of the name of the file to write, to be consistent with the file opened filename_choice_model_GDT = 'choice_model_GDT_' + str(data_version) + '.dat' filename_choice_model_BM = 'choice_model_BM_' + str(data_version) + '.dat' rel_path_choice_model_GDT = "data/" + filename_choice_model_GDT abs_file_choice_model_GDT = os.path.join(script_dir, rel_path_choice_model_GDT)
def load_cachefile(self): cachesize = 0 previous_progress = 0 previous_percent = 0 # Calculate the correct cachesize of all those cache files for cache_class in self.caches_array: cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) with open(cachefile, "rb") as cachefile: cachesize += os.fstat(cachefile.fileno()).st_size bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data) for cache_class in self.caches_array: cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) with open(cachefile, "rb") as cachefile: pickled = pickle.Unpickler(cachefile) # Check cache version information try: cache_ver = pickled.load() bitbake_ver = pickled.load() except Exception: logger.info('Invalid cache, rebuilding...') return if cache_ver != __cache_version__: logger.info('Cache version mismatch, rebuilding...') return elif bitbake_ver != bb.__version__: logger.info('Bitbake version mismatch, rebuilding...') return # Load the rest of the cache file current_progress = 0 while cachefile: try: key = pickled.load() value = pickled.load() except Exception: break if not isinstance(key, str): bb.warn("%s from extras cache is not a string?" % key) break if not isinstance(value, RecipeInfoCommon): bb.warn( "%s from extras cache is not a RecipeInfoCommon class?" % value) break if key in self.depends_cache: self.depends_cache[key].append(value) else: self.depends_cache[key] = [value] # only fire events on even percentage boundaries current_progress = cachefile.tell() + previous_progress current_percent = 100 * current_progress / cachesize if current_percent > previous_percent: previous_percent = current_percent bb.event.fire( bb.event.CacheLoadProgress(current_progress, cachesize), self.data) previous_progress += current_progress # Note: depends cache number is corresponding to the parsing file numbers. # The same file has several caches, still regarded as one item in the cache bb.event.fire( bb.event.CacheLoadCompleted(cachesize, len(self.depends_cache)), self.data)
Alle Klassen erben von den beiden und somit kann man die Seiralisierung anpassen """ # pickle.Pickler(file[,protocol]) -> selbe Bedeutung wie bei pickle # Das Pickler-Obj hat eine Methode dump(obj) # Alle Objekte werden in das Dateiobjekt geschrieben(Pickler-Instanz) import pickle p = pickle.Pickler(open("eine_datei.dat", "wb"), 2) p.dump({"vorname" : "Gyula", "nachname" : "Orosz"}) p.dump([1,2,3,4,5,6,7]) p.dump("Das ist ein Test") p.dump(("eins", "zwei", "Polizei")) # pickle.Unpickler(file) # Obj beitzt Methode load() -> Liest das naechste Objekt aus der Datei u = pickle.Unpickler(open("eine_datei.dat", "rb")) try: while True: print(u.load()) except EOFError: pass # Vor Python 3.0 gab es 2 Module # pickle und cPickle # cPickle ist optimierte Implementation in C (schneller aber nicht # OS-unabhaengig) # ab 3.0 greift pickle automatisch auf eine vorhandene C-Implemantation # zurueck, wenn da, sonst Standardimplementation
def creer_compte(nom_utilisateur=""): """ Fonction chargée de creer un un nouveau utilisateur. """ cookies = {} # clear_screen() if nom_utilisateur == "": nom_utilisateur = input( "Choisissez un nom d'utilisateur (Q pour revenir au menu): ") if nom_utilisateur.capitalize() != "Q": if len(nom_utilisateur) < 6: print( "Nom utilisateur invalide. Il doit au moins avoir 6 caracteres, choisissez encore...\n" ) input("<<< retour") creer_compte() else: with open("..//data//users//users", "rb") as fichier: lecteur = pickle.Unpickler(fichier) users = lecteur.load() if nom_utilisateur in users: clear_screen() print("Ce nom est deja pris") time.sleep(2) creer_compte() else: clear_screen() print("Nom utisateur valide\n\n") classe = input( "Maintenant dites nous votre classe(2nd, 1er ou tle): " ) if classe in var.classes: # Creations des cookies de questions with open( "..//data//questions//%s//liste_lecons" % classe, "rb") as file: reader = pickle.Unpickler(file) lecons = reader.load() for lecon in lecons: cookies[lecon] = [] # on enregistre l'utilistateur utilisateur = User.User(nom_utilisateur, classe, cookies) with open("..//data//users//" + nom_utilisateur, "wb") as usr: writer = pickle.Pickler(usr) writer.dump(utilisateur) users.append(nom_utilisateur) with open("..//data//users//users", "wb") as fd: writer = pickle.Pickler(fd) writer.dump(users) clear_screen() print("Compte crée avec succes\n") print("Bienvenue, %s" % nom_utilisateur.capitalize()) time.sleep(2) connecter(nom_utilisateur) else: clear_screen() print("Classe invalide, choisissez encore\n") input("<<< Retour ") creer_compte(nom_utilisateur) else: pass
delay = int(delay)//10>=1 if employe != 0: for i in range(0, delay): start = time.time() invfer += 5*employe Time() tuto='non' print('Minage Simulator') time.sleep(1) print("CONSEIL: agrandissez votre console au maximum!") time.sleep(1) print("Assistant: Récupération de la sauvegarde en cours...") chargefile=input("Charger la sauvegarde 1 ou 2?") if chargefile=="1": with open("sauvegardes/save1.txt", "rb") as file: monPickle = pickle.Unpickler(file) data = monPickle.load() invcharbon=data["invcharbon"] invcuivre=data["invcuivre"] invfer=data["invfer"] invargent=data["invargent"] invor=data["invor"] invdiamant=data["invdiamant"] invplatine=data["invplatine"] minebucks=data["minebucks"] foreuse=data["foreuse"] essence=data["essence"] reservoir=data["reservoir"] machine=data["machine"] nommachine=data["nommachine"] detecteur=data["detecteur"]
def afficherNombrePremiers(): with open("liste_nb_premiers", "rb") as f: print(pickle.Unpickler(f).load())
import numpy import sys import matplotlib.pyplot as P import pickle path = sys.argv[1] with open(path + "/info.p", "rb") as f: pick = pickle.Unpickler(f) train_avg = pick.load() train_cost = pick.load() train_ranks = pick.load() train_percentiles = pick.load() train_dist = pick.load() ''' with open(path+"/info.p", "rb") as f: pick = pickle.Unpickler(f) train_avg = numpy.concatenate((train_avg, pick.load())) train_cost = numpy.concatenate((train_cost, pick.load())) train_ranks = pick.load() train_percentiles = numpy.concatenate((train_percentiles, pick.load())) train_dist = numpy.concatenate((train_dist, pick.load())) ''' P.figure(1)
def load(self): if os.path.exists(self.path): with open(self.path, 'rb') as money_rb: self.money = pickle.Unpickler(money_rb).load()
def InitAlgebra(self): print "Algebra... " HRho = H_Fermi.clone() MaxRho = 1 Rho = RhosUpTo(MaxRho, 2) #second argument is normalization of rho. FermiRho = Rho.ToFermi() # Temporarily only do ph->ph FermiRho.QPCreatePart() # if the files for the algebra exist, just read them in. if (os.path.isfile("./Terms/CIS") and os.path.isfile("./Terms/phphPterm")): print "Found Existing Terms, Unpickling them." cf = open("./Terms/CIS", "rb") pf = open("./Terms/phphPterm", "rb") UnpickleCIS = pickle.Unpickler(cf) UnpicklePT = pickle.Unpickler(pf) self.CISTerms = UnpickleCIS.load() cf.close() self.PTerms = UnpicklePT.load() pf.close() self.PTerms.AssignBCFandTE( ) # This caused pickling issues so I'm doing it after. print "Using Perturbative terms: " for Term in self.PTerms: Term.Print() self.VectorShape = FermiRho.clone() self.ResidualShape = self.CISTerms.clone() return RhoH = FermiRho.clone() RhoH.NormalOrder(0) RhoH.NormalPart(0) RhoH.UnContractedPart() HRho.NormalOrder(0) HRho.NormalPart(0) HRho.UnContractedPart() LeftVac = RhoH.MyConjugate() # Since this is a DM expression we also have to project on the Right # and add the two resulting expressions. # (add or subtract.... !!!!!! - JAP 2011) RightVac = LeftVac.clone() RhoTemp = RhoH.clone() RhoH.Times(HRho) HRho.Times(RhoTemp) HRho.Subtract(RhoH) print " NormalOrdering HRho - RhoH ... " HRho.NormalOrder( 0, LeftVac.ClassesIContain() ) # only those which close against leftvac are kept during this process. LeftVac.Times(HRho) HRho.Times(RightVac) print "NormalOrdering LeftVac*(HRho-RhoH) ... " LeftVac.NormalOrder(0, [[0, 0, 0, 0, 0, 0, 0]], False) HRho.NormalOrder(0, [[0, 0, 0, 0, 0, 0, 0]], False) LeftVac.Add( HRho ) # Note: Adding instead of subtracting the right vaccum part f****d everything up. LeftVac.FullyContractedPart() print "*_*_*_*_*_*_*_*__*_*_*_*_*_*_*_*_*" print "Using this expression for LCIS: " for Term in LeftVac: Term.Print() print "*_*_*_*_*_*_*_*__*_*_*_*_*_*_*_*_*" self.CISTerms = LeftVac.clone() OutFile = open("./Terms/CIS", "w") pickle.Pickler(OutFile, 0).dump(self.CISTerms) OutFile.close() # These are the critical things assumed by the numerical part. self.VectorShape = FermiRho.clone() self.ResidualShape = self.CISTerms.clone() return
if n < nnodes: r = get_summary(res_list[0],r) elif n >= nnodes and n < nnodes + nclients: print(cc,n) r2 = get_summary(res_list[0],r2) get_lstats(r) get_lstats(r2) with open(p_sfile,'w') as f: p = pickle.Pickler(f) p.dump(r) with open(p_cfile,'w') as f: p = pickle.Pickler(f) p.dump(r2) else: with open(p_sfile,'r') as f: p = pickle.Unpickler(f) r = p.load() opened = True with open(p_cfile,'r') as f: p = pickle.Unpickler(f) r2 = p.load() opened = True # merge_results(r,cfgs["NODE_CNT"],0) # merge_results(r2,cfgs["CLIENT_NODE_CNT"],0) try: print("Tput: {} / {} = {}".format(avg(r2["txn_cnt"]),avg(r2["total_runtime"]),sum(r2["txn_cnt"])/sum(r2["total_runtime"]))) except KeyError: print("") if s == {}: s = r
def jouer(): """ Fonction chargée de controler une partie """ i = 1 on_play = True clear_screen() with open("..//data//questions//%s//liste_lecons" % var.utilisateur.classe, "rb") as file: reader = pickle.Unpickler(file) lecons = reader.load() print("Voici les lecons disponibles:\n") for lecon in lecons: print("%d: %s" % (i, lecon)) i += 1 print("\n") chx = input("Sur quelle lecon souhaitez-vous travaillez?: ") try: chx = int(chx) lecon = lecons[(chx - 1)] except ValueError: clear_screen() print("Veillez choisir un numero\n") input("<<< Retour") jouer() else: try: var.questions = init_question(var.utilisateur.classe, lecons[(chx - 1)]) lecon = lecons[(chx - 1)] except IndexError: clear_screen() print("Veillez choisir un numero disponible") input("<<< Retour") jouer() while on_play: if len(var.questions) == 0: clear_screen() print( "Vous avez repondu correctement à toutes les questions! %s!" % (random.choice(var.congrats))) input("<<< Retour") on_play = False save() else: var.questions = poser_question(var.questions, lecon) chx = input("Continuer ou quitter (Q) ?: ") if chx.capitalize() == "Q": on_play = False save()
def sift_init(): """ init SIFT descriptors and keypoints for all images in query/ :return: just pass them to sift_match """ if LOAD_SIFT == 0: query_img_name = [[], [], []] query_img = [[], [], []] query_img_hog = [[], [], []] # kp = [[], [], []] des = [[], [], []] sift_pt = [[], [], []] for direct in range(FRONT, NONE): for base_path, folder_list, file_list in os.walk( 'query/' + direct_lower_str[direct]): for file_name in file_list: filename = os.path.join(base_path, file_name) if filename[-4:] != '.png' and filename[-4:] != '.jpg': continue if base_path == 'query/side': query_img_name[direct].append(filename) query_img[direct].append( cv2.imread(filename, cv2.IMREAD_GRAYSCALE)) img_hog_temp = cv2.imread(filename) img_hog_reverse = cv2.flip( img_hog_temp, -1) # flipped horizontally & vertically query_img_hog[direct].append( [img_hog_temp, img_hog_reverse]) else: query_img_name[direct].append(filename) query_img[direct].append( cv2.imread(filename, cv2.IMREAD_GRAYSCALE)) query_img_hog[direct].append(cv2.imread(filename)) # rotate query img in rotate directory for augmentation for folder_name in folder_list: if folder_name != 'rotate': continue for base_path_rot, folder_list_rot, file_list_rot in os.walk( 'query/' + direct_lower_str[direct] + '/rotate'): for file_name_rot in file_list_rot: filename_rot = os.path.join( base_path_rot, file_name_rot) if filename_rot[-4:] != '.png' and filename_rot[ -4:] != '.jpg': continue query_img_name[direct].append(filename_rot) query_img[direct].append( cv2.imread(filename_rot, cv2.IMREAD_GRAYSCALE)) img_hog_temp = cv2.imread(filename_rot) img_hog_aug = rotation.rotate(img_hog_temp) query_img_hog[direct].append(img_hog_aug) break # only traverse top level # in case of inconsistency img_file_map = dict() for direct in range(FRONT, NONE): for img_name_temp, img_temp, img_hog_temp in zip( query_img_name[direct], query_img[direct], query_img_hog[direct]): img_file_map[img_name_temp] = [img_temp, img_hog_temp] query_img_name[direct].sort() query_img = [[], [], []] query_img_hog = [[], [], []] for direct in range(FRONT, NONE): for img_name_temp in query_img_name[direct]: query_img[direct].append(img_file_map[img_name_temp][0]) query_img_hog[direct].append(img_file_map[img_name_temp][1]) # Initiate HOG fd # fd = hog.hog_des(query_img_hog) fd = hog.load_fd() # Initiate SIFT detector orb = cv2.ORB_create() # find the keypoints and descriptors with SIFT for direct in range(FRONT, NONE): for img_temp, img_name in zip(query_img[direct], query_img_name[direct]): kp_temp, des_temp = orb.detectAndCompute(img_temp, None) if des_temp is None: if DUMP == 1: print(img_name + ": SIFT cannot detect keypoints and descriptor") # kp[direct].append(None) des[direct].append(None) sift_pt[direct].append(None) continue # kp[direct].append(kp_temp) des[direct].append(des_temp) pt_list = [] for pt_temp in kp_temp: pt_list.append( (pt_temp.pt, pt_temp.size, pt_temp.angle, pt_temp.response, pt_temp.octave, pt_temp.class_id)) sift_pt[direct].append(pt_list) # load label json data img_json_map = dict() for direct in range(FRONT, NONE): for img_name_temp in query_img_name[direct]: img_json_map[img_name_temp] = json.load( open(str(img_name_temp.split('.')[0]) + '.json')) # pack together sift_data = [ query_img_name, query_img_hog, query_img_name, sift_pt, des, fd, img_json_map ] with open('match.dat', 'wb') as f: pickle.dump(sift_data, f) return sift_data else: target = 'match.dat' if os.path.getsize(target) > 0: with open(target, "rb") as f: unpickler = pickle.Unpickler(f) sift_data = unpickler.load() return sift_data
def mkcolorcolor(filt, catalog, starcatalog, cluster, magtype, save_file=None): print filt import cutout_bpz locus_c = get_locus() #cutout_bpz.locus() locus_c = cutout_bpz.locus() import pickle f = open('maglocus', 'r') m = pickle.Unpickler(f) locus_m = m.load() import os base = os.environ['sne'] + '/photoz/' + cluster + '/' f = open(base + 'stars.html', 'w') print filt filt.sort(cutout_bpz.sort_filters) print filt ''' group filters ''' groups = {} for filter2 in filt: num = cutout_bpz.filt_num(filter2) if not num in groups: groups[num] = [] groups[num].append(filter2) print groups print catalog import random, pyfits print catalog, starcatalog p = pyfits.open(catalog)['OBJECTS'].data s = pyfits.open(starcatalog) indices = s['OBJECTS'].data.field('SeqNr') dict_obj = {} for index in indices: p.field('CLASS_STAR')[index - 1] = -999 mask = p.field('CLASS_STAR') == -999 p = p[mask] print len(p) #mask = p.field('FWHM_WORLD')*3600. < 1.1 #p = p[mask] print len(p) #while not plot: list = [] for g in sorted(groups.keys()): list.append(groups[g]) print list l_new = [] l_fs = {} locus_dict_obj = {} print list for filt in list: for f2 in filt: a_short = f2.replace('+', '').replace('C', '')[-1] print filt, a_short import string ok = True if string.find(f2, 'MEGAPRIME') != -1: a_short = 'MP' + a_short.upper() + 'SUBARU' elif string.find(f2, 'SUBARU') != -1: if string.find(f2, "W-S-") != -1: a_short = 'WS' + a_short.upper() + 'SUBARU' else: a_short = a_short.upper() + 'JOHN' if string.find(f2, "-1-") == -1: ok = False if ok: l_new.append([filt, a_short]) l_fs[a_short] = 'yes' print a_short, filt if not a_short in locus_dict_obj: locus_dict_obj[a_short] = [] locus_dict_obj[a_short].append(f2) import re good_fs = [] for k1 in locus_c.keys(): res = re.split('_', k1) print l_fs print res if l_fs.has_key(res[0]) and l_fs.has_key(res[1]): good_fs.append([res[0], res[1]]) print l_fs print locus_dict_obj, good_fs print good_fs zps_dict_obj = {} list = ['MPUSUBARU', 'VJOHN', 'RJOHN'] #,'RJOHN','IJOHN','WSZSUBARU','WSISUBARU'] #filters_fs = l_fs.keys() #good_fs = [['BJOHN','RJOHN'],['VJOHN','RJOHN']] filters_fs = [] for f1A in l_fs.keys(): #if f1A != 'MPUSUBARU' and f1B != 'MPUSUBARU': # True: #filter(lambda x: x==f1A, list) and filter(lambda x: x==f1B, list): if True: #filter(lambda x: x==f1A, list) and filter(lambda x: x==f1B, list): zps_dict_obj[f1A] = 0 import random for a in locus_dict_obj[f1A]: filters_fs.append([a, f1A]) print good_fs print zps_dict_obj #raw_input() #complist = complist[0:3] zps_list_full = zps_dict_obj.keys() zps_list = zps_list_full[1:] zps = {} zps_rev = {} for i in range(len(zps_list)): zps[zps_list[i]] = i zps_rev[i] = zps_list[i] table = p loci = len(locus_m) #[locus_c.keys()[0]]) print loci stars = len(table.field('MAG_' + magtype + '-' + filters_fs[0][0])) locus_list = [] for j in range(len(locus_m)): o = [] for c in filters_fs: #print locus_m[j][c[1]] #raw_input() o.append(locus_m[j][c[1]]) locus_list.append(o) print locus_list[0] import scipy results = {} for iteration in [ 'full' ]: #,'bootstrap1','bootstrap2','bootstrap3','bootstrap4']: ''' make matrix with a locus for each star ''' locus_matrix = scipy.array(stars * [locus_list]) print locus_matrix.shape ''' assemble matricies to make colors ''' A_band = scipy.swapaxes( scipy.swapaxes( scipy.array(loci * [[ table.field('MAG_' + magtype + '-' + a[0]) for a in filters_fs ]]), 0, 2), 1, 2) A_err = scipy.swapaxes( scipy.swapaxes( scipy.array(loci * [[ table.field('MAGERR_' + magtype + '-' + a[0]) for a in filters_fs ]]), 0, 2), 1, 2) print A_err.shape A_band[A_err > 0.2] = -99 A_err[A_err > 0.2] = 100000. ''' make matrix specifying good values ''' good = scipy.ones(A_band.shape) good[A_band == -99] = 0 good = good[:, 0, :] good_test = good.sum( axis=1) # sum all of the good measurements for any given star print sorted(good_test) print good_test ''' figure out the cut-off ''' cut_off = sorted(good_test)[-20] - 1 print cut_off A_band = A_band[good_test > cut_off] A_err = A_err[good_test > cut_off] locus_matrix = locus_matrix[good_test > cut_off] if True: #string.find(iteration,'bootstrap') != -1: length = len(A_band) randvec = scipy.array([random.random() for ww in range(length)]) fraction = 0.5 mask = randvec < (fraction) A_band = A_band[mask] A_err = A_err[mask] locus_matrix = locus_matrix[mask] #colors_err[A_band == -99] = 1000000. #colors_err[B_band == -99] = 1000000. #colors[A_band == -99] = 0. #colors[B_band == -99] = 0. #print colors.shape, locus_matrix.shape from copy import copy print good_test #colors = colors[good_test > 1] #colors_err = colors_err[good_test > 1] #locus_matrix = locus_matrix[good_test > 1] stars_good = len(locus_matrix) good = scipy.ones(A_band.shape) good[A_band == -99] = 0 print good.sum(axis=2).sum(axis=1).sum(axis=0) #raw_input() #raw_input() #good = good[:,0,:] good_test = good[:, 0, :].sum(axis=1) good = good[good_test > 1] star_mag_num = good[:, 0, :].sum(axis=1) def errfunc(pars, residuals=False): stat_tot = 0 #for i in range(len(table.field('MAG_' + magtype + '-' + complist[0][0][0])[:100])): #print i #print 'MAG_' + magtype + '-' + a #print a,b #print table.field('MAG_ISO-' + a) #print magtype, 'MAG_' + magtype + '-' + a if 1: A_zp = scipy.swapaxes( scipy.swapaxes( scipy.array(loci * [ stars_good * [[assign_zp(a[1], pars, zps) for a in filters_fs]] ]), 0, 1), 0, 0) #print A_zp.shape #raw_input() #print colors_zp.shape #print locus_matrix.shape #print colors.shape #print colors_zp[0][:][0] #print colors[2][0], colors.shape #print locus_matrix[2][0], locus_matrix.shape print A_err.shape, locus_matrix.shape, A_band.shape ds_prelim = ((A_band - locus_matrix - A_zp)) numerator = (ds_prelim / A_err**2.).sum(axis=2) denominator = (1. / A_err**2.).sum(axis=2) average = numerator / denominator A_band_average = scipy.swapaxes( scipy.swapaxes(scipy.array(len(filters_fs) * [average]), 0, 2), 0, 1) print A_band_average.shape, A_band.shape, average.shape, numerator.shape ds_prelim = (A_band - A_band_average - locus_matrix - A_zp)**2. ds = (ds_prelim.sum(axis=2))**0.5 ''' formula from High 2009 ''' dotprod = abs( (A_band - A_band_average - locus_matrix - A_zp) * A_err) dotprod[ good == 0] = 0. # set error to zero for poor measurements not in fit dotprod_sum = dotprod.sum(axis=2) sum_diff = ds**2. / dotprod_sum #sum_diff = ds / colors_err #print sum_diff[2], 'sum_diff' #print c_diff[2][0], 'c_diff' #raw_input() dist = ds.min(axis=1) select_diff = sum_diff.min(axis=1) ''' see if it matches best to end stars ''' #print select_diff, 'select_diff' #raw_input() #select_diff_norm = select_diff #/star_mag_num #print select_diff_norm, 'select_diff_norm' #raw_input() stat_tot = select_diff.sum() #print stat_tot, 'stat_tot' #raw_input() import pylab #pylab.clf() #pylab.scatter((colors - colors_zp)[:,0,0],(colors - colors_zp)[:,0,1]) #pylab.scatter(locus_matrix[0,:,0],locus_matrix[0,:,1],color='red') #pylab.show() print pars, stat_tot #, zps print zps_list_full if residuals: return select_diff, dist else: return stat_tot import pylab #pylab.ion() ''' now rerun after cutting outliers ''' if False: pinit = scipy.zeros(len(zps_list)) from scipy import optimize out = scipy.optimize.fmin(errfunc, pinit, args=()) print out import scipy print zps_list print 'starting' print out residuals, dist = errfunc(pars=[0.] + out, residuals=True) #out = [0., -0.16945683, -0.04595967, 0.06188451, 0.03366916] print dist print 'finished' ''' first filter on distance ''' #colors = colors[dist < 1] #colors_err = colors_err[dist < 1] locus_matrix = locus_matrix[dist < 1] good = good[dist < 1] residuals = residuals[dist < 1] ''' filter on residuals ''' #colors = colors[residuals < 6] #colors_err = colors_err[residuals < 6] locus_matrix = locus_matrix[residuals < 6] good = good[residuals < 6] stars_good = len(locus_matrix) star_mag_num = good[:, 0, :].sum(axis=1) #raw_input() pinit = scipy.zeros(len(zps_list)) from scipy import optimize out = scipy.optimize.fmin(errfunc, pinit, args=()) print out results[iteration] = dict(zip(zps_list_full, ([0.] + out.tolist()))) print results pylab.clf() #pylab.scatter((colors - colors_zp)[:,0,0],(colors - colors_zp)[:,0,1]) #pylab.scatter(locus_matrix[0,:,0],locus_matrix[0,:,1],color='red') #pylab.draw() #pylab.show() errors = {} import scipy print 'BOOTSTRAPPING ERRORS:' for key in zps_list_full: l = [] for r in results.keys(): if r != 'full': l.append(results[r][key]) print key + ':', scipy.std(l), 'mag' errors[key] = scipy.std(l) def save_results(save_file, results, errors): f = open(save_file, 'w') for key in results['full'].keys(): f.write(key + ' ' + str(results['full'][key]) + ' +- ' + str(errors[key]) + '\n') f.close() import pickle f = open(save_file + '.pickle', 'w') m = pickle.Pickler(f) pickle.dump({'results': results, 'errors': errors}, m) f.close() if results.has_key('full') and save_results is not None: save_results(save_file, results, errors) return results
def read_UserData(): with open("UserData", "rb") as fichier: monPickler = pickle.Unpickler(fichier) DataBank = monPickler.load() return (DataBank)
def get_locus(): import pickle f = open('newlocus', 'r') m = pickle.Unpickler(f) locus = m.load() return locus
print('ERROR TYPO freq') sys.exit(0) if dt_type != '3comp' and dt_type != 'hori' and dt_type != 'vert': print('ERROR TYPE dt_type') sys.exit(0) path_origin = os.getcwd()[:-6] path = path_origin + '/Kumamoto/' + dossier path_data = path + '/' + dossier + '_vel_' + freq + 'Hz/' + dossier + '_vel_' + freq + 'Hz_' + dt_type + '_env_smooth_S_impulse' path_results = path + '/' + dossier + '_results' list_fich = os.listdir(path_data) os.chdir(path) with open(dossier + '_veldata', 'rb') as myfch: mydp = pickle.Unpickler(myfch) dict_vel = mydp.load() dict_vel_used = dict_vel[1] os.chdir(path_data) fig, ax = plt.subplots(1, 1) ax.set_xlabel('Time') stt = read(list_fich[0]) tarrival = stt[0].stats.starttime t_start_ref = None for cles in dict_vel[2].keys(): if t_start_ref == None or t_start_ref > dict_vel[2][cles]:
import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (10.0, 6.0) from SunSpot import errors as err from SunSpot import preprocessing as pre from SunSpot import cusum_design_bb as chart from SunSpot import alerts as plot from SunSpot import svm_training as svm from SunSpot import autocorrelations as bbl ### load data (loaded automatically with package) data_path = pkg.resource_filename(pkg.Requirement.parse("SunSpot"), 'data') with open(data_path + '/data_1981', 'rb') as file: #with open('data/data_1981', 'rb') as file: #local my_depickler = pickle.Unpickler(file) Ns = my_depickler.load() #number of spots Ng = my_depickler.load() #number of sunspot groups Nc = my_depickler.load() #composite: Ns+10Ng station_names = my_depickler.load() #codenames of the stations time = my_depickler.load() #time ### compute the long-term errors mu2 = err.long_term_error(Ns, period_rescaling=8, wdw=27) ### discard stations with no values ind_nan = [] for i in range(mu2.shape[1]): if not np.all(np.isnan(mu2[:,i])): ind_nan.append(i) mu2 = mu2[:,ind_nan]
pickle.load( file=file_ ) # $ decodeInput=file_ decodeOutput=pickle.load(..) decodeFormat=pickle decodeMayExecuteInput pickle.loads( payload ) # $ decodeInput=payload decodeOutput=pickle.loads(..) decodeFormat=pickle decodeMayExecuteInput # using this keyword argument is disallowed from Python 3.9 pickle.loads( data=payload ) # $ decodeInput=payload decodeOutput=pickle.loads(..) decodeFormat=pickle decodeMayExecuteInput # We don't really have a good way to model a decode happening over multiple statements # like this. Since the important bit for `py/unsafe-deserialization` is the input, that # is the main focus. We do a best effort to model the output though (but that will only # work in local scope). unpickler = pickle.Unpickler( file_) # $ decodeInput=file_ decodeFormat=pickle decodeMayExecuteInput unpickler.load() # $ decodeOutput=unpickler.load() unpickler = pickle.Unpickler( file=file_ ) # $ decodeInput=file_ decodeFormat=pickle decodeMayExecuteInput marshal.load( file_ ) # $ decodeInput=file_ decodeOutput=marshal.load(..) decodeFormat=marshal decodeMayExecuteInput marshal.loads( payload ) # $ decodeInput=payload decodeOutput=marshal.loads(..) decodeFormat=marshal decodeMayExecuteInput # if the file opened has been controlled by an attacker, this can lead to code # execution. (underlying file format is pickle) shelve.open(
# plt.text(G.nodes[u]["coordonnees"][0]-0.15, G.nodes[v]["coordonnees"][1] + 0.25, data["label"], fontsize=8) # else : # plt.text(G.nodes[u]["coordonnees"][0]-0.15, G.nodes[u]["coordonnees"][1] + 0.25, data["label"], fontsize=8) # elif G.nodes[u]["coordonnees"][1] == G.nodes[v]["coordonnees"][1] : # if G.nodes[u]["coordonnees"][0] > G.nodes[v]["coordonnees"][0] : # plt.text(G.nodes[v]["coordonnees"][0]+0.25, G.nodes[u]["coordonnees"][1]-0.15, data["label"], fontsize=8) # else : # plt.text(G.nodes[u]["coordonnees"][0]+0.25, G.nodes[u]["coordonnees"][1]-0.15, data["label"], fontsize=8) seen[(u, v)] = rad ax.add_patch(e) return e if __name__ == '__main__': with open("fichier_comp_grands_graphes_V2.pickle", 'rb') as fichier_graphe: mon_depickler = pickle.Unpickler(fichier_graphe) dico_graphe = mon_depickler.load() with open("fichiers_pickle/a-minor_test2.pickle", 'rb') as fichier_pickle: mon_depickler = pickle.Unpickler(fichier_pickle) tab_aminor = mon_depickler.load() with open("grands_graphes.pickle", 'rb') as fichier: mon_depickler = pickle.Unpickler(fichier) dico_graphes = mon_depickler.load() for comp in dico_graphe.keys(): #comp = (('1FJG', 'A', 48,8), ('5J5B', 'BA', 48,23)) fig, axs = plt.subplots(figsize=(10, 12), nrows=1, ncols=2) #fig=plt.figure() compteur = 0 columns = 2
'ALT': 'alt', 'AST': 'ast', 'Bilirubin': 'bilirubin', 'Creatinine': 'creatinine', 'INR': 'inr', 'BMI': 'bmi', 'Platelets': 'platelets', 'Diabetes': 'diabetes'} lower_features = list(features.values()) num_features = ['albumin', 'alp', 'alt', 'ast', 'bilirubin', 'creatinine', 'inr', 'bmi', 'platelets'] data = data[list(features.keys()) + ['target']] data.rename(columns=features, inplace=True) with open(algdir + '/trained_models_imp_scl.pkl', 'rb') as file: unpickler = pickle.Unpickler(file) outputs = unpickler.load() scl_info = outputs['scl_info'] SVM_model = outputs['svm_model'] RFC_model = outputs['rfc_model'] GBC_model = outputs['gbc_model'] LOG_model = outputs['log_model'] MLP_model = outputs['mlp_model'] # Impute based on training set distribution here # Impute and scale based on training set distribution here null_cols = pd.DataFrame(data.isnull().sum(axis=0)).rename(columns={0: 'null_count'}) null_cols = null_cols.loc[null_cols['null_count'] != 0] for col in null_cols.index: data[col] = np.where(data[col].isnull(), scl_info[col + '_mean'] , data[col])
def load_dataset(FileName_PositiveInstancesDictionnary=default_FileName_PositiveInstancesDictionnary, FileName_ListProt=default_FileName_ListProt, FileName_ListMol=default_FileName_ListMol, FileName_MolKernel=default_FileName_MolKernel, FileName_DicoMolKernel_indice2instance=default_FileName_DicoMolKernel_indice2instance, FileName_DicoMolKernel_instance2indice=default_FileName_DicoMolKernel_instance2indice): """ Loading the dataset and the molecule kernel :param FileName_PositiveInstancesDictionnary: (string) tsv file name: each line corresponds to a molecule; 1rst column: gives the DrugBank ID of the molecule 2nd column: gives the number of targets of the corresponding molecule other columns: gives the UniprotIDs of molecule targets (one per column) :param FileName_ListProt: (string) txt file name: each line gives the UniprotID of a protein of the dataset :param FileName_ListMol: (string) txt file name: each line gives the DrugBankID of a molecule of the dataset :param FileName_kernel: (string) pickle file name: contains the molecule kernel (np.array) :param FileName_DicoKernel_indice2instance: (string) pickle file name: contains the dictionnary linking indices of the molecule kernel to its corresponding molecule ID :param FileName_DicoKernel_instance2indice: (string) pickle file name: contains the dictionnary linking molecule IDs to indices in the molecule kernel :return K_mol: (np.array: number of mol^2) molecule kernel :return DicoMolKernel_ind2mol: (dictionnary) keys are indices of the molecule kernel (i.e. integers between 0 and number_of_mol) and corresponding values are DrugbankIDS of the molecule corresponding to the index :return DicoMolKernel_mol2ind: (dictionnary) keys are DrugbankIDs and values are their corresponding indices of the molecule kernel :return interaction_matrix: (np.array: number_of_mol*number_of_prot) array whose values are 1 if the molecule/protein couple is interaction or 0 otherwise """ ##loading molecule kernel and its associated dictionnaries with open(FileName_MolKernel, 'rb') as fichier: pickler = pickle.Unpickler(fichier) K_mol = pickler.load().astype(np.float32) with open(FileName_DicoMolKernel_indice2instance, 'rb') as fichier: pickler = pickle.Unpickler(fichier) DicoMolKernel_ind2mol = pickler.load() with open(FileName_DicoMolKernel_instance2indice, 'rb') as fichier: pickler = pickle.Unpickler(fichier) DicoMolKernel_mol2ind = pickler.load() ##charging protein list of dataset list_prot_of_dataset = [] f_in = open(FileName_ListProt, 'r') for line in f_in: list_prot_of_dataset.append(line.rstrip()) f_in.close() ##charging list_mol_of_dataset list_mol_of_dataset = [] f_in = open(FileName_ListMol, 'r') for line in f_in: list_mol_of_dataset.append(line.rstrip()) f_in.close() ##charging list of targets per molecule of the dataset #initialization dico_targets_per_mol = {} for mol in list_mol_of_dataset: dico_targets_per_mol[mol] = [] #filling f_in = open(FileName_PositiveInstancesDictionnary, 'r') reader = csv.reader(f_in, delimiter='\t') for row in reader: nb_prot = int(row[1]) for j in range(nb_prot): dico_targets_per_mol[row[0]].append(row[2+j]) del reader f_in.close() ##making interaction_matrix interaction_matrix = np.zeros((len(list_mol_of_dataset), len(list_prot_of_dataset)), dtype=np.float32) for i in range(len(list_mol_of_dataset)): list_of_targets = dico_targets_per_mol[list_mol_of_dataset[i]] nb=0 for j in range(len(list_prot_of_dataset)): if list_prot_of_dataset[j] in list_of_targets: interaction_matrix[i,j] = 1 nb+=1 ###FOR TESTING #if len(list_of_targets)!=nb: # print("alerte") # exit(1) return K_mol, DicoMolKernel_ind2mol, DicoMolKernel_mol2ind, interaction_matrix
def main(): stop_word_set = get_stop_words() f = open('/Users/calvin/Documents/Lehigh/English/Research/data/cap1.pkl', 'rb') unpickler = pkl.Unpickler(f) data_array = [] count = 0 x_sum = 0 y_sum = 0 #pull out the first 10000 tweets, note this is easy to change, but speed and space #concerns make this limited. I think that doing a random sample would be better for x in range(0, 100000): try: dd = pkl.load(f) except EOFError: break except Exception: print(count) count += 1 unpickler.load() continue else: #right now we just take the first coordinate in the bounding box as the actual #we could average to find the middle, but this seems good enough for now if dd['coordinates'] == None: if dd['place'] == None: continue dd['coordinates'] = dd['place']['bounding_box']['coordinates'][ 0][0] else: #account for edge case where coordinates are wrapped dd['coordinates'] = dd['coordinates']['coordinates'] #count how many samples we take count += 1 # print(dd.keys()) # print(dd) #sum up the coordinate values x_sum += dd['coordinates'][0] y_sum += dd['coordinates'][1] #append the data point to the data array data_array.append(dd) #todo make it average the bounding box # x1 = # x2 = # y1 = # y2 = #take the mean of the x coordinates and y coordinates # x_mean = x_sum / count # y_mean = y_sum / count # text_list = [] # print(rms(data_array,x_mean,y_mean,count)) # for d in data_array: # tok = d['text'].split() # for w in tok: # l = w.lower() # if l in stop_word_set: # continue # text_list.append(l) # # # counts = Counter(text_list) # print(counts.most_common(15)) inputs = [] for d in data_array: inputs.append(d['coordinates']) plot_squared_clustering_errors(inputs) cluster = KMeans(20) cluster.train(inputs) print(cluster.means)
def __init__(self): ## Variables autres ----------------------- global version self.colorText = "" if sys.platform == "win32": self.colorText = "color: #334d9b" if choicedBackgroundColor() == 1: self.colorText = "color: white;" self.version = version self.files_enregistrement = None self.connected_one = None self.ftp = Online() # self.ftp.downloadftp("comptes.spi") self.ftp.downloadftp("admin.spi") try: with open("./bin/comptes.spi", "rb") as file: depickle = pickle.Unpickler(file) self.files_enregistrement = depickle.load() Donnees.comptes = self.files_enregistrement except: with open("./bin/comptes.spi", "wb") as file: pickler = pickle.Pickler(file) pickler.dump({"uadmin": "padmin"}) Donnees.comptes, self.files_enregistrement = { "uadmin": "padmin" } ## APPLICATION ---------------------------- self.app = QApplication(sys.argv) self.win = QWidget() x, y = 650, 320 self.posx, self.posy = center(x, y) self.win.setGeometry(self.posx, self.posy, x, y) self.win.setWindowTitle("Page de Connexion") self.win.setWindowFlag(Qt.FramelessWindowHint) self.win.setWindowIcon(QIcon("./bin/icon1.png")) self.win.show() self.label0 = QLabel(self.win) self.label0.move(0, 0) self.label0.resize(x, y) self.label0.setStyleSheet(getBackgroundColor()) self.label0.show() self.label1 = QLabel(self.win) self.label1.setText("Choix") self.label1.move(20, 10) self.label1.setFont(QFont('Mangal', 80)) self.label1.setStyleSheet(self.colorText) self.label1.adjustSize() self.label1.show() self.label2 = QLabel(self.win) self.label2.setText("Mauvais identifiants, réessayez.") self.label2.move(260, 150) self.label2.setFont(QFont('Mangal', 11)) self.label2.adjustSize() # self.label2.show() self.label3 = QLabel(self.win) self.label3.setText("Vérification de version en cours...") self.label3.move(20, 190) self.label3.setFont(QFont('Mangal', 11)) self.label3.setStyleSheet(self.colorText) self.label3.adjustSize() self.label3.show() self.threadLabel3 = Thread(None, self.version_search) self.threadLabel3.start() self.champ1 = QLineEdit(self.win) self.champ1.move(20, 140) self.champ1.resize(220, 30) self.champ1.setFont(QFont('Mangal', 15)) # self.champ1.show() self.champ2 = QLineEdit(self.win) self.champ2.setEchoMode(QLineEdit.Password) self.champ2.move(20, 180) self.champ2.setFont(QFont('Mangal', 15)) self.champ2.resize(220, 30) # self.champ2.show() self.bouton1 = QPushButton(self.win) self.bouton1.setText(" Se connecter ") self.bouton1.move(20, 220) self.bouton1.setFont(QFont('Mangal', 20)) self.bouton1.clicked.connect(self.connection) self.openAction = QAction("&ouvrir", self.win) self.openAction.setShortcut("Return") self.openAction.triggered.connect(self.connection) self.win.addAction(self.openAction) # self.bouton1.show() self.bouton2 = QPushButton(self.win) self.bouton2.setText(" S'enregistrer ") self.bouton2.move(220, 220) self.bouton2.setFont(QFont('Mangal', 20)) self.bouton2.clicked.connect(self.register_window) # self.bouton2.show() self.bouton3 = QPushButton(self.win) self.bouton3.setText("Fermer") self.bouton3.move(20, 270) self.bouton3.setFont(QFont('Mangal', 11)) self.bouton3.clicked.connect(self.quitterNet) self.bouton3.show() self.bouton4 = QPushButton(self.win) self.bouton4.setText("Télécharger ?") self.bouton4.move(400, 220) self.bouton4.setFont(QFont('Mangal', 20)) self.bouton4.setStyleSheet(self.colorText) self.bouton4.clicked.connect(self.updateDownload) self.radio1 = QRadioButton(self.win) self.radio1.setText("En Ligne") self.radio1.move(120, 275) self.radio1.setStyleSheet(self.colorText) self.radio1.adjustSize() self.radio1.toggled.connect(self.onlineOrNot) self.radio1.show() self.radio2 = QRadioButton(self.win) self.radio2.setText("Hors Ligne") self.radio2.move(200, 275) self.radio2.setStyleSheet(self.colorText) self.radio2.adjustSize() self.radio2.toggled.connect(self.onlineOrNot) self.radio2.show() # --------------- Page d'enregistrement -------------- self.win2 = QWidget() x2, y2 = 270, 400 self.posx2, self.posy2 = center(x2, y2) self.win2.setGeometry(self.posx2, self.posy2, x2, y2) self.win2.setWindowTitle("S'enregistrer") self.win2.setWindowIcon(QIcon("./bin/icon1.png")) self.labelWin21 = QLabel(self.win2) self.labelWin21.setText("S'enregistrer") self.labelWin21.move(20, 10) self.labelWin21.setFont(QFont('Mangal', 30)) self.labelWin21.adjustSize() self.labelWin21.show() self.labelWin22 = QLabel(self.win2) self.labelWin22.setText("Nouveau nom d'utilisateur") self.labelWin22.move(20, 70) self.labelWin22.setFont(QFont('Mangal', 12)) self.labelWin22.adjustSize() self.labelWin22.show() self.champWin21 = QLineEdit(self.win2) self.champWin21.setText("username") self.champWin21.move(20, 90) self.champWin21.resize(220, 30) self.champWin21.show() self.labelWin23 = QLabel(self.win2) self.labelWin23.setText("Mot de passe") self.labelWin23.move(20, 130) self.labelWin23.setFont(QFont('Mangal', 12)) self.labelWin23.adjustSize() self.labelWin23.show() self.champWin22 = QLineEdit(self.win2) self.champWin22.setEchoMode(QLineEdit.Password) self.champWin22.move(20, 150) self.champWin22.resize(220, 30) self.champWin22.show() self.labelWin24 = QLabel(self.win2) self.labelWin24.setText("Retapez le mot de passe") self.labelWin24.move(20, 190) self.labelWin24.setFont(QFont('Mangal', 12)) self.labelWin24.adjustSize() self.labelWin24.show() self.champWin23 = QLineEdit(self.win2) self.champWin23.setEchoMode(QLineEdit.Password) self.champWin23.move(20, 210) self.champWin23.resize(220, 30) self.champWin23.show() self.labelWin25 = QLabel(self.win2) self.labelWin25.setText("Retapez le mot de passe") self.labelWin25.move(20, 250) self.labelWin25.setFont(QFont('Mangal', 12)) self.labelWin25.adjustSize() self.boutonWin21 = QPushButton(self.win2) self.boutonWin21.setText("S'enregistrer") self.boutonWin21.move(20, 300) self.boutonWin21.setFont(QFont('Mangal', 13)) self.boutonWin21.clicked.connect(self.register) self.boutonWin21.show() self.app.exec_()
def __init__(self, conf_dict): self.demand_model_config = conf_dict["sim_general_conf"] self.sim_scenario_conf = conf_dict["sim_scenario_conf"] supply_model = conf_dict["supply_model_object"] self.demand_model_folder = conf_dict["demand_model_folder"] self.city = self.demand_model_config["city"] demand_model_path = os.path.join( os.path.dirname(os.path.dirname(os.path.dirname(__file__))), "demand_modelling", "demand_models", self.demand_model_config["city"], self.demand_model_folder) #demand modelling self.grid = pickle.Unpickler( open(os.path.join(demand_model_path, "grid.pickle"), "rb")).load() self.grid_matrix = pickle.Unpickler( open(os.path.join(demand_model_path, "grid_matrix.pickle"), "rb")).load() self.avg_out_flows_train = pickle.Unpickler( open(os.path.join(demand_model_path, "avg_out_flows_train.pickle"), "rb")).load() self.avg_in_flows_train = pickle.Unpickler( open(os.path.join(demand_model_path, "avg_in_flows_train.pickle"), "rb")).load() self.valid_zones = pickle.Unpickler( open(os.path.join(demand_model_path, "valid_zones.pickle"), "rb")).load() self.neighbors_dict = pickle.Unpickler( open(os.path.join(demand_model_path, "neighbors_dict.pickle"), "rb")).load() self.integers_dict = pickle.Unpickler( open(os.path.join(demand_model_path, "integers_dict.pickle"), "rb")).load() self.closest_valid_zone = pickle.Unpickler( open(os.path.join(demand_model_path, "closest_valid_zone.pickle"), "rb")).load() self.avg_request_rate = self.integers_dict["avg_request_rate"] self.n_vehicles_original = self.integers_dict["n_vehicles_original"] self.avg_speed_mean = self.integers_dict["avg_speed_mean"] self.avg_speed_std = self.integers_dict["avg_speed_std"] self.avg_speed_kmh_mean = self.integers_dict["avg_speed_kmh_mean"] self.avg_speed_kmh_std = self.integers_dict["avg_speed_kmh_std"] self.max_driving_distance = self.integers_dict["max_driving_distance"] self.max_in_flow = self.integers_dict["max_in_flow"] self.max_out_flow = self.integers_dict["max_out_flow"] if self.demand_model_config["sim_technique"] == "traceB": self.bookings = pickle.Unpickler( open(os.path.join(demand_model_path, "bookings_test.pickle"), "rb")).load() self.booking_requests_list = self.get_booking_requests_list() elif self.demand_model_config["sim_technique"] == "eventG": self.request_rates = pickle.Unpickler( open(os.path.join(demand_model_path, "request_rates.pickle"), "rb")).load() self.trip_kdes = pickle.Unpickler( open(os.path.join(demand_model_path, "trip_kdes.pickle"), "rb")).load() #supply model if "n_requests" in self.sim_scenario_conf.keys(): # 30 => 1 month self.desired_avg_rate = self.sim_scenario_conf[ "n_requests"] / 30 / 24 / 3600 self.rate_ratio = self.desired_avg_rate / self.avg_request_rate self.sim_scenario_conf["requests_rate_factor"] = self.rate_ratio if "n_vehicles" in self.sim_scenario_conf.keys(): self.n_vehicles_sim = self.sim_scenario_conf["n_vehicles"] elif "n_vehicles_factor" in self.sim_scenario_conf.keys(): self.n_vehicles_sim = int( self.n_vehicles_original * self.sim_scenario_conf["n_vehicles_factor"]) elif "fleet_load_factor" in self.sim_scenario_conf.keys(): self.n_vehicles_sim = int( self.sim_scenario_conf["n_requests"] / self.sim_scenario_conf["fleet_load_factor"]) if "tot_n_charging_poles" in self.sim_scenario_conf.keys(): self.tot_n_charging_poles = self.sim_scenario_conf[ "tot_n_charging_poles"] elif "n_poles_n_vehicles_factor" in self.sim_scenario_conf.keys(): self.tot_n_charging_poles = abs( self.n_vehicles_sim * self.sim_scenario_conf["n_poles_n_vehicles_factor"]) elif self.sim_scenario_conf["cps_placement_policy"] == "old_manual": self.tot_n_charging_poles = len( self.sim_scenario_conf["cps_zones"]) * 4 if self.sim_scenario_conf["distributed_cps"]: if "cps_zones_percentage" in self.sim_scenario_conf and self.sim_scenario_conf[ "cps_placement_policy"] != "real_positions": self.n_charging_zones = int( self.sim_scenario_conf["cps_zones_percentage"] * len(self.valid_zones)) elif "n_charging_zones" in self.sim_scenario_conf and self.sim_scenario_conf[ "cps_placement_policy"] != "real_positions": self.n_charging_zones = self.sim_scenario_conf[ "n_charging_zones"] self.sim_scenario_conf["cps_zones_percentage"] = 1 / len( self.valid_zones) elif "cps_zones" in self.sim_scenario_conf and self.sim_scenario_conf[ "cps_placement_policy"] != "real_positions": self.n_charging_zones = len( self.sim_scenario_conf["cps_zones"]) elif self.sim_scenario_conf[ "cps_placement_policy"] == "real_positions": self.n_charging_zones = 0 elif self.sim_scenario_conf["battery_swap"]: self.n_charging_zones = 0 self.tot_n_charging_poles = 0 self.n_charging_poles_by_zone = {} self.vehicles_soc_dict = {} self.vehicles_zones = {} self.start = None self.zones_cp_distances = pd.Series() self.closest_cp_zone = pd.Series() self.supply_model_conf = dict() self.supply_model_conf.update(self.sim_scenario_conf) self.supply_model_conf.update({ "city": self.city, "data_source_id": self.demand_model_config['data_source_id'], "n_vehicles": self.n_vehicles_sim, "tot_n_charging_poles": self.tot_n_charging_poles, "n_charging_zones": self.n_charging_zones, }) if supply_model is not None: #nel caso venga fornito un supply model questo sovrascrive tutto. Eventualmente sollevare eccezioni/warning se i parametri non son compatibili self.supply_model = supply_model self.n_vehicles_sim = supply_model.n_vehicles_sim else: self.supply_model = SupplyModel( self.supply_model_conf, self.demand_model_config["year"], demand_model_folder=self.demand_model_folder)
def lir_fichier_b(nom): import pickle with open(f"{nom}", 'rb') as fic: rec = pickle.Unpickler(fic) ret = rec.load() return (ret) #lecture de fichier binairaire