def save_variables_to_klepto_file(file_name, variables_dict): #do i need to do a different function to UPDATE variables or is it that if the variable exists #in the file that it automatically updates #Requires all klepto key names to be convertable to strings: db = file_archive(file_name + '.txt') counter = 0 counter_total = 0 for k, v in variables_dict.items(): #Try and pickle and see if was successfull: db[k] = v try: db.dump() except: print(k) db.pop(k) counter += 1 db.dump() #Delete all garbage files saved parasitically to folder: garbage_files = search_file('I_*') for file_string in garbage_files: os.remove(file_string) return db
def __init__(self): if self._jisyo_table is None: with self._lock: if self._jisyo_table is None: dictpath = Configurations().dictpath(Configurations().jisyo_kanwa) self._jisyo_table = file_archive(dictpath, {}, serialized=True) self._jisyo_table.load()
def save(self, path, name=None): """Save the model object in a serialized file. Parameters ---------- path: string Path where the file should be save. name: string, default = None Name of the file. """ #TODO: centroids_distance nao deve ser aqui self.centroids_distance = self._centroids_distance_matrix() print("Saving", name, "in", path) arch = file_archive('{}.clr'.format(os.path.join(path, name))) for d in self.__dict__: arch[d] = self.__dict__[d] arch.dump() with open(os.path.join(path, name) + '2.clr', 'wb') as f: pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL) print("Exporting", name + "-cdm", "in", path) np.save( os.path.join(path, name + "-cdm") + '.npy', self.centroids_distance)
def __init__(self): if self._itaijidict is None: with self._lock: if self._itaijidict is None: itaijipath = Configurations().dictpath(Configurations().jisyo_itaiji) self._itaijidict = file_archive(itaijipath, {}, serialized=True) self._itaijidict.load() self._itaijidict_len = self._itaijidict['_max_key_len_']
def load_variables_from_klepto_file(file_name): #Requires all klepto key names to be convertable to strings: db = file_archive(file_name) db.load() db_keys_list = list(db.keys()) db_values_list = list(db.values()) for k, v in zip(db_keys_list, db_values_list): exec(str(k) + '=' + str(v))
def update_klepto(file_name, variables_dict): #do i need to do a different function to UPDATE variables or is it that if the variable exists #in the file that it automatically updates #Requires all klepto key names to be convertable to strings: db = file_archive(file_name + '.txt') for k, v in variables_dict.items(): db[k] = v db.dump()
def remove_variables_from_klepto_file(file_name, variable_names_list): #Requires all klepto key names to be convertable to strings: db = file_archive(file_name + '.txt') if type(variable_names_list) == str: #input is a string: k = variable_names_list db.archive.pop(k) else: #input is a list: for k in variable_names_list: db.archive.pop(k)
def load_variables_from_klepto_file(file_name): #Requires all klepto key names to be convertable to strings: db = file_archive(file_name + '.txt') db.load() db_keys_list = list(db.keys()) db_values_list = list(db.values()) execution_string = '' for k, v in zip(db_keys_list, db_values_list): if type(v) != str: execution_string = execution_string + '\n' + k + '=' + str(v) else: execution_string = execution_string + '\n' + k + '=\'' + v + '\'' #Way of Using: exec(load_variables_from_klepto_file) return execution_string
def _test_hits(algorithm, maxsize=20, keymap=None, rangelimit=5, tries=1000, archived=False): @algorithm(maxsize=maxsize, keymap=keymap) def f(x, y): return 3*x+y if archived: f.archive(file_archive('cache.pkl',cached=False)) domain = list(range(rangelimit)) domain += [float(i) for i in domain] for i in range(tries): r = f(choice(domain), choice(domain)) f.dump() #print(f.info()) return f.info()
def _test_hits(algorithm, maxsize=20, keymap=None, rangelimit=5, tries=1000, archived=False): @algorithm(maxsize=maxsize, keymap=keymap, purge=True) def f(x, y): return 3*x+y if archived: f.archive(file_archive('cache.pkl',cached=False)) domain = list(range(rangelimit)) domain += [float(i) for i in domain] for i in range(tries): r = f(choice(domain), choice(domain)) f.dump() #print(f.info()) return f.info()
def mkdict(self, src: str, dst: str): max_key_len = 0 dic = {} with open(src, "r", encoding="utf-8") as f: for raw in f: line = raw.strip() if line.startswith(';;'): # skip comment continue if re.match(r"^$", line): continue try: (v, k) = self.decode_escapes(line).split(' ') dic[k] = v max_key_len = max(max_key_len, len(k)) except ValueError: raise Exception("Cannot process dictionary line: ", line) d = file_archive(dst, dic, serialized=True) d['_max_key_len_'] = max_key_len d.dump()
def maketrans(self, src, dst): dict = {} with open(src, 'r', encoding='utf-8') as f: for raw in f: line = raw.strip() if line.startswith(';;'): # skip commnet continue if re.match(r"^$", line): continue try: (v, k) = self.decode_escapes(line).split(' ') dict[ord(k)] = v except ValueError: raise Exception("Cannot process dictionary line: ", line) for i in range(0xFE00, 0xFE02): dict[i] = None for i in range(0xE0100, 0xE01EF): dict[i] = None d = file_archive(dst, dict, serialized=True) d.dump()
def mkdict(self, src, dst): max_key_len = 0 dic = {} with open(src, "rb") as f: for line in f: line = line.decode("utf-8").strip() if line.startswith(';;'): # skip comment continue if re.match(r"^$", line): continue try: (v, k) = (re.sub(r'\\u([0-9a-fA-F]{4})', lambda x: unichr(int(x.group(1), 16)), line)).split(' ') dic[k] = v max_key_len = max(max_key_len, len(k)) except ValueError: raise Exception("Cannot process dictionary line: ", line) d = file_archive(dst, dic, serialized=True) d['_max_key_len_'] = max_key_len d.dump()
def mkdict(self, src: str, dst: str): max_key_len = 0 dic = {} i = 0 with open(src, "r", encoding="utf-8") as f: for raw in f: i += 1 line = raw.strip() if line.startswith(';;'): # skip comment continue if re.match(r"^$", line): continue try: (v, k) = self.decode_escapes(line).split(DICT_SEPARATOR) dic[k] = v max_key_len = max(max_key_len, len(k)) except ValueError: raise SyntaxError(f"error in dictionary {os.path.basename(src)} line #{i}: {line}") d = file_archive(dst, dic, serialized=True) d['_max_key_len_'] = max_key_len d.dump()
def maketrans(self, src, dst): dict = {} i = 0 with open(src, 'r', encoding='utf-8') as f: for raw in f: i += 1 line = raw.strip() if line.startswith(';;'): # skip commnet continue if re.match(r"^$", line): continue try: (v, k) = self.decode_escapes(line).split(DICT_SEPARATOR) dict[ord(k)] = v except ValueError: raise SyntaxError(f"error in dictionary {os.path.basename(src)} line #{i}: {line}") for i in range(0xFE00, 0xFE02): dict[i] = None for i in range(0xE0100, 0xE01EF): dict[i] = None d = file_archive(dst, dict, serialized=True) d.dump()
options['sigmoidName'] = 'Weibull' for style in plt.style.available: plt.style.use(style) plt.style.use(plt.style.available[0]) # %% # with fit(data,options) as res: # plotPsych(res, # dataColor = [255./255, 0, 0], # lineColor = [255./255, 0, 0]) # # with fit(data2,options) as res: # plotPsych(res, # dataColor = [0, 0, 255./255], # lineColor = [0, 0, 255./255]) # plt.savefig("Weibull"+argv[1]) # # %% fittedmodels = [] with fit(data,options) as res: fittedmodels.append(res) #plotsModelfit(res) #plt.savefig("WeibullC2"+argv[1]) with fit(data2,options) as res: fittedmodels.append(res) #plotsModelfit(res) #plt.savefig("WeibullC3"+argv[1]) db = file_archive("User_"+argv[1]+"_Model.mde") db['C1'] = fittedmodels[0] db['C2'] = fittedmodels[1] db.dump()
def save_data(self, path, index, data): db = file_archive(path) db[index] = data db.dump()
def read_data(self, path, index): db = file_archive(path) db.load() return db[index]
for style in plt.style.available: plt.style.use(style) plt.style.use(plt.style.available[0]) #plotsModelfit(Model['C1']) #%% with open('PsychometricInfo.csv', 'w') as writeFile: writer = csv.writer(writeFile, delimiter=';', lineterminator='\n') header = [ 'User', '0.25 C2', '0.5 C2', '0.75 C2', 'Sensitivity C2', 'Alpha C2', 'Beta C2', '0.25 C3', '0.5 C3', '0.75 C3', 'Sensitivity C3', 'Alpha C3', 'Beta C3' ] writer.writerow(header) for user in range(13): Model = file_archive("User_" + str(user) + "_Model.mde") Model.load() UserInfo = [ user, format_decimal(getValue(0.25, Model['C1']), locale='sv_SE'), format_decimal(getValue(0.5, Model['C1']), locale='sv_SE'), format_decimal(getValue(0.75, Model['C1']), locale='sv_SE'), format_decimal(getValue(0.75, Model['C1']) - getValue(0.25, Model['C1']), locale='sv_SE'), format_decimal(Model['C1']['Fit'][0], locale='sv_SE'), format_decimal(Model['C1']['Fit'][1], locale='sv_SE'), format_decimal(getValue(0.25, Model['C2']), locale='sv_SE'), format_decimal(getValue(0.5, Model['C2']), locale='sv_SE'), format_decimal(getValue(0.75, Model['C2']), locale='sv_SE'), format_decimal(getValue(0.75, Model['C2']) -
self.name = name self.value = value company1 = Company('banana', 40) import dill import pickle dill.detect.trace(True) with open('company_dill.pkl', 'wb') as f: dill.dump(company1, f) pickle.dump(company1, f, pickle.HIGHEST_PROTOCOL) #KLEPTO: #(1). SAVING: from klepto.archives import file_archive db = file_archive('foo.txt') db['1'] = 1 db['max'] = 'bla' squared = lambda x: x**2 db['squared'] = squared def add(x,y): return x+y db['add'] = add class Foo(object): y = 1 def bar(self, x): return self.y + x db['Foo'] = Foo f = Foo()
def save_variables_to_klepto_file(file_name, variables_dict): #Requires all klepto key names to be convertable to strings: db = file_archive(file_name) for k, v in variables_dict: db[k] = v db.dump()
def __init__(self, dictname): self._dict = file_archive(Configurations().dictpath(dictname), {}, serialized=True) self._dict.load()
# %% from os import listdir from os.path import isfile, join, dirname, splitext, abspath from matplotlib import pyplot as plt import numpy as np import pandas as pd import psignifit as ps from matplotlib.ticker import ScalarFormatter from sys import getsizeof, argv from klepto.archives import file_archive # %% db = file_archive("User_0_Modellogistic.mde")
return instructions p_DataSet = [] c1 = 0 f3 = [] y1 = [] y2 = [] with jsonlines.open("train_dataset.jsonl") as reader: for _instructions in reader: #(type=dict, skip_invalid=True): p_DataSet.append(Features(_instructions)) y1.append(G_opt[_instructions['opt']]) y2.append(G_compiler[_instructions['compiler']]) #np.array([int(c) for c in ]) db = file_archive("DbMalware.txt") db['featuresnames'] = strinngfeatures db['featuresdata'] = f3 db['opt'] = y1 db['G_opt'] = G_opt db['compiler'] = y2 db['G_compiler'] = G_compiler db.dump() print('done') def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None,
def kanwaout(self, out): dic = file_archive(out, self.records, serialized=True) dic.dump()