def __init__(self, filename): self.__filename = os.path.abspath(filename) if self.__filename not in _LOCKS: _LOCKS[self.__filename] = RLock() self.__lock = _LOCKS[self.__filename] self.__db = LSM(self.__filename) self.__closed = False
class Index(object): def __init__(self, db_path): self.db = LSM(db_path) def has_changed(self, path): return self.db[path] != checksum(path) def update(self, files=()): with self.db.transaction() as txn: for path in files: self.db[path] = checksum(path) def __iter__(self): return iter(self.db.keys()) @property def changed(self): return [path for path in self if self.has_changed(path)]
class Db: def __init__(self): self.path = settings.DB_PATH self.db = LSM(self.path) def __getitem__(self, key): return self.db[key] def __setitem__(self, key, item): self.db[key] = item def __contains__(self, key): return key in self.db def reset(self): self.db.close() os.remove(self.path) self.db = LSM(self.path)
def database_open(root, recreate=False): root = root if isinstance(root, Path) else Path(root) db = root / ".mutation.okvslite" if recreate and db.exists(): log.trace("Deleting existing database...") for file in root.glob(".mutation.okvslite*"): file.unlink() if not recreate and not db.exists(): log.error("No database, can not proceed!") sys.exit(1) db = LSM(str(db)) return db
def main(): LIMIT = 10 db = LSM('fuzzbuzz.ldb') if sys.argv[1] == 'index': with open(sys.argv[2]) as f: for index, line in enumerate(f): line = line.strip() if index % 10_000 == 0: print(index, line) url, label = line.split('\t') if not all(x in ascii_lowercase for x in label): continue if ' ' in label: continue key = bbkh(label) db[pack((key, label))] = b'\x42'
def install_module_loader(uid): db = LSM(".mutation.okvslite") mutation_show(uid.hex) path, diff = lexode.unpack(db[lexode.pack([1, uid])]) diff = zstd.decompress(diff).decode("utf8") with open(path) as f: source = f.read() patched = patch(diff, source) import imp components = path[:-3].split("/") while components: for pythonpath in sys.path: filepath = os.path.join(pythonpath, "/".join(components)) filepath += ".py" ok = os.path.exists(filepath) if ok: module_path = ".".join(components) break else: components.pop() continue break if module_path is None: raise Exception("sys.path oops!") patched_module = imp.new_module(module_path) try: exec(patched, patched_module.__dict__) except Exception: # TODO: syntaxerror, do not produce those mutations exec("", patched_module.__dict__) sys.modules[module_path] = patched_module
def __init__(self, db_path): self.db = LSM(db_path)
def get(cls, name): path = settings.DISK_CACHE_ROOT + name + '.ldb' return LSM(path)
class LSMEngine(object): # create lsm key-value database to cache key for update data in other DB db = LSM(''.join([settings['LSM_PATH'],settings['LSM_DBNAME']]))
SMS_NOTIFICATIONS = dict([ (10, "10д. Долейте воду и удобрения: Micro-12.5мл, Grow-12.5мл, Bloom-12.5мл"), (20, "20д. Долейте воду и удобрения: Micro-25мл, Grow-25мл, Bloom-25мл"), (30, "30д. Долейте воду и удобрения: Micro-25мл, Grow-25мл, Bloom-25мл"), (45, "45д. Начните новый цикл. Детали: http://admin.vhnh.hort.io/today") ]) # Setup IO mcp = MCP.MCP23008() for out in range(8): mcp.setup(out, GPIO.OUT) # Setup DB db = LSM(os.getenv("DB_FILE", "/data/hortio.ldb")) def pca9548a_setup(pca9548a_channel): """ Set i2c multiplexer (pca9548a) channel """ pca9548a = I2C.get_i2c_device(PCA9548A_ADDR) pca9548a.writeRaw8(pca9548a_channel) time.sleep(0.1) def db_get(key): if key in DEFAULT_STATES: default_value = DEFAULT_STATES[key] else:
def reset(self): self.db.close() os.remove(self.path) self.db = LSM(self.path)
def __init__(self, id, serialize=identity_fn, deserialize=identity_fn): self.dbfile = "%s.kvdb"%id self.db = LSM(self.dbfile) self.serialize = serialize self.deserialize = deserialize
class Base(MutableMapping): __slots__ = "__db", "__lock", "__closed", "__filename", def __init__(self, filename): self.__filename = os.path.abspath(filename) if self.__filename not in _LOCKS: _LOCKS[self.__filename] = RLock() self.__lock = _LOCKS[self.__filename] self.__db = LSM(self.__filename) self.__closed = False @property def _db(self): if self.__closed: raise RuntimeError("Database closed") return self.__db def __iter__(self): with self.__lock: for key in self._db.keys(): yield self._decode_key(key) def __len__(self): # FIXME: It's so slow with self.__lock: return sum(1 for _ in self._db.keys()) def __contains__(self, key): return self._encode_key(key) in self._db def get(self, key, default=None): try: return self[key] except KeyError: return default def __getitem__(self, key): _key = self._encode_key(key) return self._decode_value(self._db[_key]) def __setitem__(self, key, value): _key = self._encode_key(key) _value = self._encode_value(value) with self.__lock: error = True while error: try: self._db[_key] = _value error = False except Exception as e: if e.args[0] != "Busy": raise continue def __delitem__(self, key): return self.delete(key) def delete(self, key): with self.__lock: error = True while error: try: self._db.delete(self._encode_key(key)) error = False except Exception as e: if e.args[0] != "Busy": raise continue def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() @property def closed(self): return self.__closed @property def filename(self): return self.__filename def close(self): if self.__closed: return self.__closed = True with self.__lock: self.__db.close() def __del__(self): if self.filename in _LOCKS: del _LOCKS[self.filename] self.close() def sync(self): with self.__lock: self._db.flush() def __repr__(self): return "<LSMShelf: %r>" % self.filename @abc.abstractmethod def _encode_key(self, key): raise NotImplementedError @abc.abstractmethod def _decode_key(self, key): raise NotImplementedError @abc.abstractmethod def _encode_value(self, value): raise NotImplementedError @abc.abstractmethod def _decode_value(self, value): raise NotImplementedError
from lsm import LSM API_TOKEN = "" # токен из botfather DB_FILE = "db.ldb" # название файла базы данных TIME_FORMAT = "%Y-%m-%d %H:%M:%S" EXIT_TIME_FORMAT = "{:02} часов {:02} минут {:02} секунд" TIME = 12 # 12 часов db = LSM(DB_FILE) if bytes("count", "ascii") not in db.keys(): db["count"] = 0 strings = { "start": f"Привет! Я буду отправлять твоё сообщение всем своим пользователям. Ты можешб отправить только одно " f"анонимное сообщение раз в {TIME} секунд", "send": "Хорошо, твоё сообщение будет отправлено {} пользователям!", "source": "Исходный код опубликован <a href='github.com/Sp3kE-hack/echoall'>тут</a>", "stats": "Всего пользователей в боте: {}", "wtf": "Неизвестная команда!", "already": "Ты уже отправлял сообщение, вернись через {}" }
import sys from time import time from collections import Counter from rapidfuzz import fuzz from fuzz import bbkh from lsm import LSM from lsm import SEEK_LE, SEEK_GE from tuple import pack, unpack, strinc f = open(sys.argv[1]) db = LSM('fuzzbuzz.ldb') for index, line in enumerate(f): print(index) line = line.lower() wrong, goods = line.split('->') goods = [x.strip() for x in goods.split(',')] begin = time() limit = 100 query = wrong.lower() key = bbkh(query) distances = Counter() start = pack((key, )) with db.cursor() as cursor:
# def random_string_generator(n): # return ''.join(random.choice(string.digits) for _ in range(n)) db_name = "lsm_experiment.ldb" put_range = 1000000 # 200000000 latest_data_num = 5000 random_get_range = 5000 data_list = list() latest_data_list = list() if os.path.exists(db_name): os.remove(db_name) db = LSM(db_name) now = datetime.now() current_time = now.strftime("%H:%M:%S") print("-------------------------------------") print("Experiment start at: ", current_time) print("[Put Data Number]: ", put_range) # print("[Latest Data Number]:", latest_data_num) print("[Random Get Number]:", random_get_range) put_start = time.time() for each in range(put_range): # k = random_string_generator(10).encode() # k = str(random.uniform(1, 10000)).encode() k = random.uniform(1, 10000)
def __init__(self, path_database_dir, database_name, database_method, input_type, output_type, encoding='utf-8', checks_flag=True): # input: # - path_database_dir : path of the directory of the database # - database_name : name of database without extension # - database_method : package/method used to construct database # - input_type : format of key in dictionary that should always # be used for the database (e.g.'str', 'int', 'float', etc, ) # - output_type : format of value of dictionary that is # fixed for the database (e.g. 'str', 'int', 'float', etc) # - encoding : encoding for characters # - check_flags : self.path_database_dir = path_database_dir self.database_name = database_name self.database_method = database_method self.input_type = input_type self.output_type = output_type self.encoding = encoding self.checks_flag = checks_flag self.path_settings = os.path.join( path_database_dir, 'settings_' + database_name + '_' + self.database_method + '.json') mkdir_if_not_exist(path_database_dir) self.settings_keys = [ 'database_method', 'input_type', 'output_type', 'encoding' ] self.settings_values = [ self.database_method, self.input_type, self.output_type, self.encoding ] if os.path.isfile(self.path_settings): settings = dict_load_json(self.path_settings) if len(self.settings_keys) == len(settings['settings_keys']): for i in range(len(self.settings_keys)): if settings['settings_values'][i] != self.settings_values[i] or settings['settings_keys'][i] != \ self.settings_keys[i]: raise ValueError( 'saved settings dictionary does not correspond to the settings passed for this database' ) else: raise ValueError( 'saved settings dictionary does not correspond to the settings passed for this database' ) else: self.settings = {} for i in range(len(self.settings_keys)): key = self.settings_keys[i] value = self.settings_values[i] self.settings[key] = value self.settings['settings_keys'] = self.settings_keys self.settings['settings_values'] = self.settings_values self.save_settings() if self.database_method == 'lsm': self.path_database = os.path.join(path_database_dir, database_name + '.ldb') self.db = LSM(self.path_database) elif self.database_method == 'json': # only allows data types that can be converted to string self.path_database = os.path.join(path_database_dir, database_name + '.json') list_allowed_types = ['string', 'int', 'float'] if self.input_type not in list_allowed_types: raise ValueError('input type not in allowed list', self.input_type) if self.output_type not in list_allowed_types: raise ValueError('output type not in allowed list', self.output_type) if os.path.isfile(self.path_database): print('load database at: ' + self.path_database) self.db = database_load_json(self.path_database, self.encoding) else: self.db = {} else: raise ValueError('database_method is not in options', self.database_method)
def simulate(self, process): # parallel processing on each setting value self.pid = os.getpid() self.neuron = Neuron(**self.parm[process+self.process_counter]) self.neuron.parm_dict = self.parm[process+self.process_counter] self.progress_counter = self.now_cycle_multiproc*self.neuron.allsteps # record d = datetime.datetime.today() filename = "{0}_{1}_{2}_{3}_{4}_{5}_" \ "Iext_amp{6}_Pmax_AMPA{7}_Pmax_NMDA{8}_LIF".format(d.year, d.month, d.day, d.hour, d.minute, d.second, self.neuron.Iext_amp, self.neuron.Pmax_AMPA, self.neuron.Pmax_NMDA) df = pd.DataFrame(columns=[filename]) df.to_csv(save_path + '/' + filename + '.csv') df = pd.DataFrame() for k in range(numneu): df['T_{} [ms]'.format(k)] = "" df['V_{} [mV]'.format(k)] = "" df['fire_{}'.format(k)] = "" df['I_syn_{} [uA]'.format(k)] = "" df['I_AMPA_{} [uA]'.format(k)] = "" df['I_NMDA_{} [uA]'.format(k)] = "" df['Iext_{} [uA]'.format(k)] = "" df['I_noise_{} [uA]'.format(k)] = "" df.to_csv(save_path + '/' + filename + '.csv', mode='a') ####### MAIN PROCESS ####### for j in range(num_lump): self.input_generator_sin() #self.input_generator_mackey_glass() ####### MAIN CYCLE ####### for i in range(0, self.neuron.allsteps-1): self.neuron.propagation() if self.progress_counter % 1000 == 0: self.log = 'process id : ' + str(self.pid) + ' : ' + \ str(self.progress_counter) + ' steps : ' + \ str(round(self.progress_counter*100/self.overall_steps, 1)) + "%" print(self.log) self.progress_counter += 1 # record df = pd.DataFrame() for k in range(numneu): df['T_{} [ms]'.format(k)] = self.neuron.Tsteps df['V_{} [mV]'.format(k)] = self.neuron.V[k] df['fire_{}'.format(k)] = self.neuron.t_fire_list[k] df['I_syn_{} [uA]'.format(k)] = self.neuron.Isyn[k] df['I_AMPA_{} [uA]'.format(k)] = self.neuron.IAMPA[k] df['I_NMDA_{} [uA]'.format(k)] = self.neuron.INMDA[k] df['Iext_{} [uA]'.format(k)] = self.neuron.Iext[k] df['I_noise_{} [uA]'.format(k)] = self.neuron.Inoise[k] df = df[:-1] df.to_csv(save_path + '/' + filename + '.csv', mode='a', header=None) # Preparation for calculating the next lump self.neuron.Tsteps = self.neuron.Tsteps + lump self.neuron.V = np.fliplr(self.neuron.V) self.neuron.Isyn = np.fliplr(self.neuron.Isyn) self.neuron.Isyn[:, 1:] = 0 self.neuron.IAMPA = np.fliplr(self.neuron.IAMPA) self.neuron.IAMPA[:, 1:] = 0 self.neuron.INMDA = np.fliplr(self.neuron.INMDA) self.neuron.INMDA[:, 1:] = 0 self.neuron.R_AMPA = np.flip(self.neuron.R_AMPA, axis=2) self.neuron.R_AMPA[:, :, 1:] = 0 self.neuron.R_NMDA = np.flip(self.neuron.R_NMDA, axis=2) self.neuron.R_NMDA[:, :, 1:] = 0 self.neuron.E_AMPA = np.flip(self.neuron.E_AMPA, axis=2) self.neuron.E_AMPA[:, :, 1:] = 0 self.neuron.E_NMDA = np.flip(self.neuron.E_NMDA, axis=2) self.neuron.E_NMDA[:, :, 1:] = 0 self.neuron.I_AMPA = np.flip(self.neuron.I_AMPA, axis=2) self.neuron.I_AMPA[:, :, 1:] = 0 self.neuron.I_NMDA = np.flip(self.neuron.I_NMDA, axis=2) self.neuron.I_NMDA[:, :, 1:] = 0 self.neuron.Iext = np.fliplr(self.neuron.Iext) self.neuron.t_fire_list = 0 * self.neuron.t_fire_list self.neuron.Inoise = np.fliplr(self.neuron.Inoise) self.neuron.dn = np.fliplr(self.neuron.dn) self.neuron.dWt = np.fliplr(self.neuron.dWt) self.neuron.curstep = 0 self.lump_counter += 1 ####### MAIN PROCESS END####### # Visualization of connection structure # graphbiz must be installed if not os.path.isdir(save_path + '/dot'): os.mkdir(save_path + '/dot') if not os.path.isdir(save_path + '/structure'): os.mkdir(save_path + '/structure') dot_txt = 'digraph g{\n' dot_txt += 'graph [ dpi = 300, ratio = 1.0];\n' for i in range(numneu): dot_txt += '{} [label="{}", color=lightseagreen, fontcolor=white, style=filled]\n'.format(i, 'N'+str(i+1)) for i, j in itertools.product(range(numneu), range(numneu)): if self.neuron.Syn_weight[i, j] != 0: dot_txt += '{}->{}\n'.format(i, j) dot_txt += "}\n" with open(save_path + '/dot/' + filename + '.dot', 'w') as f: f.write(dot_txt) self.cmd = 'dot {} -T png -o {}'.format(save_path + '/dot/' + filename + '.dot', save_path + '/structure/' + filename + '.png') subprocess.run(self.cmd, shell=True) # Rastergram plt.rcParams["font.size"] = 28 if not os.path.isdir(save_path + '/rastergram'): os.mkdir(save_path + '/rastergram') num_read_nodes = numneu raster_line_length = 1 raster_line_width = 0.5 read_cols = ['T_0 [ms]'] ytick_list = [] for i in range(num_read_nodes): ytick_list.append(i + 1) read_cols.append('fire_{}'.format(i)) df = pd.read_csv(save_path + '/' + filename + '.csv', usecols=read_cols, skiprows=1)[read_cols] fig = plt.figure(figsize=(20, 10)) ax = fig.add_subplot(111) ax.set_ylim(0, num_read_nodes + 1) ax.set_yticks(ytick_list) ax.set_xlabel("Time [ms]") ax.set_ylabel("Neuron number") for i in range(num_read_nodes): for j in range(len(df.values[:, 0])): if df.values[j, i + 1] != 0: x = df.values[j, 0] ax.plot([x, x], [i + 1 - (raster_line_length / 2), i + 1 + (raster_line_length / 2)], linestyle="solid", linewidth=raster_line_width, color="black") plt.tight_layout() plt.savefig(save_path + '/rastergram/' + filename + '.png') plt.close(fig) ###### LEARNING AND PREDICTION PROCESS ###### plt.rcParams["font.size"] = 14 if not os.path.isdir(save_path + '/RC'): os.mkdir(save_path + '/RC') num_read_nodes = numneu read_cols = ['T_0 [ms]'] for i in range(num_read_nodes): read_cols.append('V_{} [mV]'.format(i)) read_cols.append('I_syn_{} [uA]'.format(i)) read_cols.append('I_AMPA_{} [uA]'.format(i)) read_cols.append('I_NMDA_{} [uA]'.format(i)) read_cols.append('Iext_{} [uA]'.format(0)) print(read_cols) df = pd.read_csv(save_path + '/' + filename + '.csv', usecols=read_cols, skiprows=1)[read_cols] train_ratio = 0.5 border = int(len(df.values[:, 0]) * train_ratio) # time times = df.values[:, 0].reshape((len(df.values[:, 0]), 1)) times_bef = df.values[:border, 0].reshape((len(df.values[:border, 0]), 1)) times_af = df.values[border:, 0].reshape((len(df.values[border:, 0]), 1)) # Iext index_tmp = [] index_tmp.append(int(4 * num_read_nodes + 1)) input = df.values[:, index_tmp].reshape((len(df.values[:, index_tmp]), len(index_tmp))) target = input[:border] # V index_tmp = [] for i in range(num_read_nodes): index_tmp.append(i * 4 + 1) output = df.values[:, index_tmp].reshape((len(df.values[:, index_tmp]), len(index_tmp))) output_train = df.values[:border, index_tmp].reshape((len(df.values[:border, index_tmp]), len(index_tmp))) output_predict = df.values[border:, index_tmp].reshape((len(df.values[border:, index_tmp]), len(index_tmp))) # Isyn, Iampa, Inmda index_tmp = [] for i in range(num_read_nodes): index_tmp.append(i * 4 + 2) Isyn = df.values[:, index_tmp].reshape((len(df.values[:, index_tmp]), len(index_tmp))) index_tmp = [] for i in range(num_read_nodes): index_tmp.append(i * 4 + 3) IAMPA = df.values[:, index_tmp].reshape((len(df.values[:, index_tmp]), len(index_tmp))) index_tmp = [] for i in range(num_read_nodes): index_tmp.append(i * 4 + 4) INMDA = df.values[:, index_tmp].reshape((len(df.values[:, index_tmp]), len(index_tmp))) lsm = LSM() lsm.train(output_train, target) predict_res = (output_predict @ lsm.output_w).T # layout fig = plt.figure(figsize=(20, 15)) fig.suptitle(filename) fig.subplots_adjust(left=0.075, bottom=0.05, right=0.95, top=0.95, wspace=0.15, hspace=0.15) gs_master = GridSpec(nrows=num_read_nodes + 1, ncols=2) gs_rc = GridSpecFromSubplotSpec(nrows=1, ncols=2, subplot_spec=gs_master[0, 0:2]) ax_rc = fig.add_subplot(gs_rc[:, :]) gs_status = GridSpecFromSubplotSpec(nrows=num_read_nodes, ncols=2, subplot_spec=gs_master[1:, :], hspace=0.4, wspace=0.15) ax_status_v = [] ax_status_i = [] # Firing pattern of individual neurons for i in range(num_read_nodes): ax_status_v.append(fig.add_subplot(gs_status[i, 0])) ax_status_i.append(fig.add_subplot(gs_status[i, 1])) if i == 0: ax_rc.plot(times_bef, output_train[:, i], label="train_output_n{}".format(i)) ax_rc.plot(times, input[:, 0], label="input(target)_Iext0") ax_rc.plot(times_af, predict_res[0], label="after training") ax_status_v[i].plot(times, output[:, i], label="output_n{}".format(i)) ax_status_i[i].plot(times, Isyn[:, i], label="Isyn") ax_status_i[i].plot(times, IAMPA[:, i], label="IAMPA") ax_status_i[i].plot(times, INMDA[:, i], label="INMDA") ax_status_v[i].legend() ax_status_i[i].legend() print(times.shape) print(output_train.shape) print(target.shape) print(lsm.output_w.shape) print((output_train @ lsm.output_w).shape) print(output_predict.shape) print("W:{}".format(lsm.output_w)) #plt.show() plt.savefig(save_path + '/RC/' + filename + '.png') plt.close(fig)
def create_latent_semantic_model_runfiles(): global rankings # LSI start = time.time() lsi = LSM('LSI', index) lsi.create_model() end = time.time() print("LSI model creation took {:.2f} seconds.".format(end - start)) start = time.time() lsi.create_similarity_index() end = time.time() print("LSI similarity index creation took {:.2f} seconds.".format(end - start)) start = time.time() lsi_reranking = lsm_reranking(ranked_queries=rankings['tfidf'], LSM_model=lsi) end = time.time() print("LSI reranking took {:.2f} seconds.".format(end - start)) start = time.time() run_out_path = '{}.run'.format('LSI') with open('./lexical_results/{}'.format(run_out_path), 'w') as f_out: write_run(model_name='LSI', data=lsi_reranking, out_f=f_out, max_objects_per_query=1000) end = time.time() print("LSI run file creation {:.2f} seconds.".format(end - start)) # LDA start = time.time() lda = LSM('LDA', index) lda.create_model() end = time.time() print("LDA model creation took {:.2f} seconds.".format(end - start)) start = time.time() lda.create_similarity_index() end = time.time() print("LDA similarity index creation took {:.2f} seconds.".format(end - start)) start = time.time() lda_reranking = lsm_reranking(ranked_queries=rankings['tfidf'], LSM_model=lda) end = time.time() print("LDA reranking took {:.2f} seconds.".format(end - start)) start = time.time() run_out_path = '{}.run'.format('LDA') with open('./lexical_results/{}'.format(run_out_path), 'w') as f_out: write_run(model_name='LDA', data=lda_reranking, out_f=f_out, max_objects_per_query=1000) end = time.time() print("LDA run file creation {:.2f} seconds.".format(end - start))
def __init__(self): self.path = settings.DB_PATH self.db = LSM(self.path)