def begin_log(self): super(KeyRecord, self).begin_log() title = "keyrec_%s_%d_%s" % (os.path.splitext( os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name) self.__log_filename = self._exp.reserve_data_filename(title, "slog") self.__log_writer = LogWriter(self.__log_filename)
class Log(AutoFinalizeState): def __init__(self, parent=None, name=None, **kwargs): # init the parent class super(Log, self).__init__(parent=parent, name=name, duration=0.0, save_log=False) self._init_log_items = kwargs def begin_log(self): super(Log, self).begin_log() title = "log_%s_%d_%s" % ( os.path.splitext( os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name) self.__log_filename = self._exp.reserve_data_filename(title, "smlog") self.__log_writer = LogWriter(self.__log_filename, ["time"] + self._init_log_items.keys()) def end_log(self, to_csv=False): super(Log, self).end_log(to_csv) if self.__log_writer is not None: self.__log_writer.close() self.__log_writer = None if to_csv: csv_filename = (os.path.splitext(self.__log_filename)[0] + ".csv") log2csv(self.__log_filename, csv_filename) def _enter(self): record = self._log_items.copy() record["time"] = self._start_time self.__log_writer.write_record(record) clock.schedule(self.leave)
def __init__(self, sche_path, log_path='prj2.log', search_path='search.txt'): self.search_path = search_path self.db = Datasource() self.scheduler = ScheduleParser(sche_path) self.recovery = RecoveryManagement(log_path) self.log_writer = LogWriter(log_path) self.search = SearchEngine() self.generator = SearchEngineGenerator()
def simulate(self, name=None, resource_limit=None): model = ModelBuilder() self.models, self.rm, self.dm = model.build_all( resource_limit=resource_limit) self._initialize_queue() simulation = time.time() while not self.execution_queue.is_empty(): current = self.execution_queue.pop() if current.start > self.end: break self._simulate(current) print('Simulation time: ' + str(time.time() - simulation)) LogWriter.write(self.log_queue, name=name)
class ScheduleSimulator(SingletonInstance): def __init__(self, sche_path, log_path='prj2.log', search_path='search.txt'): self.search_path = search_path self.db = Datasource() self.scheduler = ScheduleParser(sche_path) self.recovery = RecoveryManagement(log_path) self.log_writer = LogWriter(log_path) self.search = SearchEngine() self.generator = SearchEngineGenerator() def __call__(self): transaction = {} for idx, schdule in enumerate(self.scheduler()): groups = schdule.groups() command_type = groups[0] if 'recover' in command_type: transaction = {} self.recovery(idx) self.log_writer.set_checkpoint([]) self.generator() else: self.log_writer(idx, schdule, transaction) if command_type.startswith("<T"): if command_type not in transaction.keys(): transaction[command_type] = [] if groups[1] in 'commit': for sql in transaction[command_type]: self.db.free_sql(sql) transaction[command_type] = [] self.generator() elif groups[1] in 'rollback': transaction[command_type] = [] else: transaction[command_type].append(groups[1]) elif 'search' in command_type: query = groups[1].replace("\n", "").strip() with open(self.search_path, "a") as f: f.write(f"search {idx+1}\n") f.write(f"query {query}\n") results = self.search(query) for doc in results: f.write(f"{self.search.result_formatting(doc)}\n")
def begin_log(self): super(KeyRecord, self).begin_log() title = "keyrec_%s_%d_%s" % ( os.path.splitext(os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name, ) self.__log_filename = self._exp.reserve_data_filename(title, "smlog") self.__log_writer = LogWriter(self.__log_filename, ["timestamp", "key", "state"])
def setup_state_logger(self, state_class_name): if state_class_name in self._state_loggers: filename, logger = self._state_loggers[state_class_name] else: title = "state_" + state_class_name filename = self.reserve_data_filename(title, "slog") logger = LogWriter(filename) self._state_loggers[state_class_name] = filename, logger return filename
def begin_log(self): super(Log, self).begin_log() title = "log_%s_%d_%s" % ( os.path.splitext( os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name) self.__log_filename = self._exp.reserve_data_filename(title, "smlog") self.__log_writer = LogWriter(self.__log_filename, ["time"] + self._init_log_items.keys())
def begin_log(self): super(Record, self).begin_log() title = "record_%s_%d_%s" % ( os.path.splitext( os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name) self.__log_filename = self._exp.reserve_data_filename(title, "smlog") self.__log_writer = LogWriter(self.__log_filename, self.__refs.keys() + ["timestamp"])
def setup_state_logger(self, state_class_name, field_names): field_names = tuple(field_names) if state_class_name in self._state_loggers: if field_names == self._state_loggers[state_class_name][2]: return raise ValueError("'field_names' changed for state class %r!" % state_class_name) title = "state_" + state_class_name filename = self.reserve_data_filename(title, "smlog") logger = LogWriter(filename, field_names) self._state_loggers[state_class_name] = filename, logger, field_names
def _write_sysinfo(self, filename=None): if filename is None: filename = self._sysinfo_slog sysinfo_logger = LogWriter(filename=filename) sysinfo_logger.write_record(data=self._sysinfo) sysinfo_logger.close()
class KeyRecord(KeyState): def __init__(self, parent=None, duration=None, name=None): super(KeyState, self).__init__(parent=parent, duration=duration, save_log=False, name=name) def begin_log(self): super(KeyRecord, self).begin_log() title = "keyrec_%s_%d_%s" % ( os.path.splitext(os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name, ) self.__log_filename = self._exp.reserve_data_filename(title, "smlog") self.__log_writer = LogWriter(self.__log_filename, ["timestamp", "key", "state"]) def end_log(self, to_csv=False): super(KeyRecord, self).end_log(to_csv) if self.__log_writer is not None: self.__log_writer.close() self.__log_writer = None if to_csv: csv_filename = os.path.splitext(self.__log_filename)[0] + ".csv" log2csv(self.__log_filename, csv_filename) def _on_key_down(self, keycode, text, modifiers, event_time): self.__log_writer.write_record({"timestamp": event_time, "key": keycode[1].upper(), "state": "down"}) def _on_key_up(self, keycode, event_time): self.__log_writer.write_record({"timestamp": event_time, "key": keycode[1].upper(), "state": "up"})
def begin_log(self): super(KeyRecord, self).begin_log() title = "keyrec_%s_%d_%s" % ( os.path.splitext( os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name) if self.__log_filename is not None: os.remove(self.__log_filename) self.__log_filename = self._exp.reserve_data_filename(title, "slog") if self.__log_writer is not None: self.__log_writer.close() self.__log_writer = LogWriter(self.__log_filename)
class KeyRecord(KeyState): def __init__(self, parent=None, duration=None, name=None, blocking=True): super(KeyState, self).__init__(parent=parent, duration=duration, save_log=False, name=name, blocking=blocking) def begin_log(self): super(KeyRecord, self).begin_log() title = "keyrec_%s_%d_%s" % (os.path.splitext( os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name) self.__log_filename = self._exp.reserve_data_filename(title, "smlog") self.__log_writer = LogWriter(self.__log_filename, ["timestamp", "key", "state"]) def end_log(self, to_csv=False): super(KeyRecord, self).end_log(to_csv) if self.__log_writer is not None: self.__log_writer.close() self.__log_writer = None if to_csv: csv_filename = (os.path.splitext(self.__log_filename)[0] + ".csv") log2csv(self.__log_filename, csv_filename) def _on_key_down(self, keycode, text, modifiers, event_time): self.__log_writer.write_record({ "timestamp": event_time, "key": keycode[1].upper(), "state": "down" }) def _on_key_up(self, keycode, event_time): self.__log_writer.write_record({ "timestamp": event_time, "key": keycode[1].upper(), "state": "up" })
class KeyRecord(KeyState): """A state that records keypresses during a duration. A *KeyRecord* state will record any keypress, the keyup's and keydown's, as well as any timing associated with them for a duration. Parameters ---------- duration : float (optional) The duration you would like your experiment to wait for a keypress. If set to None, then it will wait until a key from **keys** is pressed, then continue with the experiment. parent : ParentState (optional) The state you would like this state to be a child of. If not set, the *Experiment* will make it a child of a ParentState or the Experiment automatically. name : string (optional) The unique name of this state blocking : boolean (optional, default = True) If True, this state will prevent a *Parallel* state from ending. If False, this state will be canceled if its Parallel Parent finishes running. Only relevant if within a *Parallel* Parent. Logged Attributes ----------------- All parameters above and below are available to be accessed and manipulated within the experiment code, and will be automatically recorded in the state-specific log. Refer to State class docstring for additional logged parameters. """ def __init__(self, parent=None, duration=None, name=None, blocking=True): super(KeyState, self).__init__(parent=parent, duration=duration, save_log=False, name=name, blocking=blocking) def begin_log(self): super(KeyRecord, self).begin_log() title = "keyrec_%s_%d_%s" % (os.path.splitext( os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name) self.__log_filename = self._exp.reserve_data_filename(title, "slog") self.__log_writer = LogWriter(self.__log_filename) def end_log(self, to_csv=False): super(KeyRecord, self).end_log(to_csv) if self.__log_writer is not None: self.__log_writer.close() self.__log_writer = None if to_csv: csv_filename = (os.path.splitext(self.__log_filename)[0] + ".csv") log2csv(self.__log_filename, csv_filename) def _on_key_down(self, keycode, text, modifiers, event_time): self.__log_writer.write_record({ "timestamp": event_time, "key": keycode[1].upper(), "state": "down" }) def _on_key_up(self, keycode, event_time): self.__log_writer.write_record({ "timestamp": event_time, "key": keycode[1].upper(), "state": "up" })
def save_log(self, name, callback=None): lw = LogWriter("%s.log" % name) data = self.get_arr() lw.write_one(data) if callback: callback(self.get())
class KeyRecord(KeyState): """A state that records keypresses during a duration. A *KeyRecord* state will record any keypress, the keyup's and keydown's, as well as any timing associated with them for a duration. Parameters ---------- duration : float (optional) The duration you would like your experiment to wait for a keypress. If set to None, then it will wait until a key from **keys** is pressed, then continue with the experiment. parent : ParentState (optional) The state you would like this state to be a child of. If not set, the *Experiment* will make it a child of a ParentState or the Experiment automatically. name : string (optional) The unique name of this state blocking : boolean (optional, default = True) If True, this state will prevent a *Parallel* state from ending. If False, this state will be canceled if its Parallel Parent finishes running. Only relevant if within a *Parallel* Parent. Logged Attributes ----------------- All parameters above and below are available to be accessed and manipulated within the experiment code, and will be automatically recorded in the state-specific log. Refer to State class docstring for additional logged parameters. """ def __init__(self, parent=None, duration=None, name=None, blocking=True): super(KeyState, self).__init__(parent=parent, duration=duration, save_log=False, name=name, blocking=blocking) self.__log_filename = None self.__log_writer = None def begin_log(self): super(KeyRecord, self).begin_log() title = "keyrec_%s_%d_%s" % ( os.path.splitext( os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name) if self.__log_filename is not None: os.remove(self.__log_filename) self.__log_filename = self._exp.reserve_data_filename(title, "slog") if self.__log_writer is not None: self.__log_writer.close() self.__log_writer = LogWriter(self.__log_filename) def end_log(self, to_csv=False): super(KeyRecord, self).end_log(to_csv) if self.__log_writer is not None: self.__log_writer.close() self.__log_writer = None if to_csv: csv_filename = (os.path.splitext(self.__log_filename)[0] + ".csv") log2csv(self.__log_filename, csv_filename) def _on_key_down(self, keycode, text, modifiers, event_time): self.__log_writer.write_record({ "timestamp": event_time, "key": keycode[1].upper(), "state": "down"}) def _on_key_up(self, keycode, event_time): self.__log_writer.write_record({ "timestamp": event_time, "key": keycode[1].upper(), "state": "up"})
def __init__(self, log_path, path='recovery.txt'): self.path = path self.db = Datasource() self.log_parser = LogParser(log_path) self.log_writer = LogWriter(log_path) self.generator = SearchEngineGenerator()
class Record(State): def __init__(self, duration=None, parent=None, name=None, blocking=True, **kwargs): super(Record, self).__init__(parent=parent, duration=duration, save_log=False, name=name, blocking=blocking) self.__refs = kwargs def begin_log(self): super(Record, self).begin_log() title = "record_%s_%d_%s" % ( os.path.splitext( os.path.basename(self._instantiation_filename))[0], self._instantiation_lineno, self._name) self.__log_filename = self._exp.reserve_data_filename(title, "smlog") self.__log_writer = LogWriter(self.__log_filename, self.__refs.keys() + ["timestamp"]) def end_log(self, to_csv=False): super(Record, self).end_log(to_csv) if self.__log_writer is not None: self.__log_writer.close() self.__log_writer = None if to_csv: csv_filename = (os.path.splitext(self.__log_filename)[0] + ".csv") log2csv(self.__log_filename, csv_filename) def _enter(self): clock.schedule(self.leave, event_time=self._start_time) if self._end_time is not None: clock.schedule(self.finalize, event_time=self._end_time) def _leave(self): for name, ref in self.__refs.iteritems(): self.record_change() ref.add_change_callback(self.record_change) def finalize(self): super(Record, self).finalize() for name, ref in self.__refs.iteritems(): ref.remove_change_callback(self.record_change) def record_change(self): record = val(self.__refs) record["timestamp"] = self._exp._app.event_time self.__log_writer.write_record(record) def cancel(self, cancel_time): if self._active: if cancel_time < self._start_time: clock.unschedule(self.leave) clock.schedule(self.leave) clock.unschedule(self.finalize) clock.schedule(self.finalize) self._end_time = self._start_time elif self._end_time is None or cancel_time < self._end_time: clock.unschedule(self.finalize) clock.schedule(self.finalize, event_time=cancel_time) self._end_time = cancel_time
class RecoveryManagement(object): def __init__(self, log_path, path='recovery.txt'): self.path = path self.db = Datasource() self.log_parser = LogParser(log_path) self.log_writer = LogWriter(log_path) self.generator = SearchEngineGenerator() def __call__(self, recover_line): with open(self.path, "a") as f: save_redo_list = [] n_line, undo_list = self.log_parser.find_checkpoint() for log in self.log_parser.forward(n_line=n_line): groups = log.groups() command_type = groups[0] if 'recover' in command_type: break else: if len(groups) == 2: if 'start' in groups[1]: undo_list.append(command_type) elif 'commit' in groups[1]: undo_list.remove(command_type) # transaction = self.log_parser.find_transaction(command_type) # for t in transaction: # self.execute_recovery(command_type, t, 'new', 'redo') save_redo_list.append(command_type) elif 'abort' in groups[1]: undo_list.remove(command_type) # transaction = self.log_parser.find_transaction(command_type) # for t in transaction: # self.execute_recovery(command_type, t, 'old', 'redo') save_redo_list.append(command_type) elif command_type.startswith('<T'): self.execute_recovery(command_type, groups[1:], 'new', 'redo') save_undo_list = undo_list.copy() for log in self.log_parser.backward(): groups = log.groups() command_type = groups[0] if command_type in undo_list and 'start' in groups[1]: undo_list.remove(command_type) # transaction = self.log_parser.find_transaction(command_type) # for t in reversed(transaction): # self.execute_recovery(command_type, t, 'old', 'undo') self.log_writer.free_write(f"{command_type} abort") elif command_type in undo_list and command_type.startswith( '<T'): self.execute_recovery(command_type, groups[1:], 'old', 'undo') self.log_writer.recover(recover_line) self.log_writer.checkpoint() f.write(f"recover {recover_line + 1}\n") f.write(f"redo {', '.join(save_redo_list)}\n") f.write(f"undo {', '.join(save_undo_list)}\n") def execute_recovery(self, t_id, t, value_type, do_type): ''' t (tuple): 트랜잭션 튜플 value_type (enum): old value인지 new value인지 ['old', 'new'] do_Type (enum): undo or redo ['undo', 'redo'] ''' assert value_type in ['old', 'new'], "Value 타입이 잘못되었습니다." assert do_type in ['undo', 'redo'], "Do 타입이 잘못되었습니다." is_undo = True if do_type == 'undo' else False # Update if len(t) == 6: table, key_field, key, target_field, old_value, new_value = t # value = new_value if value_type in 'new' else old_value value = new_value if do_type == 'redo' else old_value if is_undo: self.log_writer.free_write( f"{t_id}, <{table}>.<id:{key}>.<{target_field}>, <{new_value}>, <{old_value}>" ) else: self.log_writer.free_write(f"#redo {t_id}_{t}") self.db.update_table(table, key_field, key, target_field, value) # Delete elif len(t) == 5: table, key_field, key, old_tuple, _ = t if isinstance(old_tuple, str): if old_tuple != 'None': old_tuple = old_tuple[1:-1] if old_tuple.endswith(","): old_tuple = old_tuple[:-1] if value_type == 'new': if is_undo: self.log_writer.free_write( f"{t_id}, <{table}>.<{key_field}:{key}>, <{old_tuple}>, <None>" ) else: self.log_writer.free_write(f"#redo {t_id}_{t}") self.db.delete_table(table, key_field, key) else: if old_tuple != 'None': if is_undo: self.log_writer.free_write( f"{t_id}, <{table}>.<{key_field}:{key}>, <None>, <{old_tuple}>" ) else: self.log_writer.free_write(f"#redo {t_id}_{t}") self.db.insert_table(table, old_tuple)
def learn_episode(self, verbose = True): episode_num_per_unit = 1 learn_num_per_unit = 1 dst_base_dir = Path(self.__cfg.dst_dir_path) log_writer = None all_log_writer = LogWriter(dst_base_dir.joinpath("learn.csv")) for trial in range(self.__cfg.episode_unit_num): if (trial + 1) % 64 == 0: weight_path = dst_base_dir.joinpath("weight", "param{}.bin".format(trial)) if not weight_path.parent.exists(): weight_path.parent.mkdir(parents = True) self.__agent.shared_nn.save(weight_path) explore = True for episode_cnt in range(episode_num_per_unit): log_path = dst_base_dir.joinpath("play", "{}_{}.csv".format(trial, episode_cnt)) if log_writer is not None: del log_writer log_writer = LogWriter(log_path) if verbose: loop_fun = tqdm else: loop_fun = lambda x : x for step, (info, new_es) in loop_fun(enumerate(self.__evolve(explore))): if explore: for new_e in new_es: self.__experiences.append(new_e) out_infos = {} out_infos["episode"] = episode_cnt out_infos["step"] = step out_infos["t"] = step * self.__env.dt out_infos.update(info) out_infos["explore"] = explore log_writer.write(out_infos) # after episode if len(self.__experiences) < self.__batch_size: continue # after episode unit learn_cnt_per_unit = 0 state_shape = self.__env.state_shape s = jnp.zeros(state_shape, dtype = jnp.float32) a = jnp.zeros((state_shape[0], EnAction.num), dtype = jnp.float32) r = jnp.zeros((state_shape[0], 1), dtype = jnp.float32) n_s = jnp.zeros(state_shape, dtype = jnp.float32) n_fin = jnp.zeros((state_shape[0], 1), dtype = jnp.float32) gamma = self.__env.gamma val = 0 total_loss_q = [] total_loss_pi = [] while 1: self.__rng, rng = jrandom.split(self.__rng) e_i = int(jrandom.randint(rng, (1,), 0, len(self.__experiences))) e = self.__experiences[e_i] if not e.finished: s = s.at[val,:].set(e.observation[0]) a = a.at[val,:].set(e.action) r = r.at[val].set(float(e.reward)) n_s = n_s.at[val,:].set(e.next_state[0]) n_fin = n_fin.at[val,:].set(float(e.next_finished)) val += 1 if val >= state_shape[0]: q_learn_cnt, p_learn_cnt, temperature, loss_val_qs, loss_val_pi, loss_balances = self.__agent.shared_nn.update(gamma, s, a, r, n_s, n_fin) all_info = {} all_info["trial"] = int(trial) all_info["episode_num_per_unit"] = int(episode_num_per_unit) all_info["episode"] = int(episode_cnt) all_info["q_learn_cnt"] = int(q_learn_cnt) #all_info["p_learn_cnt"] = int(p_learn_cnt) all_info["temperature"] = float(temperature) for _i, loss_val_q in enumerate(loss_val_qs): all_info["loss_val_q{}".format(_i)] = float(loss_val_q) all_info["loss_val_pi"] = float(loss_val_pi) #for _i, loss_balance in enumerate(loss_balances): # all_info["loss_balance{}".format(_i)] = float(loss_balance) all_log_writer.write(all_info) if verbose: for value in all_info.values(): if isinstance(value, float): print("{:.3f}".format(value), end = ",") else: print(value, end = ",") print() total_loss_q.append(loss_val_q) total_loss_pi.append(loss_val_pi) val = 0 learn_cnt_per_unit += 1 if (learn_cnt_per_unit >= min(learn_num_per_unit, len(self.__experiences) // self.__batch_size)): break