def __init__(self, x_dim, h_dim, z_dim, pred=False): # Why do they insist on doing it this way. Fixing it args = SN(feats_per_node=x_dim, layer_1_feats=h_dim, layer_2_feats=z_dim) # RReLU is default in their experiments, keeping it here act = torch.nn.RReLU() super().__init__(args, act) # Doesn't do anything, but makes training this and VGRNN consistant self.pred = pred # So my method signature works with copy/pasted code feats = [x_dim, h_dim, z_dim] # Rewriting with updated GRCU layer self.GRCU_layers = [] self._parameters = nn.ParameterList() for i in range(1, len(feats)): GRCU_args = SN(in_feats=feats[i - 1], out_feats=feats[i], activation=act) grcu_i = Sparse_GRCU_H(GRCU_args) #print (i,'grcu_i', grcu_i) self.GRCU_layers.append(grcu_i.to(self.device)) self._parameters.extend(list(self.GRCU_layers[-1].parameters()))
def load_config(): cfg.env_kwargs = SN() cfg.env_kwargs.dt = 0.01 cfg.env_kwargs.max_t = 40 agents.load_config() agents.cfg.CommonAgent.memory_len = 4000 agents.cfg.CommonAgent.batch_size = 2000 agents.cfg.CommonAgent.train_epoch = 100 agents.cfg.CommonAgent.train_start = 20 agents.cfg.CommonAgent.train_period = 2 agents.cfg.SQLAgent = SN(**vars(agents.cfg.CommonAgent)) agents.cfg.KLMAgent = SN(**vars(agents.cfg.CommonAgent)) cfg.Q = np.diag([10, 10, 1, 10]) cfg.R = np.diag([1000, 1]) cfg.F = -1 * np.eye(2) cfg.Qb = np.diag([1, 1, 1, 10]) cfg.Rb = np.diag([1000, 1]) cfg.K_init = np.zeros((2, 4)) cfg.test = SN() cfg.test.dataname_learnt = "test-learnt-env.h5" cfg.test.dataname_lqr = "test-lqr-env.h5" cfg.test.initial_perturb = np.vstack((3, 0.1, 0.1, 0.1))
def load_config(): cfg.env_kwargs = SN() cfg.env_kwargs.dt = 0.01 cfg.env_kwargs.max_t = 20 agents.load_config() cfg.Agent = agents.cfg cfg.Agent.CommonAgent.memory_len = 2000 cfg.Agent.CommonAgent.batch_size = 1000 cfg.Agent.CommonAgent.train_epoch = 20 cfg.Agent.CommonAgent.train_start = 10 cfg.Agent.CommonAgent.train_period = 3 cfg.Agent.SQLAgent = SN(**vars(cfg.Agent.CommonAgent)) cfg.Agent.KLMAgent = SN(**vars(cfg.Agent.CommonAgent)) cfg.Q = np.diag([10, 10, 1]) cfg.R = np.diag([10]) cfg.F = -1 * np.eye(1) cfg.Qb = np.diag([1, 1, 1]) cfg.Rb = np.diag([1]) cfg.K_init = -5 * np.ones((1, 3))
def test(trainpath, savepath): def logger_callback(self, t): state_dict = self.observe_dict() plant_states = self.plant.observe_list() dx, du, xdot, u = self._calc(t, plant_states) return dict(t=t, dx=dx, du=du, xdot=xdot, u=u, **state_dict) cfg.quad.init = SN(**vars(cfg.quad.init) | vars(cfg.test.init)) cfg.env.kwargs = SN(**vars(cfg.env.kwargs) | vars(cfg.test.kwargs)) env = Env() env.logger = fym.logging.Logger(savepath) train_data = fym.logging.load(trainpath) env.controller = NoisyLQR(train_data["K"][-1], env.xtrim, env.utrim) env.logger.set_info(cfg=cfg) env.reset() while True: env.render() done = env.step() if done: break env.close()
def process(self): r = requests.get('https://api.poh.dev/profiles/' + self.profile) response = json.loads(r.text) photo_url = response['photo'] r = requests.get(photo_url) self.photo = SN(url=photo_url, data=r.content) video_url = response['video'] r = requests.get(video_url) self.video = SN(url=video_url, data=r.content)
def copy(self): ret = HDF5Options() ret.compression_threshold = self.compression_threshold ret.libver = self.libver ret.driver = self.driver ret.encoding = self.encoding ret.kwds = dict(**self.kwds) ret.compressed_dataset = SN(**self.compressed_dataset.__dict__) ret.dataset = SN(**self.dataset.__dict__) return ret
def load_config(): cfg.CommonAgent = SN() cfg.CommonAgent.memory_len = 2000 cfg.CommonAgent.batch_size = 400 cfg.CommonAgent.train_epoch = 100 cfg.CommonAgent.train_start = 6 cfg.CommonAgent.train_period = 3 cfg.SQLAgent = SN(**vars(cfg.CommonAgent)) cfg.KLMAgent = SN(**vars(cfg.CommonAgent))
def load_config(): # Simulation Setting cfg.dir = "data" cfg.final_time = 30 cfg.multirotor_config = "quadrotor" # Reference model cfg.xm_init = np.zeros((3, 1)) cfg.Am = np.array([ [-7, 0, 0], [0, -7, 0], [0, 0, -4], ]) cfg.Bm = np.array([ [7, 0, 0], [0, 7, 0], [0, 0, 4], ]) cfg.Q_lyap = np.eye(3) cfg.R = 1 * np.eye(4) cfg.Rinv = np.linalg.inv(cfg.R) cfg.W_init = np.zeros((12, 4)) # MRAC cfg.MRAC = SN() cfg.MRAC.env_kwargs = dict( # solver="odeint", dt=5, ode_step_len=int(5/0.01), solver="rk4", dt=1e-3, max_t=cfg.final_time, ) # cfg.MRAC.Gamma = 8e1 cfg.MRAC.Gamma = 1e1 # H-Modification MRAC cfg.HMRAC = SN() cfg.HMRAC.env_kwargs = dict( # solver="odeint", dt=5, ode_step_len=int(5/0.01), solver="rk4", dt=1e-3, max_t=cfg.final_time, ) cfg.HMRAC.Gamma = 1e1 # FECMRAC cfg.FECMRAC = SN() cfg.FECMRAC.Gamma = 1e1 cfg.FECMRAC.kL = 0.1 cfg.FECMRAC.kU = 10 cfg.FECMRAC.theta = 0.1 cfg.FECMRAC.tauf = 1e-3 cfg.FECMRAC.threshold = 1e-10
def load_config(): cfg.Am = np.array([[0, 1, 0], [-15.8, -5.6, -17.3], [1, 0, 0]]) cfg.B = np.array([[0, 1, 0]]).T cfg.Br = np.array([[0, 0, -1]]).T cfg.x_init = np.vstack((0.3, 0, 0)) cfg.Q_lyap = np.eye(3) cfg.P = scipy.linalg.solve_lyapunov(cfg.Am.T, -cfg.Q_lyap) cfg.final_time = 50 cfg.Wcirc = np.vstack((-18.59521, 15.162375, -62.45153, 9.54708, 21.45291)) cfg.vareps = SN() cfg.vareps.freq = 5 cfg.vareps.amp = 2 # 2 cfg.vareps.offset = 0 cfg.tauf = 1e-3 cfg.Gamma1 = 1e4 cfg.threshold = 1e-10 cfg.bF = 5000 cfg.bh = 1500 cfg.LF_speed = 0.05 cfg.Lh_speed = 0.05 cfg.LF_init = 10 cfg.Lh_init = 10 cfg.dir = "data" # MRAC cfg.MRAC = SN() cfg.MRAC.env_kwargs = dict(solver="odeint", dt=20, max_t=cfg.final_time, ode_step_len=int(20 / 0.01)) # BECMRAC cfg.BECMRAC = SN() cfg.BECMRAC.env_kwargs = dict(solver="rk4", dt=0.01, max_t=cfg.final_time, ode_step_len=10) cfg.BECMRAC.Gamma2 = 1 # FECMRAC cfg.FECMRAC = SN() cfg.FECMRAC.env_kwargs = dict(solver="rk4", dt=1e-2, max_t=cfg.final_time, ode_step_len=10) cfg.FECMRAC.Gamma2 = 500 cfg.FECMRAC.kL = 0.1 cfg.FECMRAC.kU = 10 cfg.FECMRAC.theta = 0.1
def location_repository(): class MockLocationRepository(MemoryRepository): pass return MockLocationRepository().load({ 'default': { 'L001': SN(id='L001', name='Campanario', country="Colombia"), 'L002': SN(id='L002', name='Castellana', country="España"), 'L003': SN(id='L003', name='Terraplaza', country="Colombia") } })
def __init__(self): self.compression_threshold = 300 # bytes self.libver = 'latest' self.driver = None self.kwds = dict() self.encoding = 'utf-8' self.compressed_dataset = SN(compression='gzip', chunks=True, fletcher32=True, shuffle=True, compression_opts=9) self.dataset = SN(compression=None, chunks=None, fletcher32=False, shuffle=False)
def __init__(self, scheme, groups, batch_size, max_seq_length, data=None, preprocess=None, device="cpu", out_device=None): self.scheme = scheme.copy() self.groups = groups self.batch_size = batch_size self.max_seq_length = max_seq_length self.preprocess = {} if preprocess is None else preprocess self.device = device self.out_device = out_device if out_device is not None else device if data is not None: self.data = data else: self.data = SN() self.data.transition_data = {} self.data.episode_data = {} self._setup_data(self.scheme, self.groups, batch_size, max_seq_length, self.preprocess)
def __init__(self, lags=[1, 2], nhidden=2, nepochs=500, batch_size=8, ninit=3, nval=4, nproc=1, cuda=False, seed=42, optimizer_opts=SN(optimizer='Adam', lr=1e-2, weight_decay=1e-8), scale_range=(-1, 1)): super(NN_ICM_ENSA, self).__init__(lags=lags, nhidden=nhidden, nepochs=nepochs, batch_size=batch_size, ninit=ninit, nval=nval, nproc=nproc, cuda=cuda, seed=seed, optimizer_opts=optimizer_opts, scale_range=scale_range) self.linear_reg_kvargs = { 'param_grid': { 'alpha': list(np.linspace(0, 1, 11)) }, 'cv': 10, } self.icm_vars = [] self.icm_untis = []
class WithReferenceAndDisplay(FhirAbstractBaseMixin, FhirBaseModelMixin): _model = SN(ref_id=12, _contained_names=[]) ref = ReferenceAttribute(ReferenceTarget, "ref_id", "ref", force_display=True)
def load_config(): cfg.dir = "data" cfg.final_time = 40 # cfg.A = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]]) cfg.A = np.array([[0, 1, 0], [0, -2, -1], [1, 0, -1]]) # cfg.A = np.array([[2, 1, 0], [0, 1, 0], [1, 0, 3]]) cfg.B = np.array([ [0, 1], [1, 0], [0, 0], ]) m = cfg.B.shape[1] Fp = np.array([[-1, 1], [0, 1]]) Kf, *_ = LQR.clqr(Fp, np.eye((m)), np.eye(m), np.eye(m)) cfg.F = Fp - Kf # cfg.F = -1 * np.eye(2) # cfg.F = -1 * np.eye(1) cfg.Q = np.diag([1, 10, 10]) cfg.R = np.diag([1, 10]) cfg.x_init = np.vstack((0.3, 0, 0)) cfg.QLearner = SN() cfg.QLearner.env_kwargs = dict( max_t=cfg.final_time, # solver="odeint", dt=20, ode_step_len=int(20/0.01), solver="rk4", dt=0.001, ) cfg.QLearner.memory_len = 10000 cfg.QLearner.batch_size = 400 cfg.QLearner.train_epoch = 10 calc_config()
def _new_data_sn(): # Returns empty batch data new_data = SN() new_data.transition_data = {} new_data.episode_data = { } # ! This is only used if episodes are of constant length ! return new_data
def get_pretrained(modelpath, return_rep=True): args = json.load(open(modelpath + '/args.json', 'r')) args = SN(**args) representation = ShrinkSchNet(args.features, args.features, args.interactions, args.cutoff, args.num_gaussians, shrink_distances=args.shrink_distances, edgeupdate_block=None) device = torch.device('cuda') atomwise_output = SCReadout(args.features, args.features, args.readout_layers, mean=None) model = spk.atomistic.AtomisticModel(representation, atomwise_output).to(device) model.load_state_dict(torch.load(os.path.join(modelpath, 'best_model'))) return model.modules().__next__().representation
def runing(config, _log, game_name): # config 파일로 부터 args 정보를 로드 합니다. _config = args_sanity_check(config, _log) args = SN(**config) args.device = "cuda" if args.use_cuda else "cpu" env_name = get_env_name(game_name) # log 기능을 활성화 합니다. logger = Logger(_log) unique_token = "{}__{}".format( args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) args.unique_token = unique_token # 텐서보드 기능을 준비 합니다. args.unique_token = unique_token if args.use_tensorboard: tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs/{}".format(game_name)) tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token) logger.setup_tb(tb_exp_direc) # 실험을 시작 합니다. run_sequential(args, logger, env_name)
class WithReferenceAndContained(FhirAbstractBaseMixin, FhirBaseModelMixin): _model = SN(ref_id=12, _contained_names=["ref"], _refcount=0, _contained_items=[]) ref = ReferenceAttribute(ReferenceTarget, "ref_id", "ref")
class EmbeddedAttributeModel: _model = SN(emptyRelation=None, relation=relationMock, a_list=[relationMock, relationMock2]) empty = EmbeddedAttribute('emptyRelation', type='fakeClass') first = EmbeddedAttribute('relation', type='fakeClass') many = EmbeddedAttribute('a_list', type='fakeClass') settable = EmbeddedAttribute('relation', 'relation', type='fakeClass')
def get_info(): """queue info - size, job ids.""" # TODO: use a specific queue info = SN() info.name = "queue_info" sq = Queue("", connection=CONN) info.size = len(sq) info.job_ids = sq.job_ids return vars(info)
def parse_function(line): r = rx(r'^(\s*)def\s*([\w\d_]+)\s*\(([^\()]*)\)').match(line) if not r: return lvl = len(r.groups()[0]) func_name = r.groups()[1] args = rx(r'[\s\,=]+').split(r.groups()[2]) return SN(name=func_name, args=args, lvl=lvl)
def get_data(datadir): data = SN() env, info = fym.logging.load(list(datadir.glob("*env.h5"))[0], with_info=True) data.env = env data.info = info data.agent = fym.logging.load(list(datadir.glob("*agent.h5"))[0]) data.style = dict(label=info["cfg"].label) return data
def get_data(datadir, cnt): data = SN() env, info = fym.logging.load(Path(datadir, cnt + "-env.h5"), with_info=True) data.env = env data.info = info data.agent = fym.logging.load(Path(datadir, cnt + "-agent.h5")) data.style = dict(label=info["cfg"].label) return data
def run(_run, _config, _log): """ 运行,被main函数调用过来 :param _run: :type _run: :param _config: :type _config: :param _log: :type _log: :return: :rtype: """ # 更改一些config中的默认配置,例如cuda,batch等 _config = args_sanity_check(_config, _log) # 改成Namespace范围的参数 args = SN(**_config) args.device = "cuda" if args.use_cuda else "cpu" #配置日志 logger = Logger(_log) _log.info("打印实验参数: ") experiment_params = pprint.pformat(_config, indent=4, width=1) _log.info("\n\n" + experiment_params + "\n") # 配置Tensorboard Logger , eg: 'qmix_env=8_adam_td_lambda__2021-04-28_09-40-29' unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) args.unique_token = unique_token # 是否使用tensorboard,使用的话,就配置下存储信息 if args.use_tensorboard: tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs") tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token) logger.setup_tb(tb_exp_direc) # 默认情况下日志sacred来管理 logger.setup_sacred(_run) # 运行和训练 run_sequential(args=args, logger=logger) # Clean up after finishing print("退出主程序") print("停止所有线程") for t in threading.enumerate(): if t.name != "MainThread": print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon)) t.join(timeout=1) print("Thread joined") print("退出 script") # 确实退出状态 os._exit(os.EX_OK)
class AttributeWithCallables: _model = SN(name="my_name") def get_name(self): return self._model.name def set_name(self, value): self._model.name = value name = Attribute(get_name, set_name)
def get_data(datadir): data = SN() env, info = fym.logging.load(Path(datadir, "becmrac-env.h5"), with_info=True) data.env = env data.info = info data.agent = fym.logging.load(Path(datadir, "becmrac-agent.h5")) data.style = dict( label=rf"$b_h = {info['cfg'].bh}$, $b_F = {info['cfg'].bF}$") return data
def get_data(path, style=dict(), with_info=False): dataset = SN() if with_info: data, info = fym.logging.load(path, with_info=with_info) dataset.info = info dataset.style = style | dict(label=info["cfg"].label) else: data = fym.logging.load(path) dataset.style = style dataset.data = data return dataset
def __init__(self, lags=[1, 2], nhidden=2, nepochs=500, batch_size=8, ninit=3, nval=4, nproc=1, cuda=False, seed=42, optimizer_opts=SN(optimizer='Adam', lr=1e-2, weight_decay=1e-8), scale_range=(-1, 1)): super(NN_ICM, self).__init__() if lags is not None: if isinstance(lags, numbers.Number) and lags < 1: raise ValueError('number of lags should be > 0') elif isinstance(lags, list) and len(lags) == 0: raise ValueError('number of lags should be > 0') if nhidden is not None and nhidden < 0: raise ValueError('number of hidden units should be >= 0') self.lags = lags self.nhidden = nhidden self.nepochs = nepochs self.batch_size = batch_size self.nval = nval self.ninit = ninit self.nproc = nproc self.cuda = cuda self.seed = seed self.optimizer_opts = optimizer_opts self.scale_range = scale_range self.nworkers = 0 # metric for cross validation fnc = partial(torch.as_tensor, dtype=torch.float, device=torch.device('cpu')) self.metric = metrics.get_metric('mae', output_transform=fnc, target_transform=fnc) if type(lags) == list: self.v_lags = lags else: self.v_lags = list(range(1, lags + 1)) self.model = None self.criterion = None self.optimizer = None self._idx_coherent = None self._idx_noncoherent = None
def get_config(algorithm): config_dir = '{0}/{1}' with open(config_dir.format('config', "{}.yaml".format(algorithm)), "r") as f: try: config = yaml.load(f) except yaml.YAMLError as exc: assert False, "default.yaml error: {}".format(exc) return SN(**config)