コード例 #1
0
        def __init__(self, ds, device, *args, nz=5, nv=5, reduced_decoder_input_dim=2, decoder_opts=None,
                     inverse_decoder_opts=None, **kwargs):
            self.reduced_decoder_input_dim = reduced_decoder_input_dim
            self.x_extractor = torch.nn.Linear(ds.config.nx, self.reduced_decoder_input_dim).to(device=device,
                                                                                                dtype=torch.double)
            opts = {'h_units': (16, 32)}
            if decoder_opts:
                opts.update(decoder_opts)
            config = load_data.DataConfig()
            config.nx = self.reduced_decoder_input_dim
            config.ny = nv * ds.config.nx
            # h_\rho
            self.extracted_linear_decoder = model.DeterministicUser(
                make.make_sequential_network(config, **opts).to(device=device))

            opts = {'h_units': (16, 32)}
            if inverse_decoder_opts:
                opts.update(inverse_decoder_opts)
            config = load_data.DataConfig()
            config.nx = self.reduced_decoder_input_dim
            config.ny = nv * ds.config.nx
            # outputs a linear transformation from v to dx (linear in v), that is dependent on state
            # v C(x) = dx --> v = C(x)^{-1} dx allows both ways
            # h_\eta
            self.extracted_inverse_linear_decoder_producer = model.DeterministicUser(
                make.make_sequential_network(config, **opts).to(device=device))

            super().__init__(ds, device, *args, nz=nz, nv=nv, **kwargs)
コード例 #2
0
        def __init__(self, ds, device, model_opts=None, nz=5, nv=5, **kwargs):
            if model_opts is None:
                model_opts = {}
            # default values for the input model_opts to replace
            opts = {'h_units': (16, 32)}
            opts.update(model_opts)

            # v is dx, dy, dyaw in body frame and d_along
            # input is x, output is yaw
            self.yaw_selector = torch.nn.Linear(ds.config.nx, 1, bias=False).to(device=device, dtype=torch.double)
            self.true_yaw_param = torch.zeros(ds.config.nx, device=device, dtype=torch.double)
            self.true_yaw_param[2] = 1
            self.true_yaw_param = self.true_yaw_param.view(1, -1)  # to be consistent with weights
            # try starting at the true parameters
            # self.yaw_selector.weight.data = self.true_yaw_param + torch.randn_like(self.true_yaw_param)
            # self.yaw_selector.weight.requires_grad = False

            # input to local model is z, output is v
            config = load_data.DataConfig()
            config.nx = nz
            config.ny = nv * nz  # matrix output
            self.linear_model_producer = model.DeterministicUser(
                make.make_sequential_network(config, **opts).to(device=device))
            name = kwargs.pop('name', '')
            LearnLinearDynamicsTransform.__init__(self, ds, nz, nv,
                                                  name='{}_{}'.format(self._name_prefix(), name),
                                                  **kwargs)
コード例 #3
0
    def learn_model(cls, use_tsf, seed=1, name="", train_epochs=500, batch_N=500, rep_name=None):
        d, env, config, ds = cls.free_space_env_init(seed)

        _, tsf_name, _ = update_ds_with_transform(env, ds, use_tsf, cls.pre_invariant_preprocessor, rep_name=rep_name)
        mw = TranslationNetworkWrapper(model.DeterministicUser(make.make_sequential_network(config).to(device=d)), ds,
                                       name="{}_{}{}_{}".format(cls.dynamics_prefix(), tsf_name, name, seed))
        mw.learn_model(train_epochs, batch_N=batch_N)
コード例 #4
0
        def __init__(self, ds, device, nz=5, nv=5, mse_weight=0, reconstruction_weight=1, match_weight=1,
                     encoder_opts=None,
                     decoder_opts=None, dynamics_opts=None, **kwargs):
            self.mse_weight = mse_weight
            self.reconstruction_weight = reconstruction_weight
            self.match_weight = match_weight
            # TODO try penalizing mutual information between xu and z, and v and dx?
            # create encoder xu -> z
            opts = {'h_units': (16, 32)}
            if encoder_opts:
                opts.update(encoder_opts)
            config = load_data.DataConfig()
            config.nx = ds.config.nx + ds.config.nu
            config.ny = nz
            self.encoder = model.DeterministicUser(
                make.make_sequential_network(config, **opts).to(device=device))

            # TODO try extracting from x
            # create v,x -> dx
            opts = {'h_units': (16, 32)}
            if decoder_opts:
                opts.update(decoder_opts)
            config = load_data.DataConfig()
            config.nx = ds.config.nx
            config.ny = nv * ds.config.nx  # matrix output (original nx, ignore sincos)
            # outputs a linear transformation from v to dx (linear in v), that is dependent on state
            # v C(x) = dx --> v = C(x)^{-1} dx allows both ways
            self.linear_decoder_producer = model.DeterministicUser(
                make.make_sequential_network(config, **opts).to(device=device))

            # create dynamics (shouldn't have high capacity since we should have simple dynamics in trasnformed space)
            # z -> v
            opts = {'h_units': (16, 16)}
            if dynamics_opts:
                opts.update(dynamics_opts)
            config = load_data.DataConfig()
            config.nx = nz
            config.ny = nv
            self.dynamics = model.DeterministicUser(
                make.make_sequential_network(config, **opts).to(device=device))
            name = kwargs.pop('name', '')
            super().__init__(ds, nz=nz, nv=nv, name='{}_{}'.format(self._name_prefix(), name), **kwargs)
コード例 #5
0
 def __init__(self, ds, device, use_sincos_angle=False, nv=5, **kwargs):
     # replace angle with their sin and cos
     self.use_sincos_angle = use_sincos_angle
     # input to producer is x, output is matrix to multiply v to get dx by
     config = load_data.DataConfig()
     config.nx = ds.config.nx + (1 if use_sincos_angle else 0)
     config.ny = nv * ds.config.nx  # matrix output (original nx, ignore sincos)
     # outputs a linear transformation from v to dx (linear in v), that is dependent on state
     self.linear_decoder_producer = model.DeterministicUser(
         make.make_sequential_network(config, h_units=(16, 32)).to(device=device))
     super().__init__(ds, device, nv=nv, **kwargs)
コード例 #6
0
        def __init__(self, ds, device, *args, nz=5, nv=5, reduced_decoder_input_dim=2, **kwargs):
            self.reduced_decoder_input_dim = reduced_decoder_input_dim
            self.x_extractor = torch.nn.Linear(ds.config.nx, self.reduced_decoder_input_dim).to(device=device,
                                                                                                dtype=torch.double)

            config = load_data.DataConfig()
            config.nx = self.reduced_decoder_input_dim
            config.ny = nv * ds.config.nx
            self.partial_decoder = model.DeterministicUser(
                make.make_sequential_network(config, h_units=(16, 32)).to(device=device))

            super().__init__(ds, device, *args, nz=nz, nv=nv, **kwargs)
コード例 #7
0
 def __init__(self, ds, device, nv=5, inverse_decoder_opts=None, **kwargs):
     # create v,x -> dx
     opts = {'h_units': (16, 32)}
     if inverse_decoder_opts:
         opts.update(inverse_decoder_opts)
     config = load_data.DataConfig()
     config.nx = ds.config.nx
     config.ny = nv * ds.config.nx
     # outputs a linear transformation from v to dx (linear in v), that is dependent on state
     # v C(x) = dx --> v = C(x)^{-1} dx allows both ways
     self.inverse_linear_decoder_producer = model.DeterministicUser(
         make.make_sequential_network(config, **opts).to(device=device))
     super().__init__(ds, device, nv=nv, **kwargs)
コード例 #8
0
 def __init__(self, ds, device, dynamics_opts=None, **kwargs):
     # z = (x,u), v = dx
     opts = {'h_units': (32, 32)}
     if dynamics_opts:
         opts.update(dynamics_opts)
     config = load_data.DataConfig()
     nz = ds.config.input_dim()
     nv = ds.config.ny
     config.nx = nz
     config.ny = nv
     self.dynamics = model.DeterministicUser(make.make_sequential_network(config, **opts).to(device=device))
     name = kwargs.pop('name', '')
     super().__init__(ds, nz=nz, nv=nv, name='{}_{}'.format(self._name_prefix(), name), **kwargs)
コード例 #9
0
    def loaded_prior(cls, prior_class, ds, tsf_name, relearn_dynamics, seed=0):
        """Directly get loaded dynamics prior, training it if necessary on some datasource"""
        d = get_device()
        if prior_class is prior.NNPrior:
            mw = TranslationNetworkWrapper(
                model.DeterministicUser(make.make_sequential_network(ds.config).to(device=d)),
                ds, name="{}_{}_{}".format(cls.dynamics_prefix(), tsf_name, seed))

            train_epochs = 500
            pm = prior.NNPrior.from_data(mw, checkpoint=None if relearn_dynamics else mw.get_last_checkpoint(
                sort_by_time=False), train_epochs=train_epochs)
        elif prior_class is prior.PassthroughLatentDynamicsPrior:
            pm = prior.PassthroughLatentDynamicsPrior(ds)
        elif prior_class is prior.NoPrior:
            pm = prior.NoPrior()
        else:
            pm = prior_class.from_data(ds)
        return pm
コード例 #10
0
ファイル: gating_function.py プロジェクト: UM-ARM-Lab/tampc
    def __init__(self, dss, retrain=False, model_opts=None, **kwargs):
        GatingFunction.__init__(self,
                                use_action=kwargs.pop('use_action', False),
                                input_slice=kwargs.pop('input_slice', None))
        self.num_components = len(dss)
        self.nominal_ds = dss[0]
        self.weights = None
        self.relative_weights = None
        self.component_scale = None

        # TODO tune acceptance probability to maximize f1
        self.component_scale = torch.ones(self.num_components,
                                          dtype=torch.double,
                                          device=self.nominal_ds.d).view(
                                              -1, 1)
        for s in range(1, self.num_components):
            self.component_scale[s] = 70

        # we do the softmax ourselves
        opts = {'h_units': (100, ), 'activation_factory': torch.nn.LeakyReLU}
        if model_opts is not None:
            opts.update(model_opts)

        config = copy.copy(self.nominal_ds.config)
        if self.input_slice:
            xu = self.nominal_ds.training_set()[0][:, self.input_slice]
            config.nx = xu.shape[1]
            config.n_input = xu.shape[1]
        config.ny = self.num_components

        self.model = make.make_sequential_network(
            config, **opts).to(device=self.nominal_ds.d)

        LearnableParameterizedModel.__init__(self, cfg.ROOT_DIR, **kwargs)
        self.name = "selector_{}_{}".format(self.name, config)

        if retrain or not self.load(self.get_last_checkpoint()):
            self.learn_model(dss)
        self.eval()
コード例 #11
0
    fill_dataset(new_data)
    logger.info("bootstrapping finished")

    # TODO directly making the change in state into angular representation is wrong
    preprocessor = preprocess.PytorchTransformer(
        preprocess.AngleToCosSinRepresentation(0),
        preprocess.AngleToCosSinRepresentation(0))
    untransformed_config = ds.update_preprocessor(preprocessor)

    # pm = prior.GMMPrior.from_data(ds)
    # pm = prior.LSQPrior.from_data(ds)
    mw = model.NetworkModelWrapper(
        model.DeterministicUser(
            make.make_sequential_network(config,
                                         activation_factory=torch.nn.Tanh,
                                         h_units=(16, 16)).to(device=d)), ds)
    pm = prior.NNPrior.from_data(mw, train_epochs=0)
    # linearizable_dynamics = online_model.OnlineDynamicsModel(0.1, pm, ds, sigreg=1e-10)
    online_dynamics = online_model.OnlineLinearizeMixing(
        0.1,
        pm,
        ds,
        compare_to_goal,
        local_mix_weight_scale=1,
        xu_characteristic_length=10,
        const_local_mix_weight=True,
        sigreg=1e-10,
        device=d)
    hybrid_dynamics = hybrid_model.HybridDynamicsModel(
        [ds],
コード例 #12
0
rand.seed(0)
# training data for nominal model
N = 200
x = torch.rand(N) * 10
x, _ = torch.sort(x)
u = torch.zeros(x.shape[0], 0)
e = torch.randn(N) * 0.1
y = torch.sin(x) + e

config = load_data.DataConfig(predict_difference=False,
                              predict_all_dims=True,
                              y_in_x_space=False)
ds = PregeneratedDataset(x.view(-1, 1), u, y.view(-1, 1), config=config)
mw = model.NetworkModelWrapper(model.DeterministicUser(
    make.make_sequential_network(
        ds.config, h_units=(16, 16),
        activation_factory=torch.nn.Tanh).to(dtype=x.dtype)),
                               ds,
                               name="mix_nominal")

pm = prior.NNPrior.from_data(
    mw,
    checkpoint=None if relearn_dynamics else mw.get_last_checkpoint(),
    train_epochs=3000)

# make nominal predictions
yhat = pm.dyn_net.predict(x.view(-1, 1))

# plt.scatter(x.numpy(), y.numpy())
# plt.plot(x.numpy(), yhat.numpy())
# plt.xlabel('x')