Ejemplo n.º 1
0
    def mstep(self, p, x, u, **kwargs):
        mu0 = kwargs.get('mu0', 0.)
        sigma0 = kwargs.get('sigma0', 1e64)
        psi0 = kwargs.get('psi0', 1.)
        nu0 = kwargs.get('nu0', self.obs_dim + 1)

        xr, ur, xn, w = [], [], [], []
        for _x, _u, _w in zip(x, u, p):
            xr.append(arstack(_x, self.nb_lags)[:-1])
            ur.append(_u[self.nb_lags - 1:-1])
            xn.append(_x[self.nb_lags:])
            w.append(_w[self.nb_lags:])
        xu = list(map(np.hstack, zip(xr, ur)))

        _sigma = np.zeros((self.nb_states, self.obs_dim, self.obs_dim))
        for k in range(self.nb_states):
            coef, intercept, sigma = linear_regression(Xs=np.vstack(xu),
                                                       ys=np.vstack(xn),
                                                       weights=np.vstack(w)[:,
                                                                            k],
                                                       fit_intercept=True,
                                                       mu0=mu0,
                                                       sigma0=sigma0,
                                                       psi0=psi0,
                                                       nu0=nu0)

            self.A[k] = coef[:, :self.obs_dim * self.nb_lags]
            self.B[k] = coef[:, self.obs_dim * self.nb_lags:]
            self.c[k] = intercept
            _sigma[k] = sigma

        self.sigma = _sigma
Ejemplo n.º 2
0
    def mstep(self, p, x, u, **kwargs):
        mu0 = kwargs.get('mu0', 0.)
        sigma0 = kwargs.get('sigma0', 1e64)
        psi0 = kwargs.get('psi0', 1.)
        nu0 = kwargs.get('nu0', self.act_dim + 1)

        xr, ur, wr = [], [], []
        for _x, _u, _w in zip(x, u, p):
            xr.append(arstack(_x, self.nb_lags + 1))
            ur.append(_u[self.nb_lags:])
            wr.append(_w[self.nb_lags:])
        fr = list(map(self.featurize, xr))

        _sigma = np.zeros((self.nb_states, self.act_dim, self.act_dim))
        for k in range(self.nb_states):
            coef, intercept, sigma = linear_regression(
                Xs=np.vstack(fr),
                ys=np.vstack(ur),
                weights=np.vstack(wr)[:, k],
                fit_intercept=True,
                mu0=mu0,
                sigma0=sigma0,
                psi0=psi0,
                nu0=nu0)

            self.K[k] = coef
            self.kff[k] = intercept
            _sigma[k] = sigma

        self.sigma = _sigma
Ejemplo n.º 3
0
    def mstep(self, p, x, u, **kwargs):
        xr, ur, wr = [], [], []
        for _x, _u, _w in zip(x, u, p):
            xr.append(arstack(_x, self.nb_lags + 1))
            ur.append(_u[self.nb_lags:])
            wr.append(_w[self.nb_lags:])
        fr = list(map(self.featurize, xr))

        fr, ur, wr = list(map(np.vstack, (fr, ur, wr)))
        self.em(fr, ur, wr, **kwargs)
Ejemplo n.º 4
0
    def mstep(self, p, x, u, **kwargs):
        xr, ur, wr = [], [], []
        for _x, _u, _w in zip(x, u, p):
            xr.append(arstack(_x, self.nb_lags + 1))
            ur.append(_u[self.nb_lags:])
            wr.append(_w[self.nb_lags:])
        fr = list(map(self.featurize, xr))

        stats = self.likelihood.weighted_statistics(fr, ur, wr)
        self.posterior.nat_param = self.prior.nat_param + stats
        self.likelihood.params = self.posterior.mode()
Ejemplo n.º 5
0
    def mstep(self, p, x, u, **kwargs):
        xr, ur, xn, w = [], [], [], []
        for _x, _u, _w in zip(x, u, p):
            xr.append(arstack(_x, self.nb_lags)[:-1])
            ur.append(_u[self.nb_lags - 1:-1])
            xn.append(_x[self.nb_lags:])
            w.append(_w[self.nb_lags:])
        xu = list(map(np.hstack, zip(xr, ur)))

        xu, xn, w = list(map(np.vstack, (xu, xn, w)))
        self.em(xu, xn, w, **kwargs)
Ejemplo n.º 6
0
    def log_likelihood(self, x, u):
        if isinstance(x, np.ndarray) and isinstance(u, np.ndarray):
            xr = arstack(x, self.nb_lags + 1)
            ur = u[self.nb_lags:]
            fr = self.featurize(xr)
            return super().log_likelihood(fr, ur)
        else:

            def inner(x, u):
                return self.log_likelihood.__wrapped__(self, x, u)

            return list(map(inner, x, u))
Ejemplo n.º 7
0
    def smooth(self, p, x, u):
        if all(isinstance(i, np.ndarray) for i in [p, x, u]):
            xr = arstack(x, self.nb_lags)[:-1]
            ur = u[self.nb_lags - 1:-1]
            pr = p[self.nb_lags:]

            mu = np.zeros((len(xr), self.nb_states, self.obs_dim))
            for k in range(self.nb_states):
                mu[:, k, :] = self.mean(k, xr, ur)
            return np.einsum('nk,nkl->nl', pr, mu)
        else:
            return list(map(self.smooth, p, x, u))
Ejemplo n.º 8
0
    def log_likelihood(self, x, u):
        if isinstance(x, np.ndarray) and isinstance(u, np.ndarray):
            xr = arstack(x, self.nb_lags)[:-1]
            ur = u[self.nb_lags - 1:-1]
            xn = x[self.nb_lags:]
            xu = np.hstack((xr, ur))
            return super().log_likelihood(xu, xn)
        else:

            def inner(x, u):
                return self.log_likelihood.__wrapped__(self, x, u)

            return list(map(inner, x, u))
Ejemplo n.º 9
0
    def mstep(self, p, x, u, **kwargs):
        xr, ur, xn, w = [], [], [], []
        for _x, _u, _w in zip(x, u, p):
            xr.append(arstack(_x, self.nb_lags)[:-1])
            ur.append(_u[self.nb_lags - 1:-1])
            xn.append(_x[self.nb_lags:])
            w.append(_w[self.nb_lags:])
        xu = list(map(np.hstack, zip(xr, ur)))

        stats = self.likelihood.weighted_statistics(xu, xn, w)
        self.posterior.nat_param = self.prior.nat_param + stats
        self.likelihood.params = self.posterior.mode()

        self.empirical_bayes(**kwargs)
Ejemplo n.º 10
0
    def log_likelihood(self, x, u):
        if isinstance(x, np.ndarray) and isinstance(u, np.ndarray):
            xr = arstack(x, self.nb_lags + 1)
            ur = u[self.nb_lags:]

            log_lik = np.zeros((ur.shape[0], self.nb_states))
            for k in range(self.nb_states):
                log_lik[:, k] = lg_mvn(ur, self.mean(k, xr), self.sigma[k])
            return log_lik
        else:

            def inner(x, u):
                return self.log_likelihood.__wrapped__(self, x, u)

            return list(map(inner, x, u))
Ejemplo n.º 11
0
    def log_likelihood(self, x, u):
        if isinstance(x, np.ndarray) and isinstance(u, np.ndarray):
            xr = arstack(x, self.nb_lags)[:-1]
            ur = u[self.nb_lags - 1:-1]
            xn = x[self.nb_lags:]

            log_lik = np.zeros((xr.shape[0], self.nb_states))
            for k in range(self.nb_states):
                mu = self.mean(k, xr, ur)
                log_lik[:, k] = lg_mvn(xn, mu, self.sigma[k])
            return log_lik
        else:

            def inner(x, u):
                return self.log_likelihood.__wrapped__(self, x, u)

            return list(map(inner, x, u))
Ejemplo n.º 12
0
    def initialize(self, x, u, **kwargs):
        kmeans = kwargs.get('kmeans', True)

        xr, ur, xn = [], [], []
        for _x, _u in zip(x, u):
            xr.append(arstack(_x, self.nb_lags)[:-1])
            ur.append(_u[self.nb_lags - 1:-1])
            xn.append(_x[self.nb_lags:])
        xu = list(map(np.hstack, zip(xr, ur)))

        t = list(map(len, xu))
        if kmeans:
            from sklearn.cluster import KMeans
            km = KMeans(self.nb_states)
            km.fit(np.vstack(xu))
            z = np.split(km.labels_, np.cumsum(t)[:-1])
        else:
            z = list(map(partial(npr.choice, self.nb_states), t))

        z = list(map(partial(one_hot, self.nb_states), z))

        mu0 = kwargs.get('mu0', 0.)
        sigma0 = kwargs.get('sigma0', 1e64)
        psi0 = kwargs.get('psi0', 1.)
        nu0 = kwargs.get('nu0', self.obs_dim + 1)

        _sigma = np.zeros((self.nb_states, self.obs_dim, self.obs_dim))
        for k in range(self.nb_states):
            coef, intercept, sigma = linear_regression(Xs=np.vstack(xu),
                                                       ys=np.vstack(xn),
                                                       weights=np.vstack(z)[:,
                                                                            k],
                                                       fit_intercept=True,
                                                       mu0=mu0,
                                                       sigma0=sigma0,
                                                       psi0=psi0,
                                                       nu0=nu0)

            self.A[k] = coef[:, :self.obs_dim * self.nb_lags]
            self.B[k] = coef[:, self.obs_dim * self.nb_lags:]
            self.c[k] = intercept
            _sigma[k] = sigma

        self.sigma = _sigma
Ejemplo n.º 13
0
    def initialize(self, x, u, **kwargs):
        kmeans = kwargs.get('kmeans', True)

        xr, ur, xn = [], [], []
        for _x, _u in zip(x, u):
            xr.append(arstack(_x, self.nb_lags)[:-1])
            ur.append(_u[self.nb_lags - 1:-1])
            xn.append(_x[self.nb_lags:])
        xu = list(map(np.hstack, zip(xr, ur)))

        t = list(map(len, xu))
        if kmeans:
            from sklearn.cluster import KMeans
            km = KMeans(self.nb_states)
            km.fit(np.vstack(xu))
            z = np.split(km.labels_, np.cumsum(t)[:-1])
        else:
            z = list(map(partial(npr.choice, self.nb_states), t))

        z = list(map(partial(one_hot, self.nb_states), z))

        xu, xn, z = list(map(np.vstack, (xu, xn, z)))
        self.em(xu, xn, z, method='direct', nb_iter=100, values='sample')
Ejemplo n.º 14
0
    def initialize(self, x, u, **kwargs):
        kmeans = kwargs.get('kmeans', False)

        xr, ur = [], []
        for _x, _u in zip(x, u):
            xr.append(arstack(_x, self.nb_lags + 1))
            ur.append(_u[self.nb_lags:])
        fr = list(map(self.featurize, xr))

        t = list(map(len, fr))
        if kmeans:
            from sklearn.cluster import KMeans
            km = KMeans(self.nb_states)
            km.fit(np.vstack(fr))
            z = np.split(km.labels_, np.cumsum(t)[:-1])
        else:
            z = list(map(partial(npr.choice, self.nb_states), t))

        z = list(map(partial(one_hot, self.nb_states), z))

        stats = self.likelihood.weighted_statistics(fr, ur, z)
        self.posterior.nat_param = self.prior.nat_param + stats
        self.likelihood.params = self.posterior.rvs()
Ejemplo n.º 15
0
 def sample(self, z, x, u=None, ar=False):
     xr = np.squeeze(arstack(x, self.nb_lags), axis=0) if ar else x
     xu = np.hstack((xr, u))
     xn = self.rvs(z, xu)
     return np.atleast_1d(xn)
Ejemplo n.º 16
0
 def sample(self, z, x, u, ar=False):
     xr = np.squeeze(arstack(x, self.nb_lags), axis=0) if ar else x
     xu = np.hstack((xr, u))
     xn = self.likelihood.dists[z].rvs(xu)
     return np.atleast_1d(xn)
Ejemplo n.º 17
0
 def mean(self, z, x, ar=False):
     xr = np.squeeze(arstack(x, self.nb_lags + 1), axis=0) if ar else x
     fr = self.featurize(xr)
     u = self.likelihood.dists[z].mean(fr)
     return np.atleast_1d(u)
Ejemplo n.º 18
0
 def sample(self, z, x, ar=False):
     xr = np.squeeze(arstack(x, self.nb_lags + 1), axis=0) if ar else x
     fr = self.featurize(xr)
     u = self.rvs(z, fr)
     return np.atleast_1d(u)
Ejemplo n.º 19
0
 def mean(self, z, x, u, ar=False):
     xr = np.squeeze(arstack(x, self.nb_lags), axis=0) if ar else x
     return np.einsum('kh,...h->...k', self.A[z], xr) +\
            np.einsum('kh,...h->...k', self.B[z], u) + self.c[z, :]
Ejemplo n.º 20
0
 def mean(self, z, x, ar=False):
     xr = np.squeeze(arstack(x, self.nb_lags + 1), axis=0) if ar else x
     feat = self.featurize(xr)
     u = np.einsum('kh,...h->...k', self.K[z], feat) + self.kff[z]
     return np.atleast_1d(u)