コード例 #1
0
    def __init__(self,
                 sensor_models,
                 calibration_model,
                 lr=1e-4,
                 batch_size=20,
                 log_dir=None,
                 **kwargs):
        self.graph = T.core.Graph()
        self.log_dir = log_dir
        with self.graph.as_default():
            self.calibration_model = calibration_model
            self.board_ids = list(sensor_models.keys())
            self.board_map = {b: i for i, b in enumerate(self.board_ids)}
            self.sensor_map = sensor_models
            self.sensor_models = [
                sensor_models[board_id] for board_id in self.board_ids
            ]
            self.architecture = pickle.dumps(
                [sensor_models, calibration_model])
            self.batch_size = batch_size
            self.lr = lr

            self.learning_rate = T.placeholder(T.floatx(), [])
            self.sensors = T.placeholder(T.floatx(), [None, 3])
            self.env = T.placeholder(T.floatx(), [None, 3])
            self.board = T.placeholder(T.core.int32, [None])
            self.boards = T.transpose(
                T.pack([self.board,
                        T.range(T.shape(self.board)[0])]))
            self.rep = T.gather_nd(
                T.pack([
                    sensor_model(self.sensors)
                    for sensor_model in self.sensor_models
                ]), self.boards)
            self.rep_ = T.placeholder(T.floatx(),
                                      [None, self.rep.get_shape()[-1]])
            rep_env = T.concat([self.rep, self.env], -1)
            rep_env_ = T.concat([self.rep_, self.env], -1)
            self.y_ = self.calibration_model(rep_env)
            self.y_rep = self.calibration_model(rep_env_)
            self.y = T.placeholder(T.floatx(), [None, 2])
            self.loss = T.mean((self.y - self.y_)**2)
            self.mae = T.mean(T.abs(self.y - self.y_))
            T.core.summary.scalar('MSE', self.loss)
            T.core.summary.scalar('MAE', self.mae)
            self.summary = T.core.summary.merge_all()
            self.train_op = T.core.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)

        self.session = T.interactive_session(graph=self.graph)
コード例 #2
0
ファイル: nn.py プロジェクト: sharadmv/metasense-transfer
    def __init__(self, features, model=None, batch_size=20, lr=1e-4):
        super(NeuralNetwork, self).__init__(features)
        self.graph = T.core.Graph()
        with self.graph.as_default():
            self.architecture = pickle.dumps(model)
            self.model = model #Relu(6, 200) >> Relu(200) >> Relu(200) >> Relu(200) >> Linear(2)
            self.batch_size = batch_size
            self.lr = lr

            self.X = T.placeholder(T.floatx(), [None, 6])
            self.y = T.placeholder(T.floatx(), [None, 2])
            self.y_ = self.model(self.X)
            self.loss = T.mean((self.y - self.y_) ** 2)
            self.train_op = T.core.train.AdamOptimizer(self.lr).minimize(self.loss)

        self.session = T.interactive_session(graph=self.graph)
コード例 #3
0
ファイル: common.py プロジェクト: zhangmarvin/deepx
def xavier(shape, constant=1):
    """ Xavier initialization of network weights"""
    fan_in, fan_out = get_fans(shape)
    low = -constant*np.sqrt(6.0/(fan_in + fan_out))
    high = constant*np.sqrt(6.0/(fan_in + fan_out))
    return T.random_uniform(shape,
                             minval=low, maxval=high,
                             dtype=T.floatx())
コード例 #4
0
ファイル: functional.py プロジェクト: zhangmarvin/deepx
 def create_parameter(self, name, shape, initial_value=None):
     if name not in self.parameters:
         if initial_value is None:
             parameter = T.variable(
                 initialize_weights(self.initialization, shape),
                 name=name,
             )
         else:
             parameter = T.variable(
                 np.array(initial_value, dtype=T.floatx()),
                 name=name,
             )
         self.parameters[name] = parameter
コード例 #5
0
    idx = np.random.permutation(np.arange(70000))
    X = X[idx]
    labels = labels[idx].astype(np.int32)

    y = np.zeros((N, 10))
    for i in range(N):
        y[i, labels[i]] = 1

    split = int(0.9 * N)

    train_idx, test_idx = idx[:split], idx[split:]

    Xtrain, Xtest = X[train_idx], X[test_idx]
    ytrain, ytest = y[train_idx], y[test_idx]

    X_in = T.placeholder(T.floatx(), [None, 28, 28, 1])
    Y_in = T.placeholder(T.floatx(), [None, 10])

    conv_net = Conv((2, 2, 10)) >> Conv((2, 2, 20)) >> Flatten() >> Linear(10)
    logits = conv_net(X_in)
    predictions = T.argmax(logits, -1)
    loss = T.mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y_in))

    train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)

    sess = T.interactive_session()

    def train(n_iter, batch_size=20):
        for i in range(n_iter):
            idx = np.random.permutation(Xtrain.shape[0])[:batch_size]
            result = sess.run([loss, train_op], { X_in : Xtrain[idx], Y_in : ytrain[idx] })
コード例 #6
0
ファイル: gmm-svi.py プロジェクト: sharadmv/vi-demos
    ellipse = 2. * np.dot(np.linalg.cholesky(cov), circle) + mean[:, None]
    if line:
        line.set_data(ellipse)
        line.set_alpha(alpha)
    else:
        ax.plot(ellipse[0], ellipse[1], linestyle='-', linewidth=2)


N = 1000
K = 5
D = 2

sigma = 0.5
sigma0 = 100
data = generate_data(N, D, K, sigma=sigma, sigma0=sigma0, seed=None)
p_pi = Dirichlet(T.constant(10.0 * np.ones([K], dtype=T.floatx())))
p_theta = NIW(
    list(
        map(lambda x: T.constant(np.array(x).astype(T.floatx())),
            [np.eye(D) * sigma, np.zeros(D), 1, D + 1])))
prior = (p_pi, p_theta)

np.random.seed(None)

X = T.placeholder(T.floatx(), [None, D])
batch_size = T.shape(X)[0]

q_pi = make_variable(Dirichlet(np.ones([K], dtype=T.floatx())))
q_theta = make_variable(
    NIW(
        map(lambda x: np.array(x).astype(T.floatx()), [
コード例 #7
0
ファイル: gmm-mf.py プロジェクト: sharadmv/vi-demos
    ellipse = 2. * np.dot(np.linalg.cholesky(cov), circle) + mean[:, None]
    if line:
        line.set_data(ellipse)
        line.set_alpha(alpha)
    else:
        ax.plot(ellipse[0], ellipse[1], linestyle='-', linewidth=2)


N = 1000
K = 5
D = 2

sigma = 0.5
sigma0 = 100
X = generate_data(N, D, K, sigma=sigma, sigma0=sigma0, seed=None)
p_pi = Dirichlet(T.constant(10.0 * np.ones([K], dtype=T.floatx())))
p_theta = NIW(
    list(
        map(lambda x: T.constant(np.array(x).astype(T.floatx())),
            [np.eye(D) * sigma, np.zeros(D), 1, D + 1])))
prior = (p_pi, p_theta)

np.random.seed(None)

q_pi = make_variable(Dirichlet(np.ones([K], dtype=T.floatx())))
q_theta = make_variable(
    NIW(
        map(lambda x: np.array(x).astype(T.floatx()), [
            np.tile(np.eye(D)[None] * 100, [K, 1, 1]),
            np.random.multivariate_normal(
                mean=np.zeros([D]), cov=np.eye(D) * 20, size=[K]),
コード例 #8
0
        _, l = sess.run([train_op, loss], {X: X_data[idx], Y: Y_data[idx]})
        if i % 1000 == 0:
            print(l)


if __name__ == "__main__":
    args = parse_args()
    model_path = Path('results') / args.name / 'models' / 'model_latest.pkl'
    dataset1 = load(args.round1, args.location1, args.board1)
    dataset2 = load(args.round2, args.location2, args.board2)
    train = dataset1[0].join(dataset2[0], lsuffix='-left').dropna()
    test = dataset1[1].join(dataset2[1], lsuffix='-left').dropna()
    model = joblib.load(model_path)
    fixer_model = pickle.loads(model.architecture)[0][args.board2]

    X = T.placeholder(T.floatx(), [None, 3])
    Y = T.placeholder(T.floatx(), [None, 3])

    Y_ = fixer_model(X)
    loss = T.mean((Y - Y_)**2)
    train_op = T.core.train.AdamOptimizer(1e-4).minimize(
        loss, var_list=fixer_model.get_parameters())

    X_data_train = train[[s + '-left' for s in sensor_features]].as_matrix()
    Y_data_train = model.representation(train[sensor_features], train['board'])
    X_data_test = test[[s + '-left' for s in sensor_features]].as_matrix()
    Y_data_test = model.representation(test[sensor_features], test['board'])

    sess = T.interactive_session()
    fit_nn(X_data_train, Y_data_train, fixer_model, batch_size=64)
    train_preds = model.calibrate(sess.run(Y_, {X: X_data_train}),
コード例 #9
0
ファイル: nlds.py プロジェクト: sharadmv/nvmp
    X += np.random.normal(size=X.shape, scale=np.sqrt(noise))
    return X


data = generate_data(1000)
N = data.shape[0]
yt, yt1 = data[:, :-1], data[:, 1:]
yt, yt1 = yt.reshape([-1, D]), yt1.reshape([-1, D])

transition_net = Tanh(D, 500) >> Tanh(500) >> nn.Gaussian(D)
transition_net.initialize()

rec_net = Tanh(D, 500) >> Tanh(500) >> nn.Gaussian(D)
rec_net.initialize()

Yt = T.placeholder(T.floatx(), [None, D])
Yt1 = T.placeholder(T.floatx(), [None, D])
batch_size = T.shape(Yt)[0]
num_batches = N / T.to_float(batch_size)

Yt_message = Gaussian.pack([
    T.tile(T.eye(D)[None] * noise, [batch_size, 1, 1]),
    T.einsum('ab,ib->ia',
             T.eye(D) * noise, Yt)
])
Yt1_message = Gaussian.pack([
    T.tile(T.eye(D)[None] * noise, [batch_size, 1, 1]),
    T.einsum('ab,ib->ia',
             T.eye(D) * noise, Yt1)
])
transition = Gaussian(transition_net(Yt)).expected_value()
コード例 #10
0
ファイル: vae.py プロジェクト: yuchen8807/parasol
    def initialize(self):
        self.graph = T.core.Graph()
        with self.graph.as_default():
            prior_params = self.prior_params.copy()
            prior_type = prior_params.pop('prior_type')
            self.prior = PRIOR_MAP[prior_type](self.ds, self.da, self.horizon, **prior_params)

            cost_params = self.cost_params.copy()
            cost_type = cost_params.pop('cost_type')
            self.cost = COST_MAP[cost_type](self.ds, self.da, **cost_params)

            self.O = T.placeholder(T.floatx(), [None, None, self.do])
            self.U = T.placeholder(T.floatx(), [None, None, self.du])
            self.C = T.placeholder(T.floatx(), [None, None])
            self.S = T.placeholder(T.floatx(), [None, None, self.ds])
            self.A = T.placeholder(T.floatx(), [None, None, self.da])

            self.t = T.placeholder(T.int32, [])
            self.state, self.action = T.placeholder(T.floatx(), [None, self.ds]), T.placeholder(T.floatx(), [None, self.da])
            if self.prior.has_dynamics():
                self.next_state = self.prior.next_state(self.state, self.action, self.t)
                self.prior_dynamics = self.prior.get_dynamics()

            self.num_data = T.scalar()
            self.beta = T.placeholder(T.floatx(), [])
            self.learning_rate = T.placeholder(T.floatx(), [])
            self.model_learning_rate = T.placeholder(T.floatx(), [])

            self.S_potentials = util.map_network(self.state_encoder)(self.O)
            self.A_potentials = util.map_network(self.action_encoder)(self.U)

            if self.prior.is_dynamics_prior():
                self.data_strength = T.placeholder(T.floatx(), [])
                self.max_iter = T.placeholder(T.int32, [])
                posterior_dynamics, (encodings, actions) = \
                        self.prior.posterior_dynamics(self.S_potentials, self.A_potentials,
                                                      data_strength=self.data_strength,
                                                      max_iter=self.max_iter)
                self.posterior_dynamics_ = posterior_dynamics, (encodings.expected_value(), actions.expected_value())

            if self.prior.is_filtering_prior():
                self.prior_dynamics_stats = self.prior.sufficient_statistics()
                self.dynamics_stats = (
                    T.placeholder(T.floatx(), [None, self.ds, self.ds]),
                    T.placeholder(T.floatx(), [None, self.ds, self.ds + self.da]),
                    T.placeholder(T.floatx(), [None, self.ds + self.da, self.ds + self.da]),
                    T.placeholder(T.floatx(), [None]),
                )
                S_natparam = self.S_potentials.get_parameters('natural')
                num_steps = T.shape(S_natparam)[1]

                self.padded_S = stats.Gaussian(T.core.pad(
                    self.S_potentials.get_parameters('natural'),
                    [[0, 0], [0, self.horizon - num_steps], [0, 0], [0, 0]]
                ), 'natural')
                self.padded_A = stats.GaussianScaleDiag([
                    T.core.pad(self.A_potentials.get_parameters('regular')[0],
                            [[0, 0], [0, self.horizon - num_steps], [0, 0]]),
                    T.core.pad(self.A_potentials.get_parameters('regular')[1],
                            [[0, 0], [0, self.horizon - num_steps], [0, 0]])
                ], 'regular')
                self.q_S_padded, self.q_A_padded = self.prior.encode(
                    self.padded_S, self.padded_A,
                    dynamics_stats=self.dynamics_stats
                )
                self.q_S_filter = self.q_S_padded.filter(max_steps=num_steps)
                self.q_A_filter = self.q_A_padded.__class__(
                    self.q_A_padded.get_parameters('natural')[:, :num_steps]
                , 'natural')
                self.e_q_S_filter = self.q_S_filter.expected_value()
                self.e_q_A_filter = self.q_A_filter.expected_value()

            (self.q_S, self.q_A), self.prior_kl, self.kl_grads, self.info = self.prior.posterior_kl_grads(
                self.S_potentials, self.A_potentials, self.num_data
            )

            self.q_S_sample = self.q_S.sample()[0]
            self.q_A_sample = self.q_A.sample()[0]

            self.q_O = util.map_network(self.state_decoder)(self.q_S_sample)
            self.q_U = util.map_network(self.action_decoder)(self.q_A_sample)
            self.q_O_sample = self.q_O.sample()[0]
            self.q_U_sample = self.q_U.sample()[0]

            self.q_O_ = util.map_network(self.state_decoder)(self.S)
            self.q_U_ = util.map_network(self.action_decoder)(self.A)
            self.q_O__sample = self.q_O_.sample()[0]
            self.q_U__sample = self.q_U_.sample()[0]

            self.cost_likelihood = self.cost.log_likelihood(self.q_S_sample, self.C)
            if self.cost.is_cost_function():
                self.evaluated_cost = self.cost.evaluate(self.S)
            self.log_likelihood = T.sum(self.q_O.log_likelihood(self.O), axis=1)

            self.elbo = T.mean(self.log_likelihood + self.cost_likelihood - self.prior_kl)
            train_elbo = T.mean(self.log_likelihood + self.beta * (self.cost_likelihood - self.prior_kl))
            T.core.summary.scalar("encoder-stdev", T.mean(self.S_potentials.get_parameters('regular')[0]))
            T.core.summary.scalar("log-likelihood", T.mean(self.log_likelihood))
            T.core.summary.scalar("cost-likelihood", T.mean(self.cost_likelihood))
            T.core.summary.scalar("prior-kl", T.mean(self.prior_kl))
            T.core.summary.scalar("beta", self.beta)
            T.core.summary.scalar("elbo", self.elbo)
            T.core.summary.scalar("beta-elbo", train_elbo)
            for k, v in self.info.items():
                T.core.summary.scalar(k, T.mean(v))
            self.summary = T.core.summary.merge_all()
            neural_params = (
                self.state_encoder.get_parameters()
                + self.state_decoder.get_parameters()
                + self.action_encoder.get_parameters()
                + self.action_decoder.get_parameters()
            )
            cost_params = self.cost.get_parameters()
            if len(neural_params) > 0:
                optimizer = T.core.train.AdamOptimizer(self.learning_rate)
                gradients, variables = zip(*optimizer.compute_gradients(-train_elbo, var_list=neural_params))
                gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
                self.neural_op = optimizer.apply_gradients(zip(gradients, variables))
            else:
                self.neural_op = T.core.no_op()
            if len(cost_params) > 0:
                self.cost_op = T.core.train.AdamOptimizer(self.learning_rate).minimize(-self.elbo, var_list=cost_params)
            else:
                self.cost_op = T.core.no_op()
            if len(self.kl_grads) > 0:
                if self.prior.is_dynamics_prior():
                    # opt = lambda x: T.core.train.MomentumOptimizer(x, 0.5)
                    opt = lambda x: T.core.train.GradientDescentOptimizer(x)
                else:
                    opt = T.core.train.AdamOptimizer
                self.dynamics_op = opt(self.model_learning_rate).apply_gradients([
                    (b, a) for a, b in self.kl_grads
                ])
            else:
                self.dynamics_op = T.core.no_op()
            self.train_op = T.core.group(self.neural_op, self.dynamics_op, self.cost_op)
        self.session = T.interactive_session(graph=self.graph, allow_soft_placement=True, log_device_placement=False)
コード例 #11
0
sns.set_style('white')
import numpy as np
from tqdm import trange

from sklearn.linear_model import LogisticRegression
from deepx import T
from deepx.nn import *
from deepx.stats import Gaussian, Dirichlet, NIW, Categorical, kl_divergence, Bernoulli
from activations import Gaussian as GaussianLayer
from activations import GaussianStats

N = 1000
D = 10

p_w = Gaussian([
    T.constant(np.eye(D).astype(T.floatx()))[None],
    T.constant(np.zeros(D).astype(T.floatx()))[None]
])


def logistic(x):
    return 1 / (1 + np.exp(-x))


# def generate_data(N, D):
# with T.session() as s:
# w = np.random.multivariate_normal(mean=np.zeros(D), cov=np.eye(D))


# X = np.random.normal(size=(N, D))
# p = logistic(np.einsum('ia,a->i', X, w))
コード例 #12
0
from deepx import stats, T

N, H, ds, da = 1, 2, 4, 2

# random rotation for state-state transition
A = np.zeros([H - 1, ds, ds])
for t in range(H - 1):
    theta = 0.5 * np.pi * np.random.rand()
    rot = np.array([[np.cos(theta), -np.sin(theta)],
                    [np.sin(theta), np.cos(theta)]])
    out = np.zeros((ds, ds))
    out[:2, :2] = rot
    q = np.linalg.qr(np.random.randn(ds, ds))[0]
    A[t] = q.dot(out).dot(q.T)
A = T.constant(A, dtype=T.floatx())

B = T.constant(0.1 * np.random.randn(H - 1, ds, da), dtype=T.floatx())
Q = T.matrix_diag(
    np.random.uniform(low=0.9, high=1.1, size=[H - 1, ds]).astype(np.float32))

prior = stats.Gaussian([T.eye(ds), T.zeros(ds)])
p_S = stats.Gaussian([
    T.eye(ds, batch_shape=[N, H]),
    T.constant(np.random.randn(N, H, ds), dtype=T.floatx())
])
potentials = stats.Gaussian.unpack(
    p_S.get_parameters('natural')) + [p_S.log_z()]
actions = T.constant(np.random.randn(N, H, da), dtype=T.floatx())

lds = stats.LDS(((A, B, Q), prior, potentials, actions))