Example #1
0
    def _initialize_with_pca(self,
                             datas,
                             inputs=None,
                             masks=None,
                             tags=None,
                             num_iters=20):
        for data in datas:
            assert data.shape[1] == self.N

        N_offsets = np.cumsum(self.N_vec)[:-1]
        pcas = []

        split_datas = list(
            zip(*[np.split(data, N_offsets, axis=1) for data in datas]))
        split_masks = list(
            zip(*[np.split(mask, N_offsets, axis=1) for mask in masks]))
        assert len(split_masks) == len(split_datas) == self.P

        for em, dps, mps in zip(self.emissions_models, split_datas,
                                split_masks):
            pcas.append(em._initialize_with_pca(dps, inputs, mps, tags))

        # Combine the PCA objects
        from sklearn.decomposition import PCA
        pca = PCA(self.D)
        pca.components_ = block_diag(*[p.components_ for p in pcas])
        pca.mean_ = np.concatenate([p.mean_ for p in pcas])
        # Not super pleased with this, but it should work...
        pca.noise_variance_ = np.concatenate(
            [p.noise_variance_ * np.ones(n) for p, n in zip(pcas, self.N_vec)])
        return pca
Example #2
0
 def _invert(self, data, input, mask, tag):
     assert data.shape[1] == self.N
     N_offsets = np.cumsum(self.N_vec)[:-1]
     states = []
     for em, dp, mp in zip(self.emissions_models,
                         np.split(data, N_offsets, axis=1),
                         np.split(mask, N_offsets, axis=1)):
         states.append(em._invert(dp, input, mp, tag))
     return np.column_stack(states)
Example #3
0
def dynamics_fn(t, coords):
    q, p = np.split(coords,2)
    D = dissipative_force(p, momentum=True)

    dcoords = autograd.grad(hamiltonian_fn)(coords)
    dqdt, dpdt = np.split(dcoords,2)
    dqdt -= D

    S = np.concatenate([dpdt, -dqdt], axis=-1)
    return S
Example #4
0
def lag_dynamics_fn(t, coords):
    q, dq = np.split(coords,2)
    dcoords = autograd.grad(lagrangian_fn)(coords)
    dLdq, dLddq = np.split(dcoords,2)

    I = inertia_matrix()
    D = dissipative_force(dq)

    # d/dt dLddq - dLdq = \tau
    dqdt = dLddq * I
    ddqdt = (D + dLdq) * 1./I
    # dqdt, ddqdt = dLddq, dLdq # dq = p = dLddq, d/dt dLddq = dp = dLdq
    # dqdt = dq
    
    S = np.concatenate([dqdt, ddqdt], axis=-1)
    return S
Example #5
0
def get_trajectory(t_span=[0, 3],
                   timescale=10,
                   radius=None,
                   y0=None,
                   noise_std=0.1,
                   **kwargs):
    t_eval = np.linspace(t_span[0], t_span[1],
                         int(timescale * (t_span[1] - t_span[0])))

    # get initial state
    if y0 is None:
        y0 = np.random.rand(2) * 2 - 1
    if radius is None:
        radius = np.random.rand() * 0.9 + 0.1  # sample a range of radii
    y0 = y0 / np.sqrt((y0**2).sum()) * radius  ## set the appropriate radius

    spring_ivp = solve_ivp(fun=dynamics_fn,
                           t_span=t_span,
                           y0=y0,
                           t_eval=t_eval,
                           rtol=1e-10,
                           **kwargs)
    q, p = spring_ivp['y'][0], spring_ivp['y'][1]
    dydt = [dynamics_fn(None, y) for y in spring_ivp['y'].T]
    dydt = np.stack(dydt).T
    dqdt, dpdt = np.split(dydt, 2)

    # add noise
    q += np.random.randn(*q.shape) * noise_std
    p += np.random.randn(*p.shape) * noise_std
    return q, p, dqdt, dpdt, t_eval
Example #6
0
 def forward(self, x, input, tag):
     assert x.shape[1] == self.D
     D_offsets = np.cumsum(self.D_vec)[:-1]
     datas = []
     for em, xp in zip(self.emissions_models, np.split(x, D_offsets, axis=1)):
         datas.append(em.forward(xp, input, tag))
     return np.concatenate(datas, axis=2)
Example #7
0
def get_one_trajectory(t_span=[0, 10],
                       y0=np.array([1, 0]),
                       n_points=50,
                       **kwargs):
    """
    Evaluate one GT trajectory
    """

    t_eval = np.linspace(t_span[0], t_span[1], n_points)

    # ODE solver
    pen_sol = solve_ivp(fun=dynamics_fn,
                        t_span=t_span,
                        y0=y0,
                        t_eval=t_eval,
                        rtol=1e-10,
                        **kwargs)

    q, p = pen_sol['y'][0], pen_sol['y'][1]

    dydt = [dynamics_fn(None, y) for y in pen_sol['y'].T]
    dydt = np.stack(dydt).T
    dqdt, dpdt = np.split(dydt, 2)

    return q, p, dqdt, dpdt, t_eval
Example #8
0
def neural_net_loss_jcb(p_values, xinputs, y, kwargs1):
    """Implements a deep neural network for classification.
       params is a list of (weights, bias) tuples.
       inputs is an (N x D) matrix.
       returns normalized class log-probabilities."""
    hidden = kwargs1['hidden']
    sizes_wb, shapes_wb = kwargs1['wb_sizes'], kwargs1['wb_shapes']
    new_sizes_sum = []
    sum_size_psns = 0
    wb_sizes_array = np.asarray(sizes_wb)
    for new_idx in range(len(wb_sizes_array)):
        sum_size_psns += wb_sizes_array[new_idx]
        new_sizes_sum.append(sum_size_psns)
    print(new_sizes_sum)
    print(shapes_wb)
    # construct loss function
    splitted_list_params = []
    split_all_params = np.split(p_values, new_sizes_sum)
    # print(split_all_params)
    for ijex in range(len(split_all_params) - 1):
        splitted_list_params.append(np.reshape(split_all_params[ijex], shapes_wb[ijex]))
    ws_parameters = splitted_list_params[0:][::2]
    bs_parameters = splitted_list_params[1:][::2]

    y_hat = xinputs
    for k_idx in range(len(hidden)):
        y_hat = sig_activation(np.dot(y_hat, ws_parameters[k_idx]) + bs_parameters[k_idx])
    y_hat = np.dot(y_hat, ws_parameters[-1]) + bs_parameters[-1]
    r = y - y_hat
    y_loss_vec = np.sum(np.sum(np.square(r), 1))
    return y_loss_vec
Example #9
0
 def unflatten(vector):
     split_ixs = np.cumsum(lengths)
     pieces = np.split(vector, split_ixs)
     return {key: unflattener(piece)
             for piece, unflattener, key in zip(pieces,
                                                unflatteners,
                                                keys)}
Example #10
0
    def k_fold_cv(self, X, Y, k=10):
        indx = np.array([i for i in range(self.dataXshape[0])])
        indx = np.random.permutation(indx)
        indx_subset = np.split(indx, k)

        ret_val = 0.
        for ki in range(k):
            temp_ind = [
                i for i in range(X.shape[0]) if i not in indx_subset[ki]
            ]
            temp_X = X[temp_ind]
            temp_Y = Y[temp_ind]

            w = self.post_mean
            lam = self.lam_memo
            alpha = self.prec_weight

            w, lam, alpha = laplace_approximation.maximized_approximate_log_marginal_likelihood(
                temp_Y, temp_X, mo_model, w, lam, alpha, warmup_num=0)
            train_ml = laplace_approximation.approximate_log_marginal_likelihood(
                temp_Y, temp_X, w, mo_model, lam, alpha)
            total_ml = laplace_approximation.approximate_log_marginal_likelihood(
                Y, X, w, mo_model, lam, alpha)
            ret_val += total_ml - train_ml
            print(ki, "ave_cv", ret_val / (ki + 1), "  temp_cv",
                  total_ml - train_ml)
        logger.log("k =", k)
        logger.log("PM_CV =", ret_val / (1. * k))
        return ret_val / (1. * k)
Example #11
0
def neural_net_loss(p, kwargs1):
    """Implements a deep neural network for classification.
       params is a list of (weights, bias) tuples.
       inputs is an (N x D) matrix.
       returns normalized class log-probabilities."""
    inputs, y = kwargs1['x_inputs'], kwargs1['y']
    wb_sizes_x, wb_shapes = kwargs1['wb_sizes'], kwargs1['wb_shapes']
    x_wb_size_new = []
    sum_wb_sizes = 0
    wb_sizes_array = np.asarray(wb_sizes_x)

    # Format the size entries using number python package
    for k in range(len(wb_sizes_array)):
        sum_wb_sizes += wb_sizes_array[k]
        x_wb_size_new.append(sum_wb_sizes)

    # print(x_wb_size_new)
    # print(wb_shapes)
    split_params_lst = []
    split_params = np.split(p, x_wb_size_new)
    for i in range(len(split_params) - 1):
        split_params_lst.append(np.reshape(split_params[i], wb_shapes[i]))
    ws_classify = split_params_lst[0:][::2]
    bs_classify = split_params_lst[1:][::2]

    hidden = kwargs1['hidden']
    y_hat = inputs
    for k_idx in range(len(hidden)):
        y_hat = sig_activation(np.dot(y_hat, ws_classify[k_idx]) + bs_classify[k_idx]) 
    y_hat = np.dot(y_hat, ws_classify[-1]) + bs_classify[-1]
    r = y - y_hat
    residue = np.square(r)
    loss = np.sum(np.sum(np.square(r), 1))
    return loss, r
Example #12
0
def get_trajectory(radius=None, y0=None, noise_std=0.1, **kwargs):
    time_stamps = kwargs["time_stamps"] if "time_stamps" in kwargs else 45
    t_span = [0, 3 / 44 * (time_stamps - 1)]
    t_eval = np.linspace(t_span[0], t_span[1], time_stamps)

    # get initial state
    if y0 is None:
        y0 = np.random.rand(2) * 2. - 1
    if radius is None:
        radius = np.random.rand() + 1.3  # sample a range of radii
    y0 = y0 / np.sqrt((y0**2).sum()) * radius  ## set the appropriate radius

    spring_ivp = solve_ivp(fun=dynamics_fn,
                           t_span=t_span,
                           y0=y0,
                           t_eval=t_eval,
                           rtol=1e-10,
                           **kwargs)
    q, p = spring_ivp['y'][0], spring_ivp['y'][1]
    dydt = [dynamics_fn(None, y) for y in spring_ivp['y'].T]
    dydt = np.stack(dydt).T
    dqdt, dpdt = np.split(dydt, 2)

    # add noise
    q += np.random.randn(*q.shape) * noise_std
    p += np.random.randn(*p.shape) * noise_std
    return q, p, dqdt, dpdt, t_eval
Example #13
0
    def initialize(self,
                   datas,
                   inputs=None,
                   masks=None,
                   tags=None,
                   init_method="random"):
        Ts = [data.shape[0] for data in datas]

        # Get initial discrete states
        if init_method.lower() == 'kmeans':
            # KMeans clustering
            from sklearn.cluster import KMeans
            km = KMeans(self.K)
            km.fit(np.vstack(datas))
            zs = np.split(km.labels_, np.cumsum(Ts)[:-1])

        elif init_method.lower() == 'random':
            # Random assignment
            zs = [npr.choice(self.K, size=T) for T in Ts]

        else:
            raise Exception(
                'Not an accepted initialization type: {}'.format(init_method))

        # Make a one-hot encoding of z and treat it as HMM expectations
        Ezs = [one_hot(z, self.K) for z in zs]
        expectations = [(Ez, None, None) for Ez in Ezs]

        # Set the variances all at once to use the setter
        self.m_step(expectations, datas, inputs, masks, tags)
Example #14
0
def apply_rotation(obj, coord_old, src_folder):

    coord_vec_ls = []
    for i in range(3):
        f = os.path.join(src_folder, 'coord{}_vec.npy'.format(i))
        coord_vec_ls.append(np.load(f))
    s = obj.shape
    coord0_vec, coord1_vec, coord2_vec = coord_vec_ls

    coord_old = np.tile(coord_old, [s[0], 1])
    coord1_old = coord_old[:, 0]
    coord2_old = coord_old[:, 1]
    coord_old = np.stack([coord0_vec, coord1_old, coord2_old], axis=1).transpose()
    # print(sess.run(coord_old))


    obj_channel_ls = np.split(obj, s[3], 3)
    obj_rot_channel_ls = []
    for channel in obj_channel_ls:
        channel_flat = channel.flatten()
        ind = coord_old[0] * (s[1] * s[2]) + coord_old[1] * s[2] + coord_old[2]
        ind = ind.astype('int')
        obj_chan_new_val = channel_flat[ind]
        obj_rot_channel_ls.append(np.reshape(obj_chan_new_val, s[:-1]))
    obj_rot = np.stack(obj_rot_channel_ls, axis=3)
    return obj_rot
Example #15
0
def func_predict(opt_p, x_in, y_out, wb_shapes, wb_sizes_x1, hidden1):
    # wb_sizes_x1, wb_shapes = kwargs1['wb_sizes'], kwargs1['wb_shapes']
    x_wb_size_new1 = []
    sum_wb_sizes1 = 0
    wb_sizes_array1 = np.asarray(wb_sizes_x1)
    # Format the size entries using number python package
    for k in range(len(wb_sizes_array1)):
        sum_wb_sizes1 += wb_sizes_array1[k]
        x_wb_size_new1.append(sum_wb_sizes1)

    print(x_wb_size_new1)
    print(wb_shapes)
    split_params_lst1 = []
    split_params1 = np.split(opt_p, x_wb_size_new1)
    print(len(split_params1[1]))
    for i in range(len(split_params1) - 1):
        split_params_lst1.append(np.reshape(split_params1[i], wb_shapes[i]))
    ws2_classify1 = split_params_lst1[0:][::2]
    bs2_classify1 = split_params_lst1[1:][::2]
    # print(ws2_classify)
    # hidden1= kwargs1['hidden']
    y_hat1 = x_in
    for k_idx in range(len(hidden1)):
        y_hat1 = sig_activation(
            np.dot(y_hat1, ws2_classify1[k_idx]) + bs2_classify1[k_idx])
    y_hat1 = np.dot(y_hat1, ws2_classify1[-1]) + bs2_classify1[-1]
    return y_hat1
Example #16
0
def pca_with_imputation(D, datas, masks, num_iters=20):
    if isinstance(datas, (list, tuple)) and isinstance(masks, (list, tuple)):
        data = np.concatenate(datas)
        mask = np.concatenate(masks)

    if np.any(~mask):
        # Fill in missing data with mean to start
        fulldata = data.copy()
        for n in range(fulldata.shape[1]):
            fulldata[~mask[:, n], n] = fulldata[mask[:, n], n].mean()

        for itr in range(num_iters):
            # Run PCA on imputed data
            pca = PCA(D)
            x = pca.fit_transform(fulldata)

            # Fill in missing data with PCA predictions
            pred = pca.inverse_transform(x)
            fulldata[~mask] = pred[~mask]
    else:
        pca = PCA(D)
        x = pca.fit_transform(data)

    # Unpack xs
    xs = np.split(x, np.cumsum([len(data) for data in datas])[:-1])
    assert len(xs) == len(datas)
    assert all([x.shape[0] == data.shape[0] for x, data in zip(xs, datas)])

    return pca, xs
Example #17
0
def func_hess(all_parameters, inputs, y, kwargs1):
    #
    wb_sizes_x, wb_shapes = kwargs1['wb_sizes'], kwargs1['wb_shapes']
    x_wb_size_new = []
    sum_wb_sizes = 0
    wb_sizes_array = np.asarray(wb_sizes_x)
    # Format the size entries using number python package
    for k in range(len(wb_sizes_array)):
        sum_wb_sizes += wb_sizes_array[k]
        x_wb_size_new.append(sum_wb_sizes)

    #print(x_wb_size_new)
    #print(wb_shapes)
    split_params_lst = []
    split_params = np.split(all_parameters, x_wb_size_new)
    for i in range(len(split_params) - 1):
        split_params_lst.append(np.reshape(split_params[i], wb_shapes[i]))
    ws2_classify = split_params_lst[0:][::2]
    bs2_classify = split_params_lst[1:][::2]
    # print(ws2_classify)
    hidden = kwargs1['hidden']
    y_hat = inputs
    for k_idx in range(len(hidden)):
        y_hat = sig_activation(
            np.dot(y_hat, ws2_classify[k_idx]) + bs2_classify[k_idx])
    y_hat = np.dot(y_hat, ws2_classify[-1]) + bs2_classify[-1]
    r = y - y_hat
    y_loss = np.sum(np.sum(np.square(r), 1))

    return y_loss
Example #18
0
    def initialize(self, x, u, **kwargs):
        localize = kwargs.get('localize', True)

        Ts = [_x.shape[0] for _x in x]
        if localize:
            from sklearn.cluster import KMeans
            km = KMeans(self.nb_states, random_state=1)
            km.fit((np.vstack(x)))
            zs = np.split(km.labels_, np.cumsum(Ts)[:-1])
            zs = [z[:-1] for z in zs]
        else:
            zs = [npr.choice(self.nb_states, size=T - 1) for T in Ts]

        _cov = np.zeros((self.nb_states, self.dm_obs, self.dm_obs))
        for k in range(self.nb_states):
            ts = [np.where(z == k)[0] for z in zs]
            xs = [
                np.hstack((_x[t, :], _u[t, :])) for t, _x, _u in zip(ts, x, u)
            ]
            ys = [_x[t + 1, :] for t, _x in zip(ts, x)]

            coef_, intercept_, sigma = linear_regression(xs, ys)
            self.A[k, ...] = coef_[:, :self.dm_obs]
            self.B[k, ...] = coef_[:, self.dm_obs:]
            self.c[k, :] = intercept_
            _cov[k, ...] = sigma

        self.cov = _cov
 def unpack_gp_params(params):
     mean          = params[0]
     noise_scale   = np.exp(params[1]) + 0.001
     cov_params    = params[2:2+num_cov_params]
     pseudo_params = params[2+num_cov_params:]
     x0, y0 = np.split(pseudo_params,[num_pseudo_params*input_dimension])
     x0     = x0.reshape((num_pseudo_params,input_dimension))
     return mean, cov_params, noise_scale, x0, y0
Example #20
0
def dynamics_fn(t, coords):
    """
    Returns the time derivatives
    """
    dcoords = autograd.grad(hamiltonian_fn)(coords, pen_params)
    dqdt, dpdt = np.split(dcoords, 2)
    S = np.concatenate([dpdt, -dqdt], axis=-1)
    return S
Example #21
0
 def flow_det(z, params):
     """ log determinant of this flow """
     lparams, mparams = np.split(params, 2)
     diag = (1 - mask) * lfun(mask * z, lparams)
     if len(z.shape) > 1:
         return np.sum(diag, axis=1)
     else:
         return np.sum(diag)
Example #22
0
def pairwise_distance(vector, size):

    print np.split(vector)
        
    def stack(vectors):
   
    def loss(): 
        
        sum = 0.0
        for i, vectori in enumerate(vectors):
            for j in range(i):
                sum+=np.linalg.norm(vectori,vectors[j])
    return sum
    
def main():
    (parser, loss) = KLD(50)
    print(parser)
    print(parser.idxs_and_shapes)
    datum = {}
    datum['mu1']=np.zeros(50)
    datum['mu2']=np.ones(50)
    datum['sig1']=5
    datum['sig2']=6

    trial_vecs = []
    for _ in range(5):
        trial_vecs.append(np.random.rand(50))
    value_and_grad_fun = value_and_grad(pairwise_distance)
    value, grad = value_and_grad_fun(trial_vecs)
    

    print(trial_vecs)

    weights = parser.stack(datum)
    value_and_grad_fun = value_and_grad(loss)
    value, grad = value_and_grad_fun(weights)
    print(value)
    weights = weights - 10e-4*grad
    value, grad = value_and_grad_fun(weights)
    print(value)


    pass 

if __name__ == "__main__":
    main()
Example #23
0
 def unpack_gp_params(params):
     mean = params[0]
     noise_scale = np.exp(params[1]) + 0.001
     cov_params = params[2:2 + num_cov_params]
     pseudo_params = params[2 + num_cov_params:]
     x0, y0 = np.split(pseudo_params, [num_pseudo_params * input_dimension])
     x0 = x0.reshape((num_pseudo_params, input_dimension))
     return mean, cov_params, noise_scale, x0, y0
Example #24
0
def pend_dynamics(t, x0):
    theta, thetadot = np.split(x0, 2)

    dtheta = thetadot
    dthetadot = -g / L * np.sin(theta)

    dxdt = np.concatenate([dtheta, dthetadot], axis=-1)

    return dxdt
Example #25
0
def split_channel(var, backend='autograd'):
    if backend == 'autograd':
        var0, var1 = anp.split(var, var.shape[-1], axis=-1)
        slicer = [slice(None)] * (var.ndim - 1) + [0]
        return var0[tuple(slicer)], var1[tuple(slicer)]
    elif backend == 'pytorch':
        var0, var1 = tc.split(var, 1, dim=-1)
        slicer = [slice(None)] * (var.ndim - 1) + [0]
        return var0[tuple(slicer)], var1[tuple(slicer)]
Example #26
0
def split_channel(var, override_backend=None):
    bn = override_backend if override_backend is not None else global_settings.backend
    if bn == 'autograd':
        var0, var1 = anp.split(var, var.shape[-1], axis=-1)
        slicer = [slice(None)] * (var.ndim - 1) + [0]
        return var0[tuple(slicer)], var1[tuple(slicer)]
    elif bn == 'pytorch':
        var0, var1 = tc.split(var, 1, dim=-1)
        slicer = [slice(None)] * (var.ndim - 1) + [0]
        return var0[tuple(slicer)], var1[tuple(slicer)]
Example #27
0
 def update(self, gradients, params, learning_rate=0.1):
     self.i = self.i + 1
     step_size = learning_rate * self.decay_rate**(self.i /
                                                   self.decay_steps)
     self.m = (1 - self.b1) * gradients + self.b1 * self.m
     self.v = (1 - self.b2) * (gradients**2) + self.b2 * self.v
     mhat = self.m / (1 - self.b1**(self.i))
     vhat = self.v / (1 - self.b2**(self.i))
     params = params + step_size * mhat / (np.sqrt(vhat) + self.eps)
     return np.split(params, 2)
Example #28
0
    def apply(self, x):
        x = np.concatenate((x, self.h_t), axis=1)

        gates = x @ self.weights + self.bias

        i, j, f, o = np.split(gates, 4, axis=1)

        self.c_t = self.c_t * sigmoid(f) + sigmoid(i) * tanh(j)
        self.h_t = sigmoid(o) * tanh(self.c_t)

        return self.h_t
def sample_qf_q_and_p(logprob, t, combined_params, k, num_samples, rs):
    add_dim_to_pair = lambda (a, b, c): (np.expand_dims(
        a, 1), np.expand_dims(b, 1), np.expand_dims(c, 1))
    combined_qs_and_samples = [
        add_dim_to_pair(sample_q_and_p(logprob, t, params, num_samples, rs))
        for params in np.split(combined_params, k)
    ]
    combined_qs, combined_ps, combined_samples = zip(*combined_qs_and_samples)
    return np.concatenate(combined_qs, axis=1),\
           np.concatenate(combined_ps, axis=1),\
           np.concatenate(combined_samples, axis=1)  # should be NxK, and NxKxD
Example #30
0
 def unpack_gp_params(params):
     mean          = params[0]
     noise_scale   = np.exp(params[1]) + 0.001
     cov_params    = params[2:2+num_cov_params]
     pseudo_params = params[2+num_cov_params:]
     x0, y0 = np.split(pseudo_params,[num_pseudo_params*input_dimension])
     x0     = x0.reshape((num_pseudo_params,input_dimension))
     gp_details = {'mean': mean, 'noise_scale': noise_scale, 'cov_params': cov_params, 'x0': x0, 'y0': y0}
     #x0 = X
     #y0 = y
     return mean, cov_params, noise_scale, x0, y0
Example #31
0
    def function(self, x_mu_lmbda):
        """
        Compute the value of the augmented lagrangian relaxation defined as:

            L(x, mu, lambda) = 1/2 x^T Q x + q^T x + mu^T (A x - b) + lambda^T (G x - h)

        :param x_mu_lmbda: the primal-dual variable wrt evaluate the function
        :return: the function value wrt primal-dual variable
        """
        x, mu_lmbda = np.split(x_mu_lmbda, [self.primal.ndim])
        return self.primal.function(x) + mu_lmbda @ (self.AG @ x - self.bh)
Example #32
0
def lagrangian_fn(coords):
    ''' 
    change the data to (q, dq) 
    m = l = 1, g = 3
    dq = p / (m * l**2) = p
    '''
    q, dq = np.split(coords,2)
    # L(q, dq) = (m * l**2 * dq**2)/2 - m * g * l (1 - cos q)
    # L = dq**2/2. - 3*(1 - np.cos(q)) # pendulum lagrangian
    T = kinetic_energy(dq)
    U = potential_energy(q)
    L = T - U
    return L
Example #33
0
    def __call__(self, X):
        V = np.concatenate([X, self.Y, np.ones(1)])
        S = np.dot(V, self.W)

        z, i, f, o = np.split(S, 4)
        Z, I, F, O = sigmoid(z), np.tanh(i), sigmoid(f), sigmoid(o)

        self.c = Z * I + F * self.c

        C = np.tanh(self.c)
        self.Y = O * C

        return self.Y
Example #34
0
 def unflatten(vector):
     pieces = np.split(vector, split_indices)
     return constructor(unflatten(v) for unflatten, v in zip(unflatteners, pieces))
def main():
    # Parse optional arguments
    parser=argparse.ArgumentParser()
    parser.add_argument("--epochs",help="Number of epochs to iterate over", type=int)
    parser.add_argument("--alpha",help="Step size for gradient descent", type=float)
    parser.add_argument("--func", help="Distribution choice for epsilon. \
                                        Can be norm for normal, log for logistic, \
                                        ,gumbel for gumbel, or r_gumbel for reverse_gumbel")
    args=parser.parse_args()
    # Set default epochs to 1
    if args.epochs:
        epoch = args.epochs
    else:
        epoch = 5

    # Set default alpha to 0.05
    if args.alpha:
        alpha = args.alpha
    else:
        alpha = 0.05

    # Set default distribution to normal
    if args.func in ['log','gumbel','r_gumbel']:
        if args.func == 'log':
            func = log_cdf
        elif args.func == 'gumbel':
            func = gumbel_cdf
        else:
            func = r_gumbel_cdf
    else:
        func = sci.stats.norm.cdf


    #Initialize the Spark Context
    sc=pyspark.SparkContext()
    df=pd.read_csv('../data/musicdata.small.csv',header=None)
    df.columns=['uid', 'aid', 'rating']

    # I and J are the number of users and artists, respectively
    I = df.uid.max() + 1
    J = df.aid.max() + 1

    # Take the first 2000 samples
    dftouse = df[['rating', 'uid', 'aid']].head(2000)

    # Adjust the indices
    dftouse['uid'] = dftouse['uid'] - 1
    dftouse['aid'] = dftouse['aid'] - 1

    # Take the ratings from 0-100 and transform them from 0-5
    dftouse.rating=np.around((dftouse.rating-1)/20)
    rating_vals = np.arange(1,dftouse.rating.max()+1)
    minR = dftouse.rating.min()
    dftouse['rating'] = dftouse['rating'] - minR
    # R is the number of rating values
    R = len(rating_vals)

    # create buckets as midpoints
    buckets = 0.5 * (rating_vals[1:] + rating_vals[:-1])

    # get length I, J
    I = dftouse.uid.max() + 1
    J = dftouse.aid.max() + 1

    # define some K
    K = 2

    # convert to numpy data matrix
    Xrat = np.array(dftouse)

    # initialize a theta vector with some random values
    theta = np.zeros((I + J) * K + I + J + 1)
    theta = np.random.normal(size=theta.shape[0], loc=0., scale=0.1)

    # define gradll as the gradient of the log likelihood
    gradrll = grad(rowloglikelihood)

    # Open up some files
    f1 = open("out.txt", "w")

    # Now we begin the parallelization!

    # set up parameters
    S=200


    # turn Xrat into an RDD
    xrat_rdd = sc.parallelize(Xrat)
    # Split the ratings into size S chunks
    split_xrat=np.split(Xrat,Xrat.shape[0]/S)
    #And then parallelize those chunks
    split_xrat = sc.parallelize(split_xrat)

    # then run the sgd!

    t=time.time()
    ptheta = split_xrat.map(lambda subX:n_row_sgd(theta, subX, buckets, I, J, K, R, alpha, epoch, gradrll, func)).mean()

    # then we predict (in parallel)
    y_preds = xrat_rdd.map(lambda row: parallel_predict(ptheta, row, buckets, I, J, K)).collect()

    print Xrat[:20,0]
    print y_preds[:20]
    print ptheta[:20]
    # Write things to file
    f1.write("Time (training):  "+ str(time.time()-t)+"\n")
    f1.write("Log likelihood:   "+ str(loglikelihood(ptheta, Xrat, buckets, I, J, K, R, func))+"\n")
    f1.write("Accuracy:         "+ str(accuracy(y_preds, Xrat))+"\n")
    f1.write("RMSE:             "+ str(rmse(y_preds, Xrat)))
    f1.close()
Example #36
0
def split_array(arr, length):
    truncate_to_multiple = lambda arr, k: arr[:k*(len(arr) // k)]
    return np.split(truncate_to_multiple(arr, length), len(arr) // length)
Example #37
0
 def unflatten(vector):
     pieces = np.split(vector, split_indices)
     return {key: unflattener(piece)
             for piece, unflattener, key in zip(pieces, unflatteners, keys)}
Example #38
0
File: nnet.py Project: mattjj/svae
def gaussian_info(inputs):
    J_input, h = np.split(inputs, 2, axis=-1)
    J = -1./2 * log1pexp(J_input)
    return make_tuple(J, h)
Example #39
0
File: nnet.py Project: mattjj/svae
def gaussian_mean(inputs, sigmoid_mean=False):
    mu_input, sigmasq_input = np.split(inputs, 2, axis=-1)
    mu = sigmoid(mu_input) if sigmoid_mean else mu_input
    sigmasq = log1pexp(sigmasq_input)
    return make_tuple(mu, sigmasq)