コード例 #1
0
ファイル: forwardAdditiveLKBA.py プロジェクト: hmgoforth/ppba
def compute_rm(M_feat, T_feat, P_mk_rel):
	'''
	Args:
		M_feat: Sequence of deep feature map images, 4D numpy array, k x Cf x Hf x Wf
		T_feat: Template images from I deep feature extractions, 4D numpy array, k x Cf x Hf x Wf
		P_mk_rel: Numpy array, warp parameters for each of the map images, k x 8 x 1

	Returns:
		rm: Numpy array, residuals of map images with templates, num_frame - 1 (duplicated dim.) x k x (Cf x Hf x Wf) x 1
	'''

	_, map_c, map_h, map_w = M_feat.shape

	P_mk_rel_tens = torch.from_numpy(P_mk_rel).float()
	M_feat_tens = torch.from_numpy(M_feat).float()

	M_feat_warp_tens, M_feat_mask_tens = dlk.warp_hmg(M_feat_tens, P_mk_rel_tens)

	M_feat_warp = M_feat_warp_tens.numpy()
	M_feat_mask = M_feat_mask_tens.numpy()

	M_feat_mask_tile = np.tile(np.expand_dims(M_feat_mask, 1), (1, img_c, 1, 1))

	T_feat_mask = np.multiply(T_feat, M_feat_mask_tile)

	r_m = T_feat_mask - M_feat_warp

	r_m_rsh = r_m.reshape((k, map_c * map_h * map_w, 1))

	r_m_tl = np.tile(r_m_rsh, (num_frames - 1, 1, 1, 1))

	return r_m_tl
コード例 #2
0
ファイル: emissions.py プロジェクト: cxrodgers/ssm
    def _initialize_with_pca(self,
                             datas,
                             inputs=None,
                             masks=None,
                             tags=None,
                             num_iters=20):
        Keff = 1 if self.single_subspace else self.K

        # First solve a linear regression for data given input
        if self.M > 0:
            from sklearn.linear_model import LinearRegression
            lr = LinearRegression(fit_intercept=False)
            lr.fit(np.vstack(inputs), np.vstack(datas))
            self.Fs = np.tile(lr.coef_[None, :, :], (Keff, 1, 1))

        # Compute residual after accounting for input
        resids = [
            data - np.dot(input, self.Fs[0].T)
            for data, input in zip(datas, inputs)
        ]

        # Run PCA to get a linear embedding of the data
        pca, xs, ll = pca_with_imputation(self.D,
                                          resids,
                                          masks,
                                          num_iters=num_iters)

        self.Cs = np.tile(pca.components_.T[None, :, :], (Keff, 1, 1))
        self.ds = np.tile(pca.mean_[None, :], (Keff, 1))

        return pca
コード例 #3
0
ファイル: test_lds.py プロジェクト: pankajkarman/ssm
def test_solveh_banded_grad(T=10, D=4):
    """
    Test solveh_banded gradient
    """
    J_diag, J_lower_diag, J_full = make_block_tridiag(T, D)
    J_diag = np.tile(J_diag[None, :, :], (T, 1, 1))
    J_lower_diag = np.tile(J_lower_diag[None, :, :], (T - 1, 1, 1))
    b = npr.randn(T * D)

    J_banded = blocks_to_bands(J_diag, J_lower_diag, lower=True)
    check_grads(solveh_banded, argnum=0, modes=['rev'], order=1)(J_banded,
                                                                 b,
                                                                 lower=True)
    check_grads(solveh_banded, argnum=1, modes=['rev'], order=1)(J_banded,
                                                                 b,
                                                                 lower=True)

    J_banded = blocks_to_bands(J_diag,
                               np.swapaxes(J_lower_diag, -1, -2),
                               lower=False)
    check_grads(solveh_banded, argnum=0, modes=['rev'], order=1)(J_banded,
                                                                 b,
                                                                 lower=False)
    check_grads(solveh_banded, argnum=1, modes=['rev'], order=1)(J_banded,
                                                                 b,
                                                                 lower=False)
    def concatenate_latents(self, sample_latents, region_latents,
                            type_latents):
        # make all combinations of vectors from the three input matrices
        n_samples, n_latents = sample_latents.shape
        n_regions = region_latents.shape[0]
        n_types = type_latents.shape[0]

        print("hello")
        # make possible combinations across samples and regions
        samples_regions = np.concatenate(
            (np.tile(sample_latents,
                     (1, n_regions)).reshape(n_regions * n_samples, n_latents),
             np.tile(region_latents, (n_samples, 1))),
            axis=1).reshape((n_samples, n_regions, 2 * n_latents))

        print(samples_regions.shape)
        # make possible combinations across samples, regions and types
        samples_regions_types = np.concatenate(
            (np.tile(samples_regions, (1, n_types)).reshape(
                n_regions * n_samples * n_types, n_latents * 2),
             np.tile(type_latents, (n_samples * n_regions, 1))),
            axis=1)
        print(samples_regions_types.shape)

        # simple check that everything was propagated correctly
        assert (all(
            samples_regions_types.reshape((
                n_samples, n_regions, n_types,
                3 * n_latents))[0, 1, 2] == np.concatenate((sample_latents[0],
                                                            region_latents[1],
                                                            type_latents[2]))))

        return samples_regions_types
コード例 #5
0
def get_k(stiffness, ke):
    # Constructs a sparse stiffness matrix, k, for use in the displace function.
    nely, nelx = stiffness.shape

    # get position of the nodes of each element in the stiffness matrix
    ely, elx = np.meshgrid(range(nely), range(nelx))  # x, y coords
    ely, elx = ely.reshape(-1, 1), elx.reshape(-1, 1)

    n1 = (nely + 1) * (elx + 0) + (ely + 0)
    n2 = (nely + 1) * (elx + 1) + (ely + 0)
    n3 = (nely + 1) * (elx + 1) + (ely + 1)
    n4 = (nely + 1) * (elx + 0) + (ely + 1)
    edof = np.array([
        2 * n1, 2 * n1 + 1, 2 * n2, 2 * n2 + 1, 2 * n3, 2 * n3 + 1, 2 * n4,
        2 * n4 + 1
    ])
    edof = edof.T[0]

    x_list = np.repeat(edof, 8)  # flat list pointer of each node in an element
    y_list = np.tile(edof,
                     8).flatten()  # flat list pointer of each node in elem

    # make the stiffness matrix
    kd = stiffness.T.reshape(nelx * nely, 1, 1)
    value_list = (kd * np.tile(ke, kd.shape)).flatten()
    return value_list, y_list, x_list
コード例 #6
0
def generate_convolutional_mog_data(n, im_side=17, autocorr_scale=5.):

    circ_cov_mat = conv_utils.generate_iostropic_circulant_cov_2d(
        im_side, autocorr_scale=autocorr_scale)
    circ_class_samples = conv_utils.rgb_gauss_random_samples(
        n, cov_or_covs=circ_cov_mat)

    white_noise_cov_mat = np.eye(im_side**2)
    noise_class_samples = conv_utils.rgb_gauss_random_samples(
        n, cov_or_covs=white_noise_cov_mat)

    # combine batch major samples from each class
    inputs = np.concatenate([circ_class_samples.T, noise_class_samples.T])

    # covert to images, then return to batch minor
    inputs = np.asarray(
        [conv_utils.to_im_rgb(inpt, im_side) for inpt in inputs]).T

    # generate one_hot and label vectors
    one_hots = np.hstack([
        np.tile(np.atleast_2d([1, 0]).T, [1, n]),
        np.tile(np.atleast_2d([0, 1]).T, [1, n])
    ])
    labels = np.argmax(one_hots, axis=0)

    return inputs, one_hots, labels
コード例 #7
0
ファイル: redshift_utils.py プロジェクト: fagan2888/DESI-MCMC
def sinc_interp(new_samples, samples, fvals, left=None, right=None):
    """
    Interpolates x, sampled at "s" instants
    Output y is sampled at "u" instants ("u" for "upsampled")

    from Matlab:
    http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html        
    """
    if len(fvals) != len(samples):
        raise Exception, 'function vals (fvals) and samples must be the same length'

    # Find the period
    T = (samples[1:] - samples[:-1]).max()

    # sinc resample
    sincM = np.tile(new_samples, (len(samples), 1)) - \
            np.tile(samples[:, np.newaxis], (1, len(new_samples)))
    y = np.dot(fvals, np.sinc(sincM / T))

    # set outside values to left/right inputs if given
    if left is not None:
        y[new_samples < samples[0]] = np.nan
    if right is not None:
        y[new_samples > samples[-1]] = np.nan
    return y
コード例 #8
0
ファイル: emissions.py プロジェクト: zshwuhan/ssm
 def _initialize_with_pca(self, datas, masks, num_iters=20):
     pca, xs = pca_with_imputation(self.D, datas, masks, num_iters=num_iters)
     Keff = 1 if self.single_subspace else self.K
     self.Cs = np.tile(pca.components_.T[None, :, :], (Keff, 1, 1))
     self.ds = np.tile(pca.mean_[None, :], (Keff, 1))
         
     return pca
コード例 #9
0
ファイル: utils.py プロジェクト: abrahamnunes/fitr
def reduce_then_tile(X, f, axis=1):
    """ Computes some reduction function over an axis, then tiles that vector to create matrix of original size

    Arguments:

        X: `ndarray((n, m))`. Matrix.
        f: `function` that reduces data across some axis (e.g. `np.sum()`, `np.max()`)
        axis: `int` which axis the data should be reduced over (only goes over 2 axes for now)

    Returns:res

        `ndarray((n, m))`

    Examples:

    Here is one way to compute a softmax function over the columns of `X`, for each row.

    ```
    import numpy as np
    X = np.random.normal(0, 1, size=(10, 3))**2
    max_x = reduce_then_tile(X, np.max, axis=1)
    exp_x = np.exp(X - max_x)
    sum_exp_x = reduce_then_tile(exp_x, np.sum, axis=1)
    y = exp_x/sum_exp_x
    ```

    """
    y = f(X, axis=axis)
    if axis == 1:
        y = np.tile(y.reshape(-1, 1), [1, X.shape[1]])
    elif axis == 0:
        y = np.tile(y.reshape(1, -1), [X.shape[0], 1])
    return y
コード例 #10
0
    def log_transition_matrices(self, data, input, mask, tag):
        T, D = data.shape
        # Previous state effect
        log_Ps = np.tile(self.log_Ps[None, :, :], (T - 1, 1, 1))
        # Input effect
        log_Ps = log_Ps + np.dot(input[1:], self.Ws.T)[:, None, :]

        # Past observations effect

        #Off diagonal elements of transition matrix (state switches), from past observations
        log_Ps_offdiag = np.tile(
            np.dot(data[:-1], self.Rs.T)[:, None, :], (1, self.K, 1))
        mult_offdiag = 1 - np.tile(
            np.identity(self.K)[None, :, :], (log_Ps_offdiag.shape[0], 1, 1))

        #Diagonal elements of transition matrix (stickiness), from past observations
        log_Ps_diag = np.tile(
            np.dot(data[:-1], self.Ss.T)[:, None, :], (1, self.K, 1))
        mult_diag = np.tile(
            np.identity(self.K)[None, :, :], (log_Ps_diag.shape[0], 1, 1))

        log_Ps = log_Ps_diag * mult_diag  #Diagonal elements (stickness) from past observations
        log_Ps = log_Ps + np.identity(
            self.K) * self.s  #Diagonal elements (stickness) bias
        log_Ps = log_Ps + log_Ps_offdiag * mult_offdiag  #Off diagonal elements (state switching) from past observations
        log_Ps = log_Ps + (1 - np.identity(
            self.K)) * self.r  #Off diagonal elements (state switching) bias

        return log_Ps - logsumexp(log_Ps, axis=2, keepdims=True)  #Normalize
コード例 #11
0
ファイル: utilities.py プロジェクト: RobeeF/DDGMM
def compute_rho(eta, H, psi, mu_s, sigma_s, z_c, chsi):
    ''' Compute rho as defined in equation (8) of the DGMM paper 
    eta (list of nb_layers elements of shape (K_l x r_{l-1}, 1)): mu 
                                                    parameters for each layer    
    H (list of nb_layers elements of shape (K_l x r_{l-1}, r_l)): Lambda 
                                                    parameters for each layer
    psi (list of nb_layers elements of shape (K_l x r_{l-1}, r_{l-1})): Psi 
                                                    parameters for each layer
    z_c (list of nd-arrays) z^{(l)} - eta^{(l)} for each layer. 
    chsi (list of nd-arrays): The chsi parameters for each layer
    -----------------------------------------------------------------------
    returns (list of ndarrays): The rho parameters (covariance matrices) 
                                    for all paths starting at each layer
    '''
    
    L = len(H)    
    rho = [0 for i in range(L)]
    k = [len(h) for h in H]
    k_aug = k + [1] 

    for l in range(0, L):
        sigma_next_l = np.tile(sigma_s[l + 1], (k[l], 1, 1))
        mu_next_l = np.tile(mu_s[l + 1], (k[l], 1, 1))

        HxPsi_inv = t(H[l], (0, 2, 1)) @ pinv(psi[l])
        HxPsi_inv = np.repeat(HxPsi_inv, np.prod(k_aug[l + 1: ]), axis = 0)

        rho[l] = chsi[l][n_axis] @ (HxPsi_inv[n_axis] @ z_c[l][..., n_axis] \
                                    + (pinv(sigma_next_l) @ mu_next_l)[n_axis])
                
    return rho
コード例 #12
0
def make_gmm_blobs_d2(distance_factor=5.0, ):
    def rot2d_matrix(angle):
        r = np.array([[math.cos(angle), -math.sin(angle)],
                      [math.sin(angle), math.cos(angle)]])
        return r

    def rot2d_cov(angle, cov):
        R = rot2d_matrix(angle)
        return np.dot(np.dot(R, cov), R.T)

    means = np.array([[-1.0, 1], [1, 1], [-1, -1], [1, -1]]) * distance_factor
    base_cov = np.array([[5.0, 0], [0, 0.5]])

    # 4 isotropic covariance matrices in 2d
    covr = np.tile(base_cov, [4, 1, 1])
    covq = np.tile(rot2d_cov(np.pi / 5.0, base_cov), [4, 1, 1])
    covp = np.tile(rot2d_cov(np.pi / 2.0, base_cov), [4, 1, 1])

    p = density.GaussianMixture(means, covp)
    q = density.GaussianMixture(means, covq)
    r = density.GaussianMixture(means, covr)

    modelp = model.ComposedModel(p=p)
    modelq = model.ComposedModel(p=q)
    ds = r.get_datasource()
    return modelp, modelq, ds
コード例 #13
0
def index2d(channel, stride, kshape, xshape):
    k_h, k_w = kshape
    x_h, x_w = xshape

    c_idx = np.repeat(np.arange(channel), k_h * k_w)
    c_idx = c_idx.reshape(-1, 1)

    res_h = int((x_h - k_h) / stride) + 1
    res_w = int((x_w - k_w) / stride) + 1

    size = channel * k_h * k_w

    h_idx = np.tile(np.repeat(stride * np.arange(res_h), res_w), size)
    h_idx = h_idx.reshape(size, -1)
    h_off = np.tile(np.repeat(np.arange(k_h), k_w), channel)
    h_off = h_off.reshape(size, -1)
    h_idx = h_idx + h_off

    w_idx = np.tile(np.tile(stride * np.arange(res_w), res_h), size)
    w_idx = w_idx.reshape(size, -1)
    w_off = np.tile(np.arange(k_w), channel * k_h)
    w_off = w_off.reshape(size, -1)
    w_idx = w_idx + w_off

    return c_idx, h_idx, w_idx
コード例 #14
0
    def fit(self):
        training_data_2 = np.array(list(training_data))
        l = len(training_data_2)
        l -= l % self.batch_size

        training_data_2 = training_data_2[:l]
        training_data_2 = np.array(list(training_data_2))
        training_data_2 = training_data_2.reshape((self.batch_size, -1))

        # sanity check, na poczatku powinno byc ~wielkosci alfabetu
        print('validation perplexity:')
        print(self.perplexity(validation_data))
        best_validation_perplexity = 100
        for epoch in range(self.num_of_epochs):
            print("epoch number: " + str(epoch + 1))
            self.C = np.zeros(self.h_size)
            self.h = np.zeros(self.h_size)
            hprev = np.tile(self.h, (self.batch_size, 1))
            Cprev = np.tile(self.C, (self.batch_size, 1))
            for i in tqdm(
                    range((training_data_2.shape[1] - 1) //
                          self.number_of_steps)):
                inputs = training_data_2[:, i * self.number_of_steps:(i + 1) *
                                         self.number_of_steps]
                targets = training_data_2[:, i * self.number_of_steps +
                                          1:(i + 1) * self.number_of_steps + 1]

                delta_w = self._d_cost_batched(inputs, targets, hprev, Cprev,
                                               self.weights)
                clipped_delta_w = [np.clip(d, -5, 5) for d in delta_w]
                # print(any([cdw != dw for cdw, dw in zip(clipped_delta_w, delta_w)]))  doesn't work!
                delta_w = clipped_delta_w
                for w, d in zip(self.weights, delta_w):
                    w -= d * self.learning_rate

                hprev, Cprev = self._update_hidden_state_batched(
                    inputs, hprev, Cprev, self.weights)

            print('validation perplexity:'
                  )  # czy powinnam zerowac state? jest po 10 ksiegach
            validation_perplexity = self.perplexity(validation_data)
            print(validation_perplexity)

            if validation_perplexity < best_validation_perplexity:
                best_validation_perplexity = validation_perplexity
            else:
                self.learning_rate /= 2

            prefix = 'Jam jest Jacek'
            self._update_hidden_state(
                prefix[:-1], self.weights
            )  # najpierw wprowadzam prefix ignorujac outputy, nie zaczynam wczytywac ich zaraz po J
            sample = self.sample(prefix[-1], 200)
            print(prefix + sample)

        print(
            'test perplexity:'
        )  # czy powinnam zerowac state? jest po 11 ksiegach i 'Jam jest Jacek'...
        print(self.perplexity(test_data))
コード例 #15
0
    def __init__(self,
                 latent_dim,
                 noise_dim,
                 model_directory,
                 latent=None,
                 full=False,
                 config_fname='op_conditions.ini'):

        self.latent = latent
        self.latent_dim = latent_dim
        self.noise_dim = noise_dim
        if noise_dim == 0:
            full = False
        self.full = full

        if (not full) and (self.latent is None):
            self.dim = self.latent_dim
            self.bounds = np.array([[0., 1.]])
            self.bounds = np.tile(self.bounds, [self.dim, 1])
        else:
            self.dim = self.latent_dim + self.noise_dim
            if self.latent is not None:
                assert len(self.latent) == self.latent_dim
                latent_bounds = np.vstack((latent - 0.1, latent + 0.1)).T
            else:
                latent_bounds = np.array([0., 1.])
                latent_bounds = np.tile(latent_bounds, [self.latent_dim, 1])
            noise_bounds = np.array([-0.5, 0.5])
            noise_bounds = np.tile(noise_bounds, [self.noise_dim, 1])
            self.bounds = np.vstack((latent_bounds, noise_bounds))

        # Expand bounds by 20%
        b = self.bounds
        r = np.max(b, axis=1) - np.min(b, axis=1)
        self.bounds = np.zeros_like(b)
        self.bounds[:, 0] = b[:, 0] - 0.2 * r
        self.bounds[:, 1] = b[:, 1] + 0.2 * r

        self.y = None
        self.config_fname = config_fname

        self.gan = GAN(self.latent_dim, self.noise_dim, 192, 31, (0., 1.))
        self.gan.restore(model_directory)

        n_points = self.gan.X_shape[0]
        x_synth = self.gan.x_fake_test
        x_synth_ = tf.squeeze(x_synth)
        self.x_target = tf.placeholder(tf.float32, shape=[n_points, 2])
        self.e = tf.reduce_mean(
            tf.reduce_sum(tf.square(x_synth_ - self.x_target), axis=1))
        if self.full:
            self.grad_e = tf.concat(tf.gradients(self.e,
                                                 [self.gan.c, self.gan.z]),
                                    axis=1)
        else:
            self.grad_e = tf.gradients(self.e, self.gan.c)
コード例 #16
0
def test_blocks_to_banded_grad(T=25, D=4):
    """
    Test blocks_to_banded gradient
    """
    J_diag, J_lower_diag, J_full = make_block_tridiag(T, D)
    J_diag = np.tile(J_diag[None, :, :], (T, 1, 1))
    J_lower_diag = np.tile(J_lower_diag[None, :, :], (T-1, 1, 1))

    check_grads(blocks_to_bands, argnum=0, modes=['rev'], order=1)(J_diag, J_lower_diag)
    check_grads(blocks_to_bands, argnum=1, modes=['rev'], order=1)(J_diag, J_lower_diag)
コード例 #17
0
def test_transpose_banded_grad(T=25, D=4):
    """
    Test transpose_banded gradient
    """
    J_diag, J_lower_diag, J_full = make_block_tridiag(T, D)
    J_diag = np.tile(J_diag[None, :, :], (T, 1, 1))
    J_lower_diag = np.tile(J_lower_diag[None, :, :], (T-1, 1, 1))
    J_banded = blocks_to_bands(J_diag, J_lower_diag, lower=True)

    check_grads(transpose_banded, argnum=1, modes=['rev'], order=1)((2*D-1, 0), J_banded)
コード例 #18
0
def gamma_h_boosted(epsilon, u, alpha):
    """
    Reparameterization for gamma rejection sampler with shape augmentation.
    """
    B = u.shape[1]
    K = alpha.shape[0]
    alpha_vec = np.tile(alpha,(B,1)).T + np.tile(np.arange(B),(K,1))
    u_pow = np.power(u,1./alpha_vec)
    
    return np.prod(u_pow,axis=1) * gamma_h(epsilon, alpha+B)
コード例 #19
0
ファイル: util.py プロジェクト: mdw771/beyond_dof
def save_rotation_lookup(array_size, n_theta, dest_folder=None):

    image_center = [np.floor(x / 2) for x in array_size]

    coord0 = np.arange(array_size[0])
    coord1 = np.arange(array_size[1])
    coord2 = np.arange(array_size[2])

    coord2_vec = np.tile(coord2, array_size[1])

    coord1_vec = np.tile(coord1, array_size[2])
    coord1_vec = np.reshape(coord1_vec, [array_size[1], array_size[2]])
    coord1_vec = np.reshape(np.transpose(coord1_vec), [-1])

    coord0_vec = np.tile(coord0, [array_size[1] * array_size[2]])
    coord0_vec = np.reshape(coord0_vec, [array_size[1] * array_size[2], array_size[0]])
    coord0_vec = np.reshape(np.transpose(coord0_vec), [-1])

    # move origin to image center
    coord1_vec = coord1_vec - image_center[1]
    coord2_vec = coord2_vec - image_center[2]

    # create matrix of coordinates
    coord_new = np.stack([coord1_vec, coord2_vec]).astype(np.float32)

    # create rotation matrix
    theta_ls = np.linspace(0, 2 * np.pi, n_theta)
    coord_old_ls = []
    for theta in theta_ls:
        m_rot = np.array([[np.cos(theta),  -np.sin(theta)],
                          [np.sin(theta), np.cos(theta)]])
        coord_old = np.matmul(m_rot, coord_new)
        coord1_old = np.round(coord_old[0, :] + image_center[1]).astype(np.int)
        coord2_old = np.round(coord_old[1, :] + image_center[2]).astype(np.int)
        # clip coordinates
        coord1_old = np.clip(coord1_old, 0, array_size[1]-1)
        coord2_old = np.clip(coord2_old, 0, array_size[2]-1)
        coord_old = np.stack([coord1_old, coord2_old], axis=1)
        coord_old_ls.append(coord_old)
    if dest_folder is None:
        dest_folder = 'arrsize_{}_{}_{}_ntheta_{}'.format(array_size[0], array_size[1], array_size[2], n_theta)
    if not os.path.exists(dest_folder):
        os.mkdir(dest_folder)
    for i, arr in enumerate(coord_old_ls):
        np.save(os.path.join(dest_folder, '{:04}'.format(i)), arr)

    coord1_vec = coord1_vec + image_center[1]
    coord1_vec = np.tile(coord1_vec, array_size[0])
    coord2_vec = coord2_vec + image_center[2]
    coord2_vec = np.tile(coord2_vec, array_size[0])
    for i, coord in enumerate([coord0_vec, coord1_vec, coord2_vec]):
        np.save(os.path.join(dest_folder, 'coord{}_vec'.format(i)), coord)

    return coord_old_ls
コード例 #20
0
def ll(x, num_peds, ess, robot_mu_x, robot_mu_y, ped_mu_x, ped_mu_y, \
       cov_robot_x, cov_robot_y, inv_cov_robot_x, inv_cov_robot_y, \
       cov_ped_x, cov_ped_y, inv_cov_ped_x, inv_cov_ped_y, \
       one_over_cov_sum_x, one_over_cov_sum_y, normalize):
    T = np.size(robot_mu_x)

    quad_robot_mu_x = np.dot((x[:T]-robot_mu_x).T, np.dot(inv_cov_robot_x, \
                                                                x[:T]-robot_mu_x))
    quad_robot_mu_y = np.dot((x[T:2*T]-robot_mu_y).T, np.dot(inv_cov_robot_y, \
                                                             x[T:2*T]-robot_mu_y))
    llambda = -0.5 * quad_robot_mu_x - 0.5 * quad_robot_mu_y

    n = 2
    for ped in range(ess):
        quad_ped_mu_x = np.dot((x[n*T:(n+1)*T]-ped_mu_x[ped]).T, np.dot(\
                                inv_cov_ped_x[ped], x[n*T:(n+1)*T]-ped_mu_x[ped]))
        quad_ped_mu_y = np.dot((x[(n+1)*T:(n+2)*T]-ped_mu_y[ped]).T, np.dot(\
                            inv_cov_ped_y[ped], x[(n+1)*T:(n+2)*T]-ped_mu_y[ped]))
        llambda = llambda - 0.5 * quad_ped_mu_x - 0.5 * quad_ped_mu_y
        n = n + 2

    n = 2
    for ped in range(ess):
        # if normalize == True:
        #   # normalize_x = np.multiply(np.power(2*np.pi,-0.5), \
        # one_over_std_sum_x[ped])
        #   # normalize_y = np.multiply(np.power(2*np.pi,-0.5), \
        # one_over_std_sum_y[ped])
        # else:
        normalize_x = 1.
        normalize_y = 1.

        vel_x = np.tile(x[:T], (T, 1)).T - np.tile(x[n * T:(n + 1) * T],
                                                   (T, 1))
        vel_y = np.tile(x[T:2 * T],
                        (T, 1)).T - np.tile(x[(n + 1) * T:(n + 2) * T], (T, 1))
        n = n + 2

        vel_x_2 = np.power(vel_x, 2)
        vel_y_2 = np.power(vel_y, 2)

        quad_robot_ped_x = np.multiply(vel_x_2, one_over_cov_sum_x[ped])
        quad_robot_ped_y = np.multiply(vel_y_2, one_over_cov_sum_y[ped])

        Z_x = np.multiply(normalize_x, np.exp(-0.5 * quad_robot_ped_x))
        Z_y = np.multiply(normalize_y, np.exp(-0.5 * quad_robot_ped_y))

        Z = np.multiply(Z_x, Z_y)

        log_znot_norm = np.sum(np.log1p(-Z))

        llambda = llambda + log_znot_norm
    return -1. * llambda
コード例 #21
0
def compute_path_params(eta, H, psi):
    ''' Compute the gaussian parameters for each path
    H (list of nb_layers elements of shape (K_l x r_{l-1}, r_l)): Lambda 
                                                    parameters for each layer
    psi (list of nb_layers elements of shape (K_l x r_{l-1}, r_{l-1})): Psi 
                                                    parameters for each layer
    eta (list of nb_layers elements of shape (K_l x r_{l-1}, 1)): mu 
                                                    parameters for each layer
    ------------------------------------------------------------------------------------------------
    returns (tuple of len 2): The updated parameters mu_s and sigma for all s in Omega
    '''

    #=====================================================================
    # Retrieving model parameters
    #=====================================================================

    L = len(H)
    k = [len(h) for h in H]
    k_aug = k + [
        1
    ]  # Integrating the number of components of the last layer i.e 1

    r1 = H[0].shape[1]
    r2_L = [h.shape[2] for h in H]  # r[2:L]
    r = [r1] + r2_L  # r augmented

    #=====================================================================
    # Initiating the parameters for all layers
    #=====================================================================

    mu_s = [0 for i in range(L + 1)]
    sigma_s = [0 for i in range(L + 1)]

    # Initialization with the parameters of the last layer
    mu_s[-1] = np.zeros((1, r[-1], 1))  # Inverser k et r plus tard
    sigma_s[-1] = np.eye(r[-1])[n_axis]

    #==================================================================================
    # Compute Gaussian parameters from top to bottom for each path
    #==================================================================================

    for l in reversed(range(0, L)):
        H_repeat = np.repeat(H[l], np.prod(k_aug[l + 1:]), axis=0)
        eta_repeat = np.repeat(eta[l], np.prod(k_aug[l + 1:]), axis=0)
        psi_repeat = np.repeat(psi[l], np.prod(k_aug[l + 1:]), axis=0)

        mu_s[l] = eta_repeat + H_repeat @ np.tile(mu_s[l + 1], (k[l], 1, 1))

        sigma_s[l] = H_repeat @ np.tile(sigma_s[l + 1], (k[l], 1, 1)) @ t(H_repeat, (0, 2, 1)) \
            + psi_repeat

    return mu_s, sigma_s
コード例 #22
0
def gamma_grad_h_boosted(epsilon, u, alpha):
    """
    Gradient of reparameterization with shape augmentation.
    """
    B = u.shape[1]
    K = alpha.shape[0]
    h_val = gamma_h(epsilon, alpha+B)
    h_der = gamma_grad_h(epsilon, alpha+B)
    alpha_vec = np.tile(alpha,(B,1)).T + np.tile(np.arange(B),(K,1))
    u_pow = np.power(u,1./alpha_vec)
    u_der = -np.log(u)/alpha_vec**2
    
    return np.prod(u_pow,axis=1) * h_val * (h_der/h_val + np.sum(u_der,axis=1))
コード例 #23
0
ファイル: models.py プロジェクト: AndyZhang17/CAM_4th_yr_proj
def pseudo_data_gen(dim_t, gp_size, dim_s=1):
    '''
    Pseudodata uniformly between -1, +1
    :param dim_t:
    :param gp_size:
    :return:
    '''
    dim_s = dim_t
    s = np.linspace(-1.0, +1.0, num=gp_size)
    t = np.reshape(np.tile(s, dim_t), (dim_t, gp_size)).T
    if dim_s != 1:
        s = np.reshape(np.tile(s, dim_s), (dim_s, gp_size)).T
    return s, t
コード例 #24
0
    def translate_and_rotate(self, X):
        if not self.reorient:
            return X
        else:
            nchrom = self.lengths.shape[0]
            if not self.fix_homo:
                nchrom *= 2

            if self.translate and self.rotate:
                translations = X[:nchrom * 3].reshape(-1, 3)
                rotations = X[nchrom * 3:].reshape(-1, 4)
            elif self.translate:
                translations = X.reshape(-1, 3)
                rotations = ag_np.zeros((nchrom, 4))
            elif self.rotate:
                rotations = X.reshape(-1, 4)
                translations = ag_np.zeros((nchrom, 3))
            else:
                raise ValueError(
                    'Must select translate=True and/or rotate=True when finding ideal rotation and/or translation'
                )

            lengths = np.tile(self.lengths, self.ploidy)
            if self.fix_homo:
                translations = ag_np.tile(translations, (self.ploidy, 1))
                rotations = ag_np.tile(rotations, (self.ploidy, 1))

            new_structures = []
            for init_structure in self.init_structures:
                new_structure = []
                begin = end = 0
                for i in range(lengths.shape[0]):
                    length = lengths[i]
                    end += length
                    if self.rotate:
                        new_structure.append(
                            ag_np.dot(
                                init_structure[begin:end, :] +
                                translations[i, :],
                                _quat_to_rotation_matrix(rotations[i, :])))
                    else:
                        new_structure.append(init_structure[begin:end, :] +
                                             translations[i, :])
                    begin = end

                new_structure = ag_np.concatenate(new_structure)
                new_structures.append(new_structure)

            return new_structures
コード例 #25
0
ファイル: gaussian.py プロジェクト: andreas-koukorinis/KCEF
    def _hessian_bloc_dim(self, sigma, Y_i, Y_j, K, i, j):
        n = Y_i.shape[0]
        Y_ii = np.reshape(Y_i, [1, -1])
        Y_jj = np.reshape(Y_j, [1, -1])
        diff_i = np.tile(Y_ii, [n, 1])
        diff_i = diff_i.T - diff_i
        diff_j = np.tile(Y_jj, [n, 1])
        diff_j = diff_j.T - diff_j

        if i == j:
            return (np.multiply(K, (2. * (sigma) - 4. *
                                    (sigma**2) * np.multiply(diff_i, diff_j))))
        else:
            return -4. * (sigma**2) * (np.multiply(
                K, np.multiply(diff_i, diff_j)))
コード例 #26
0
    def build_sequential_ds(self, inputs):
        DiffX = lambda X: np.tile(X, (2, 1)) - np.transpose(np.tile(X, (2, 1)))
        dx = lambda X, Y: - X + (
                    (self.A - pow(X, 2) - pow(Y, 2)) * X - self.w * Y + self.G * (np.sum(self.C * DiffX(X), axis=1))) \
                          + self.dsig * np.random.randn(2)
        DiffY = lambda Y: np.tile(Y, (2, 1)) - np.transpose(np.tile(Y, (2, 1)))
        dy = lambda X, Y: - Y + (
                    (self.A - pow(X, 2) - pow(Y, 2)) * Y + self.w * X + self.G * (
                np.sum(self.C * DiffY(Y), axis=1))) + self.dsig * np.random.randn(2)

        def fun(X):
            x, y = X[:2], X[2:]
            return np.sum(np.abs((dx(x,y), dy(x,y))))

        return fun
コード例 #27
0
    def build_jacobian_fun(self, inputs):
        DiffX = lambda X: np.tile(X, (2, 1)) - np.transpose(np.tile(X, (2, 1)))
        dx = lambda X, Y: - X + (
                    (self.A - pow(X, 2) - pow(Y, 2)) * X - self.w * Y + self.G * (np.sum(self.C * DiffX(X), axis=1))) \
                          + self.dsig * np.random.randn(2)
        DiffY = lambda Y: np.tile(Y, (2, 1)) - np.transpose(np.tile(Y, (2, 1)))
        dy = lambda X, Y: - Y + (
                    (self.A - pow(X, 2) - pow(Y, 2)) * Y + self.w * X + self.G * (
                np.sum(self.C * DiffY(Y), axis=1))) + self.dsig * np.random.randn(2)

        def fun(X):
            x, y = X[:2], X[2:]
            return np.sum((dx(x,y), dy(x,y)))
        jac_fun = nd.Jacobian(fun)
        return jac_fun
コード例 #28
0
ファイル: tm.py プロジェクト: simonkamronn/autohmm
    def _set_startprob(self, startprob):

        if startprob is None:
            startprob = np.tile(1.0 / self.n_components, self.n_components)
        else:
            startprob = np.asarray(startprob, dtype=np.float)

            if not np.alltrue(startprob <= 1.0):
                normalize(startprob)

            if len(startprob) != self.n_components:
                if len(startprob) == self.n_unique:
                    startprob_split = np.copy(startprob) / (1.0+self.n_tied)
                    startprob = np.zeros(self.n_components)
                    for u in range(self.n_unique):
                        for t in range(self.n_chain):
                            startprob[u*(self.n_chain)+t] = \
                                startprob_split[u].copy()
                else:
                    raise ValueError("cannot match shape of startprob")

        if not np.allclose(np.sum(startprob), 1.0):
            raise ValueError('startprob must sum to 1.0')

        self._log_startprob = np.log(np.asarray(startprob).copy())
コード例 #29
0
ファイル: kernels.py プロジェクト: fw0/domain_adapt
def get_dxopt_delta_p(lin_solver, df_dx, d_dp_df_dx, d_dx_df_dx, A, b, xopt, p, delta_p_direction):
    
    # f(x, p) should be convex
    x_len = A.shape[1]

    # get tight constraints
    A_tight, b_tight = get_tight_constraints(A, b, xopt)
    num_tight = A_tight.shape[0]

    # get d
    p_dim = len(delta_p_direction.shape)
    delta_p_direction_broadcasted = np.tile(delta_p_direction, tuple([x_len] + [1 for i in xrange(p_dim)]))
    d_top = -np.sum(d_dp_df_dx(p, xopt) * delta_p_direction_broadcasted, axis=tuple(range(1,1+p_dim)))
    d_bottom = np.zeros(num_tight)
    d = np.hstack((d_top,d_bottom))

    # get C
    C = np.vstack((np.hstack((d_dx_df_dx(xopt, p), -A_tight.T)), np.hstack((A_tight, np.zeros((num_tight, num_tight))))))

    # get deriv
    deriv = lin_solver(C, d)
    
#    print 'solver error:', np.linalg.norm(np.dot(C,deriv) - d)

    return deriv
コード例 #30
0
ファイル: demo_relax.py プロジェクト: raeidsaqur/SeqGAN-RELAX
 def mc_objective_and_var(combined_params, t):
     params, est_params = combined_params
     params_rep = np.tile(params, (num_samples, 1))
     rs = npr.RandomState(t)
     noise_u = rs.rand(num_samples, D)
     noise_v = rs.rand(num_samples, D)
     return relax_all(params_rep, est_params, noise_u, noise_v, objective)
コード例 #31
0
    def predict_cumulative_hazard(self, df, times=None):
        """
        Return the cumulative hazard rate of subjects in X at time points.

        Parameters
        ----------
        X: numpy array or DataFrame
            a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
            can be in any order. If a numpy array, columns must be in the
            same order as the training data.
        times: iterable, optional
            an iterable of increasing times to predict the cumulative hazard at. Default
            is the set of all durations (observed and unobserved). Uses a linear interpolation if
            points in time are not in the index.

        Returns
        -------
        cumulative_hazard_ : DataFrame
            the cumulative hazard of individuals over the timeline
        """
        times = np.asarray(
            coalesce(times, self.timeline, np.unique(self.durations)))
        n = times.shape[0]
        times = times.reshape((n, 1))

        lambdas_ = self._prep_inputs_for_prediction_and_return_parameters(df)

        bp = self.breakpoints
        M = np.minimum(np.tile(bp, (n, 1)), times)
        M = np.hstack([M[:, tuple([0])], np.diff(M, axis=1)])

        return pd.DataFrame(np.dot(M, (1 / lambdas_)),
                            columns=_get_index(df),
                            index=times[:, 0])
コード例 #32
0
ファイル: variational.py プロジェクト: pankajkarman/ssm
    def _initialize_continuous_state_params(self, data, input, mask, tag):
        T = data.shape[0]
        D = self.D

        # Initialize the linear terms
        h_ini = np.zeros(D)
        h_dyn_1 = np.zeros((T - 1, D))
        h_dyn_2 = np.zeros((T - 1, D))

        # Set the posterior mean based on the emission model, if possible.
        try:
            h_obs = (1.0 / self.initial_variance) * self.model.emissions. \
                invert(data, input=input, mask=mask, tag=tag)
        except:
            warn("We can only initialize the continuous states if the emissions support "
                 "\"inverting\" the observations by mapping them to an estimate of the "
                 "latent states. Defaulting to a random initialization instead.")
            h_obs = (1.0 / self.initial_variance) * np.random.randn(data.shape[0], self.D)

        # Initialize the posterior variance to self.initial_variance * I
        J_ini = np.zeros((D, D))
        J_dyn_11 = np.zeros((T - 1, D, D))
        J_dyn_21 = np.zeros((T - 1, D, D))
        J_dyn_22 = np.zeros((T - 1, D, D))
        J_obs = np.tile(1 / self.initial_variance * np.eye(D)[None, :, :], (T, 1, 1))

        return dict(J_ini=J_ini,
                    h_ini=h_ini,
                    J_dyn_11=J_dyn_11,
                    J_dyn_21=J_dyn_21,
                    J_dyn_22=J_dyn_22,
                    h_dyn_1=h_dyn_1,
                    h_dyn_2=h_dyn_2,
                    J_obs=J_obs,
                    h_obs=h_obs)
 def get_error_and_ll(w, v_prior, X, y, K, location, scale):
     v_noise = np.exp(parser.get(w, 'log_v_noise')[ 0, 0 ]) * scale**2
     q = get_parameters_q(w, v_prior)
     samples_q = draw_samples(q, K)
     outputs = predict(samples_q, X) * scale + location
     log_factor = -0.5 * np.log(2 * math.pi * v_noise) - 0.5 * (np.tile(y, (1, K)) - np.array(outputs))**2 / v_noise
     ll = np.mean(logsumexp(log_factor - np.log(K), 1))
     error = np.sqrt(np.mean((y - np.mean(outputs, 1, keepdims = True))**2))
     return error, ll
コード例 #34
0
ファイル: forward_models.py プロジェクト: andymiller/svae
def linear_decode(z, phi):
    C, d = phi
    z = z if z.ndim == 3 else z[:,None,:]  # ensure z.shape == (T, K, n)

    mu = np.dot(z, C.T)
    log_sigmasq = np.tile(d[None,None,:], mu.shape[:2] + (1,))

    shape = z.shape[:-1] + (-1,)
    return np.reshape(mu, shape), np.reshape(log_sigmasq, shape)
コード例 #35
0
ファイル: glm-diagonal.py プロジェクト: onenoc/lfvbae
def generate_data(beta,tau,n,num_times):
    num_features = len(beta)-1
    X = np.random.uniform(-2,2,(n,num_times,num_features))
    alpha = np.random.normal(0,tau,n)
    alpha = np.reshape(np.tile(alpha,num_times),(num_times,n))
    alpha = np.transpose(alpha)
    P = logistic(beta[0]+np.dot(X,beta[1:]))#+alpha)
    y = np.random.binomial(1,P)
    return X,y
コード例 #36
0
ファイル: utils.py プロジェクト: juliaprocess/chieh_libs
def projectSimplex(mat):
    """ project each row vector to the simplex
    """
    nPoints, nVars = mat.shape
    mu = np.fliplr(np.sort(mat, axis=1))
    sum_hist = np.cumsum(mu, axis=1)
    flag = (mu - 1./np.tile(np.arange(1,nVars+1),(nPoints,1))*(sum_hist-1) > 0)
    
    f_flag = lambda flagPoint: len(flagPoint) - 1 - \
            flagPoint[::-1].argmax()
    lastTrue = map(f_flag, flag)
    
    sm_row = sum_hist[np.arange(nPoints), lastTrue]
    
    theta = (sm_row - 1)*1./(np.array(lastTrue)+1.)
    
    w = np.maximum(mat - np.tile(theta, (nVars,1)).T, 0.)
    
    return w
コード例 #37
0
ファイル: glmm.py プロジェクト: onenoc/lfvbae
def likelihood_individual(beta,y,X,alpha):
    N = len(alpha)
    t = len(y)
    #get success probabilities
    p = get_pi(beta,X,alpha)
    #do bernoulli to get observation probabilities
    y = np.tile(y,len(p)/len(y))
    likelihood = bernoulli(p,y)
    #handle products (based on number of time steps, multiply every t elements together)
    likelihood = np.reshape(likelihood, (t,len(likelihood)/t))
    likelihood = np.prod(likelihood,0)
    #handle sums (over particles)
    likelihood = np.sum(likelihood)/N
    return likelihood
コード例 #38
0
ファイル: autograd_wrapper.py プロジェクト: matt-graham/hmc
    def jacobian_and_value(fun, x):
        """
        Returns a function that returns both the Jacobian and value of a
        function.

        Assumes that the function `fun` broadcasts along the first dimension of
        the input being differentiated with respect to such that a batch of
        outputs can be computed concurrently for a batch of inputs.
        """
        val = fun(x)
        v_vspace = vspace(val)
        x_vspace = vspace(x)
        x_rep = np.tile(x, (v_vspace.size,) + (1,) * x_vspace.ndim)
        vjp_rep, _ = make_vjp(fun, x_rep)
        jacobian_shape = v_vspace.shape + x_vspace.shape
        basis_vectors = np.array([b for b in v_vspace.standard_basis()])
        jacobian = vjp_rep(basis_vectors)
        return np.reshape(jacobian, jacobian_shape), val
コード例 #39
0
ファイル: autograd_wrapper.py プロジェクト: matt-graham/hmc
    def hessian_grad_and_value(fun, x):
        """
        Returns a function that returns the Hessian, gradient and value of a
        function.

        Assumes that the function `fun` broadcasts along the first dimension of
        the input being differentiated with respect to such that a batch of
        outputs can be computed concurrently for a batch of inputs.
        """
        def grad_fun(x):
            vjp, val = make_vjp(fun, x)
            return vjp(vspace(val).ones()), val
        x_vspace = vspace(x)
        x_rep = np.tile(x, (x_vspace.size,) + (1,) * x_vspace.ndim)
        vjp_grad, (grad, val) = make_vjp(lambda x: atuple(grad_fun(x)), x_rep)
        hessian_shape = x_vspace.shape + x_vspace.shape
        basis_vectors = np.array([b for b in x_vspace.standard_basis()])
        hessian = vjp_grad((basis_vectors, vspace(val).zeros()))
        return np.reshape(hessian, hessian_shape), grad[0], val[0]
コード例 #40
0
ファイル: tm.py プロジェクト: simonkamronn/autohmm
 def _set_precision_prior(self, precision_prior):
     if precision_prior is None:
         self._precision_prior_ = \
         np.zeros((self.n_components, self.n_features, self.n_features))
     else:
         precision_prior = np.asarray(precision_prior)
         if len(precision_prior) == 1:
             self._precision_prior_ = np.tile(precision_prior,
             (self.n_components, self.n_features, self.n_features))
         elif \
         (precision_prior.reshape(self.n_unique, self.n_features, self.n_features)).shape \
         == (self.n_unique, self.n_features, self.n_features):
             self._precision_prior_ = \
             np.zeros((self.n_components, self.n_features, self.n_features))
             for u in range(self.n_unique):
                 for t in range(self.n_chain):
                     self._precision_prior_[u*(self.n_chain)+t] = precision_prior[u].copy()
         else:
             raise ValueError("cannot match shape of precision_prior")
コード例 #41
0
ファイル: tm.py プロジェクト: sarah-strauss/autohmm
    def _set_transmat(self, transmat_val):
        if transmat_val is None:
            transmat = np.tile(1.0 / self.n_components,
                               (self.n_components, self.n_components))
        else:
            transmat_val[np.isnan(transmat_val)] = 0.0
            normalize(transmat_val, axis=1)

            if (np.asarray(transmat_val).shape == (self.n_components,
                                                   self.n_components)):
                transmat = np.copy(transmat_val)
            elif transmat_val.shape[0] == self.n_unique:
                transmat = self._ntied_transmat(transmat_val)
            else:
                raise ValueError("cannot match shape of transmat")

        if not np.all(np.allclose(np.sum(transmat, axis=1), 1.0)):
            raise ValueError('Rows of transmat must sum to 1.0')
        self._log_transmat = np.log(np.asarray(transmat).copy())
        underflow_idx = np.isnan(self._log_transmat)
        self._log_transmat[underflow_idx] = NEGINF
コード例 #42
0
ファイル: utils.py プロジェクト: juliaprocess/chieh_libs
def genConstraints(prng, label, alpha, beta, num_ML, num_CL, start_expert = 0, \
        flag_same=False):
    """ This function generates pairwise constraints (ML/CL) using groud-truth
    cluster label and noise parameters
    Parameters
    ----------
    label: shape(n_sample, )
        cluster label of all the samples
    alpha: shape(n_expert, )
        sensitivity parameters of experts
    beta: shape(n_expert, )
        specificity parameters of experts
    num_ML: int
    num_CL: int
    flag_same: True if different experts provide constraints for the same set
    of sample pairs, False if different experts provide constraints for
    different set of sample pairs
    
    Returns
    -------
    S: shape(n_con, 4)
        The first column -> expert id
        The second and third column -> (row, column) indices of two samples
        The fourth column -> constraint values (1 for ML and 0 for CL)
    """
    n_sample = len(label)
    tp = np.tile(label, (n_sample,1))
    label_mat = (tp == tp.T).astype(int)
    
    ML_set = []
    CL_set = []
    # get indices of upper-triangle matrix
    [row, col] = np.triu_indices(n_sample, k=1)
    # n_sample * (n_sample-1)/2
    for idx in range(len(row)):
        if label_mat[row[idx],col[idx]] == 1:
            ML_set.append([row[idx], col[idx]])
        elif label_mat[row[idx],col[idx]] == 0:
            CL_set.append([row[idx], col[idx]])
        else:
            print "Invalid matrix entry values"

    ML_set = np.array(ML_set)
    CL_set = np.array(CL_set)

    assert num_ML < ML_set.shape[0]
    assert num_CL < CL_set.shape[0]
    
    # generate noisy constraints for each expert
    assert len(alpha) == len(beta)
    n_expert = len(alpha)
    
    # initialize the constraint matrix
    S = np.zeros((0, 4))
    
    # different experts provide constraint for the same set of sample pairs
    if flag_same == True:
        idx_ML = prng.choice(ML_set.shape[0], num_ML, replace=False)
        idx_CL = prng.choice(CL_set.shape[0], num_CL, replace=False)
        ML = ML_set[idx_ML, :]
        CL = CL_set[idx_CL, :]
        for m in range(n_expert):
            val_ML = prng.binomial(1, alpha[m], num_ML)
            val_CL = prng.binomial(1, 1-beta[m], num_CL)
            Sm_ML = np.hstack((np.ones((num_ML,1))*(m+start_expert), ML, \
                    val_ML.reshape(val_ML.size,1) ))
            Sm_CL = np.hstack((np.ones((num_CL,1))*(m+start_expert), CL, \
                    val_CL.reshape(val_CL.size,1) ))
            S = np.vstack((S, Sm_ML, Sm_CL)).astype(int)
    # different experts provide constraints for different sets of sample pairs
    else:
        for m in range(n_expert):
            prng = np.random.RandomState(1000 + m)
            idx_ML = prng.choice(ML_set.shape[0], num_ML, replace=False)
            idx_CL = prng.choice(CL_set.shape[0], num_CL, replace=False)
            ML = ML_set[idx_ML, :]
            CL = CL_set[idx_CL, :]
            val_ML = prng.binomial(1, alpha[m], num_ML)
            val_CL = prng.binomial(1, 1-beta[m], num_CL)
            Sm_ML = np.hstack((np.ones((num_ML,1))*(m+start_expert), ML, \
                    val_ML.reshape(val_ML.size,1) ))
            Sm_CL = np.hstack((np.ones((num_CL,1))*(m+start_expert), CL, \
                    val_CL.reshape(val_CL.size,1) ))
            S = np.vstack((S, Sm_ML, Sm_CL)).astype(int)

    return S
 def log_likelihood_factor(samples_q, v_noise, X, y):
     outputs = predict(samples_q, X)
     return -0.5 * np.log(2 * math.pi * v_noise) - 0.5 * (np.tile(y, (1, samples_q.shape[ 0 ])) - outputs)**2 / v_noise
コード例 #44
0
ファイル: glmm.py プロジェクト: onenoc/lfvbae
def get_pi(beta,X,alpha):
    linear_pred = np.dot(X,beta[1:])
    linear_pred = np.tile(linear_pred,len(alpha))
    alpha = np.repeat(alpha,len(np.dot(X,beta[1:])))
    return logistic(beta[0]+linear_pred+0*alpha)
コード例 #45
0
ファイル: pylqr.py プロジェクト: navigator8972/pylqr
    def build_lqr_system(self, x_array, u_array):
        dfdx_array = []
        dfdu_array = []
        dldx_array = []
        dldu_array = []
        dldxx_array = []
        dldux_array = []
        dlduu_array = []

        for t, (x, u) in enumerate(zip(x_array, u_array)):
            #refresh all the points for potential finite difference
            x1 = None
            x2 = None
            u1 = None
            u2 = None

            #for fx
            if self.plant_dyn_dx is not None:
                #use defined derivative
                dfdx_array.append(self.plant_dyn_dx(x, u, t, self.aux))
            else:
                #use finite difference
                if x1 is None or x2 is None:
                    x1 = np.tile(x, (len(x), 1)) + np.eye(len(x)) * self.finite_diff_eps
                    x2 = np.tile(x, (len(x), 1)) - np.eye(len(x)) * self.finite_diff_eps
                fx1 = np.array([self.plant_dyn(x1_dim, u, t, self.aux) for x1_dim in x1])
                fx2 = np.array([self.plant_dyn(x2_dim, u, t, self.aux) for x2_dim in x2])
                dfdx_array.append((fx1-fx2).T/2./self.finite_diff_eps)

            #for fu
            if self.plant_dyn_du is not None:
                #use defined derivative
                dfdu_array.append(self.plant_dyn_du(x, u, t, self.aux))
            else:
                #use finite difference
                if u1 is None or u2 is None:
                    u1 = np.tile(u, (len(u), 1)) + np.eye(len(u)) * self.finite_diff_eps
                    u2 = np.tile(u, (len(u), 1)) - np.eye(len(u)) * self.finite_diff_eps
                fu1 = np.array([self.plant_dyn(x, u1_dim, t, self.aux) for u1_dim in u1])
                fu2 = np.array([self.plant_dyn(x, u2_dim, t, self.aux) for u2_dim in u2])
                dfdu_array.append((fu1-fu2).T/2./self.finite_diff_eps)

            #for lx
            if self.cost_dx is not None:
                #use defined derivative
                dldx_array.append(self.cost_dx(x, u, t, self.aux))
            else:
                #use finite difference
                if x1 is None or x2 is None:
                    x1 = np.tile(x, (len(x), 1)) + np.eye(len(x)) * self.finite_diff_eps
                    x2 = np.tile(x, (len(x), 1)) - np.eye(len(x)) * self.finite_diff_eps
                cx1 = np.array([self.cost(x1_dim, u, t, self.aux) for x1_dim in x1])
                cx2 = np.array([self.cost(x2_dim, u, t, self.aux) for x2_dim in x2])
                dldx_array.append((cx1-cx2).T/2./self.finite_diff_eps)

            #for lu
            if self.cost_du is not None:
                #use defined derivative
                dldu_array.append(self.cost_du(x, u, t, self.aux))
            else:
                #use finite difference
                if u1 is None or u2 is None:
                    u1 = np.tile(u, (len(u), 1)) + np.eye(len(u)) * self.finite_diff_eps
                    u2 = np.tile(u, (len(u), 1)) - np.eye(len(u)) * self.finite_diff_eps
                cu1 = np.array([self.cost(x, u1_dim, t, self.aux) for u1_dim in u1])
                cu2 = np.array([self.cost(x, u2_dim, t, self.aux) for u2_dim in u2])
                dldu_array.append((cu1-cu2).T/2./self.finite_diff_eps)

            #for lxx
            if self.cost_dxx is not None:
                #use defined derivative
                dldxx_array.append(self.cost_dxx(x, u, t, self.aux))
            else:
                #use finite difference
                # l = self.cost(x, u, t, self.aux)
                # dldxx_array.append(np.array([[(cx1_dim + cx2_dim - 2*l)/(self.finite_diff_eps**2) for cx2_dim in cx2] for cx1_dim in cx1]))
                dldxx_array.append(
                    self.finite_difference_second_order_(
                        lambda x_arg: self.cost(x_arg, u, t, self.aux),
                        x))

            #for luu
            if self.cost_duu is not None:
                #use defined derivative
                dlduu_array.append(self.cost_duu(x, u, t, self.aux))
            else:
                #use finite difference
                # l = self.cost(x, u, t, self.aux)
                # dlduu_array.append(np.array([[(cu1_dim + cu2_dim - 2*l)/(self.finite_diff_eps**2) for cu2_dim in cu2] for cu1_dim in cu1])) 
                dlduu_array.append(
                    self.finite_difference_second_order_(
                        lambda u_arg: self.cost(x, u_arg, t, self.aux),
                        u))
            #for lux
            if self.cost_dux is not None:
                #use defined derivative
                dldux_array.append(self.cost_dux(x, u, t, self.aux))
            else:
                #use finite difference
                l = self.cost(x, u, t, self.aux)
                cux1 = np.array([[self.cost(x1_dim, u1_dim, t, self.aux) for x1_dim in x1] for u1_dim in u1])
                cux2 = np.array([[self.cost(x2_dim, u2_dim, t, self.aux) for x2_dim in x2] for u2_dim in u2])
                #partial derivative - a simplified approximation, see wiki on finite difference
                dldux = cux1 + cux2 + \
                        2 * np.tile(l, (len(x), len(u))).T - \
                        np.tile(cx1, (len(u), 1)) - np.tile(cx2, (len(u), 1)) - \
                        np.tile(cu1, (len(x), 1)).T - np.tile(cu2, (len(x), 1)).T

                dldux_array.append(dldux/(2*self.finite_diff_eps**2)) 
            # print dfdx_array[-1], dfdu_array[-1], dldx_array[-1], dldu_array[-1]
            # print dldxx_array[-1], dlduu_array[-1], dldux_array[-1]
            # raw_input()
            
            #need to do somthing similar for constraints if they were there
            #to incorporate with the cost functions. Ignore them for now
        
        lqr_sys = {
            'dfdx':dfdx_array,
            'dfdu':dfdu_array,
            'dldx':dldx_array,
            'dldu':dldu_array,
            'dldxx':dldxx_array,
            'dlduu':dlduu_array,
            'dldux':dldux_array
            }

        return lqr_sys