コード例 #1
0
 def matmul_last_axis(self, mat, axes=1):
     reshaped_liks = np.reshape(self.liks, [-1] + [np.prod(
         self.liks.shape[-axes:])])
     reshaped_mat = np.reshape(mat, [np.prod(
         mat.shape[:axes], dtype=int)] + [-1])
     reshaped_liks = np.dot(reshaped_liks, reshaped_mat)
     self.liks = np.reshape(reshaped_liks, list(self.liks.shape[:-axes]) + list(mat.shape[axes:]))
コード例 #2
0
ファイル: simplex_patterns.py プロジェクト: rgiordan/paragami
 def __init__(self,
              simplex_size,
              array_shape,
              default_validate=True,
              free_default=None):
     """
     Parameters
     ------------
     simplex_size: `int`
         The length of the simplexes.
     array_shape: `tuple` of `int`
         The size of the array of simplexes (not including the simplexes
         themselves).
     default_validate: `bool`, optional
         Whether or not to check for legal (i.e., positive and normalized)
         folded values by default.
     free_default: `bool`, optional
         The default value for free.
     """
     self.__simplex_size = int(simplex_size)
     if self.__simplex_size <= 1:
         raise ValueError('simplex_size must be >= 2.')
     self.__array_shape = array_shape
     self.__shape = self.__array_shape + (self.__simplex_size, )
     self.__free_shape = self.__array_shape + (self.__simplex_size - 1, )
     self.default_validate = default_validate
     super().__init__(np.prod(self.__shape),
                      np.prod(self.__free_shape),
                      free_default=free_default)
コード例 #3
0
def RBF_eKK(mu, sigma, X, lengthscales=None, kernel_variance=1):
    """
    x ~ N(mu, sigma), Dx1
    X is DxM
    Return E_x [k(X, x) * k(x, X) ], an M x M array
    """
    if lengthscales is None:
        lengthscales = np.ones((mu.shape[0], 1))

    kXX_scaled = RBF(
        x=X,
        x2=X,
        lengthscales=np.sqrt(2 * (lengthscales**2)),
        kernel_variance=kernel_variance *
        np.sqrt(np.prod(lengthscales**2) / np.prod(2 * (lengthscales**2))))

    X_pairwise_sums = X[:, :, None] + X[:, :, None].swapaxes(1, 2)

    kXpX_mu = RBF(
        x=np.reshape(X_pairwise_sums / 2, (mu.shape[0], -1), order='F'),
        x2=mu,
        lengthscales=np.sqrt((lengthscales**2) / 2 + sigma),
        kernel_variance=kernel_variance * np.sqrt(
            np.prod(lengthscales**2) / np.prod((lengthscales**2) / 2 + sigma)))

    return kXX_scaled * np.reshape(kXpX_mu, (X.shape[1], X.shape[1]),
                                   order='F')
コード例 #4
0
def sample_latent_pi(aa, bb, n_samples):
    v_samples = sample_latent_sb(aa, bb, n_samples)
    v = v_samples
    vm = 1 - v_samples

    #vs=[v[:,i:(i+1)]*np.prod(vm[:,:i], axis=1, keepdims=True) for i in range(1, v_samples.shape[1])]

    #vl = np.prod(vm, axis=1, keepdims=True)
    #    print(vl.shape)

    #w_vectors = [v_samples[0]] + vs # + [vl]
    #weights = np.concatenate(w_vectors, axis=1)
    #print(np.mean(weights))

    #w_vectors=[v_samples[:,0:1]]+vs#+[vl]
    #weights = np.concatenate(w_vectors, axis=1)
    #print(np.sum(weights, axis=1))

    ##
    vs = [
        v[:, i][:, None] * np.prod(vm[:, :i], axis=1, keepdims=True)
        for i in range(1, v_samples.shape[1])
    ]

    vl = np.prod(vm, axis=1, keepdims=True)
    #    print(vl.shape)

    w_vectors = [v_samples[:, 0][:, None]] + vs + [vl]
    weights = np.concatenate(w_vectors, axis=1)
    ##

    return weights
コード例 #5
0
ファイル: box.py プロジェクト: cunni/pyspherepack
    def tex_best(cls,
                 filenames=None,
                 texname=None,
                 scaled_rad=None,
                 clamp_edge=None):
        filenames = filenames if filenames is not None else [
            'data/mb_50_2x1.pkl', 'data/mb_50_3x1.pkl'
        ]
        texname = texname if texname is not None else 'data/aggregated_results'
        # set up pylatex doc
        geometry_options = {"margin": "1in"}
        doc = pylatex.Document(texname, geometry_options=geometry_options)
        dapne = lambda s: doc.append(pylatex.NoEscape(s))
        with doc.create(pylatex.Section('Introduction')):
            doc.append(
                'Each section that follows shows an optimized layout for a given number of circles and an approximate aspect ratio of the sheet. Throughout, the following parameters are assumed: clamp edge of 10.0mm, circle diameter of 20mm, spacing between circles of 0.50mm.'
            )
        for fn in filenames:
            mb = cls.load(filename=fn)
            b = mb.best_box['box']
            b.plot(clamp_edge=clamp_edge, scaled_rad=scaled_rad)
            # pylatex to put this in tex
            #matplotlib.use('Agg')
            with doc.create(
                    pylatex.Section(pylatex.NoEscape(
                        r'{} circles, box aspect ratio of roughly ${}\times{}$'
                        .format(b.n_balls, b.box[0], b.box[1])),
                                    label=fn)):
                with doc.create(pylatex.Figure(position='htbp')) as plot:
                    plot.add_plot(width=pylatex.NoEscape(r'0.8\textwidth'))
                    #plot.add_caption('Optimized circle packing for this sheet size.')

            x = b.box_warp(b.logits)
            rad = b.ball_radius(x)
            clamp_edge = clamp_edge if clamp_edge is not None else 0.0
            scaled_rad = scaled_rad if scaled_rad is not None else rad
            scaled_box = scaled_rad / rad * (b.box + 2 * rad)
            scaled_x = scaled_rad / rad * (x + rad)
            #doc.append(pylatex.NoEscape('\noindent Density %:'))
            dapne(r'\noindent Density \%: {:04.2f}\% \\'.format(b.density()))
            dapne(r'Waste \%: {:04.2f}\% \\'.format(100 - b.density()))
            dapne(r'Density with clamp edge \%: {:04.2f}\% \\'.format(
                (b.density() * np.prod(scaled_box) /
                 (scaled_box[1] * (scaled_box[0] + 2 * clamp_edge)))))
            dapne(r'Waste with clamp edge \%: {:04.2f}\% \\'.format(
                100 - (b.density() * np.prod(scaled_box) /
                       (scaled_box[1] * (scaled_box[0] + 2 * clamp_edge)))))

            dapne(r'Circle center coordinates: \\')
            for i in range(b.n_balls):
                #dapne(r'$c_{{{}}}$: {}\\'.format(i+1,scaled_x[i,:]))
                dapne(r'$[{}~~{}]$ \\'.format(scaled_x[i, 0], scaled_x[i, 1]))
            dapne(r'\clearpage')

        doc.generate_tex()
コード例 #6
0
def compute_path_params(eta, H, psi):
    ''' Compute the gaussian parameters for each path
    H (list of nb_layers elements of shape (K_l x r_{l-1}, r_l)): Lambda 
                                                    parameters for each layer
    psi (list of nb_layers elements of shape (K_l x r_{l-1}, r_{l-1})): Psi 
                                                    parameters for each layer
    eta (list of nb_layers elements of shape (K_l x r_{l-1}, 1)): mu 
                                                    parameters for each layer
    ------------------------------------------------------------------------------------------------
    returns (tuple of len 2): The updated parameters mu_s and sigma for all s in Omega
    '''

    #=====================================================================
    # Retrieving model parameters
    #=====================================================================

    L = len(H)
    k = [len(h) for h in H]
    k_aug = k + [
        1
    ]  # Integrating the number of components of the last layer i.e 1

    r1 = H[0].shape[1]
    r2_L = [h.shape[2] for h in H]  # r[2:L]
    r = [r1] + r2_L  # r augmented

    #=====================================================================
    # Initiating the parameters for all layers
    #=====================================================================

    mu_s = [0 for i in range(L + 1)]
    sigma_s = [0 for i in range(L + 1)]

    # Initialization with the parameters of the last layer
    mu_s[-1] = np.zeros((1, r[-1], 1))  # Inverser k et r plus tard
    sigma_s[-1] = np.eye(r[-1])[n_axis]

    #==================================================================================
    # Compute Gaussian parameters from top to bottom for each path
    #==================================================================================

    for l in reversed(range(0, L)):
        H_repeat = np.repeat(H[l], np.prod(k_aug[l + 1:]), axis=0)
        eta_repeat = np.repeat(eta[l], np.prod(k_aug[l + 1:]), axis=0)
        psi_repeat = np.repeat(psi[l], np.prod(k_aug[l + 1:]), axis=0)

        mu_s[l] = eta_repeat + H_repeat @ np.tile(mu_s[l + 1], (k[l], 1, 1))

        sigma_s[l] = H_repeat @ np.tile(sigma_s[l + 1], (k[l], 1, 1)) @ t(H_repeat, (0, 2, 1)) \
            + psi_repeat

    return mu_s, sigma_s
コード例 #7
0
def sample_sticks(a,b, n_samples=1):
    v_samples = sample_kumaraswamy(a,b)  # [nz-1]
    #print(v_samples)
    v = v_samples[None, :]
    vm = 1-v
    vs = [v[:, i][:, None]*np.prod(vm[:, :i], axis=1, keepdims=True) for i in range(1,v.shape[1])]
    vl = np.prod(vm, axis=1, keepdims=True)
    pi_vectors = [v[:,0][:,None]]+vs+[vl]
    pis = np.concatenate(pi_vectors, axis=1)
    #print(np.round(pis[0],3))
    #print(weights)

    return pis
コード例 #8
0
def sample_latent_pi(aa,bb,n_samples):
    v_samples=sample_latent_sb(aa,bb,n_samples)
    v=v_samples
    vm=1-v_samples

    vs = [v[:, i][:, None] * np.prod(vm[:, :i], axis=1, keepdims=True) for i in range(1, v_samples.shape[1])]

    vl = np.prod(vm, axis=1, keepdims=True)

    w_vectors = [v_samples[:, 0][:, None]] + vs + [vl]
    weights = np.concatenate(w_vectors, axis=1)

    return weights
コード例 #9
0
def compute_S_1L(L_1L, k_1L, k):
    ''' Compute the number of paths starting from each head and tail of the 
    network.
    L_1L (dict): The number of layers where the lists include the heads and the tail layers
    k_1L (list of int): The number of component on each layer including the common layers
    k (dict): The original number of component on each layer
    --------------------------------------------------------------------------
    returns (dict): The number of paths starting from each head and tail       
    '''
    # Paths of both (heads+tail) and tail
    S1cL = [np.prod(k_1L['c'][l:]) for l in range(L_1L['c'] + 1)]
    S1dL = [np.prod(k_1L['d'][l:]) for l in range(L_1L['d'])]
    St = [np.prod(k['t'][l:]) for l in range(L_1L['t'])]
    return {'c': S1cL, 'd': S1dL, 't': St}
コード例 #10
0
def RBF_eK(mu, sigma, X, lengthscales=None, kernel_variance=1):
    """
    x ~ N(mu, sigma), Dx1
    X is DxM
    Return E_x [ k(x, X)], a 1 x M array
    """
    if lengthscales is None:
        lengthscales = np.ones((mu.shape[0], 1))
    return RBF(
        x=mu,
        x2=X,
        lengthscales=np.sqrt(lengthscales**2 + sigma),
        kernel_variance=kernel_variance *
        np.sqrt(np.prod(lengthscales**2) / np.prod(lengthscales**2 + sigma)))
コード例 #11
0
def student(theta, df, prod=True):
    """Implementation of the student t distribution with df degrees of freedom

    Parameters
    ----------
    theta : type
        Description of parameter `theta`.
    df : type
        Description of parameter `df`.
    prod : bool
        If true return the density of the sample
        If False, return the joint distribution

    Returns
    -------
    type
        float if prod
        np.ndarray if not prod
    """
    individual = gamma((df+1.)/2.)*(1+theta**2 / df)**(-(df+1)/2) \
                /(gamma(df/2.)*np.sqrt(df*pi))
    if prod:
        return np.prod(individual)
    else:
        return individual
コード例 #12
0
ファイル: parser.py プロジェクト: MarkKobs/Socrates
def parse_model(spec):
    shape = np.array(ast.literal_eval(read(spec['shape'])))
    lower, upper = parse_bounds(np.prod(shape), spec['bounds'])
    layers = parse_layers(spec['layers']) if 'layers' in spec else None
    path = spec['path'] if 'path' in spec else None

    return Model(shape, lower, upper, layers, path)
コード例 #13
0
ファイル: convnet.py プロジェクト: CamZHU/autograd
 def build_weights_dict(self, input_shape):
     # Input shape is anything (all flattened)
     input_size = np.prod(input_shape, dtype=int)
     self.parser = WeightsParser()
     self.parser.add_weights('params', (input_size, self.size))
     self.parser.add_weights('biases', (self.size,))
     return self.parser.N, (self.size,)
コード例 #14
0
 def split_prob(self,X,params):
     
     p = [self.single(cov,params[0,i],params[1,i]) \
             for i,cov in enumerate(X)]
     
     #return np.sum(p)/self.xdim
     return np.prod(p)
コード例 #15
0
ファイル: convnet.py プロジェクト: LiuFang816/SALSTM_py_data
 def build_weights_dict(self, input_shape):
     # Input shape is anything (all flattened)
     input_size = np.prod(input_shape, dtype=int)
     self.parser = WeightsParser()
     self.parser.add_weights('params', (input_size, self.size))
     self.parser.add_weights('biases', (self.size,))
     return self.parser.N, (self.size,)
コード例 #16
0
ファイル: utils.py プロジェクト: ecat/adbs
def jacobian_pkl(fun, x):
    vjp, ans = _make_vjp(fun, x)
    ans_vspace = vspace(ans)
    jacobian_shape = ans_vspace.shape + vspace(x).shape
    grads = map(vjp, ans_vspace.standard_basis())

    grads_out = np.stack(grads)
    if (np.prod(jacobian_shape) == np.prod(grads_out.shape)):
        return np.reshape(grads_out, jacobian_shape)
    else:
        my_jacobian_shape = ans_vspace.shape + vspace(x).shape + (
            2, )  # 2 to support real/im
        re_im_grads = np.squeeze(np.reshape(grads_out, my_jacobian_shape))
        out = re_im_grads[..., 0] + 1j * re_im_grads[..., 1]

        return out
コード例 #17
0
def learn_maxpl(imgs):
    """Learn the weights and bias for the Hopfield network by maximizing the pseudo log-likelihood."""
    img_size = np.prod(imgs[0].shape)

    fake_weights = np.random.normal(0, 0.1, (img_size, img_size))
    bias = np.random.normal(0, 0.1, (img_size))
    diag_mask = np.ones((img_size, img_size)) - np.identity(img_size)

    def objective(params, iter):
        fake_weights, bias = params
        weights = np.multiply((fake_weights + fake_weights.T) / 2, diag_mask)
        pll = 0
        for i in range(len(imgs)):
            img = np.reshape(imgs[i], -1)
            activations = np.matmul(weights, img) + bias
            output = sigmoid(activations)
            eps = 1e-10
            img[img < 0] = 0
            pll += np.sum(np.multiply(img, np.log(output+eps)) + np.multiply(1-img, np.log(1-output+eps)))
        if iter % 100 == 0: print(-pll)
        return -pll

    g = grad(objective)

    fake_weights, bias = sgd(g, (fake_weights, bias), num_iters=300, step_size=0.001)
    weights = np.multiply((fake_weights + fake_weights.T) / 2, diag_mask)

    plt.imsave('weights_mpl.jpg', weights)
    return weights, bias
コード例 #18
0
ファイル: utilities.py プロジェクト: RobeeF/DDGMM
def compute_rho(eta, H, psi, mu_s, sigma_s, z_c, chsi):
    ''' Compute rho as defined in equation (8) of the DGMM paper 
    eta (list of nb_layers elements of shape (K_l x r_{l-1}, 1)): mu 
                                                    parameters for each layer    
    H (list of nb_layers elements of shape (K_l x r_{l-1}, r_l)): Lambda 
                                                    parameters for each layer
    psi (list of nb_layers elements of shape (K_l x r_{l-1}, r_{l-1})): Psi 
                                                    parameters for each layer
    z_c (list of nd-arrays) z^{(l)} - eta^{(l)} for each layer. 
    chsi (list of nd-arrays): The chsi parameters for each layer
    -----------------------------------------------------------------------
    returns (list of ndarrays): The rho parameters (covariance matrices) 
                                    for all paths starting at each layer
    '''
    
    L = len(H)    
    rho = [0 for i in range(L)]
    k = [len(h) for h in H]
    k_aug = k + [1] 

    for l in range(0, L):
        sigma_next_l = np.tile(sigma_s[l + 1], (k[l], 1, 1))
        mu_next_l = np.tile(mu_s[l + 1], (k[l], 1, 1))

        HxPsi_inv = t(H[l], (0, 2, 1)) @ pinv(psi[l])
        HxPsi_inv = np.repeat(HxPsi_inv, np.prod(k_aug[l + 1: ]), axis = 0)

        rho[l] = chsi[l][n_axis] @ (HxPsi_inv[n_axis] @ z_c[l][..., n_axis] \
                                    + (pinv(sigma_next_l) @ mu_next_l)[n_axis])
                
    return rho
コード例 #19
0
def gamma_(theta, alpha, beta, prod=True):
    """ gamma distribution with parameter alpha, beta

    Parameters
    ----------
    theta : np.ndarray
    alpha : float
        shape of the gamma distribution, > 0
    beta : float
        rate of the distribution, > 0
    prod : bool
        If true return the density of the sample
        If False, return the joint distribution

    Returns
    -------
    type
        float if prod
        np.ndarray if not prod

    """
    x = indicator_positive(theta)
    individual = beta**alpha * theta**(alpha - 1) * np.exp(
        -beta * theta) / gamma(alpha) * x

    if prod:
        return np.prod(individual)
    else:
        return individual
コード例 #20
0
ファイル: observation.py プロジェクト: Runjing-Liu120/scarlet
    def get_loss(self, model):
        """Computes the loss/fidelity of a given model wrt to the observation

        Parameters
        ----------
        model: array
            The model from `Blend`

        Returns
        -------
        result: array
            Scalar tensor with the likelihood of the model
            given the image data
        """

        model_ = self.render(model)
        images_ = self.images[self.slices]
        weights_ = self.weights[self.slices]

        # normalization of the single-pixel likelihood:
        # 1 / [(2pi)^1/2 (sigma^2)^1/2]
        # with inverse variance weights: sigma^2 = 1/weight
        # full likelihood is sum over all data samples: pixel in images
        # NOTE: this assumes that all pixels are used in likelihood!
        log_sigma = np.zeros(self.weights.shape, dtype=self.weights.dtype)
        cuts = self.weights > 0
        log_sigma[cuts] = np.log(1 / self.weights[cuts])
        log_norm = (np.prod(images_.shape) / 2 * np.log(2 * np.pi) +
                    np.sum(log_sigma) / 2)

        return log_norm + np.sum(weights_ * (model_ - images_)**2) / 2
コード例 #21
0
def normal(theta, mean, var, prod=True):
    """ normal distribution with same variance for all the components

    Parameters
    ----------
    theta : float or np.ndarray
    mean : float or np.ndarray
        mean of the distribution
         (if float, assumes same mean for all thetas)
    var : float or np.ndarray
        variance of the distribution
        (if float, assumes same variance for all thetas)
    prod : bool
        If true return the density of the sample
        If False, return the joint distribution

    Returns
    -------
    type
        float if prod
        np.ndarray if not prod
    """
    if np.min(var) < 0:
        return 0
        raise ValueError("invalid variance given in normal in func_stats.py")

    individual = np.exp(-(theta - mean)**2 /
                        (2 * var**2)) / (np.sqrt(2 * pi) * var)

    if prod:
        return np.prod(individual)
    else:
        return individual
コード例 #22
0
def exponential(theta, lambda_, prod=True):
    """exponential distribution, assume a positive input

    Parameters
    ----------
    theta : float or np.ndarray
    lambda_ : float
    prod : bool
        If true return the density of the sample
        If False, return the joint distribution

    Returns
    -------
    type
        float if prod
        np.ndarray if not prod
    """

    x = indicator_positive(theta)
    individual = np.exp(-lambda_ * theta) * lambda_ * x

    if prod:
        return np.prod(individual)
    else:
        return individual
コード例 #23
0
    def __init__(self, faces, step, src, fov=None, f=None):
        self.src = src
        self.grid = Grid(img=src.shape, step=step)
        self.step = step

        self.fov = fov
        self.f = f

        self.uniform_grid = self.grid.grid
        self.sigm = self.sigmoid()
        self.stereo_grid = self.grid.get_stereo_proj(fov=fov, f=f).grid
        self.weights = prepare_weights(faces, self.uniform_grid)

        self.bounds = []  # grid cannot go beyond picture borders
        for i in range(int(len(self.uniform_grid.ravel()) / 2)):
            self.bounds.append((0, src.shape[1]))
            self.bounds.append((0, src.shape[0]))
        for i in range(self.weights.shape[3]):
            for j in range(4):
                self.bounds.append((-np.inf, np.inf))

        self.init = self.uniform_grid.ravel()
        for i in range(self.weights.shape[3]):
            self.init = np.append(self.init, [1, 1, 0, 0])

        self.grid_len = np.prod(self.stereo_grid.shape)
コード例 #24
0
def Poly(x: np.ndarray, deg: int, type: tp.Optional[str] = "full"):
    inp_size = x.shape[1]
    X_p = np.empty(x.shape)
    # arrange every pair of input vectors
    indices_n_wise = combinations(range(inp_size), 2)
    if deg < 2:
        for idx in indices_n_wise:
            X_p = np.append(x[:, idx], axis=1)
    else:
        for idx in indices_n_wise:
            # full degree polymone
            indices_to_mul = sum(
                [
                    list(combinations_with_replacement(idx, i))
                    for i in range(2, deg + 1)
                ],
                [],
            )
            if type == "partial":
                # partial degree polynome
                indices_to_mul = filter(lambda x: len(set(x)) > 1, indices_to_mul)
            for ind in indices_to_mul:
                pf = np.prod(x[:, ind], axis=1)[:, np.newaxis]
                X_p = np.append(X_p, pf, axis=1)
    return X_p
コード例 #25
0
ファイル: ann.py プロジェクト: siju-samuel/distributedML
def gen_data():
    category_paths = [f for f in listdir('101_ObjectCategories/')]
    image_paths = [
        f for f in listdir('101_ObjectCategories/menorah/')
        if isfile(join('101_ObjectCategories/menorah/', f))
    ]

    images = []
    output_labels = []
    # Include all categories with mappings to the integer representing the category
    categories_dict = {}

    category = 0
    for category_path in category_paths:
        image_paths = [
            f for f in listdir('101_ObjectCategories/' + category_path + '/')
        ]
        for image_path in image_paths:
            im = standarizeImage(
                imread('101_ObjectCategories/' + category_path + '/' +
                       image_path))
            if im.shape == (64, 64, 3):
                images.append(im)
                output_labels.append(category)
        categories_dict[category] = category_path
        category = category + 1

    images = np.array(images)
    partial_flatten = lambda x: np.reshape(x,
                                           (x.shape[0], np.prod(x.shape[1:])))
    images = partial_flatten(images)

    np.save('images(64).npy', images)
    np.save('output_labels(64).npy', output_labels)
コード例 #26
0
ファイル: tm.py プロジェクト: simonkamronn/autohmm
    def _do_mstep_grad(self, puc, data):
        wrt = [str(p) for p in self.wrt if str(p) in self.params]
        for update_idx in range(self.n_iter_update):
            for p in wrt:
                if p == 'm':
                    optim_x0 = self.mu_
                    wrt_arg = 0
                elif p == 'p':
                    optim_x0 = self.precision_
                    wrt_arg = 1
                else:
                    raise ValueError('unknown parameter')

                optim_bounds = [self.wrt_bounds[p] for k in
                                range(np.prod(self.wrt_dims[p]))]
                result = minimize(fun=self._optim_wrap, jac=True,
                                  x0=np.array(optim_x0).reshape(-1),
                                  args=(p,
                                        {'wrt': wrt_arg,
                                         'p': self.precision_,
                                         'm': self.mu_,
                                         'xn': data['obs'],
                                         'gn': puc  # post. uni. concat.
                                        }),
                                  bounds=optim_bounds,
                                  method='TNC')

                newv = result.x.reshape(self.wrt_dims[p])
                if p == 'm':
                    self.mu_ = newv
                elif p == 'p':
                    self.precision_ = newv
                else:
                    raise ValueError('unknown parameter')
コード例 #27
0
def location_mixture_logpdf(samps, locations, location_weights, distr_at_origin, contr_var = False, variant = 1):
#    lpdfs = zeroprop.logpdf()
    diff = samps - locations[:, np.newaxis, :]
    lpdfs = distr_at_origin.logpdf(diff.reshape([np.prod(diff.shape[:2]), diff.shape[-1]])).reshape(diff.shape[:2])
    logprop_weights = log(location_weights/location_weights.sum())[:, np.newaxis]
    if not contr_var: 
        return logsumexp(lpdfs + logprop_weights, 0)
    #time_m1 = np.hstack([time0[:,:-1],time0[:,-1:]])
    else:
        time0 = lpdfs + logprop_weights + log(len(location_weights))
        
        if variant == 1:
            time1 = np.hstack([time0[:,1:],time0[:,:1]])
            cov = np.mean(time0**2-time0*time1)
            var = np.mean((time0-time1)**2)
            lpdfs = lpdfs  -    cov/var * (time0-time1)        
            return logsumexp(lpdfs - log(len(location_weights)), 0)
        elif variant == 2:
            cvar = (time0[:,:,np.newaxis] - 
                    np.dstack([np.hstack([time0[:, 1:], time0[:, :1]]),
                               np.hstack([time0[:,-1:], time0[:,:-1]])]))

            
            ## self-covariance matrix of control variates
            K_cvar = np.diag(np.mean(cvar**2, (0, 1)))
            #add off diagonal
            K_cvar = K_cvar + (1.-np.eye(2)) * np.mean(cvar[:,:,0]*cvar[:,:,1])
            
            ## covariance of control variates with random variable
            cov = np.mean(time0[:,:,np.newaxis] * cvar, 0).mean(0)
            
            optimal_comb = np.linalg.inv(K_cvar) @ cov
            lpdfs = lpdfs  -  cvar @ optimal_comb
            return logsumexp(lpdfs - log(len(location_weights)), 0)
コード例 #28
0
ファイル: utilities.py プロジェクト: RobeeF/DDGMM
def compute_chsi(H, psi, mu_s, sigma_s):
    ''' Compute chsi as defined in equation (8) of the DGMM paper 
    H (list of nb_layers elements of shape (K_l x r_l-1, r_l)): Lambda 
                                                    parameters for each layer
    psi (list of nb_layers elements of shape (K_l x r_l-1, r_l-1)): Psi 
                                                    parameters for each layer
    mu_s (list of nd-arrays): The means of the Gaussians starting at each layer
    sigma_s (list of nd-arrays): The covariance matrices of the Gaussians 
                                                    starting at each layer
    ------------------------------------------------------------------------------------------------
    returns (list of ndarray): The chsi parameters for all paths starting at each layer
    '''
    L = len(H)
    k = [len(h) for h in H]
    
    #=====================================================================
    # Initiating the parameters for all layers
    #=====================================================================
    
    # Initialization with the parameters of the last layer    
    chsi = [0 for i in range(L)]
    chsi[-1] = pinv(pinv(sigma_s[-1]) + t(H[-1], (0, 2, 1)) @ pinv(psi[-1]) @ H[-1]) 

    #==================================================================================
    # Compute chsi from top to bottom 
    #==================================================================================
        
    for l in range(L - 1):
        Ht_psi_H = t(H[l], (0, 2, 1)) @ pinv(psi[l]) @ H[l]
        Ht_psi_H = np.repeat(Ht_psi_H, np.prod(k[l + 1:]), axis = 0)
        
        sigma_next_l = np.tile(sigma_s[l + 1], (k[l], 1, 1))
        chsi[l] = pinv(pinv(sigma_next_l) + Ht_psi_H)
            
    return chsi
コード例 #29
0
ファイル: ar.py プロジェクト: danwenxuan123/autohmm
    def _do_optim(self, p, optim_x0, gn, data, entries='all'):
        optim_bounds = [
            self.wrt_bounds[p] for k in range(np.prod(self.wrt_dims[p]))
        ]

        result = minimize(
            fun=self._optim_wrap,
            jac=True,
            x0=np.array(optim_x0).reshape(-1),
            args=(
                p,
                {
                    'wrt': p,
                    'p': self.precision_,
                    'm': self.mu_,
                    'a': self.alpha_,
                    'xn': data['obs'],
                    'xln': data['lagged'],
                    'gn': gn,  # post. uni. concat.
                    'entries': entries
                }),
            bounds=optim_bounds,
            method='TNC')
        new_value = result.x.reshape(self.wrt_dims[p])
        return new_value
コード例 #30
0
    def _setup_input(self, X, y=None):
        if not isinstance(X, np.ndarray):
            X = np.array(X)

        if X.size == 0:
            raise ValueError("Number of features must be > 0")

        if X.ndim == 1:
            self.n_samples, self.n_features = 1, X.shape
        else:
            self.n_samples, self.n_features = X.shape[0], np.prod(X.shape[1:])

        self.X = X

        if self.y_required:
            if y is None:
                raise ValueError("Missed required argument y")

            if not isinstance(y, np.ndarray):
                y = np.array(y)

            if y.size == 0:
                raise ValueError("Number of targets must be > 0")

        self.y = y
コード例 #31
0
ファイル: interface.py プロジェクト: abhiagwl/vistan
    def constrained_array_to_dict(self, samples):
        """ A function to convert the constrained parameter
            output to dictionary format compatible with
            PyStan's StanModelFit4.extract() format.

        Args:
            samples (np.ndarray): Constrained params

        Returns:
            dict:
                A dictionary that consists of the parameters aranged in
                the same format as PyStan's StanModelFit4.extract()
        """
        assert samples.ndim == 2
        N = samples.shape[0]

        constrained_param_shapes = self.get_constrained_param_shapes()
        params = collections.OrderedDict()
        idx = 0
        for param_name, param_shape in constrained_param_shapes.items():
            nparam = int(np.prod(param_shape))
            params[param_name] = np.reshape(samples[:, idx:idx + nparam],
                                            (N, *param_shape),
                                            order="F")
            idx += nparam
        assert idx == samples.shape[1]
        return params
コード例 #32
0
 def forward_pass(self, inputs, param_vector):
     params = self.parser.get(param_vector, 'params')
     biases = self.parser.get(param_vector, 'biases')
     if inputs.ndim > 2:
         inputs = inputs.reshape(
             (inputs.shape[0], np.prod(inputs.shape[1:])))
     return self.nonlinearity(np.dot(inputs[:, :], params) + biases)
コード例 #33
0
    def compute_sample_LLs(self, NNs, zs, w_m, w_v, z_m, z_v, e_v, x, y, N):

        # Compute likelihood factors
        f_ws, f_zs = self.compute_LL_factors(NNs, zs, w_m, w_v, z_m, z_v, N)
        # Calculate likelihoods for every data-pair in the batch
        lls = []
        denom = 2 * e_v
        for k in range(0, self.K):
            # Append random features z to x
            x_z = np.concatenate((x, zs[k]), axis=1)
            out = NNs[k].execute(x_z)
            nom = np.square(y - out)
            ll = np.exp(-nom / denom) / (np.sqrt(2 * np.pi * e_v)) + 1e-10
            # Multiply multi-dimensional output if applicable
            ll = np.prod(ll, axis=1, keepdims=True)
            if (ll == 0).any() == True:
                print('Warning: A likelihood is zero.', np.argmin(ll))
                self.errormsg.append('one ll is zero.', np.argmin(ll))
            # Include alpha and divide by likelihood factors
            factored_ll = (ll**self.alpha / (f_ws[k] * f_zs[k]))
            if (factored_ll == 0).any() == True:
                print('Warning: A factored likelihood is zero.')
                self.errormsg.append('one f_ll is zero.')
            lls.append(factored_ll)
        return lls
コード例 #34
0
    def sample(self, n, seed=872):
        """
        Rejection sampling.
        """
        d = len(self.freqs)
        sigma2 = self.sigma2
        freqs = self.freqs
        with util.NumpySeedContext(seed=seed):
            # rejection sampling
            sam = np.zeros((n, d))
            # sample block_size*d at a time.
            block_size = 500
            from_ind = 0
            while from_ind < n:
                # The proposal q is N(0, sigma2*I)
                X = np.random.randn(block_size, d) * np.sqrt(sigma2)
                q_un = np.exp(-np.sum(X**2, 1) / (2.0 * sigma2))
                # unnormalized density p
                p_un = q_un * (1 + np.prod(np.cos(X * freqs), 1))
                c = 2.0
                I = stats.uniform.rvs(size=block_size) < p_un / (c * q_un)

                # accept
                accepted_count = np.sum(I)
                to_take = min(n - from_ind, accepted_count)
                end_ind = from_ind + to_take

                AX = X[I, :]
                X_take = AX[:to_take, :]
                sam[from_ind:end_ind, :] = X_take
                from_ind = end_ind
        return Data(sam)
コード例 #35
0
ファイル: agnn.py プロジェクト: hal3/aglols
 def add_shape(self, name, shape):
     if name in self.idxs_and_shapes:
         if shape != self.idxs_and_shapes[name][1]:
             raise Exception("re-adding shape with same name (%s) with different shape (%s vs %s)" % (name, shape, self.idxs_and_shapes[name][1]))
         return
     start = self.num_weights
     self.num_weights += np.prod(shape)
     self.idxs_and_shapes[name] = (slice(start, self.num_weights), shape)
コード例 #36
0
 def __init__(self, mu, var):
     self.norm_const = - 0.5*np.log(2*np.pi)
     self.mu = np.atleast_1d(mu).flatten()
     self.var = np.atleast_1d(var).flatten() 
     self.dim = np.prod(self.var.shape)
     assert(self.mu.shape == self.var.shape)
     self.std = np.sqrt(var)
     self.logstd = np.log(self.std)
コード例 #37
0
def _glorot_fan(shape):
    assert len(shape) >= 2

    if len(shape) == 4:
        receptive_field_size = np.prod(shape[2:])
        fan_in = shape[1] * receptive_field_size
        fan_out = shape[0] * receptive_field_size
    else:
        fan_in, fan_out = shape[:2]
    return float(fan_in), float(fan_out)
コード例 #38
0
ファイル: data.py プロジェクト: davidweichiang/autograd
def load_mnist():
    partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:])))
    one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int)
    train_images, train_labels, test_images, test_labels = data_mnist.mnist()
    train_images = partial_flatten(train_images) / 255.0
    test_images  = partial_flatten(test_images)  / 255.0
    train_labels = one_hot(train_labels, 10)
    test_labels = one_hot(test_labels, 10)
    N_data = train_images.shape[0]

    return N_data, train_images, train_labels, test_images, test_labels
コード例 #39
0
ファイル: data.py プロジェクト: AugustLONG/autograd
def load_mnist():
    partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:])))
    one_hot = lambda x, k: np.array(x[:,None] == np.arange(k)[None, :], dtype=int)
    source, _ = urllib.urlretrieve(
        'https://raw.githubusercontent.com/HIPS/Kayak/master/examples/data.py')
    data = imp.load_source('data', source).mnist()
    train_images, train_labels, test_images, test_labels = data
    train_images = partial_flatten(train_images) / 255.0
    test_images  = partial_flatten(test_images)  / 255.0
    train_labels = one_hot(train_labels, 10)
    test_labels = one_hot(test_labels, 10)
    N_data = train_images.shape[0]

    return N_data, train_images, train_labels, test_images, test_labels
コード例 #40
0
ファイル: glmm.py プロジェクト: onenoc/lfvbae
def likelihood_individual(beta,y,X,alpha):
    N = len(alpha)
    t = len(y)
    #get success probabilities
    p = get_pi(beta,X,alpha)
    #do bernoulli to get observation probabilities
    y = np.tile(y,len(p)/len(y))
    likelihood = bernoulli(p,y)
    #handle products (based on number of time steps, multiply every t elements together)
    likelihood = np.reshape(likelihood, (t,len(likelihood)/t))
    likelihood = np.prod(likelihood,0)
    #handle sums (over particles)
    likelihood = np.sum(likelihood)/N
    return likelihood
コード例 #41
0
ファイル: test_jacobian.py プロジェクト: xindaya/autograd
def test_jacobian_against_stacked_grads():
    scalar_funs = [
        lambda x: np.sum(x ** 3),
        lambda x: np.prod(np.sin(x) + np.sin(x)),
        lambda x: grad(lambda y: np.exp(y) * np.tanh(x[0]))(x[1]),
    ]

    vector_fun = lambda x: np.array([f(x) for f in scalar_funs])

    x = npr.randn(5)
    jac = jacobian(vector_fun)(x)
    grads = [grad(f)(x) for f in scalar_funs]

    assert np.allclose(jac, np.vstack(grads))
コード例 #42
0
 def __init__(self, mu, K, Ki = None, logdet_K = None, L = None): 
     mu = np.atleast_1d(mu).flatten()
     K = np.atleast_2d(K) 
     assert(np.prod(mu.shape) == K.shape[0] )
     assert(K.shape[0] == K.shape[1])
     
     self.mu = mu
     self.K = K
     (val, vec) = np.linalg.eigh(K)
     idx = np.arange(mu.size-1,-1,-1)
     (self.eigval, self.eigvec) = (np.diag(val[idx]), vec[:,idx])
     self.eig = self.eigvec.dot(np.sqrt(self.eigval))
     self.dim = K.shape[0]
     #(self.Ki, self.logdet) = (np.linalg.inv(K), np.linalg.slogdet(K)[1])
     (self.Ki, self.L, self.Li, self.logdet) = pdinv(K)
     
     self.lpdf_const = -0.5 *np.float(self.dim * np.log(2 * np.pi)
                                        + self.logdet)
コード例 #43
0
 def __init__(self, mu, K, df, Ki = None, logdet_K = None, L = None):
     mu = np.atleast_1d(mu).flatten()
     K = np.atleast_2d(K)
     assert(np.prod(mu.shape) == K.shape[0] )
     assert(K.shape[0] == K.shape[1])
     self.mu = mu
     self.K = K
     self.df = df
     self._freeze_chi2 = stats.chi2(df)
     self.dim = K.shape[0]
     self._df_dim = self.df + self.dim
     #(self.Ki,  self.logdet) = (np.linalg.inv(K), np.linalg.slogdet(K)[1])
     (self.Ki, self.L, self.Li, self.logdet) = pdinv(K)
     
     
     self.lpdf_const = np.float(gammaln((self.df + self.dim) / 2)
                                -(gammaln(self.df/2)
                                  + (log(self.df)+log(np.pi)) * self.dim*0.5
                                  + self.logdet * 0.5)
                                )
コード例 #44
0
ファイル: ar.py プロジェクト: sarah-strauss/autohmm
    def _do_optim(self, p, optim_x0, gn, data, entries='all'):
        optim_bounds = [self.wrt_bounds[p] for k in
                        range(np.prod(self.wrt_dims[p]))]

        result = minimize(fun=self._optim_wrap,jac=True,
                          x0=np.array(optim_x0).reshape(-1),
                          args=(p,
                                {'wrt': p,
                                 'p': self.precision_,
                                 'm': self.mu_,
                                 'a': self.alpha_,
                                 'xn': data['obs'],
                                 'xln': data['lagged'],
                                 'gn': gn,  # post. uni. concat.
                                 'entries': entries
                                }),
                          bounds=optim_bounds,
                          method='TNC')
        new_value = result.x.reshape(self.wrt_dims[p])
        return new_value
コード例 #45
0
 def _generate_layers(self, weights):
     used = 0
     for shape in self._shapes:
         size = np.prod(shape)
         yield weights[used:used+size].reshape(shape), weights[used+size], weights[used+size+1] + 1
         used += size + 2
コード例 #46
0
ファイル: gmm.py プロジェクト: xindaya/autograd
 def add_shape(self, name, shape):
     start = self.num_weights
     self.num_weights += np.prod(shape)
     self.idxs_and_shapes[name] = (slice(start, self.num_weights), shape)
コード例 #47
0
ファイル: test_numpy.py プロジェクト: ASAPPinc/autograd
 def fun(x): return to_scalar(np.prod(x))
 d_fun = lambda x : to_scalar(grad(fun)(x))
コード例 #48
0
ファイル: test_numpy.py プロジェクト: ASAPPinc/autograd
 def fun(x): return to_scalar(np.prod(x, axis=0, keepdims=True))
 d_fun = lambda x : to_scalar(grad(fun)(x))
コード例 #49
0
ファイル: neural_net.py プロジェクト: agibsonccc/autograd
if __name__ == '__main__':
    # Network parameters
    layer_sizes = [784, 200, 100, 10]
    L2_reg = 1.0

    # Training parameters
    param_scale = 0.1
    learning_rate = 1e-3
    momentum = 0.9
    batch_size = 256
    num_epochs = 50
    
    # Load and process MNIST data (borrowing from Kayak)
    print "Loading training data..."
    import imp, urllib
    partial_flatten = lambda x : np.reshape(x, (x.shape[0], np.prod(x.shape[1:])))
    one_hot = lambda x, K : np.array(x[:,None] == np.arange(K)[None, :], dtype=int)
    source, _ = urllib.urlretrieve(
        'https://raw.githubusercontent.com/HIPS/Kayak/master/examples/data.py')
    data = imp.load_source('data', source).mnist()
    train_images, train_labels, test_images, test_labels = data
    train_images = partial_flatten(train_images) / 255.0
    test_images  = partial_flatten(test_images)  / 255.0
    train_labels = one_hot(train_labels, 10)
    test_labels = one_hot(test_labels, 10)
    N_data = train_images.shape[0]

    # Make neural net functions
    N_weights, pred_fun, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)
    loss_grad = grad(loss_fun)
コード例 #50
0
def build_checker_dataset(n_data = 6, noise_std =0.1):
	rs = npr.RandomState(0)
	inputs = np.array([np.array([x,y]) for x in np.linspace(-1,1,n_data) for y in np.linspace(-1,1,n_data)])
	targets = np.sign([np.prod(input) for input in inputs]) + rs.randn(n_data**2)*noise_std
	return inputs, targets
コード例 #51
0
ファイル: build_rnn.py プロジェクト: SunLinJie/RNN4MNIST
 def add_shape(self, name, shape):
     start = self.num_weights
     self.num_weights += np.prod(shape)  # prod:product of all members
     self.idxs_and_shapes[name] = (slice(start, self.num_weights), shape)
     self.weights_name.append(name)
コード例 #52
0
 def shape(self, x_shape):
     return x_shape[0], np.prod(x_shape[1:])
コード例 #53
0
 def n_params(self):
     return sum([np.prod(self._params[x].shape) for x in self._params.keys()])
コード例 #54
0
ファイル: convnet.py プロジェクト: CamZHU/autograd
 def forward_pass(self, inputs, param_vector):
     params = self.parser.get(param_vector, 'params')
     biases = self.parser.get(param_vector, 'biases')
     if inputs.ndim > 2:
         inputs = inputs.reshape((inputs.shape[0], np.prod(inputs.shape[1:])))
     return self.nonlinearity(np.dot(inputs[:, :], params) + biases)
コード例 #55
0
ファイル: convnet.py プロジェクト: CamZHU/autograd
 def add_weights(self, name, shape):
     start = self.N
     self.N += np.prod(shape)
     self.idxs_and_shapes[name] = (slice(start, self.N), shape)
コード例 #56
0
 def __init__(self, layers_sizes, batch_size=32, dropout=0.1, init_scale=0.05):
     self._shapes = np.array([layers_sizes[:-1], layers_sizes[1:]]).T
     weights_size = np.sum(np.prod(self._shapes, 1)) + 2*len(self._shapes)
     self._weights = np.random.uniform(-init_scale, init_scale, weights_size)
     self.dropout = dropout
     self.batch_size = batch_size