Пример #1
0
def make_surface(n, length_scale):
    # BEWARE : MEMORY CONSUMPTION IS O(n**4)!!!

    # set up arrays of x and y coordinates defining a grid
    t = linspace(-1.0, 1.0, n)
    x, y = meshgrid(t, t)

    x = ravel(x)
    y = ravel(y)

    # compute the square Euclidean distance between each pair of grid points
    delta_x = subtract.outer(x, x)
    delta_y = subtract.outer(y, y)
    dist_squared = delta_x ** 2 + delta_y ** 2

    # sample a Gaussian process over the grid using a square-exponential covariance function
    cov = exp(-dist_squared/(2.0*(length_scale**2)))
    mean = zeros_like(x)
    surface = random.multivariate_normal(mean, cov)

    # reshape the sampled values into a 2d array (matching the grid layout)
    surface_2d = reshape(surface, (n, n))

    # resize the sampled values using interpolation (cheap way of faking higher resolution)
    surface_2d_interp = ndimage.zoom(surface_2d, 5)
    return surface_2d_interp
Пример #2
0
 def assign_amides(self,x, y, tol=(0.4,0.04), n=3):
 
     from numpy import subtract, fabs, argmin, array, compress, nonzero
 
     ## assign based on closest Mahalanobis distance
 
     tol = array(tol)
     
     d1 = fabs(subtract.outer(x[:,0],y[:,0])) / tol[0]
     d2 = fabs(subtract.outer(x[:,1],y[:,1])) / tol[1]
 
     labels = argmin(d1**2 + d2**2,0)
 
     ## throw out wrong assignments (i.e. assignments that are
     ## more than n*tol away from cluster center)
 
     newidx = 0 * labels - 1 
 
     for i in range(len(y)):
 
         d = fabs(x[labels[i]]-y[i])
 
         ## correct assignment
 
         if (d < n*tol).all():
             newidx[i] = labels[i]
 
     labels = newidx
 
     ## collect wrongly assigned data points and cluster them without a reference
     ## (i.e. unsupervised clustering)
 
     invalid = labels==-1
 
     clusters_invalid, idx_invalid = self.cluster_points(compress(invalid,y,0), tol)
 
     labels[nonzero(invalid)[0]] = idx_invalid + len(x)
 
     ## calculate new cluster centers 
 
     centers = []
     for i in range(labels.max()+1):
         z = compress(labels==i,y,0)
         if len(z):
             centers.append(z.mean(0))
         else:
             centers.append(None)
     
     return labels, centers
Пример #3
0
    def full_grid_interp_new(self, p, fs, X, fg_cache):
        # accept X as a list of arrays
        first = True
        for i, e in reversed(list(enumerate(p))):
            if (i, e) in fg_cache:
                one_over_x_m_xi, den_factor = fg_cache[(i, e)]
            else:
                Xs = self.xroot_cache[i][e]
                Ws = self.cheb_weights_cache[e]
                diff = subtract.outer(X[i], Xs)
                mask = (diff == 0).any(axis=-1)
                if any(mask):
                    w = where(diff == 0)
                    one_over_x_m_xi = zeros_like(diff)
                    one_over_x_m_xi[w] = 1
                    one_over_x_m_xi[~mask] = Ws/(diff[~mask])
                else:
                    one_over_x_m_xi = Ws/diff
                den_factor = sum(one_over_x_m_xi, axis=1)
                fg_cache[(i, e)] = one_over_x_m_xi, den_factor

            if first:
                fs_r = fs.reshape((-1,one_over_x_m_xi.shape[1]))
                fs = dot(one_over_x_m_xi, fs_r.T)
                first = False
            else:
                fs_r = fs.reshape((X[0].shape[0], fs.shape[1] / one_over_x_m_xi.shape[1], one_over_x_m_xi.shape[1]))
                fs = (fs_r * one_over_x_m_xi[:,newaxis,:]).sum(axis=-1)
            fs /= den_factor[:,newaxis]

        return fs[:,0]
Пример #4
0
 def find_ambiguous_mapping(self, res, aa,
                            non_ambiguous_mapping,
                            previous=False):
     from munkres import Munkres
     from numpy import fabs, sum, delete
     
     total_costs = None
     mapping = []
     ambiguous_keys = []
     ambiguous_shifts = []
     res_shifts, res_keys = res.get_carbons(previous)
     aa_shifts, aa_keys = aa.get_carbons()
     
     for i, j in non_ambiguous_mapping:
         if j in aa_keys:
             k = list(aa_keys).index(j)
             aa_shifts = delete(aa_shifts, k)
             aa_keys = delete(aa_keys, k)
     for i, key in enumerate(res_keys):
         if self.ambiguous(key, previous):
             ambiguous_keys.append(key)
             ambiguous_shifts.append(res_shifts[i])
             
     if len(aa_keys) > 0 and len(ambiguous_shifts) > 0:
         costs = fabs(subtract.outer(ambiguous_shifts, aa_shifts))
         munkres = Munkres()
         result = munkres.compute(costs * 1.)
         for i, j in result:
             mapping.append((ambiguous_keys[i], aa_keys[j]))
         
     return mapping
Пример #5
0
 def compute_cost_matrix(self, residue1, residue2, tolerance):
     '''
     Computes a cost matrix C for Munkres for a predecessor/successor pair
     i := residue1
     j := residue2
     C_ij = 0, if deviation of i-shifts of residue1 and [i-1]-shifts of
              residue2 is within tolerance
     C_ij = 1, else
     
     @todo: Double check the filling of cost matrix
     
     @param residue1: Possible predecessor of residue2
     @type residue1: Residue.PastaResidue
     @param residue2: Possible successor of residue1
     @type residue2: Residue.PastaResidue
     @return: C
     @rtype: numpy.ndarray
     '''
     
     shifts_i, keys_i = residue1.get_carbons(previous=False)
     shifts_im1, keys_im1 = residue2.get_carbons(previous=True)
     
     delta = fabs(subtract.outer(shifts_i, shifts_im1))
     C = 1 - (delta <= tolerance).astype('i')
     
     return C
Пример #6
0
    def full_grid_interp_new(self, p, fs, X, fg_cache):
        # accept X as a list of arrays
        first = True
        for i, e in reversed(list(enumerate(p))):
            if (i, e) in fg_cache:
                one_over_x_m_xi, den_factor = fg_cache[(i, e)]
            else:
                Xs = self.xroot_cache[i][e]
                Ws = self.cheb_weights_cache[e]
                diff = subtract.outer(X[i], Xs)
                mask = (diff == 0).any(axis=-1)
                if any(mask):
                    w = where(diff == 0)
                    one_over_x_m_xi = zeros_like(diff)
                    one_over_x_m_xi[w] = 1
                    one_over_x_m_xi[~mask] = Ws / (diff[~mask])
                else:
                    one_over_x_m_xi = Ws / diff
                den_factor = sum(one_over_x_m_xi, axis=1)
                fg_cache[(i, e)] = one_over_x_m_xi, den_factor

            if first:
                fs_r = fs.reshape((-1, one_over_x_m_xi.shape[1]))
                fs = dot(one_over_x_m_xi, fs_r.T)
                first = False
            else:
                fs_r = fs.reshape(
                    (X[0].shape[0], fs.shape[1] // one_over_x_m_xi.shape[1],
                     one_over_x_m_xi.shape[1]))
                fs = (fs_r * one_over_x_m_xi[:, newaxis, :]).sum(axis=-1)
            fs /= den_factor[:, newaxis]

        return fs[:, 0]
Пример #7
0
def dFiniteBasisV(x, c, dc, s):
    """
  Finite support radial basis functions derivative
  
  TYPICAL USAGE
  =============

    >>> from numpy import array,eye,sum
    >>> eps = 1e-6
    >>> x = array([[0.1,0.1],[0.0,0.0],[1.0,0.0]])
    >>> c = array([[0.0,0.0],[1.0,0.0],[0.0,0.5],[-1.0,-1.0]])
    >>> dc = array([[1.0,0.0],[0.0,0.0],[0.0,1.0],[0.1,0.1]])
    >>> s = array([0.9,1.5,1.0,5.0])
    >>> V1 = array([FiniteBasisV(x+i,c,dc,s) for i in eps*eye(2)])
    >>> V0 = FiniteBasisV(x,c,dc,s)
    >>> dV = (V1-V0)/eps
    >>> dv = dFiniteBasisV(x,c,dc,s)
    >>> sum((dV-dv)**2)<100*eps
    True
  
  
  """
    D = x.shape[1]
    R = array([subtract.outer(i, j) for i, j in zip(x.T, c.T)]) / s
    r = sqrt(sum(R * R, 0))
    rho = (r * r * r * r - 2 * r * r + 1) * (r < 1.0)
    drho = (4 * r * r * r - 4 * r) * (r < 1.0)
    r[r == 0] = 1.0  # Fix normalization after basis calculation
    return (
        drho * (sum(R.transpose(1, 2, 0) * dc, -1)) * (R / (r * s))
        + (tile(rho, (D, 1, 1)).transpose(1, 2, 0) * dc).transpose(2, 0, 1) / s
    )
Пример #8
0
def gp_samples():
    N = 5
    x = linspace(1, N, N)
    mean = zeros(N)
    covariance = exp(-0.1 * subtract.outer(x, x) ** 2)

    samples = default_rng(1234).multivariate_normal(mean, covariance, size=100)
    return [samples[:, i] for i in range(N)]
Пример #9
0
def _ewald_correction(x, alpha=2.0):
    """
    calculate the Ewald-correction forces, i.e. the force due to all the images
    of a point particle, EXCLUDING the nearest one.

    Particle at 0,0,0, in the periodic unit cube (x,y,z in [-0.5,0.5])

    See also Hernquist, Bouchet & Suto 1991
    """
    from numpy import cumprod, empty_like, mgrid, float64, subtract, exp, flatnonzero, dot
    from scipy.special import erfc
    old_shape = x.shape
    n = cumprod(old_shape[:-1])[-1]
    x2 = x.reshape(n, 3)

    force = empty_like(x2)
    r2 = square(x2).sum(1)
    mult = 1.0 / (r2 * sqrt(r2))
    for i in range(3):
        force[:, i] = x2[:, i] * mult

    N = (mgrid[:9, :9, :9] - 4).reshape((3, 9 * 9 * 9))

    vec = empty((n, N.shape[1], 3), dtype=float64)
    for i in range(3):
        vec[:, :, i] = subtract.outer(x2[:, i], N[i])

    r = sqrt(square(vec).sum(2))
    mult = (erfc(alpha * r) +
            (2 * alpha / sqrt(pi)) * r * exp(-alpha * alpha * r * r)) / (r *
                                                                         r * r)

    for i in range(3):
        force[:, i] -= (vec[:, :, i] * mult).sum(1)

    N2 = square(N).sum(0).reshape(1, N.shape[1])
    idx = flatnonzero(N2 > 0)
    N = N[:, idx]
    N2 = N2[:, idx]

    N_x = dot(x2, N)

    N = N.reshape(3, 1, N.shape[1])
    mult = (2.0 / N2) * exp(-pi * pi * N2 /
                            (alpha * alpha)) * sin(2 * pi * N_x)

    for i in range(3):
        force[:, i] -= (N[i] * mult).sum(1)

    # make the x=0 point zero, if it exists
    idx = flatnonzero(r2 == 0)
    force[idx, :] = 0.0

    force = force.reshape(old_shape)

    return force
Пример #10
0
 def _create_and_rotate_coordinate_arrays(self, x, y, orientation):
     """
     Create pattern matrices from x and y vectors, and rotate
     them to the specified orientation.
     """
     # Using this two-liner requires that x increase from left to
     # right and y decrease from left to right; I don't think it
     # can be rewritten in so little code otherwise - but please
     # prove me wrong.
     pattern_y = subtract.outer(cos(orientation)*y, sin(orientation)*x)
     pattern_x = add.outer(sin(orientation)*y, cos(orientation)*x)
     return pattern_x, pattern_y
Пример #11
0
def test_trace_plot():
    N = 11
    x = linspace(1, N, N)
    mean = zeros(N)
    covariance = exp(-0.1 * subtract.outer(x, x) ** 2)

    samples = default_rng(1234).multivariate_normal(mean, covariance, size=100)
    samples = [samples[:, i] for i in range(N)]
    labels = ["test {}".format(i) for i in range(len(samples))]

    fig = trace_plot(samples, labels=labels, show=False)

    assert len(fig.get_axes()) == N
Пример #12
0
def tri(N, M=None, k=0, dtype=None):
    """ returns a N-by-M matrix where all the diagonals starting from
        lower left corner up to the k-th are all ones.
    """
    if M is None: M = N
    if type(M) == type('d'):
        #pearu: any objections to remove this feature?
        #       As tri(N,'d') is equivalent to tri(N,dtype='d')
        dtype = M
        M = N
    m = greater_equal(subtract.outer(arange(N), arange(M)),-k)
    if dtype is None:
        return m
    else:
        return m.astype(dtype)
Пример #13
0
def tri(N, M=None, k=0, dtype=None):
    """Construct (N, M) matrix filled with ones at and below the k-th diagonal.

    The matrix has A[i,j] == 1 for i <= j + k

    Parameters
    ----------
    N : integer
    M : integer
        Size of the matrix. If M is None, M == N is assumed.
    k : integer
        Number of subdiagonal below which matrix is filled with ones.
        k == 0 is the main diagonal, k < 0 subdiagonal and k > 0 superdiagonal.
    dtype : dtype
        Data type of the matrix.

    Returns
    -------
    A : array, shape (N, M)

    Examples
    --------
    >>> from scipy.linalg import tri
    >>> tri(3, 5, 2, dtype=int)
    array([[1, 1, 1, 0, 0],
           [1, 1, 1, 1, 0],
           [1, 1, 1, 1, 1]])
    >>> tri(3, 5, -1, dtype=int)
    array([[0, 0, 0, 0, 0],
           [1, 0, 0, 0, 0],
           [1, 1, 0, 0, 0]])

    """
    if M is None:
        M = N
    if type(M) == type("d"):
        # pearu: any objections to remove this feature?
        #       As tri(N,'d') is equivalent to tri(N,dtype='d')
        dtype = M
        M = N
    m = greater_equal(subtract.outer(arange(N), arange(M)), -k)
    if dtype is None:
        return m
    else:
        return m.astype(dtype)
Пример #14
0
 def interp_at(self, x):
     """Barycentric interpolation"""
     scalar_x = isscalar(x)
     if have_Cython:
         y = bary_interp(self.Xs, self.Ys, self.weights,
                         asfarray(atleast_1d(x)))
         if scalar_x:
             y = y[0]
     else:
         # split x if necessary to avoid overflow
         max_size = 100000
         if scalar_x:
             x_parts = [atleast_1d(x)]
         elif x.size <= max_size:
             x_parts = [x]
         else:
             n_parts = (x.size * len(self.Xs) + max_size - 1) // max_size
             x_parts = array_split(x, n_parts)
         # now interpolate each part of x
         results = []
         for x_p in x_parts:
             xdiff = subtract.outer(x_p, self.Xs)
             ind = where(xdiff == 0)
             xdiff[ind] = 1
             temp = self.weights / xdiff
             num = dot(temp, self.Ys)
             den = temp.sum(axis=-1)
             # Tricky case which can occur when ends of the interval are
             # almost equal.  xdiff can be close to but nonzero, but the sum
             # in the denominator can be exactly zero.
             if (den == 0).any():
                 num[den == 0] = self.Ys[abs(
                     xdiff[den == 0]).argmin(axis=-1)]
                 den[den == 0] = 1
             ret = array(num / den)
             if len(ind[0]) > 0:
                 ret[ind[:-1]] = self.Ys[ind[-1]]
             results.append(ret)
         # concatenate results
         if scalar_x:
             y = squeeze(results)
         else:
             y = concatenate(results)
     return y
Пример #15
0
    def full_grid_interp(self, p, fs, X, fg_cache):
        # accept X as a list of arrays
        den = 1
        if have_Cython:
            one_over_x_m_xi_list = []  # C
        else:
            one_over_x_m_xi_grid = None
        for i, e in enumerate(p):
            if (i, e) in fg_cache:
                one_over_x_m_xi, den_factor = fg_cache[(i, e)]
            else:
                Xs = self.xroot_cache[i][e]
                Ws = self.cheb_weights_cache[e]
                diff = subtract.outer(X[i], Xs)
                mask = (diff == 0).any(axis=-1)
                if any(mask):
                    w = where(diff == 0)
                    one_over_x_m_xi = zeros_like(diff)
                    one_over_x_m_xi[w] = 1
                    one_over_x_m_xi[~mask] = Ws / (diff[~mask])
                else:
                    one_over_x_m_xi = Ws / diff
                den_factor = sum(one_over_x_m_xi, axis=1)
                fg_cache[(i, e)] = one_over_x_m_xi, den_factor

            den *= den_factor
            if have_Cython:
                one_over_x_m_xi_list.append(one_over_x_m_xi)  # C
            else:
                if one_over_x_m_xi_grid is None:
                    one_over_x_m_xi_grid = one_over_x_m_xi
                else:
                    newshape = (one_over_x_m_xi.shape[0], ) + (1, ) + (
                        one_over_x_m_xi.shape[-1], )
                    one_over_x_m_xi_grid = one_over_x_m_xi_grid[
                        ..., newaxis] * one_over_x_m_xi.reshape(newshape)
                one_over_x_m_xi_grid = one_over_x_m_xi_grid.reshape(
                    (X[0].shape[0], -1))

        if have_Cython:
            num = c_dense_grid_interp(one_over_x_m_xi_list, fs)
        else:
            num = dot(one_over_x_m_xi_grid, fs)
        return num / den
Пример #16
0
def tri(N, M=None, k=0, dtype=None):
    """Construct (N, M) matrix filled with ones at and below the k-th diagonal.

    The matrix has A[i,j] == 1 for i <= j + k

    Parameters
    ----------
    N : integer
    M : integer
        Size of the matrix. If M is None, M == N is assumed.
    k : integer
        Number of subdiagonal below which matrix is filled with ones.
        k == 0 is the main diagonal, k < 0 subdiagonal and k > 0 superdiagonal.
    dtype : dtype
        Data type of the matrix.

    Returns
    -------
    A : array, shape (N, M)

    Examples
    --------
    >>> from scipy.linalg import tri
    >>> tri(3, 5, 2, dtype=int)
    array([[1, 1, 1, 0, 0],
           [1, 1, 1, 1, 0],
           [1, 1, 1, 1, 1]])
    >>> tri(3, 5, -1, dtype=int)
    array([[0, 0, 0, 0, 0],
           [1, 0, 0, 0, 0],
           [1, 1, 0, 0, 0]])

    """
    if M is None: M = N
    if type(M) == type('d'):
        #pearu: any objections to remove this feature?
        #       As tri(N,'d') is equivalent to tri(N,dtype='d')
        dtype = M
        M = N
    m = greater_equal(subtract.outer(arange(N), arange(M)), -k)
    if dtype is None:
        return m
    else:
        return m.astype(dtype)
Пример #17
0
 def interp_at(self, x):
     """Barycentric interpolation"""
     scalar_x = isscalar(x)
     if have_Cython:
         y = bary_interp(self.Xs, self.Ys, self.weights, asfarray(atleast_1d(x)))
         if scalar_x:
             y = y[0]
     else:
         # split x if necessary to avoid overflow
         max_size = 100000
         if scalar_x:
             x_parts = [atleast_1d(x)]
         elif x.size <= max_size:
             x_parts = [x]
         else:
             n_parts = (x.size * len(self.Xs) + max_size - 1) // max_size
             x_parts = array_split(x, n_parts)
         # now interpolate each part of x
         results = []
         for x_p in x_parts:
             xdiff = subtract.outer(x_p, self.Xs)
             ind = where(xdiff == 0)
             xdiff[ind] = 1
             temp = self.weights / xdiff
             num = dot(temp, self.Ys)
             den = temp.sum(axis= -1)
             # Tricky case which can occur when ends of the interval are
             # almost equal.  xdiff can be close to but nonzero, but the sum
             # in the denominator can be exactly zero.
             if (den == 0).any():
                 num[den == 0] = self.Ys[abs(xdiff[den == 0]).argmin(axis= -1)]
                 den[den == 0] = 1
             ret = array(num / den)
             if len(ind[0]) > 0:
                 ret[ind[:-1]] = self.Ys[ind[-1]]
             results.append(ret)
         # concatenate results
         if scalar_x:
             y = squeeze(results)
         else:
             y = concatenate(results)
     return y
Пример #18
0
    def full_grid_interp(self, p, fs, X, fg_cache):
        # accept X as a list of arrays
        den = 1
        if have_Cython:
            one_over_x_m_xi_list = [] # C
        else:
            one_over_x_m_xi_grid = None
        for i, e in enumerate(p):
            if (i, e) in fg_cache:
                one_over_x_m_xi, den_factor = fg_cache[(i, e)]
            else:
                Xs = self.xroot_cache[i][e]
                Ws = self.cheb_weights_cache[e]
                diff = subtract.outer(X[i], Xs)
                mask = (diff == 0).any(axis=-1)
                if any(mask):
                    w = where(diff == 0)
                    one_over_x_m_xi = zeros_like(diff)
                    one_over_x_m_xi[w] = 1
                    one_over_x_m_xi[~mask] = Ws/(diff[~mask])
                else:
                    one_over_x_m_xi = Ws/diff
                den_factor = sum(one_over_x_m_xi, axis=1)
                fg_cache[(i, e)] = one_over_x_m_xi, den_factor

            den *= den_factor
            if have_Cython:
                one_over_x_m_xi_list.append(one_over_x_m_xi) # C
            else:
                if one_over_x_m_xi_grid is None:
                    one_over_x_m_xi_grid = one_over_x_m_xi
                else:
                    newshape = (one_over_x_m_xi.shape[0],) + (1,) + (one_over_x_m_xi.shape[-1],)
                    one_over_x_m_xi_grid = one_over_x_m_xi_grid[...,newaxis] * one_over_x_m_xi.reshape(newshape)
                one_over_x_m_xi_grid = one_over_x_m_xi_grid.reshape((X[0].shape[0], -1))

        if have_Cython:
            num = c_dense_grid_interp(one_over_x_m_xi_list, fs)
        else:
            num = dot(one_over_x_m_xi_grid, fs)
        return num / den
Пример #19
0
def histogram2D(x, nbins=100, axes=None, nbatch=1000, normalize=True):
    """
    Non-greedy two-dimensional histogram.

    @param x: input array of rank two
    @type x: numpy array
    @param nbins: number of bins
    @type nbins: integer
    @param axes: x- and y-axes used for binning the data (if provided this will be used instead of <nbins>)
    @type axes: tuple of two one-dimensional numpy arrays
    @param nbatch: size of batch that is used to sort the data into the 2D grid
    @type nbatch: integer
    @param normalize: specifies whether histogram should be normalized
    @type normalize: boolean

    @return: 2-rank array storing histogram, tuple of x- and y-axis
    """
    from numpy import linspace, zeros, argmin, fabs, subtract, transpose
    
    if axes is None:
        
        lower, upper = x.min(0), x.max(0)
        axes = [linspace(lower[i], upper[i], nbins) for i in range(lower.shape[0])]

    H = zeros((len(axes[0]), len(axes[1])))

    while len(x):

        y = x[:nbatch]
        x = x[nbatch:]

        I = transpose([argmin(fabs(subtract.outer(y[:, i], axes[i])), 1) for i in range(2)])

        for i, j in I: H[i, j] += 1

    if normalize:
        H = H / H.sum() / (axes[0][1] - axes[0][0]) / (axes[1][1] - axes[1][0])

    return H, axes
def histogram2D(x, nbins=100, axes=None, nbatch=1000, normalize=True):
    """
    Non-greedy two-dimensional histogram.

    @param x: input array of rank two
    @type x: numpy array
    @param nbins: number of bins
    @type nbins: integer
    @param axes: x- and y-axes used for binning the data (if provided this will be used instead of <nbins>)
    @type axes: tuple of two one-dimensional numpy arrays
    @param nbatch: size of batch that is used to sort the data into the 2D grid
    @type nbatch: integer
    @param normalize: specifies whether histogram should be normalized
    @type normalize: boolean

    @return: 2-rank array storing histogram, tuple of x- and y-axis
    """
    from numpy import linspace, zeros, argmin, fabs, subtract, transpose
    
    if axes is None:
        
        lower, upper = x.min(0), x.max(0)
        axes = [linspace(lower[i], upper[i], nbins) for i in range(lower.shape[0])]

    H = zeros((len(axes[0]), len(axes[1])))

    while len(x):

        y = x[:nbatch]
        x = x[nbatch:]

        I = transpose([argmin(fabs(subtract.outer(y[:, i], axes[i])), 1) for i in range(2)])

        for i, j in I: H[i, j] += 1

    if normalize:
        H = H / H.sum() / (axes[0][1] - axes[0][0]) / (axes[1][1] - axes[1][0])

    return H, axes
Пример #21
0
def FiniteBasisV(x, c, dc, s):
    """
  Finite support radial basis functions
  
  
  TYPICAL USAGE
  =============

    >>> from numpy import array
    >>> x = array([[0.1,0.1],[0.0,0.0],[1.0,0.0]])
    >>> c = array([[0.0,0.0],[1.0,0.0],[0.0,0.5],[-1.0,-1.0]])
    >>> dc = array([[1.0,0.0],[0.0,0.0],[0.0,1.0],[0.1,0.1]])
    >>> s = array([0.9,1.5,1.0,5.0])
    >>> FiniteBasisV(x,c,dc,s)
    array([[ 0.10569188,  0.        , -0.27556   ,  0.03589389],
           [ 0.        ,  0.        , -0.28125   ,  0.033856  ],
           [ 0.        ,  0.        , -0.        ,  0.0384    ]])
       
  """
    R = array([subtract.outer(i, j) for i, j in zip(x.T, c.T)]) / s
    r = sqrt(sum(R * R, 0))
    rho = (r * r * r * r - 2 * r * r + 1) * (r < 1.0)
    return rho * (sum(R.transpose(1, 2, 0) * dc, -1))
Пример #22
0
 def define_spin_system(self,peaks, tol=0.8):
 
     from numpy import mean, subtract, fabs, argmin, array, nonzero, argmax, sum, std, \
          argsort, concatenate, compress
 
     ## clusters CA shifts
 
     ## TODO: CA dimension hard coded
 
     clusters, labels = self.cluster_points(peaks[:,:1],tol=[tol])
     ## two distinct CA resonances (regular case)
     
     leftover_peaks = None
     if len(clusters) == 2:
 
         ## detect CA-CA peak
 
         ca_shifts = array(map(mean,clusters))
         ca_peaks = peaks[argmin(fabs(subtract.outer(ca_shifts,peaks[:,1])),1)]
 
         ## more intense peak is CA(i)
 
         if fabs(ca_peaks[0][-1]) > fabs(ca_peaks[1][-1]):
 
             shifts_i = peaks[nonzero(labels==0)[0]]
             shifts_im= peaks[nonzero(labels==1)[0]]
 
         else:
             
             shifts_i = peaks[nonzero(labels==1)[0]]
             shifts_im= peaks[nonzero(labels==0)[0]]
 
     ## only one CA resonance (related to prolines?)
 
     elif len(clusters) == 1:
 
         ## TODO: discuss with Vincent whether this is correct
 
         shifts_im= peaks[nonzero(labels==0)[0]]
         shifts_i = None
 
     ## more than two CA resonances (i.e. maybe the spin-system is contaminated with
     ## a resonance from another spin-system)
     ## keep maximally populated clusters
             
     else:
         print 'more than two CA-carbons'
         
         index  = argmax(map(len,clusters))
         shifts = compress(labels==index,peaks,0)
         mean_nh= take(shifts,(2,3),1).mean(0)
         std_nh = std(take(shifts,(2,3),1),0)
         
         ## select peaks with amide shifts that are closest to reference
 
         distances = []
         for index in range(len(clusters)):
             shifts = compress(labels==index,peaks,0)
             distances.append(sum((mean_nh-take(shifts,(2,3),1).mean(0))**2/std_nh**2)**0.5)
 
         index1, index2 = argsort(distances)[:2]
         
         ca_shifts = array([mean(clusters[i],0)[0] for i in [index1, index2]])
         ca_peaks = peaks[argmin(fabs(subtract.outer(ca_shifts,peaks[:,1])),1)]
 
         ## more intense peak is CA(i)
 
         if fabs(ca_peaks[0][-1]) > fabs(ca_peaks[1][-1]):
 
             shifts_i = peaks[nonzero(labels==index1)[0]]
             shifts_im= peaks[nonzero(labels==index2)[0]]
 
         else:
             
             shifts_i = peaks[nonzero(labels==index2)[0]]
             shifts_im= peaks[nonzero(labels==index1)[0]]
 
         indices = range(len(clusters))
         indices.remove(index1)
         indices.remove(index2)
 
         leftover_peaks = concatenate([compress(labels==index,peaks,0) for index in indices],0)
     
     return shifts_im, shifts_i, leftover_peaks
Пример #23
0
 def __call__(self,x,lda,thetas,wfunc,nw=1.0,xsub=None,eps0=1e-10):
   if xsub is None:
     if thetas.ndim==1:
       tau = tile(thetas,(x.shape[0],1))
     else:
       tau = thetas
     v = (
       tile(x,(self.c0.shape[0],1,1)).transpose(1,0,2)
       -
       tile(self.c0,(x.shape[0],1,1))
     )
     w = (
       tile(x,(self.c1.shape[0],1,1)).transpose(1,0,2)
       - 
       tile(self.c1,(x.shape[0],1,1))
     )
     p = sum([1,-1]*self.n[:,[1,0]]*v,-1)/self.u
     mu = sum(self.n*v,-1)/(self.u*self.u)
     d = nan*ones_like(mu)
     if sum(mu<=0)>0:
       d[mu<=0] = sqrt(sum(v[mu<=0]**2,-1))
     if sum(mu>=1)>0:
       d[mu>=1] = sqrt(sum(w[mu>=1]**2,-1))
     if sum(logical_and(mu<1,mu>0))>0:
       d[logical_and(mu<1,mu>0)] = sqrt(
         sum(v[logical_and(mu<1,mu>0)]*v[logical_and(mu<1,mu>0)],-1)
         -
         (mu*self.u)[logical_and(mu<1,mu>0)]**2
       )
     angs = subtract.outer(tau,arctan2(*self.n.T)).transpose(1,0,2)
     W0 = exp(-d*d/(2*lda*lda))
     W1 = wfunc(angs,p)
     WW = W1*W0
     W = (1.0/exp(exp(nw)))+sum(WW,-1).T
     return(W)
   else:
     W = None
     for i,j in zip(range(0,x.shape[0]+xsub,xsub)[:-1],range(0,x.shape[0]+xsub,xsub)[1:]):
       xs = x[i:j]
       if thetas.ndim==1:
         tau = tile(thetas,(xs.shape[0],1))
       else:
         tau = thetas[i:j]
       v = (
         tile(xs,(self.c0.shape[0],1,1)).transpose(1,0,2)
         -
         tile(self.c0,(xs.shape[0],1,1))
       )
       w = (
         tile(xs,(self.c1.shape[0],1,1)).transpose(1,0,2)
         - 
         tile(self.c1,(xs.shape[0],1,1))
       )
       p = sum([1,-1]*self.n[:,[1,0]]*v,-1)/self.u
       mu = sum(self.n*v,-1)/(self.u*self.u)
       d = nan*ones_like(mu)
       if sum(mu<=0)>0:
         d[mu<=0] = sqrt(sum(v[mu<=0]**2,-1))
       if sum(mu>=1)>0:
         d[mu>=1] = sqrt(sum(w[mu>=1]**2,-1))
       if sum(logical_and(mu<1,mu>0))>0:
         kai = (
           sum(v[logical_and(mu<1,mu>0)]*v[logical_and(mu<1,mu>0)],-1)
           -
           (mu*self.u)[logical_and(mu<1,mu>0)]**2
         )
         kai[abs(kai)<eps0] = 0.0
         d[logical_and(mu<1,mu>0)] = sqrt(kai)
       angs = subtract.outer(tau,arctan2(*self.n.T)).transpose(1,0,2)
       W0 = exp(-d*d/(2*lda*lda))
       W1 = wfunc(angs,p)
       WW = W1*W0
       Ws = (1.0/exp(exp(nw)))+sum(WW,-1).T
       if W is None:
         W = Ws
       else:
         W = vstack([W,Ws])
     return(W)
Пример #24
0
 def compute_cost_matrix(self, shifts_i, shifts_j, previous=False):
     C = fabs(subtract.outer(shifts_i, shifts_j))
     return C
from numpy import linspace, zeros, subtract, exp
from numpy.random import multivariate_normal

# Create a spatial axis and use it to define a Gaussian process
N = 8
x = linspace(1, N, N)
mean = zeros(N)
covariance = exp(-0.1 * subtract.outer(x, x)**2)

# sample from the Gaussian process
samples = multivariate_normal(mean, covariance, size=20000)
samples = [samples[:, i] for i in range(N)]

# use matrix_plot to visualise the sample data
from inference.plotting import matrix_plot
matrix_plot(samples, filename='matrix_plot_example.png')