Пример #1
0
def test_deconvolution_kernel():
    """ Check C vs python deconvolution kernel """
    from os.path import join, dirname
    from numpy import add, exp
    from numpy.testing import assert_allclose
    from purify import kernels as create_kernels, read_visibility, \
        __file__ as path

    path = join(dirname(path), "data", "images", "Coverages", "cont_sim4.vis")
    visibility = read_visibility(path)

    dims = 256, 256
    oversampling = 2, 2
    interpolation = 4, 4
    kernels = create_kernels(visibility, dims, oversampling, interpolation)

    def axis(image, oversampling, interpolation):
        from numpy import arange, pi
        scale = interpolation * pi / float(oversampling * image * image * 2)
        return (arange(0, image) - oversampling) * scale

    xaxis = axis(dims[0], oversampling[0], interpolation[0])
    yaxis = axis(dims[1], oversampling[1], interpolation[1])

    expected = exp(0.5 * add.outer(xaxis * xaxis, yaxis * yaxis))

    assert_allclose(expected, kernels.deconvolution)

    shifts = exp(-1j * (
        visibility['u'] * dims[0] * 0.5 + visibility['v'] * dims[1] * 0.5
    ))
    assert_allclose(shifts, kernels.shifts)
Пример #2
0
def expectation(model, instance):
    '''
    Perform forward-backward algorithm to calculate the second component
    of the detrieve.
    '''
    # get the cached score

    L = len(instance)
    T = model.nr_tags
    A = model.nr_attrs
    g0, g = build_score_cache(model.w, L, T, A, instance)

    a = forward(g0, g, L, T)
    b = backward(g, L, T)

    logZ = logsumexp(a[L-1,:])

    E = defaultdict(float)
    f = instance.features_table

    c = exp(g0 + b[0,:] - logZ).clip(0., 1.)
    for j in xrange(T):
        for k in f[0,None,j]:
            E[k] += c[j]

    for i in xrange(1, L):
        c = exp(add.outer(a[i-1,:], b[i,:]) + g[i,:,:] - logZ).clip(0.,1.)
        for j in range(T):
            for k in range(T):
                for e in f[i,j,k]:
                    E[e] += c[j,k]

    return E
Пример #3
0
def expectation(model, instance):
    '''
    Perform forward-backward algorithm to calculate the second component
    of the detrieve.
    '''
    # get the cached score

    L = len(instance)
    T = model.nr_tags
    A = model.nr_attrs
    g0, g = build_score_cache(model.w, L, T, A, instance)

    a = forward(g0, g, L, T)
    b = backward(g, L, T)

    logZ = logsumexp(a[L - 1, :])

    E = defaultdict(float)
    f = instance.features_table

    c = exp(g0 + b[0, :] - logZ).clip(0., 1.)
    for j in xrange(T):
        for k in f[0, None, j]:
            E[k] += c[j]

    for i in xrange(1, L):
        c = exp(add.outer(a[i - 1, :], b[i, :]) + g[i, :, :] - logZ).clip(
            0., 1.)
        for j in range(T):
            for k in range(T):
                for e in f[i, j, k]:
                    E[e] += c[j, k]

    return E
def GenerateTwoScreens(nfft, r0):
    """

      Generate phase screens with a Kolmogorov spectrum of atmospheric
      disturbances [c.f. Tatarski 1961,1971], such that the phase structure
      function is given by

           D(r) = <[phi(r')-phi(r'+r)]**2>
                = 6.88*(r/r0)**5/3

       where r0 is the Fried parameter.

       This version returns two screens, because it's easier to do it that
       way.
    """
    C = sqrt(0.0229*(float(nfft)/r0)**(5.0/3.0))
    # Generate a 2-d array populated with rsquared=xsquared+ysquared
    r = arange(nfft)
    r[nfft/2:] = nfft-r[nfft/2:]
    rsq = r**2
    rsq = add.outer(rsq,rsq)
    rsq[0, 0] = 1.0 # To solve pole at origin problem
    sample = random.normal(size=(nfft, nfft))+1j*random.normal(size=(nfft, nfft))
    sample *= C*rsq**(-11.0/12.0)
    result = fft.fft2(sample)
    return(result.real, result.imag)
Пример #5
0
    def take(self, indices):
        """Flatten the shape of the RTs, then take only dimensions indexed in 'indices' and remove the remaining dimensions.

        Input:
            indices: a numpy.array of dimensions to be kept.
        Output:
            B: the BasicStats obtained from the remaining dimensions.
        """
        d1 = len(indices)

        size = 0
        for i in xrange(self.T):
            size += d1 ** self.orders[i]
        theindices = zeros(size, "int32")
        ofs = 0
        for i in xrange(self.T):
            if self.orders[i] == 0:
                a = array([ofs])
            else:
                a = indices
                for j in xrange(self.orders[i] - 1):
                    a = add.outer(a * self.d, indices).ravel()
            sz = d1 ** self.orders[i]
            theindices[ofs : ofs + sz] = a + self.ofs[i]
            ofs += sz

        return self._new_dA((d1,), self.A.take(theindices, 1))
Пример #6
0
def test_sara_analysisop_hardening():
    """ Hardens sopt_sara_analysisop bindings against code changes """
    from numpy import add, exp, arange, mean, max, min
    from numpy.testing import assert_allclose
    from purify import SparsityOperator

    image_size = 256, 256
    nlevels = 4
    types = ["DB1", "DB2", "DB10"]
    functions = SparsityOperator(image_size, nlevels, types)

    sigma = 10.0, 8.0
    xaxis = (arange(image_size[0]) / float(image_size[0]) - 0.5) / sigma[0]
    yaxis = (arange(image_size[1]) / float(image_size[1]) - 0.5) / sigma[1]

    gaussian = exp(-add.outer(xaxis * xaxis, yaxis * yaxis))

    actual = functions.analyze(gaussian)

    assert_allclose(
        mean(mean(actual, -1), -1),
        [0.036007226135801075, 0.036007541133741106, 0.03600715848225703] 
    )
    assert_allclose(
        min(min(actual, -1), -1), 
        [-0.00211575115187038, -0.0016413049107621763, -0.000927119110859153]
    )
    assert_allclose(
        max(max(actual, -1), -1), 
        [9.237324386040596, 9.23745215540613, 9.237604022698143]
    )
Пример #7
0
def make_k_values(boxsize, ngrid):
    """ 
    build the grid of |k| for the given box size, in a vaguely memory efficient way
    returns k (n,) array, inv_k2 (n,n,n) array, with 0 at i=j=k=0
    """
    # 1d component of k
    k1 = arange(ngrid)
    k1[1 + ngrid // 2:] -= ngrid
    k1 = k1 * (2 * pi / float(boxsize))
    k2 = square(k1)
    kmag = add.outer(add.outer(k2, k2), k2)
    inv_k2 = kmag.copy()
    kmag = sqrt(kmag)
    inv_k2[0, 0, 0] = 1.0
    inv_k2 = 1.0 / inv_k2
    inv_k2[0, 0, 0] = 0.0
    return k1, kmag, inv_k2
Пример #8
0
def _choose_brackets(y):
    """Return the indices between which we should search for maxima of `y`."""
    dsd = diff(sign(diff(y)))
    indices = flatnonzero(dsd < 0)
    left = reshape(add.outer(indices, [0, 1]), -1)
    left = _remove_adjacent_duplicates(left)
    right = left + 1
    return left, right
Пример #9
0
    def sample_weights(self):
        """
        Create a random set of expansion coefficients.
        """
        from numpy import add
        from numpy.random import standard_normal

        k = self._periodicities()
        k = add.outer(k ** 2, k ** 2)
        self.set([standard_normal(k.shape) for i in range(4)])   
        self.normalize(True)
Пример #10
0
    def sample_weights(self):
        """
        Create a random set of expansion coefficients.
        """
        from numpy import add
        from numpy.random import standard_normal

        k = self._periodicities()
        k = add.outer(k**2, k**2)
        self.set([standard_normal(k.shape) for i in range(4)])
        self.normalize(True)
Пример #11
0
 def getELBO():
     '''
     Calculate ELBO following Blei et al, equation (21)
     '''
     t1 = log(s2) - m / sigma2
     t1 = t1.sum()
     t2 = -0.5 * add.outer(x**2, s2 + m**2)
     t2 += outer(x, m)
     t2 -= log(phi)
     t2 *= phi
     t2 = t2.sum()
     return t1 + t2  #log_p_x + log_p_mu + log_p_sigma - log_q_mu - log_q_sigma
Пример #12
0
 def _create_and_rotate_coordinate_arrays(self, x, y, orientation):
     """
     Create pattern matrices from x and y vectors, and rotate
     them to the specified orientation.
     """
     # Using this two-liner requires that x increase from left to
     # right and y decrease from left to right; I don't think it
     # can be rewritten in so little code otherwise - but please
     # prove me wrong.
     pattern_y = subtract.outer(cos(orientation)*y, sin(orientation)*x)
     pattern_x = add.outer(sin(orientation)*y, cos(orientation)*x)
     return pattern_x, pattern_y
Пример #13
0
def powerspec_bins(ngrid, boxsize):
    """
    find power spectrum bins to for a cubic grid of size ngrid^3 of fourier modes.
    Assumes the FFT convention of 0, ..., n/2, -n/2+1, ..., -1
    ngrid   - num cells on side of cube
    boxsize - size of the box in real space

    returns kmin, kmax, kbins, kvol
    kmin  - the lower bound of the bin
    kmax  - the upper bound of the bin
    kbins - index (0, ..., m) of the bin of each cell
    kvol  - the volume in k space of all modes in that bin
    """

    mid = ngrid // 2
    # find the magnitude of the indices (i.e. ix**2+iy**2+iz**2 in the FFT convention)
    n1 = arange(ngrid)
    n1[1 + mid:] -= ngrid
    n2 = square(n1)
    nmag = sqrt(add.outer(add.outer(n2, n2), n2)).ravel()

    nbins = (-1, ) + tuple(arange(mid - 1) + 1.5) + (ngrid * 2, )
    #print 'nbins', nbins
    kbins = digitize(nmag, nbins) - 1
    assert (kbins.min() == 0)
    assert (kbins.max() == len(nbins) - 2)

    # multiplier to go to k-space
    dk = 2.0 * pi / boxsize

    kmin = (array(nbins) * dk)[:-1]
    kmin[0] = 0

    kmax = (array(nbins) * dk)[1:]
    kmax[-1] = mid * dk * sqrt(3.0)

    kvol = bincount(kbins) * (dk * dk * dk)
    return kmin, kmax, kbins, kvol
Пример #14
0
def FastFloyd(w):
    d = w + 0
    N = len(d)
    oldd = d + 0
    for k in range(N):
        print k,
        newd = add.outer(oldd[:, k], oldd[k])
        m = greater(newd, 700)
        newd = (1 - m) * newd + m * oldd
        mask = less(newd, oldd)
        mmask = 1 - mask
        g = mask * newd + mmask * oldd
        oldd = g + 0
    return g
Пример #15
0
def _features_from_shape(M,N,appearance):
    """Generate all rectangular Haar-like features, of a specific 'appearance'.
    All rectangles are bounded by a M-by-N image patch.

    Input:
        M, N: number of rows and columns of an image patch
        appearance: an 'int' 2D numpy.array describing the appearance. E.g. array([[1,-1],[-1,1]]).
    Output:
        A: a 1D numpy.array of projection coefficients, in vectorintegrated form
        ind: a 2D numpy.array of indices of each feature, in vectorintegrated form
        ind2: a 2D numpy.array of 4 corners of each feature in the patch
    """
    (ny,nx) = appearance.shape
    A = zeros([ny+1,nx+1],'int')
    sum1 = appearance.sum()

    a1 = appearance*(ny*nx-sum1)
    a0 = (1-appearance)*sum1
    aa = a1-a0
    
    A[0:-1,0:-1] += aa
    A[1:,1:] += aa
    A[0:-1,1:] -= aa
    A[1:,0:-1] -= aa
    
    mask = A.astype('bool')
    A = A.ravel()[mask.ravel()]
    
    cny = 0
    for y1 in xrange(M): cny += (M-y1-1)/ny
    cnx = 0
    for x1 in xrange(N): cnx += (N-x1-1)/nx
    
    cnt = cnx*cny

    ind = zeros([cnt,len(A)],'int')
    ind2 = zeros([cnt,4],'int')
    
    i = 0
    for y1 in xrange(M):
        for y2 in xrange(y1+ny,M,ny):
            y = arange(y1,y2+1,(y2-y1)/ny)*N
            for x1 in xrange(N):
                for x2 in xrange(x1+nx,N,nx):
                    x = arange(x1,x2+1,(x2-x1)/nx)
                    ind[i] = add.outer(y,x)[mask]
                    ind2[i] = [y1,x1,y2,x2]
                    i += 1

    return (A, ind, ind2)
Пример #16
0
def FastFloyd( w ):
    d = w + 0
    N = len( d )
    oldd = d + 0
    for k in range( N ):
        print k,
        newd = add.outer( oldd[:,k], oldd[k] )
        m = greater( newd, 700 )
        newd = (1-m)*newd + m * oldd
        mask = less( newd, oldd )
        mmask = 1-mask
        g = mask*newd + mmask * oldd
        oldd = g + 0
    return g
Пример #17
0
def _dlikelihood(w, instance, model):
    '''
    Calculate gradient of a instance

    - param[in] w           The weight vector
    - param[in] instance    The instance
    - param[in] model       The model
    '''
    grad = zeros(w.shape[0], dtype=float)

    L = len(instance)
    T = model.nr_tags
    A = model.nr_attrs

    build_instance(model.attrs, model.tags, instance, True)
    g0, g = build_score_cache(w, L, T, A, instance)

    F = instance.correct_features
    for k, v in F.iteritems():
        grad[k] += v

    a = forward(g0, g, L, T)  # forward
    b = backward(g, L, T)  # backward

    logZ = logsumexp(a[L - 1, :])

    U = instance.unigram_features_table
    B = instance.bigram_features_table

    c = exp(g0 + b[0, :] - logZ).clip(0., 1.)
    for j in xrange(T):
        grad[U[0, j]] -= c[j]

    for i in xrange(1, L):
        c = exp(add.outer(a[i - 1, :], b[i, :]) + g[i, :, :] - logZ).clip(
            0., 1.)
        # The following code is an equilism of this
        #for j in range(T):
        #    for k in range(T):
        #        grad[U[i,k]] -= c[j,k]
        #        grad[B[j,k]] -= c[j,k]
        for k in range(T):
            grad[U[i, k]] -= c[:, k].sum()
        grad[range(A * T, (A + T) * T)] -= c.flatten()

    return grad
Пример #18
0
def expectation(N, K, log_M):
    """
	Expectation of the sufficient statistics given ``x`` and current
	parameter settings.
	"""
    g0 = log_M[0, 0]
    g = log_M[1:]
    a = forward(g0, g, N, K)
    b = backward(g, N, K)
    print "Forward:"
    print a
    print "Backward:"
    print b
    # log-normalizing constant
    logZ = misc.logsumexp(a[N - 1, :])

    E = defaultdict(float)

    # The first factor needs to be special case'd
    # E[ f( y_0 ) ] = p(y_0 | y_[1:N], x) * f(y_0)
    c = exp(g0 + b[0, :] - logZ).clip(0.0, 1.0)
    for y in xrange(K):
        p = c[y]
        if p < 1e-40:
            continue  # skip really small updates.
        for k in f[0, None, y]:
            E[k] += p

    for t in xrange(1, N):
        # vectorized computation of the marginal for this transition factor
        c = exp((add.outer(a[t - 1, :], b[t, :]) + g[t - 1, :, :] - logZ)).clip(0.0, 1.0)

        for yp in xrange(K):
            for y in xrange(K):
                # we can also use the following to compute ``p`` but its quite
                # a bit slower than the computation of vectorized quantity ``c``.
                # p = exp(a[t-1,yp] + g[t-1,yp,y] + b[t,y] - logZ).clip(0.0, 1.0)
                p = c[yp, y]
                if p < 1e-40:
                    continue  # skip really small updates.
                # expectation of this factor is p*f(t, yp, y)
                for k in f[t, yp, y]:
                    E[k] += p

    return E
Пример #19
0
def expectation(N, K, log_M):
    """
	Expectation of the sufficient statistics given ``x`` and current
	parameter settings.
	"""
    g0 = log_M[0, 0]
    g = log_M[1:]
    a = forward(g0, g, N, K)
    b = backward(g, N, K)
    print "Forward:"
    print a
    print "Backward:"
    print b
    # log-normalizing constant
    logZ = misc.logsumexp(a[N - 1, :])

    E = defaultdict(float)

    # The first factor needs to be special case'd
    # E[ f( y_0 ) ] = p(y_0 | y_[1:N], x) * f(y_0)
    c = exp(g0 + b[0, :] - logZ).clip(0.0, 1.0)
    for y in xrange(K):
        p = c[y]
        if p < 1e-40: continue  # skip really small updates.
        for k in f[0, None, y]:
            E[k] += p

    for t in xrange(1, N):
        # vectorized computation of the marginal for this transition factor
        c = exp(
            (add.outer(a[t - 1, :], b[t, :]) + g[t - 1, :, :] - logZ)).clip(
                0.0, 1.0)

        for yp in xrange(K):
            for y in xrange(K):
                # we can also use the following to compute ``p`` but its quite
                # a bit slower than the computation of vectorized quantity ``c``.
                #p = exp(a[t-1,yp] + g[t-1,yp,y] + b[t,y] - logZ).clip(0.0, 1.0)
                p = c[yp, y]
                if p < 1e-40: continue  # skip really small updates.
                # expectation of this factor is p*f(t, yp, y)
                for k in f[t, yp, y]:
                    E[k] += p

    return E
Пример #20
0
def _dlikelihood(w, instance, model):
    '''
    Calculate gradient of a instance

    - param[in] w           The weight vector
    - param[in] instance    The instance
    - param[in] model       The model
    '''
    grad = zeros(w.shape[0], dtype=float)

    L = len(instance)
    T = model.nr_tags
    A = model.nr_attrs

    build_instance(model.attrs, model.tags, instance, True)
    g0, g = build_score_cache(w, L, T, A, instance)

    F = instance.correct_features
    for k, v in F.iteritems():
        grad[k] += v

    a = forward(g0, g, L, T)    # forward
    b = backward(g, L, T)       # backward

    logZ = logsumexp(a[L-1,:])

    U = instance.unigram_features_table
    B = instance.bigram_features_table

    c = exp(g0 + b[0,:] - logZ).clip(0., 1.)
    for j in xrange(T):
        grad[U[0,j]] -= c[j]

    for i in xrange(1, L):
        c = exp(add.outer(a[i-1,:], b[i,:]) + g[i,:,:] - logZ).clip(0.,1.)
        # The following code is an equilism of this
        #for j in range(T):
        #    for k in range(T):
        #        grad[U[i,k]] -= c[j,k]
        #        grad[B[j,k]] -= c[j,k]
        for k in range(T):
            grad[U[i,k]] -= c[:,k].sum()
        grad[range(A*T, (A+T)*T)] -= c.flatten()

    return grad
Пример #21
0
    def _mod_van_der_walls_mixing(self, mole_fraction, P, T):
        alpha = self._Twu91(T)
        attraction = 0.45724 * alpha * (self.R * self.Tc).pow(2)/(self.Pc * 100000)
        cohesion = 0.07780 * self.R * self.Tc / (self.Pc * 100000)
        # aij = [(ai.aj)0.5(1 - kij)] = aji
        mixture = outer(attraction, attraction)
        mixture = power(mixture,0.5)
        mixture = multiply(mixture, subtract(1,self.interaction_params))
        a_ij = mixture * P * 100000 / (self.R * T) ** 2
        b = cohesion * P * 100000 / (self.R * T)

        bi = numpy.asarray(b.pow(3/4))
        b_ij = (add.outer(bi,bi)/2)
        b_ij = b_ij**(4/3)

        A = npsum(npsum(multiply(a_ij, outer(mole_fraction, mole_fraction))))
        B = npsum(npsum(multiply(b_ij, outer(mole_fraction, mole_fraction))))
        return A, B, a_ij, b
Пример #22
0
    def expectation(self, x):
        """
        Expectation of the sufficient statistics given ``x`` and current
        parameter settings.
        """
        N = x.N
        K = self.K
        f = x.feature_table
        (g0, g) = self.log_potentials(x)

        a = self.forward(g0, g, N, K)
        b = self.backward(g, N, K)

        # log-normalizing constant
        logZ = logsumexp(a[N - 1, :])

        E = defaultdict(float)

        # The first factor needs to be special case'd
        # E[ f( y_0 ) ] = p(y_0 | y_[1:N], x) * f(y_0)
        c = exp(g0 + b[0, :] - logZ).clip(0.0, 1.0)
        for y in xrange(K):
            p = c[y]
            for k in f[0, None, y]:
                E[k] += p

        for t in xrange(1, N):
            # vectorized computation of the marginal for this transition factor
            c = exp((add.outer(a[t - 1, :], b[t, :]) + g[t - 1, :, :] -
                     logZ)).clip(0.0, 1.0)

            for yp in xrange(K):
                for y in xrange(K):
                    # we can also use the following to compute ``p`` but its quite
                    # a bit slower than the computation of vectorized quantity ``c``.
                    #p = exp(a[t-1,yp] + g[t-1,yp,y] + b[t,y] - logZ).clip(0.0, 1.0)
                    p = c[yp, y]
                    # expectation of this factor is p*f(t, yp, y)
                    for k in f[t, yp, y]:
                        E[k] += p

        return E
Пример #23
0
    def expectation(self, x):
        """
        Expectation of the sufficient statistics given ``x`` and current
        parameter settings.
        """
        N = x.N
        K = self.K
        f = x.feature_table
        (g0, g) = self.log_potentials(x)

        a = self.forward(g0, g, N, K)
        b = self.backward(g, N, K)

        # log-normalizing constant
        logZ = logsumexp(a[N - 1, :])

        E = defaultdict(float)

        # The first factor needs to be special case'd
        # E[ f( y_0 ) ] = p(y_0 | y_[1:N], x) * f(y_0)
        c = exp(g0 + b[0, :] - logZ).clip(0.0, 1.0)
        for y in xrange(K):
            p = c[y]
            for k in f[0, None, y]:
                E[k] += p

        for t in xrange(1, N):
            # vectorized computation of the marginal for this transition factor
            c = exp((add.outer(a[t - 1, :], b[t, :]) + g[t - 1, :, :] - logZ)).clip(0.0, 1.0)

            for yp in xrange(K):
                for y in xrange(K):
                    # we can also use the following to compute ``p`` but its quite
                    # a bit slower than the computation of vectorized quantity ``c``.
                    # p = exp(a[t-1,yp] + g[t-1,yp,y] + b[t,y] - logZ).clip(0.0, 1.0)
                    p = c[yp, y]
                    # expectation of this factor is p*f(t, yp, y)
                    for k in f[t, yp, y]:
                        E[k] += p

        return E
Пример #24
0
    def MHV1(self,mole_fraction,P,T):
        g_res = self.unifac_model.gibbs_res(mole_fraction = mole_fraction,T = T, DebugPrint = False)
        
        alpha = self._Twu91(T)
        attraction = 0.45724 * alpha * (self.R * self.Tc).pow(2)/(self.Pc * 100000)
        cohesion = 0.07780 * self.R * self.Tc / (self.Pc * 100000)
        # aij = [(ai.aj)0.5(1 - kij)] = aji
        mixture = outer(attraction, attraction)
        mixture = power(mixture,0.5)
        mixture = multiply(mixture, subtract(1,self.interaction_params))
        a_ij = mixture * P * 100000 / (self.R * T) ** 2
        b = cohesion * P * 100000 / (self.R * T)
        c = self.s * cohesion

        bi = numpy.asarray(b.pow(3/4))
        b_ij = (add.outer(bi,bi)/2)
        b_ij = b_ij**(4/3)
        
        B = npsum(npsum(multiply(b_ij, outer(mole_fraction, mole_fraction))))
        A = B* (npsum(attraction/(cohesion * self.R *T) * mole_fraction)- g_res/0.53087)
        C = npsum(c*mole_fraction)
        return A, B, a_ij, b, C
Пример #25
0
    def Exp(self, x):
        """expectation of x"""
        N = x.seq.__len__()
        K = self.K

        g0, g = self.currentField(x)

        a = self.forward(g0, g, N, K)
        b = self.backward(g, N, K)
        ### scale Z
        logZ = logsumexp(a[N - 1, :])

        ans = dict()
        # here e = exp(a + b - logZ)
        # at step 0, a == g0
        e0 = exp(g0 + b[0, :] - logZ).clip(0., 1.)
        for y in range(K):
            prob = float(e0[y])
            for k in x[0, None, y]:
                # the k-pos logit from step -1 transfer to label y
                ans[k] = ans.get(k, 0.) + prob

        for t in range(1, N):
            """
            Outer: broadcast: not element-wise operation
            """
            # e_i = {(forward[t-1] + backward[t])*P(y|x)_t-1}/ logZ	    logit of time i
            # or exp(a[t-1,yp] + g[t-1,yp,y] + b[t,y] - logZ)
            ei = exp((add.outer(a[t - 1, :], b[t, :]) + g[t - 1, :, :] -
                      logZ)).clip(0., 1.)
            #ei = exp(a[t-1,:] + g[t-1,:,:] + b[t,:] - logZ).clip(0., 1.)
            #print(ei.shape)
            for yp in range(K):
                for y in range(K):
                    prob = float(ei[yp, y])
                    for k in x[t, yp, y]:
                        ans[k] = ans.get(k, 0.) + prob

        return ans
Пример #26
0
    def expectation(self, x):
        """
        calculate the expectation of the sufficient statistics given ``x'' and current parameter settings
        :param x: input sequence
        :return:
        """
        assert isinstance(x, Sequence)
        N = x.N
        N_S = self.N_S
        f = x.feature_table

        g0, g = self.log_potentials(x)
        # forward probabilities
        alpha = self.forward(g0, g, N, N_S)
        # backward probabilities
        beta = self.backward(g, N, N_S)
        # normalization factor
        logZ = logexpsum(alpha[N-1, :])

        E = defaultdict(float)

        c = exp(g0 + beta[0, :] - logZ).clip(0.0, 1.0)

        for y in xrange(N_S):
            p = c[y]
            for k in f[0, None, y]:
                E[k] += p

        for t in xrange(1, N):
            # vectorized computation of the marginal for this transition factor
            c = exp((add.outer(alpha[t-1, :], beta[t, :]) + g[t-1, :, :] - logZ)).clip(0.0, 1.0)

            for yt_1 in xrange(N_S):
                for yt in xrange(N_S):
                    p = c[yt_1, yt]
                    # expectation of this factor is p * f(yt_1, yt, xt, t), f is feature function
                    for k in f[t, yt_1, yt]:
                        E[k] += p
        return E
Пример #27
0
def test_sara_synthesisop_hardening():
    """ Hardens sopt_sara_synthesisop bindings against code changes """
    from numpy import add, exp, arange, mean, max, min
    from numpy.testing import assert_allclose
    from purify import SparsityOperator

    image_size = 256, 256
    nlevels = 4
    types = ["DB1", "DB2", "DB10"]
    functions = SparsityOperator(image_size, nlevels, types)

    sigma = 10.0, 8.0
    xaxis = (arange(image_size[0]) / float(image_size[0]) - 0.5) / sigma[0]
    yaxis = (arange(image_size[1]) / float(image_size[1]) - 0.5) / sigma[1]

    gaussian = exp(-add.outer(xaxis * xaxis, yaxis * yaxis))

    analysis = functions.analyze(gaussian)
    actual = functions.synthesize(analysis)

    assert_allclose(
        mean(mean(analysis, -1), -1),
        [0.5761192143816274, 0.5761192143816207, 0.5761192143816083]
    )
    assert_allclose(
        min(min(analysis, -1), -1), 
        [0.5736634410081112, 0.5736634410081048, 0.5736634410080951] 
    )
    assert_allclose(
        max(max(analysis, -1), -1), 
        [0.5773502691896323, 0.5773502691896251, 0.5773502691896143]
    )

    relative = lambda x, y: abs(x - y) / (abs(x) + abs(y))
    assert relative(mean(actual), 0.9978677505256317+0j) < 1e-8
    assert relative(max(actual), 0.99999999999999689+0j) < 1e-8
    assert relative(min(actual), 0.99361422627082718+0j) < 1e-8
Пример #28
0
def distance_matrix(X, Y=None):
    """
    Calculates a matrix of pairwise distances

    @param X: m x n input vector
    @type X: numpy array

    @param Y: k x n input vector or None, which defaults to Y=X
    @type Y: numpy array

    @return: m x k distance matrix
    @rtype: numpy array
    """
    from numpy import add, clip, sqrt, dot, transpose, sum

    if Y is None: Y = X

    if X.ndim < 2: X = X.reshape((1, -1))
    if Y.ndim < 2: Y = Y.reshape((1, -1))

    C = dot(X, transpose(Y))
    S = add.outer(sum(X**2, 1), sum(Y**2, 1))

    return sqrt(clip(S - 2 * C, 0., 1e300))
Пример #29
0
def distance_matrix(X, Y=None):
    """
    Calculates a matrix of pairwise distances

    @param X: m x n input vector
    @type X: numpy array

    @param Y: k x n input vector or None, which defaults to Y=X
    @type Y: numpy array

    @return: m x k distance matrix
    @rtype: numpy array
    """
    from numpy import add, clip, sqrt, dot, transpose, sum

    if Y is None: Y = X

    if X.ndim < 2: X = X.reshape((1, -1))
    if Y.ndim < 2: Y = Y.reshape((1, -1))

    C = dot(X, transpose(Y))
    S = add.outer(sum(X ** 2, 1), sum(Y ** 2, 1))

    return sqrt(clip(S - 2 * C, 0., 1e300))
Пример #30
0
def test_cycle_fftw():
    """ Apply forward and backward fft """
    from purify.fftw import Fourier2D
    from numpy import add, exp, arange, product
    from numpy.testing import assert_allclose

    image_size = 256, 256
    oversampling = 2, 2
    shape = image_size[0] * oversampling[0], image_size[1] * oversampling[1]

    forward = Fourier2D(image_size, oversampling, "forward")
    backward = Fourier2D(image_size, oversampling, "backward")

    sigma = 10.0, 8.0
    xaxis = (arange(shape[0]) / float(shape[0]) - 0.5) / sigma[0]
    yaxis = (arange(shape[1]) / float(shape[1]) - 0.5) / sigma[1]

    expected = exp(-add.outer(xaxis * xaxis, yaxis * yaxis))
    forward.data = expected
    forward.execute()
    backward.data = forward.data
    backward.execute()

    assert_allclose(backward.data, expected * product(forward.data.shape))
Пример #31
0
def get_mat(n):
    data = arange(n)
    data = add.outer(data, data)
    return data
Пример #32
0
def get_mat(n):
    data = arange(n)
    data = add.outer(data, data)
    return data