Esempio n. 1
0
 def density_2s(self, n1, n2):
     """Returns a reduced density matrix for a pair of sites.
     
     Parameters
     ----------
     n1 : int
         The site number of the first site.
     n2 : int
         The site number of the second site (must be > n1).        
     """
     rho = sp.empty((self.q[n1] * self.q[n2], self.q[n1] * self.q[n2]), dtype=sp.complex128)
     r_n2 = sp.empty_like(self.r[n2 - 1])
     r_n1 = sp.empty_like(self.r[n1 - 1])
     
     for s2 in xrange(self.q[n2]):
         for t2 in xrange(self.q[n2]):
             r_n2 = m.mmul(self.A[n2][t2], self.r[n2], m.H(self.A[n2][s2]))
             
             r_n = r_n2
             for n in reversed(xrange(n1 + 1, n2)):
                 r_n = self.eps_r(n, r_n)        
                 
             for s1 in xrange(self.q[n1]):
                 for t1 in xrange(self.q[n1]):
                     r_n1 = m.mmul(self.A[n1][t1], r_n, m.H(self.A[n1][s1]))
                     tmp = m.mmul(self.l[n1 - 1], r_n1)
                     rho[s1 * self.q[n1] + s2, t1 * self.q[n1] + t2] = tmp.trace()
     return rho
Esempio n. 2
0
    def density_2s(self, n1, n2):
        """Returns a reduced density matrix for a pair of sites.
        
        Currently only supports sites in the nonuniform window.

        Parameters
        ----------
        n1 : int
            The site number of the first site.
        n2 : int
            The site number of the second site (must be > n1).
        """
        rho = sp.empty((self.q[n1] * self.q[n2], self.q[n1] * self.q[n2]), dtype=sp.complex128)
        r_n2 = sp.empty_like(self.r[n2 - 1])
        r_n1 = sp.empty_like(self.r[n1 - 1])
        ln1m1 = self.get_l(n1 - 1)

        for s2 in range(self.q[n2]):
            for t2 in range(self.q[n2]):
                r_n2 = mm.mmul(self.A[n2][t2], self.r[n2], mm.H(self.A[n2][s2]))

                r_n = r_n2
                for n in reversed(range(n1 + 1, n2)):
                    r_n = tm.eps_r_noop(r_n, self.A[n], self.A[n])

                for s1 in range(self.q[n1]):
                    for t1 in range(self.q[n1]):
                        r_n1 = mm.mmul(self.A[n1][t1], r_n, mm.H(self.A[n1][s1]))
                        tmp = mm.adot(ln1m1, r_n1)
                        rho[s1 * self.q[n1] + s2, t1 * self.q[n1] + t2] = tmp
        return rho
Esempio n. 3
0
    def density_2s(self, n1, n2):
        """Returns a reduced density matrix for a pair of sites.
        
        Currently only supports sites in the nonuniform window.

        Parameters
        ----------
        n1 : int
            The site number of the first site.
        n2 : int
            The site number of the second site (must be > n1).
        """
        rho = sp.empty((self.q[n1] * self.q[n2], self.q[n1] * self.q[n2]), dtype=sp.complex128)
        r_n2 = sp.empty_like(self.r[n2 - 1])
        r_n1 = sp.empty_like(self.r[n1 - 1])
        ln1m1 = self.get_l(n1 - 1)

        for s2 in xrange(self.q[n2]):
            for t2 in xrange(self.q[n2]):
                r_n2 = mm.mmul(self.A[n2][t2], self.r[n2], mm.H(self.A[n2][s2]))

                r_n = r_n2
                for n in reversed(xrange(n1 + 1, n2)):
                    r_n = tm.eps_r_noop(r_n, self.A[n], self.A[n])

                for s1 in xrange(self.q[n1]):
                    for t1 in xrange(self.q[n1]):
                        r_n1 = mm.mmul(self.A[n1][t1], r_n, mm.H(self.A[n1][s1]))
                        tmp = mm.adot(ln1m1, r_n1)
                        rho[s1 * self.q[n1] + s2, t1 * self.q[n1] + t2] = tmp
        return rho
Esempio n. 4
0
 def calc_x(self, n, Vsh, sqrt_l, sqrt_r, sqrt_l_inv, sqrt_r_inv):
     """Calculate the parameter matrix x* giving the desired B.
     
     This is equivalent to eqn. (49) of arXiv:1103.0936v2 [cond-mat.str-el] except 
     that, here, norm-preservation is not enforced, such that the optimal 
     parameter matrices x*_n (for the parametrization of B) are given by the 
     derivative w.r.t. x_n of <Phi[B, A]|Ĥ|Psi[A]>, rather than 
     <Phi[B, A]|Ĥ - H|Psi[A]> (with H = <Psi|Ĥ|Psi>).
     
     An additional sum was added for the single-site hamiltonian.
     
     Some multiplications have been pulled outside of the sums for efficiency.
     
     Direct dependencies: 
         - A[n - 1], A[n], A[n + 1]
         - r[n], r[n + 1], l[n - 2], l[n - 1]
         - C[n], C[n - 1]
         - K[n + 1]
         - V[n]
     """
     x = sp.zeros((self.D[n - 1], self.q[n] * self.D[n] - self.D[n - 1]), dtype=self.typ, order=self.odr)
     x_part = sp.empty_like(x)
     x_subpart = sp.empty_like(self.A[n][0])
     x_subsubpart = sp.empty_like(self.A[n][0])
     
     x_part.fill(0)
     for s in xrange(self.q[n]):
         x_subpart.fill(0)    
         
         if n < self.N:
             x_subsubpart.fill(0)
             for t in xrange(self.q[n + 1]):
                 x_subsubpart += m.mmul(self.C[n][s,t], self.r[n + 1], m.H(self.A[n + 1][t])) #~1st line
                 
             x_subsubpart += m.mmul(self.A[n][s], self.K[n + 1]) #~3rd line               
             
             x_subpart += m.mmul(x_subsubpart, sqrt_r_inv)
         
         if not self.h_ext is None:
             x_subsubpart.fill(0)
             for t in xrange(self.q[n]):                         #Extra term to take care of h_ext..
                 x_subsubpart += self.h_ext(n, s, t) * self.A[n][t] #it may be more effecient to squeeze this into the nn term...
             x_subpart += m.mmul(x_subsubpart, sqrt_r)
         
         x_part += m.mmul(x_subpart, Vsh[s])
             
     x += m.mmul(sqrt_l, x_part)
         
     if n > 1:
         x_part.fill(0)
         for s in xrange(self.q[n]):     #~2nd line
             x_subsubpart.fill(0)
             for t in xrange(self.q[n + 1]):
                 x_subsubpart += m.mmul(m.H(self.A[n - 1][t]), self.l[n - 2], self.C[n - 1][t, s])
             x_part += m.mmul(x_subsubpart, sqrt_r, Vsh[s])
         x += m.mmul(sqrt_l_inv, x_part)
             
     return x
Esempio n. 5
0
    def loglike(self, x, T=None):
        """
        Compute the log likelyhood given a set of samples.
        :param x: The sample matrix, is of size x \times d where n is the number of samples and d is the number of variables
        """
        flag = False
        ## Get some parameters
        n = x.shape[0]

        ## Compute the membership function
        K = self.predict(x, out='ki')

        ## Compute the Loglikelhood
        K *= (-0.5)
        Km = K.max(axis=1).reshape(n, 1)
        LL = (sp.log(sp.exp(K - Km).sum(axis=1)).reshape(n, 1) +
              Km).sum()  # logsumexp trick

        ## Compute the posterior
        if T is None:
            flag = True
            T = sp.empty_like(K)

        with sp.errstate(over='ignore'):
            for i in xrange(K.shape[1]):
                T[:, i] = 1 / sp.exp(K - K[:, i][:, sp.newaxis]).sum(axis=1)

        if flag:
            return LL, T
        else:
            return LL
Esempio n. 6
0
    def predict_proba(self, X):
        """
        Predict the membership probabilities for the data samples
        in X using trained model.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            List of n_features-dimensional data points. Each row
            corresponds to a single data point.

        Returns
        -------
        proba : array, shape (n_samples, n_clusters)
        """
        X = check_array(X, copy=False, order='C', dtype=sp.float64)
        K = self.score_samples(X)
        T = sp.empty_like(K)

        # Compute the Loglikelhood
        K *= (0.5)

        # Compute the posterior
        with sp.errstate(over='ignore'):
            for c in xrange(self.C):
                T[:, c] = 1 / sp.exp(K - K[:, c][:, sp.newaxis]).sum(axis=1)

        return T
Esempio n. 7
0
 def density_1s(self, n):
     """Returns a reduced density matrix for a single site.
     
     The site number basis is used: rho[s, t] 
     with 0 <= s, t < q[n].
     
     The state must be up-to-date -- see self.update()!
     
     Parameters
     ----------
     n1 : int
         The site number.
         
     Returns
     -------
     rho : ndarray
         Reduced density matrix in the number basis.
     """
     rho = sp.empty((self.q[n], self.q[n]), dtype=sp.complex128)
                 
     r_n = self.r[n]
     r_nm1 = sp.empty_like(self.r[n - 1])
     for s in xrange(self.q[n]):
         for t in xrange(self.q[n]):
             r_nm1 = m.mmul(self.A[n][t], r_n, m.H(self.A[n][s]))                
             rho[s, t] = m.adot(self.l[n - 1], r_nm1)
     return rho
Esempio n. 8
0
File: hdda.py Progetto: mfauvel/HDDA
    def predict_proba(self, X):
        """
        Predict the membership probabilities for the data samples
        in X using trained model.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            List of n_features-dimensional data points. Each row
            corresponds to a single data point.

        Returns
        -------
        proba : array, shape (n_samples, n_clusters)
        """
        X = check_array(X, copy=False, order='C', dtype=sp.float64)
        K = self.score_samples(X)
        T = sp.empty_like(K)

        # Compute the Loglikelhood
        K *= (0.5)

        # Compute the posterior
        with sp.errstate(over='ignore'):
            for c in xrange(self.C):
                T[:, c] = 1 / sp.exp(K-K[:, c][:, sp.newaxis]).sum(axis=1)

        return T
 def scale(self,x,M=None,m=None):  # TODO:  DO IN PLACE SCALING
     """!@brief Function that standardize the data
     
         Input:
             x: the data
             M: the Max vector
             m: the Min vector
         Output:
             x: the standardize data
             M: the Max vector
             m: the Min vector
     """
     [n,d]=x.shape
     if not sp.issubdtype(x.dtype,float):
         x=x.astype('float')
 
     # Initialization of the output
     xs = sp.empty_like(x)
 
     # get the parameters of the scaling
     if M is None:
         M,m = sp.amax(x,axis=0),sp.amin(x,axis=0)
         
     den = M-m
     for i in range(d):
         if den[i] != 0:
             xs[:,i] = 2*(x[:,i]-m[i])/den[i]-1
         else:
             xs[:,i]=x[:,i]
 
     return xs
Esempio n. 10
0
    def scale(self, x, M=None, m=None):  # TODO:  DO IN PLACE SCALING
        """!@brief Function that standardize the data
        
            Input:
                x: the data
                M: the Max vector
                m: the Min vector
            Output:
                x: the standardize data
                M: the Max vector
                m: the Min vector
        """
        [n, d] = x.shape
        if not sp.issubdtype(x.dtype, float):
            x = x.astype('float')

        # Initialization of the output
        xs = sp.empty_like(x)

        # get the parameters of the scaling
        if M is None:
            M, m = sp.amax(x, axis=0), sp.amin(x, axis=0)

        den = M - m
        for i in range(d):
            if den[i] != 0:
                xs[:, i] = 2 * (x[:, i] - m[i]) / den[i] - 1
            else:
                xs[:, i] = x[:, i]

        return xs
Esempio n. 11
0
    def density_1s(self, n):
        """Returns a reduced density matrix for a single site.
        
        The site number basis is used: rho[s, t] 
        with 0 <= s, t < q[n].
        
        The state must be up-to-date -- see self.update()!
        
        Parameters
        ----------
        n1 : int
            The site number.
            
        Returns
        -------
        rho : ndarray
            Reduced density matrix in the number basis.
        """
        rho = sp.empty((self.q[n], self.q[n]), dtype=sp.complex128)

        r_n = self.r[n]
        r_nm1 = sp.empty_like(self.r[n - 1])
        for s in xrange(self.q[n]):
            for t in xrange(self.q[n]):
                r_nm1 = m.mmul(self.A[n][t], r_n, m.H(self.A[n][s]))
                rho[s, t] = m.adot(self.l[n - 1], r_nm1)
        return rho
Esempio n. 12
0
def shifted_matrix_sub(data, sub, tau, pad_val=0.0):
    """Subtracts the multi-channeled vector (rows are channels) y from
    the vector x with a certain offset. x and y can due to the offset be only
    partly overlapping.

    REM: from matlab

    :type data: ndarray
    :param data: data array to apply the subtractor to
    :type sub: ndarray
    :param sub: subtractor array
    :type tau: int
    :param tau: offset of :sub: w.r.t. start of :data:
    :type pad_val: float
    :param pad_val: value to use for the padding
        Default=0.0
    :return: ndarray - data minus sub at offset, len(data)
    """

    ns_data, nc_data = data.shape
    ns_sub, nc_sub = sub.shape
    if nc_data != nc_sub:
        raise ValueError('nc_data and nc_sub must agree!')
    tau = int(tau)
    data_sub = sp.empty_like(data)
    data_sub[:] = pad_val
    data_sub[max(0, tau):tau + ns_sub] = sub[max(0, -tau):ns_data - tau]
    return data - data_sub
Esempio n. 13
0
File: RCWA.py Progetto: LeiDai/EMpy
def dispersion_relation_extraordinary(kx, ky, k, nO, nE, c):
    """Dispersion relation for the extraordinary wave.

    NOTE
    See eq. 16 in Glytsis, "Three-dimensional (vector) rigorous
    coupled-wave analysis of anisotropic grating diffraction",
    JOSA A, 7(8), 1990 Always give positive real or negative
    imaginary.
    """

    if kx.shape != ky.shape or c.size != 3:
        raise ValueError('kx and ky must have the same length and c must have 3 components')

    kz = S.empty_like(kx)

    for ii in xrange(0, kx.size):

        alpha = nE**2 - nO**2
        beta = kx[ii]/k * c[0] + ky[ii]/k * c[1]

        # coeffs
        C = S.array([nO**2 + c[2]**2 * alpha, \
                     2. * c[2] * beta * alpha, \
                     nO**2 * (kx[ii]**2 + ky[ii]**2) / k**2 + alpha * beta**2 - nO**2 * nE**2])

        # two solutions of type +x or -x, purely real or purely imag
        tmp_kz = k * S.roots(C)

        # get the negative imaginary part or the positive real one
        if S.any(S.isreal(tmp_kz)):
            kz[ii] = S.absolute(tmp_kz[0])
        else:
            kz[ii] = -1j * S.absolute(tmp_kz[0])

    return kz
Esempio n. 14
0
    def calc_B(self, n, set_eta=True):
        """Generates the B[n] tangent vector corresponding to physical evolution of the state.

        In other words, this returns B[n][x*] (equiv. eqn. (47) of
        arXiv:1103.0936v2 [cond-mat.str-el])
        with x* the parameter matrices satisfying the Euler-Lagrange equations
        as closely as possible.
        
        In the case of Bc, use the general Bc generated in calc_B_centre().
        """
        if n == self.N_centre:
            B, eta_sq_c = self.calc_B_centre()
            if set_eta:
                self.eta_sq[self.N_centre] = eta_sq_c
        else:
            l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv = self.calc_l_r_roots(n)
            
            if n > self.N_centre:
                Vsh = tm.calc_Vsh(self.A[n], r_sqrt, sanity_checks=self.sanity_checks)
                x = self.calc_x(n, Vsh, l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv, right=True)
                
                B = sp.empty_like(self.A[n])
                for s in range(self.q[n]):
                    B[s] = mm.mmul(l_sqrt_inv, x, mm.H(Vsh[s]), r_sqrt_inv)
                    
                if self.sanity_checks:
                    M = tm.eps_r_noop(self.r[n], B, self.A[n])
                    if not sp.allclose(M, 0):
                        print("Sanity Fail in calc_B!: B_%u does not satisfy GFC!" % n)
            else:
                Vsh = tm.calc_Vsh_l(self.A[n], l_sqrt, sanity_checks=self.sanity_checks)
                x = self.calc_x(n, Vsh, l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv, right=False)
                
                B = sp.empty_like(self.A[n])
                for s in range(self.q[n]):
                    B[s] = mm.mmul(l_sqrt_inv, mm.H(Vsh[s]), x, r_sqrt_inv)
                    
                if self.sanity_checks:
                    M = tm.eps_l_noop(self.l[n - 1], B, self.A[n])
                    if not sp.allclose(M, 0):
                        print("Sanity Fail in calc_B!: B_%u does not satisfy GFC!" % n)
            
            if set_eta:
                self.eta_sq[n] = mm.adot(x, x)


        return B
Esempio n. 15
0
 def calc_B1(self):
     """Calculate the optimal B1 given right gauge-fixing on B2..N and
     no gauge-fixing on B1.
     
     We use the non-norm-preserving K's, since the norm-preservation
     is not needed elsewhere. It is cleaner to subtract the relevant
     norm-changing terms from the K's here than to generate all K's
     with norm-preservation.
     """
     B1 = sp.empty_like(self.A[1])
     
     try:
         r1_i = self.r[1].inv()
     except AttributeError:
         r1_i = mm.invmh(self.r[1])
         
     try:
         l0_i = self.l[0].inv()
     except AttributeError:
         l0_i = mm.invmh(self.l[0])
     
     A0 = self.A[0]
     A1 = self.A[1]
     A2 = self.A[2]
     r1 = self.r[1]
     r2 = self.r[2]
     l0 = self.l[0]
     
     KLh = mm.H(self.u_gnd_l.K_left - l0 * mm.adot(self.u_gnd_l.K_left, self.r[0]))
     K2 = self.K[2] - r1 * mm.adot(self.l[1], self.K[2])
     
     C1 = self.C[1] - self.h_expect[1] * self.AA1
     C0 = self.C[0] - self.h_expect[0] * self.AA0
     
     for s in xrange(self.q[1]):
         try:
             B1[s] = A1[s].dot(r1_i.dot_left(K2))
         except AttributeError:
             B1[s] = A1[s].dot(K2.dot(r1_i))
         
         for t in xrange(self.q[2]):
             try:
                 B1[s] += C1[s, t].dot(r2.dot(r1_i.dot_left(mm.H(A2[t]))))
             except AttributeError:
                 B1[s] += C1[s, t].dot(r2.dot(mm.H(A2[t]).dot(r1_i)))                    
             
         B1sbit = KLh.dot(A1[s])
                         
         for t in xrange(self.q[0]):
             B1sbit += mm.H(A0[t]).dot(l0.dot(C0[t,s]))
             
         B1[s] += l0_i.dot(B1sbit)
        
     rb = sp.zeros_like(self.r[0])
     for s in xrange(self.q[1]):
         rb += B1[s].dot(r1.dot(mm.H(B1[s])))
     eta = sp.sqrt(mm.adot(l0, rb))
             
     return B1, eta
def calculate_slow_phi_0s(phi_0s, p_values):
    slow_phi_0s = scipy.empty_like(phi_0s)
    for i, phi_0 in enumerate(phi_0s):
        phi_0_unfolded = unfold(phi_0)
        x = arange(len(phi_0_unfolded))
        model = polyfit(x, phi_0_unfolded, 3, w=p_values[i])
        slow_phi_0s[i] = polyval(model, x)
    return slow_phi_0s
Esempio n. 17
0
def calculate_slow_phi_0s(phi_0s, p_values):
    slow_phi_0s = scipy.empty_like(phi_0s)
    for i, phi_0 in enumerate(phi_0s):
        phi_0_unfolded = unfold(phi_0)
        x = arange(len(phi_0_unfolded))
        model = polyfit(x, phi_0_unfolded, 3, w=p_values[i])
        slow_phi_0s[i] = polyval(model, x)
    return slow_phi_0s
Esempio n. 18
0
    def calc_C(self, n_low=-1, n_high=-1):
        """Generates the C matrices used to calculate the K's and ultimately the B's

        These are to be used on one side of the super-operator when applying the
        nearest-neighbour Hamiltonian, similarly to C in eqn. (44) of
        arXiv:1103.0936v2 [cond-mat.str-el], except being for the non-norm-preserving case.

        Makes use only of the nearest-neighbour hamiltonian, and of the A's.

        C[n] depends on A[n] and A[n + 1].
        
        This calculation can be significantly faster if a matrix form for h_nn
        is available. See gen_h_matrix().

        """
        if self.h_nn is None:
            return 0

        if n_low < 1:
            n_low = 0
        if n_high < 1:
            n_high = self.N + 1
        
        if self.h_nn_mat is None:
            for n in xrange(n_low, n_high):
                self.C[n].fill(0)
                for u in xrange(self.q[n]):
                    for v in xrange(self.q[n + 1]):
                        AA = mm.mmul(self.A[n][u], self.A[n + 1][v]) #only do this once for each
                        for s in xrange(self.q[n]):
                            for t in xrange(self.q[n + 1]):
                                h_nn_stuv = self.h_nn(n, s, t, u, v)
                                if h_nn_stuv != 0:
                                    self.C[n][s, t] += h_nn_stuv * AA
        else:
            dot = sp.dot
            for n in xrange(n_low, n_high):
                An = self.A[n]
                Anp1 = self.A[n + 1]
                
                AA = sp.empty_like(self.C[n])
                for u in xrange(self.q[n]):
                    for v in xrange(self.q[n + 1]):
                        AA[u, v] = dot(An[u], Anp1[v])
                        
                if n == 0: #FIXME: Temp. hack
                    self.AA0 = AA
                elif n == 1:
                    self.AA1 = AA
                
                res = sp.tensordot(AA, self.h_nn_mat[n], ((0, 1), (2, 3)))
                res = sp.rollaxis(res, 3)
                res = sp.rollaxis(res, 3)
                
                self.C[n][:] = res
Esempio n. 19
0
def map_cell_field(src, mesh_map):
    # Copy to contiguous arrays and cast as needed,
    # then send pointer
    srcc = sp.ascontiguousarray(src)
    srcc = srcc.astype(sp.float64, casting="same_kind", copy=False)
    dest = sp.empty_like(srcc)
    libgridmap.map_cell_field_c(
        mesh_map, 1, len(srcc), 0.,
        srcc.ctypes.data_as(ctypes.POINTER(ctypes.c_double)),
        dest.ctypes.data_as(ctypes.POINTER(ctypes.c_double)))
    return dest
Esempio n. 20
0
def oppwalker_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)
    out = sp.empty_like(arr)

    # red-green
    out[:, :, 0] = arr[:, :, 0] - arr[:, :, 1]
    # blue-yellow
    out[:, :, 1] = arr[:, :, 2] - arr[:, :, [0, 1]].min(2)
    # intensity
    out[:, :, 2] = arr.max(2)

    return out
Esempio n. 21
0
def oppwalker_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)
    out = sp.empty_like(arr)

    # red-green
    out[:,:,0] = arr[:,:,0] - arr[:,:,1]
    # blue-yellow
    out[:,:,1] = arr[:,:,2] - arr[:,:,[0,1]].min(2)
    # intensity
    out[:,:,2] = arr.max(2)

    return out
Esempio n. 22
0
def addPoisson(data, nFactor=1):
    """
	This method is used to generate random noise on a spectrum, image or cube
	by using the numpy.random.poisson library.
		
	Parameters:
	@param data: it can be a 1D, 2D or 3D numpy-array.
		
	*args:
	nFactor: float number that is used to turn the poisson noise a float by multiplying the
	poisson distribution by it and dividing the random number resulted from the method
	by it.
		
	It returns a data with the same type that the one inputted.
	
	"""

    if (data.ndim == 1):
        tmp = empty_like(data)
        for k in range(data.shape[0]):
            tmp[k] = random.poisson(nFactor * data[k]) / nFactor

    elif (data.ndim == 2):
        tmp = empty_like(data)
        for i in range(data.shape[0]):
            for j in range(data.shape[1]):
                tmp[i][j] = random.poisson(nFactor * data[i][j]) / nFactor

    elif (data.ndim == 3):
        tmp = empty_like(data)
        for k in range(data.shape[0]):
            for i in range(data.shape[1]):
                for j in range(data.shape[2]):
                    tmp[k][i][j] = random.poisson(
                        nFactor * data[k][i][j]) / nFactor
    else:
        errorMessage = "Error @  illusion.datamanip.addPoissonNoise: \n"
        errorMessage += "Data with more than 3 dimensions are not supported"
        raise TypeError, errorMessage
    return tmp
Esempio n. 23
0
def BiotLineIntegral(vect_arr, r_p, current=1.0):
    """
    Calculates the magnetic flux density of from a list of points
    (vect_arr) that represent a discretization of a conductor.
    The magnetic flux density is calculated for r_p positions.
    
    Parameters
    ----------
    vect_arr: array([[x_0,y_0,z_0], ... , [x_n,y_n,z_n]])
    r_p: array([[x_0,y_0,z_0], ... , [x_n,y_n,z_n]])

    All coordinates are in mm

    Returns
    ----------
    Magnetric flux density Bfield, same as r_p
    
    """
    bfield = scipy.empty_like(r_p, scipy.float64)
    code = """
    #include <iostream>
    #include <math.h>
    #include <assert.h>
    for(int ir_p = 0; ir_p < size_r_p; ir_p++) {
        vec3 wire_pre = { vect_arr(0,0), vect_arr(0,1), vect_arr(0,2) };
        vec3 bfield_vec = { 0.0, 0.0, 0.0 };
        vec3 vec3r_p = { r_p(ir_p, 0), r_p(ir_p, 1), r_p(ir_p, 2) };  
        for(int i_v  = 1; i_v < size_vect_arr; i_v++) {
            vec3 vec3_arr = { vect_arr(i_v,0), vect_arr(i_v,1), vect_arr(i_v,2) };
            vec3 dl = vec3_diff( vec3_arr, wire_pre);
            vec3 rs = vec3_arr;
            vec3 r  = vec3_diff( vec3r_p, rs );
            double r_length = vec3_abs(r);
            bfield_vec      =  vec3_add( vec3_scale( vec3_cross( dl, r), 1.0 / pow( r_length, 3)), bfield_vec );
            wire_pre        = vec3_arr;
        }
        bfield(ir_p, 0) = bfield_vec.x ;
        bfield(ir_p, 1) = bfield_vec.y ;
        bfield(ir_p, 2) = bfield_vec.z ;
    }
    return_val = 1;
    """
    size_r_p = r_p[:, 0].size
    size_vect_arr = vect_arr[:, 0].size
    os.path.realpath(__file__)
    support_code = open(os.path.dirname(__file__) + "/biot_blitz_support.cpp")
    scipy.weave.inline(
        code, ["r_p", "size_r_p", "bfield", "vect_arr", "size_vect_arr"],
        type_converters=converters.blitz,
        support_code=support_code.read(),
        compiler='gcc')
    return bfield * mu_0 * 1000.0 * 1.0 / (4.0 * pi)
Esempio n. 24
0
def BiotLineIntegral(vect_arr, r_p, current=1.0):
    """
    Calculates the magnetic flux density of from a list of points
    (vect_arr) that represent a discretization of a conductor.
    The magnetic flux density is calculated for r_p positions.
    
    Parameters
    ----------
    vect_arr: array([[x_0,y_0,z_0], ... , [x_n,y_n,z_n]])
    r_p: array([[x_0,y_0,z_0], ... , [x_n,y_n,z_n]])

    All coordinates are in mm

    Returns
    ----------
    Magnetric flux density Bfield, same as r_p
    
    """
    bfield = scipy.empty_like(r_p, scipy.float64)
    code = """
    #include <iostream>
    #include <math.h>
    #include <assert.h>
    for(int ir_p = 0; ir_p < size_r_p; ir_p++) {
        vec3 wire_pre = { vect_arr(0,0), vect_arr(0,1), vect_arr(0,2) };
        vec3 bfield_vec = { 0.0, 0.0, 0.0 };
        vec3 vec3r_p = { r_p(ir_p, 0), r_p(ir_p, 1), r_p(ir_p, 2) };  
        for(int i_v  = 1; i_v < size_vect_arr; i_v++) {
            vec3 vec3_arr = { vect_arr(i_v,0), vect_arr(i_v,1), vect_arr(i_v,2) };
            vec3 dl = vec3_diff( vec3_arr, wire_pre);
            vec3 rs = vec3_arr;
            vec3 r  = vec3_diff( vec3r_p, rs );
            double r_length = vec3_abs(r);
            bfield_vec      =  vec3_add( vec3_scale( vec3_cross( dl, r), 1.0 / pow( r_length, 3)), bfield_vec );
            wire_pre        = vec3_arr;
        }
        bfield(ir_p, 0) = bfield_vec.x ;
        bfield(ir_p, 1) = bfield_vec.y ;
        bfield(ir_p, 2) = bfield_vec.z ;
    }
    return_val = 1;
    """
    size_r_p          = r_p[:, 0].size
    size_vect_arr     = vect_arr[:, 0].size
    os.path.realpath(__file__)
    support_code = open( os.path.dirname(__file__) + "/biot_blitz_support.cpp" )
    scipy.weave.inline(code,
                       ["r_p", "size_r_p", "bfield", "vect_arr", "size_vect_arr"],
                       type_converters=converters.blitz,
                       support_code=support_code.read(),
                       compiler='gcc' )
    return bfield * mu_0 * 1000.0 * 1.0/(4.0 * pi)
Esempio n. 25
0
def oppsande_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    r = arr[:, :, 0]
    g = arr[:, :, 1]
    b = arr[:, :, 2]

    out = sp.empty_like(arr)
    out[:, :, 0] = (r - g) / sp.sqrt(2.)
    out[:, :, 1] = (r + g - 2. * b) / sp.sqrt(6.)
    out[:, :, 2] = (r + g + b) / sp.sqrt(3.)

    return out
Esempio n. 26
0
def oppsande_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    r = arr[:,:,0]
    g = arr[:,:,1]
    b = arr[:,:,2]
    
    out = sp.empty_like(arr)
    out[:,:,0] = (r-g) / sp.sqrt(2.)
    out[:,:,1] = (r+g-2.*b) / sp.sqrt(6.)
    out[:,:,2] = (r+g+b) / sp.sqrt(3.)

    return out
Esempio n. 27
0
def calc_x_l(Km1, C, Cm1, rp1, lm2, Am1, A, Ap1, lm1_s, lm1_si, r_s, r_si,
             Vsh):
    D = A.shape[2]
    Dm1 = A.shape[1]
    q = A.shape[0]

    x = sp.zeros((q * Dm1 - D, D), dtype=A.dtype)
    x_part = sp.empty_like(x, order='C')
    x_subpart = sp.empty_like(A[0], order='C')

    if not C is None:
        x_part.fill(0)
        for s in xrange(q):
            x_subpart = eps_r_noop_inplace(rp1, C[s], Ap1,
                                           x_subpart)  #~1st line
            x_part += Vsh[s].dot(lm1_s.dot(x_subpart))

        try:
            x += r_si.dot_left(x_part)
        except AttributeError:
            x += x_part.dot(r_si)

    x_part.fill(0)
    for s in xrange(q):  #~2nd line
        x_subpart.fill(0)

        if not lm2 is None:
            x_subpart = eps_l_noop_inplace(lm2, Am1, Cm1[:, s], x_subpart)

        if not Km1 is None:
            x_subpart += Km1.dot(A[s])  #~3rd line

        x_part += Vsh[s].dot(lm1_si.dot(x_subpart))
    try:
        x += r_s.dot_left(x_part)
    except AttributeError:
        x += x_part.dot(r_s)

    return x
Esempio n. 28
0
def solver_3(a, b, np):
    """
      solver optimized for "3x3 matrices" and dr discretization points
    """
    # prepare the array for the resulting function
    h = empty_like(a)

    # calculate the determinant
    a_det = a[0, 0] * (a[1, 1] * a[2, 2] - a[2, 1] * a[1, 2]) - a[0, 1] * (
        a[1, 0] * a[2, 2] - a[2, 0] * a[1, 2]) + a[0, 2] * (a[1, 0] * a[2, 1] -
                                                            a[2, 0] * a[1, 1])
    if (a_det == 0.0).any():
        print(
            "singular matrix, cannot invert: determinant of (1-C) matrix is zero for dr=%u"
            % dr)
        exit(1)

    # perform the calculation for every discretization point
    for dr in range(np):
        # calculate the inverse
        a_inv = ones((3, 3)) / a_det[dr]
        # the indexes in a_inv already correspond to transposed matrix!
        a_inv[0, 0] *= (a[1, 1, dr] * a[2, 2, dr] - a[2, 1, dr] * a[1, 2, dr])
        a_inv[1, 0] *= -1.0 * (a[1, 0, dr] * a[2, 2, dr] -
                               a[2, 0, dr] * a[1, 2, dr])
        a_inv[2, 0] *= (a[1, 0, dr] * a[2, 1, dr] - a[2, 0, dr] * a[1, 1, dr])
        a_inv[0, 1] *= -1.0 * (a[0, 1, dr] * a[2, 2, dr] -
                               a[2, 1, dr] * a[0, 2, dr])
        a_inv[1, 1] *= (a[0, 0, dr] * a[2, 2, dr] - a[2, 0, dr] * a[0, 2, dr])
        a_inv[2, 1] *= -1.0 * (a[0, 0, dr] * a[2, 1, dr] -
                               a[2, 0, dr] * a[0, 1, dr])
        a_inv[0, 2] *= (a[0, 1, dr] * a[1, 2, dr] - a[1, 1, dr] * a[0, 2, dr])
        a_inv[1, 2] *= -1.0 * (a[0, 0, dr] * a[1, 2, dr] -
                               a[1, 0, dr] * a[0, 2, dr])
        a_inv[2, 2] *= (a[0, 0, dr] * a[1, 1, dr] - a[1, 0, dr] * a[0, 1, dr])

        # do the calculation
        # using matrix algebra from scipy/numpy
        h[:, :, dr] = (mat(a_inv) * mat(b[:, :, dr]))
        # explicitly - might be faster, but is not!
        #h[0,0,dr] = (a_inv[0,0]*b[0,0,dr] + a_inv[0,1]*b[1,0,dr] + a_inv[0,2]*b[2,0,dr]) / dens_factor[0,0]
        #h[0,1,dr] = (a_inv[0,0]*b[0,1,dr] + a_inv[0,1]*b[1,1,dr] + a_inv[0,2]*b[2,1,dr]) / dens_factor[0,1]
        #h[0,2,dr] = (a_inv[0,0]*b[0,2,dr] + a_inv[0,1]*b[1,2,dr] + a_inv[0,2]*b[2,2,dr]) / dens_factor[0,2]
        #h[1,0,dr] = (a_inv[1,0]*b[0,0,dr] + a_inv[1,1]*b[1,0,dr] + a_inv[1,2]*b[2,0,dr]) / dens_factor[1,0]
        #h[1,1,dr] = (a_inv[1,0]*b[0,1,dr] + a_inv[1,1]*b[1,1,dr] + a_inv[1,2]*b[2,1,dr]) / dens_factor[1,1]
        #h[1,2,dr] = (a_inv[1,0]*b[0,2,dr] + a_inv[1,1]*b[1,2,dr] + a_inv[1,2]*b[2,2,dr]) / dens_factor[1,2]
        #h[2,0,dr] = (a_inv[2,0]*b[0,0,dr] + a_inv[2,1]*b[1,0,dr] + a_inv[2,2]*b[2,0,dr]) / dens_factor[2,0]
        #h[2,1,dr] = (a_inv[2,0]*b[0,1,dr] + a_inv[2,1]*b[1,1,dr] + a_inv[2,2]*b[2,1,dr]) / dens_factor[2,1]
        #h[2,2,dr] = (a_inv[2,0]*b[0,2,dr] + a_inv[2,1]*b[1,2,dr] + a_inv[2,2]*b[2,2,dr]) / dens_factor[2,2]

    return (h)
Esempio n. 29
0
def calc_x_l(Km1, C, Cm1, rp1, lm2, Am1, A, Ap1, lm1_s, lm1_si, r_s, r_si, Vsh):
    D = A.shape[2]
    Dm1 = A.shape[1]
    q = A.shape[0]
    
    x = sp.zeros((q * Dm1 - D, D), dtype=A.dtype)
    x_part = sp.empty_like(x, order='C')
    x_subpart = sp.empty_like(A[0], order='C')
    
    if not C is None:
        x_part.fill(0)
        for s in xrange(q):
            x_subpart = eps_r_noop_inplace(rp1, C[s], Ap1, x_subpart) #~1st line
            x_part += Vsh[s].dot(lm1_s.dot(x_subpart))
            
        try:
            x += r_si.dot_left(x_part)
        except AttributeError:
            x += x_part.dot(r_si)

    
    x_part.fill(0)
    for s in xrange(q):     #~2nd line
        x_subpart.fill(0)

        if not lm2 is None:
            x_subpart = eps_l_noop_inplace(lm2, Am1, Cm1[:, s], x_subpart)
        
        if not Km1 is None:
            x_subpart += Km1.dot(A[s]) #~3rd line
        
        x_part += Vsh[s].dot(lm1_si.dot(x_subpart))
    try:
        x += r_s.dot_left(x_part)
    except AttributeError:
        x += x_part.dot(r_s)

    return x
Esempio n. 30
0
def invE_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    red = arr[:, :, 0]
    green = arr[:, :, 1]
    blue = arr[:, :, 2]

    out = sp.empty_like(arr)

    out[:, :, 0] = (red + green + blue) / 3.
    out[:, :, 1] = (red + green - 2. * blue) / 4.
    out[:, :, 2] = (red - 2. * green + blue) / 4.

    return out
Esempio n. 31
0
def fightSizeDistributionPlot(samples,
                              color='k',
                              plotConfInt=True,
                              plotErrorBars=False,
                              log=False,
                              alpha=0.4,
                              makePlot=True,
                              confIntP=0.95,
                              removeZeros=False,
                              removeOnes=False,
                              multiple=1,
                              verbose=True,
                              maxSize=None,
                              **kwargs):
    """
    multiple (1)            : Multiply probabilities by this 
                              factor.  Useful for plotting expected
                              number rather than probability.
    """

    #ell = len(samples[0])
    dist,confIntList = fightSizeDistribution(samples,               \
        confIntP=confIntP,removeZeros=removeZeros,removeOnes=removeOnes,
        maxSize=maxSize)
    ell = len(dist)

    dist, confIntList = multiple * dist, multiple * confIntList

    if makePlot:
        if plotConfInt:
            #for confInt in confIntList:
            #    if confInt[0] == 0.: confInt[0] = zeroEquiv
            #firstZero = pylab.find(dist==0)[2]
            firstZero = len(dist)
            pylab.fill_between(range(1,firstZero),                       \
                confIntList[:,0][1:firstZero],                           \
                confIntList[:,1][1:firstZero],color=color,alpha=alpha)
        if plotErrorBars:
            yerr = scipy.empty_like(confIntList.T)
            yerr[0] = dist - confIntList[:, 1]
            yerr[1] = confIntList[:, 0] - dist
            pylab.errorbar(range(ell), dist, yerr=yerr, color=color)

        pylab.plot(range(ell), dist, color=color, **kwargs)

        if log: plotFn = pylab.yscale('log')

    if verbose:
        print("sum(dist) =", sum(dist))
    return dist
Esempio n. 32
0
def invE_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)
    
    red = arr[:,:,0]
    green = arr[:,:,1]
    blue = arr[:,:,2]

    out = sp.empty_like(arr)

    out[:,:,0] = (red + green + blue) / 3.
    out[:,:,1] = (red + green - 2.*blue) / 4.
    out[:,:,2] = (red - 2.*green + blue) / 4.

    return out
def van_rossum_dist(trains, tau=1.0 * pq.s, kernel=None, sort=True):
    """ Calculates the van Rossum distance.

    It is defined as Euclidean distance of the spike trains convolved with a
    causal decaying exponential smoothing filter. A detailed description can be
    found in *Rossum, M. C. W. (2001). A novel spike distance. Neural
    Computation, 13(4), 751-763.* This implementation is normalized to yield
    a distance of 1.0 for the distance between an empty spike train and a spike
    train with a single spike. Divide the result by sqrt(2.0) to get the
    normalization used in the cited paper.

    Given :math:`N` spike trains with :math:`n` spikes on average the run-time
    complexity of this function is :math:`O(N^2 n^2)`. An implementation in
    :math:`O(N^2 n)` would be possible but has a high constant factor rendering
    it slower in practical cases.

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the van Rossum distance will be calculated pairwise.
    :param tau: Decay rate of the exponential function as time scalar. Controls
        for which time scale the metric will be sensitive. This parameter will
        be ignored if `kernel` is not `None`. May also be :const:`scipy.inf`
        which will lead to only measuring differences in spike count.
    :type tau: Quantity scalar
    :param kernel: Kernel to use in the calculation of the distance. This is not
        the smoothing filter, but its autocorrelation. If `kernel` is `None`, an
        unnormalized Laplacian kernel with a size of `tau` will be used.
    :type kernel: :class:`.signal_processing.Kernel`
    :param bool sort: Spike trains with sorted spike times might be needed for
        the calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
    :returns: Matrix containing the van Rossum distances for all pairs of spike
        trains.
    :rtype: 2-D array
    """

    if kernel is None:
        if tau == sp.inf:
            spike_counts = [st.size for st in trains]
            return (spike_counts - sp.atleast_2d(spike_counts).T) ** 2
        kernel = sigproc.LaplacianKernel(tau, normalize=False)

    k_dist = kernel.summed_dist_matrix(
        [st.view(type=pq.Quantity) for st in trains], not sort)
    vr_dist = sp.empty_like(k_dist)
    for i, j in sp.ndindex(*k_dist.shape):
        vr_dist[i, j] = (
            k_dist[i, i] + k_dist[j, j] - k_dist[i, j] - k_dist[j, i])
    return sp.sqrt(vr_dist)
Esempio n. 34
0
def van_rossum_dist(trains, tau=1.0 * pq.s, kernel=None, sort=True):
    """ Calculates the van Rossum distance.

    It is defined as Euclidean distance of the spike trains convolved with a
    causal decaying exponential smoothing filter. A detailed description can be
    found in *Rossum, M. C. W. (2001). A novel spike distance. Neural
    Computation, 13(4), 751-763.* This implementation is normalized to yield
    a distance of 1.0 for the distance between an empty spike train and a spike
    train with a single spike. Divide the result by sqrt(2.0) to get the
    normalization used in the cited paper.

    Given :math:`N` spike trains with :math:`n` spikes on average the run-time
    complexity of this function is :math:`O(N^2 n^2)`. An implementation in
    :math:`O(N^2 n)` would be possible but has a high constant factor rendering
    it slower in practical cases.

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the van Rossum distance will be calculated pairwise.
    :param tau: Decay rate of the exponential function as time scalar. Controls
        for which time scale the metric will be sensitive. This parameter will
        be ignored if `kernel` is not `None`. May also be :const:`scipy.inf`
        which will lead to only measuring differences in spike count.
    :type tau: Quantity scalar
    :param kernel: Kernel to use in the calculation of the distance. This is not
        the smoothing filter, but its autocorrelation. If `kernel` is `None`, an
        unnormalized Laplacian kernel with a size of `tau` will be used.
    :type kernel: :class:`.signal_processing.Kernel`
    :param bool sort: Spike trains with sorted spike times might be needed for
        the calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
    :returns: Matrix containing the van Rossum distances for all pairs of spike
        trains.
    :rtype: 2-D array
    """

    if kernel is None:
        if tau == sp.inf:
            spike_counts = [st.size for st in trains]
            return (spike_counts - sp.atleast_2d(spike_counts).T)**2
        kernel = sigproc.LaplacianKernel(tau, normalize=False)

    k_dist = kernel.summed_dist_matrix(
        [st.view(type=pq.Quantity) for st in trains], not sort)
    vr_dist = sp.empty_like(k_dist)
    for i, j in sp.ndindex(*k_dist.shape):
        vr_dist[i, j] = (k_dist[i, i] + k_dist[j, j] - k_dist[i, j] -
                         k_dist[j, i])
    return sp.sqrt(vr_dist)
Esempio n. 35
0
def solver_n(a, b, np):
    """
      solver optimized for nxn matrices and dr discretization points
    """

    # create the function that will be returned
    h = empty_like(a)

    for dr in range(np):
        # solve the matrix problem for all dr
        # and divide by the density prefactor
        # remember that the zero elements in syst['dens']['ij'] were replaced by 1.0
        # in order to avoid numerical problem in further division by this value
        h[:, :, dr] = linalg.solve(a[:, :, dr], b[:, :, dr])

    return (h)
Esempio n. 36
0
 def _calc_B_r_diss(self, op, K, C, n, set_eta=True):
     if self.q[n] * self.D[n] - self.D[n - 1] > 0:
         l_sqrt, l_sqrt_inv, r_sqrt, r_sqrt_inv = tm.calc_l_r_roots(self.l[n - 1], 
                                                                self.r[n],
                                                                sanity_checks=self.sanity_checks,
                                                                sc_data=("site", n))
         Vsh = tm.calc_Vsh(self.A[n], r_sqrt, sanity_checks=self.sanity_checks)
         x = self.calc_x(n, Vsh, l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv)
         if set_eta:
             self.eta[n] = sp.sqrt(mm.adot(x, x))
 
         B = sp.empty_like(self.A[n])
         for s in xrange(self.q[n]):
             B[s] = mm.mmul(l_sqrt_inv, x, mm.H(Vsh[s]), r_sqrt_inv)
         return B
     else:
         return None
Esempio n. 37
0
 def density_1s(self, n):
     """Returns a reduced density matrix for a single site.
     
     Parameters
     ----------
     n1 : int
         The site number.
     """
     rho = sp.empty((self.q[n], self.q[n]), dtype=sp.complex128)
                 
     r_n = self.r[n]
     r_nm1 = sp.empty_like(self.r[n - 1])
     for s in xrange(self.q[n]):
         for t in xrange(self.q[n]):
             r_nm1 = m.mmul(self.A[n][t], r_n, m.H(self.A[n][s]))                
             rho[s, t] = m.mmul(self.l[n - 1], r_nm1).trace()
     return rho
Esempio n. 38
0
def convolveSpectrum(cube, function):
    """
	Convolve the spectrum of a data cube by a given function.
	
	@param cube: Cube to be convolved.
	@param function: this must be a python instance with one arg and one return numbers.
	
	@return scube: The spectral convolved cube.
	"""
    width = cube.shape[1]
    height = cube.shape[2]
    scube = empty_like(cube)

    for i in range(width):
        for j in range(height):
            scube[:, i, j] = convolve1d(cube[:, i, j], function)

    return scube
Esempio n. 39
0
def dispersion_relation_extraordinary(kx, ky, k, nO, nE, c):
    """Dispersion relation for the extraordinary wave.

    NOTE
    See eq. 16 in Glytsis, "Three-dimensional (vector) rigorous
    coupled-wave analysis of anisotropic grating diffraction",
    JOSA A, 7(8), 1990 Always give positive real or negative
    imaginary.
    """

    if kx.shape != ky.shape or c.size != 3:
        raise ValueError(
            "kx and ky must have the same length and c must have 3 components"
        )

    kz = S.empty_like(kx)

    for ii in range(0, kx.size):

        alpha = nE ** 2 - nO ** 2
        beta = kx[ii] / k * c[0] + ky[ii] / k * c[1]

        # coeffs
        C = S.array(
            [
                nO ** 2 + c[2] ** 2 * alpha,
                2.0 * c[2] * beta * alpha,
                nO ** 2 * (kx[ii] ** 2 + ky[ii] ** 2) / k ** 2
                + alpha * beta ** 2
                - nO ** 2 * nE ** 2,
            ]
        )

        # two solutions of type +x or -x, purely real or purely imag
        tmp_kz = k * S.roots(C)

        # get the negative imaginary part or the positive real one
        if S.any(S.isreal(tmp_kz)):
            kz[ii] = S.absolute(tmp_kz[0])
        else:
            kz[ii] = -1j * S.absolute(tmp_kz[0])

    return kz
Esempio n. 40
0
def rg2_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    out = sp.empty_like(arr[:, :, [0, 1]])

    red = arr[:, :, 0]
    green = arr[:, :, 1]
    blue = arr[:, :, 2]
    intensity = arr.mean(2)

    lowi = intensity < 0.1 * intensity.max()
    arr[lowi] = 0

    denom = arr.sum(2)
    denom[denom == 0] = 1
    out[:, :, 0] = red / denom
    out[:, :, 1] = green / denom

    return out
Esempio n. 41
0
def small_droplet_proxy(scene,
                        cloud_mask,
                        not_day_land_mask,
                        not_day_mask,
                        show_plots=False,
                        plot_path="",
                        createPlots=False):
    array = np.ma.array(scene["IR_039"].data)

    if createPlots:
        plot2dArray(array,
                    title="03.9 array",
                    show=show_plots,
                    outputPath=plot_path + "sdp_array.png")
    mask = cloud_mask & not_day_land_mask
    if createPlots:
        plot2dArray(mask,
                    title="sdp mask",
                    show=show_plots,
                    outputPath=plot_path + "sdp_land_cloud_mask.png")
    array.mask = mask

    blocks = block_by_size_generator(array.shape, array.shape[1],
                                     array.shape[0], 0)

    result_blocks = find_block_threshold(array, blocks)
    #print result_blocks
    threshold_array = scipy.empty_like(array)
    blocks_into_threshold_array(result_blocks, threshold_array)
    if createPlots:
        plot2dArray(threshold_array,
                    title="sdp thresholds",
                    show=show_plots,
                    outputPath=plot_path + "sdp_thresholds.png")

    array.mask = 0
    result = (array > threshold_array) & cloud_mask & ~not_day_mask
    if createPlots:
        plot2dArray(result,
                    title="sdp result",
                    show=show_plots,
                    outputPath=plot_path + "sdp_result.png")
    return result
Esempio n. 42
0
def rg2_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    out = sp.empty_like(arr[:,:,[0,1]])

    red = arr[:,:,0]
    green = arr[:,:,1]
    blue = arr[:,:,2]
    intensity = arr.mean(2)

    lowi = intensity < 0.1*intensity.max()
    arr[lowi] = 0

    denom = arr.sum(2)
    denom[denom==0] = 1
    out[:,:,0] = red / denom
    out[:,:,1] = green / denom
    
    return out
Esempio n. 43
0
    def _calc_B_r_diss(self, op, K, C, n, set_eta=True):
        if self.q[n] * self.D[n] - self.D[n - 1] > 0:
            l_sqrt, l_sqrt_inv, r_sqrt, r_sqrt_inv = tm.calc_l_r_roots(
                self.l[n - 1],
                self.r[n],
                sanity_checks=self.sanity_checks,
                sc_data=("site", n))
            Vsh = tm.calc_Vsh(self.A[n],
                              r_sqrt,
                              sanity_checks=self.sanity_checks)
            x = self.calc_x(n, Vsh, l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv)
            if set_eta:
                self.eta[n] = sp.sqrt(mm.adot(x, x))

            B = sp.empty_like(self.A[n])
            for s in xrange(self.q[n]):
                B[s] = mm.mmul(l_sqrt_inv, x, mm.H(Vsh[s]), r_sqrt_inv)
            return B
        else:
            return None
Esempio n. 44
0
def chrom_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    opp = opp_convert(arr)
    out = sp.empty_like(opp[:, :, [0, 1]])

    rg = opp[:, :, 0]
    by = opp[:, :, 1]
    intensity = opp[:, :, 2]

    lowi = intensity < 0.1 * intensity.max()
    rg[lowi] = 0
    by[lowi] = 0

    denom = intensity
    denom[denom == 0] = 1
    out[:, :, 0] = rg / denom
    out[:, :, 1] = by / denom

    return out
Esempio n. 45
0
def chrom_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    opp = opp_convert(arr)
    out = sp.empty_like(opp[:,:,[0,1]])

    rg = opp[:,:,0]
    by = opp[:,:,1]
    intensity = opp[:,:,2]

    lowi = intensity < 0.1*intensity.max()
    rg[lowi] = 0
    by[lowi] = 0

    denom = intensity
    denom[denom==0] = 1
    out[:,:,0] = rg / denom
    out[:,:,1] = by / denom

    return out
Esempio n. 46
0
 def calc_B(self, n):
     """Generates the B[n] tangent vector corresponding to physical evolution of the state.
     
     In other words, this returns B[n][x*] (equiv. eqn. (47) of 
     arXiv:1103.0936v2 [cond-mat.str-el]) 
     with x* the parameter matrices satisfying the Euler-Lagrange equations
     as closely as possible.
     """
     if self.q[n] * self.D[n] - self.D[n - 1] > 0:
         l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv = self.calc_l_r_roots(n)
         
         Vsh = self.calc_Vsh(n, r_sqrt)
         
         x = self.calc_x(n, Vsh, l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv)
 
         B = sp.empty_like(self.A[n])
         for s in xrange(self.q[n]):
             B[s] = m.mmul(l_sqrt_inv, x, m.H(Vsh[s]), r_sqrt_inv)
         return B
     else:
         return None
Esempio n. 47
0
 def _calc_B_l(self, n, set_eta=True):
     if self.q[n] * self.D[n - 1] - self.D[n] > 0:
         l_sqrt, l_sqrt_inv, r_sqrt, r_sqrt_inv = tm.calc_l_r_roots(self.l[n - 1], 
                                                                self.r[n], 
                                                                zero_tol=self.zero_tol,
                                                                sanity_checks=self.sanity_checks,
                                                                sc_data=('site', n))
         
         Vsh = tm.calc_Vsh_l(self.A[n], l_sqrt, sanity_checks=self.sanity_checks)
         
         x = self.calc_x_l(n, Vsh, l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv)
         
         if set_eta:
             self.eta[n] = sp.sqrt(m.adot(x, x))
 
         B = sp.empty_like(self.A[n])
         for s in xrange(self.q[n]):
             B[s] = m.mmul(l_sqrt_inv, m.H(Vsh[s]), x, r_sqrt_inv)
         return B
     else:
         return None
Esempio n. 48
0
    def calc_B(self, n, set_eta=True):
        """Generates the B[n] tangent vector corresponding to physical evolution of the state.

        In other words, this returns B[n][x*] (equiv. eqn. (47) of
        arXiv:1103.0936v2 [cond-mat.str-el])
        with x* the parameter matrices satisfying the Euler-Lagrange equations
        as closely as possible.
        
        In the case of B1, use the general B1 generated in calc_B1().
        """
        if self.q[n] * self.D[n] - self.D[n - 1] > 0:
            if n == 1:
                B, eta1 = self.calc_B1()
                if set_eta:
                    self.eta[1] = eta1
            else:
                l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv = self.calc_l_r_roots(n)
    
                Vsh = self.calc_Vsh(n, r_sqrt)
    
                x = self.calc_opt_x(n, Vsh, l_sqrt, r_sqrt, l_sqrt_inv, r_sqrt_inv)
                
                if set_eta:
                    self.eta[n] = sp.sqrt(mm.adot(x, x))
    
                B = sp.empty_like(self.A[n])
                for s in xrange(self.q[n]):
                    B[s] = mm.mmul(l_sqrt_inv, x, mm.H(Vsh[s]), r_sqrt_inv)

            if self.sanity_checks:
                M = sp.zeros_like(self.r[n - 1])
                for s in xrange(self.q[n]):
                    M += mm.mmul(B[s], self.r[n], mm.H(self.A[n][s]))

                if not sp.allclose(M, 0):
                    print "Sanity Fail in calc_B!: B_%u does not satisfy GFC!" % n

            return B
        else:
            return None, 0
Esempio n. 49
0
 def _calc_B_l_n(self, n, set_eta=True, l_s_m1=None, l_si_m1=None, r_s=None, r_si=None, Vlh=None):
     if self.q[n] * self.D[n - 1] - self.D[n] > 0:
         if l_s_m1 is None:
             l_s_m1, l_si_m1, r_s, r_si = tm.calc_l_r_roots(self.l[n - 1], self.r[n], 
                                                        zero_tol=self.zero_tol,
                                                        sanity_checks=self.sanity_checks,
                                                        sc_data=('site', n))
         
         if Vlh is None:
             Vlh = tm.calc_Vsh_l(self.A[n], l_s_m1, sanity_checks=self.sanity_checks)
         
         x = self.calc_x_l(n, Vlh, l_s_m1, r_s, l_si_m1, r_si)
         
         if set_eta:
             self.eta_sq[n] = m.adot(x, x)
 
         B = sp.empty_like(self.A[n])
         for s in xrange(self.q[n]):
             B[s] = m.mmul(l_si_m1, m.H(Vlh[s]), x, r_si)
         return B
     else:
         return None
Esempio n. 50
0
def cubeXframe(cube, image):
    """
    2D x 3D product
        
    This method takes each cube's frame and performs a simple multiplication of this frame 
    with the flat image inputted. On this multiplication the pixels are multiplied one by one.
        The cube and the frame have to have the same width and heigh.
    
    @param 	cube: a 3D numpy.array representing the data cube.
    @param 	image: a 2D numpy.array representing the image frame.
    
    @return 	result: a 3D numpy.array representing the multiplied cube.
    
    """
    # .: Let us multiply :.
    result = empty_like(cube)
    #print image.shape, cube.shape
    for k in range(cube.shape[0]):
        for i in range(cube.shape[1]):
            for j in range(cube.shape[2]):
                result[k, i, j] = cube[k, i, j] * image[i, j]
    return result
Esempio n. 51
0
def irg_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    r = arr[:,:,0]
    g = arr[:,:,1]
    b = arr[:,:,2]

    intensity = arr.mean(2)
    lowi = intensity < 0.1*intensity.max()
    
    r[lowi] = 0
    g[lowi] = 0

    denom = intensity.copy()
    denom[denom==0] = 1

    out = sp.empty_like(arr)

    out[:,:,0] = intensity
    out[:,:,1] = r / denom
    out[:,:,2] = g / denom
    
    return out
Esempio n. 52
0
def standardize_col(dat,meanonly=False):
    '''
    Mean impute each columns of an array.
    '''           
    colmean=st.nanmean(dat)
    if ~meanonly:
        colstd=st.nanstd(dat)
    else:
        colstd=None
    ncol=dat.shape[1]           
    nmissing=sp.zeros((ncol))    
    datimp=sp.empty_like(dat); datimp[:]=dat
    for c in sp.arange(0,ncol):        
        datimp[sp.isnan(datimp[:,c]),c]=colmean[c] 
        datimp[:,c]=datimp[:,c]-colmean[c]        
        if not meanonly:
            if colstd[c]>1e-6:
                datimp[:,c]=datimp[:,c]/colstd[c]
            else:
                print "warning: colstd=" + colstd[c] + " during normalization"
        nmissing[c]=float(sp.isnan(dat[:,c]).sum())
    fracmissing=nmissing/dat.shape[0]         
    return datimp,fracmissing
Esempio n. 53
0
def irg_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    r = arr[:, :, 0]
    g = arr[:, :, 1]
    b = arr[:, :, 2]

    intensity = arr.mean(2)
    lowi = intensity < 0.1 * intensity.max()

    r[lowi] = 0
    g[lowi] = 0

    denom = intensity.copy()
    denom[denom == 0] = 1

    out = sp.empty_like(arr)

    out[:, :, 0] = intensity
    out[:, :, 1] = r / denom
    out[:, :, 2] = g / denom

    return out
Esempio n. 54
0
def solver_2(a, b, np):
    """
      solver optimized for 2x2 matrices and dr discretization points
    """
    # prepare the array for the resulting function
    h = empty_like(a)

    # calculate the determinant
    a_det = a[0, 0] * a[1, 1] - a[1, 0] * a[0, 1]
    if (a_det == 0.0).any():
        print(
            "singular matrix, cannot invert: determinant of (1-C) matrix is zero for dr=%u"
            % dr)
        exit(1)

    # perform the calculation for every discretization point
    for dr in range(np):
        # calculate the inverse
        a_inv = ones((
            2,
            2,
        )) / a_det[dr]
        a_inv[0, 0] *= a[1, 1, dr]
        a_inv[0, 1] *= -1.0 * a[0, 1, dr]
        a_inv[1, 0] *= -1.0 * a[1, 0, dr]
        a_inv[1, 1] *= a[0, 0, dr]

        # do the calculation
        # using matrix algebra from scipy/numpy
        #h[:,:,dr] = (mat(a_inv[dr]) * mat(b[:,:,dr])) / dens_factor
        # explicitly - is faster
        h[0, 0, dr] = (a_inv[0, 0] * b[0, 0, dr] + a_inv[0, 1] * b[1, 0, dr])
        h[0, 1, dr] = (a_inv[0, 0] * b[0, 1, dr] + a_inv[0, 1] * b[1, 1, dr])
        h[1, 0, dr] = (a_inv[1, 0] * b[0, 0, dr] + a_inv[1, 1] * b[1, 0, dr])
        h[1, 1, dr] = (a_inv[1, 0] * b[0, 1, dr] + a_inv[1, 1] * b[1, 1, dr])

    return (h)
Esempio n. 55
0
def rgbwhiten_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    r = arr[:,:,0]
    rmean = r.mean()
    rstd = r.std()
    if rstd == 0: rstd = 1

    g = arr[:,:,1]
    gmean = g.mean()
    gstd = g.std()
    if gstd == 0: gstd = 1

    b = arr[:,:,2]
    bmean = b.mean()
    bstd = b.std()
    if bstd == 0: bstd = 1

    out = sp.empty_like(arr)
    out[:,:,0] = (r - rmean) / rstd
    out[:,:,1] = (g - gmean) / gstd
    out[:,:,2] = (b - bmean) / bstd

    return out
Esempio n. 56
0
def rgbwhiten_convert(arr):
    #assert(arr.min()>=0 and arr.max()<=1)

    r = arr[:, :, 0]
    rmean = r.mean()
    rstd = r.std()
    if rstd == 0: rstd = 1

    g = arr[:, :, 1]
    gmean = g.mean()
    gstd = g.std()
    if gstd == 0: gstd = 1

    b = arr[:, :, 2]
    bmean = b.mean()
    bstd = b.std()
    if bstd == 0: bstd = 1

    out = sp.empty_like(arr)
    out[:, :, 0] = (r - rmean) / rstd
    out[:, :, 1] = (g - gmean) / gstd
    out[:, :, 2] = (b - bmean) / bstd

    return out
Esempio n. 57
0
def invW_convert(arr):
    
    #assert(arr.min()>=0 and arr.max()<=1)
    
    invE = invE_convert(arr)

    out = sp.empty_like(arr)
    
    intensity = invE[:,:,0]
    rg = invE[:,:,1]
    yb = invE[:,:,2]

    lowi = intensity < 0.1*intensity.max()
    rg[lowi] = 0
    yb[lowi] = 0

    denom = intensity.copy()
    denom[denom==0] = 1

    out[:,:,0] = intensity
    out[:,:,1] = rg / denom
    out[:,:,2] = yb / denom

    return out
Esempio n. 58
0
def invW_convert(arr):

    #assert(arr.min()>=0 and arr.max()<=1)

    invE = invE_convert(arr)

    out = sp.empty_like(arr)

    intensity = invE[:, :, 0]
    rg = invE[:, :, 1]
    yb = invE[:, :, 2]

    lowi = intensity < 0.1 * intensity.max()
    rg[lowi] = 0
    yb[lowi] = 0

    denom = intensity.copy()
    denom[denom == 0] = 1

    out[:, :, 0] = intensity
    out[:, :, 1] = rg / denom
    out[:, :, 2] = yb / denom

    return out