Example #1
0
File: misc.py Project: MMaus/mutils
def mpow2(A, n):
    """ 
    Returns the n-th power of A.
    Here, this is computed using eigenvalue decomposition.
    
    ==========
    Parameter:
    ==========
    A : *array*
        the square matrix from which the n-th power should be returned
    n : *integer*
        the power

    ========
    Returns:
    ========
    B : *array*
        B = A^n

    """
     
    D, L = eig(A)
    if isreal(A).all():
        return reduce(dot, [L, diag(D**n), inv(L)]).real
    else:
        return reduce(dot, [L, diag(D**n), inv(L)])
Example #2
0
File: misc.py Project: MMaus/mutils
def mpow2(A, n):
    """ 
    Returns the n-th power of A.
    Here, this is computed using eigenvalue decomposition.
    
    ==========
    Parameter:
    ==========
    A : *array*
        the square matrix from which the n-th power should be returned
    n : *integer*
        the power

    ========
    Returns:
    ========
    B : *array*
        B = A^n

    """

    D, L = eig(A)
    if isreal(A).all():
        return reduce(dot, [L, diag(D**n), inv(L)]).real
    else:
        return reduce(dot, [L, diag(D**n), inv(L)])
Example #3
0
def fProximity(A, B=None, zeroDiag=True):
    ''' Return the proximity (similarity x correlation) as :
    - 2D nparray scalar between 2D nparray vectors (filled with zeros for diagonal and symetrix terms)
    - 1D nparray scalar between 1D nparray vector and 2D nparray vectors
    - 2D nparray scalar between 2D nparray vectors
    '''
    sA = A.shape
    sB = B.shape

    if B == None:
        corr = zeros((sA[0], sA[0]))
        for i in range(sA[0]):
            corr[i, i + 1:] = fProximity(A[i], A[i + 1:])
        return corr

    elif A.ndim == 1:
        dif = 1. - abs(A - B).sum(axis=-1) / (1. * sA[0])
        sim = B.dot(A) / (A**2).sum(axis=-1)**(0.5) / (B**
                                                       2).sum(axis=-1)**(0.5)
        return where(isfinite(sim), dif * sim, dif)

    elif B.ndim == 1:
        dif = 1. - abs(A - B).sum(axis=-1) / (1. * sB[0])
        sim = A.dot(B) / (A**2).sum(axis=-1)**(0.5) / (B**
                                                       2).sum(axis=-1)**(0.5)
        return where(isfinite(sim), dif * sim, dif)

    else:
        corr = zeros((sA[0], sB[0]))
        for i in range(sA[0]):
            corr[i] = fProximity(A[i], B)
        return corr - zeroDiag * diag(diag(corr))
Example #4
0
 def computeMomentumEvolution(self):
     self.momentumPhase = py.exp(-1j * 2 * py.pi * py.diag(
         self.x) @ py.ones([len(self.x), len(self.k)]) @ py.diag(self.k))
     self.momentumEvolution = self.timeEvolution @ self.momentumPhase
     # normalisation of momentum
     self.momentumEvolution *= self.dx / py.sqrt(2 * py.pi)
     self.momentumDensity = abs(self.momentumEvolution)**2
     X = py.array(py.sum(self.momentumDensity, axis=1)**(-1))[:, py.newaxis]
     # normalisation of momentum density
     self.momentumDensity = self.momentumDensity * \
         py.concatenate(len(self.k) * [X], axis=1) / self.dk
Example #5
0
def find_factors(idat, odat, k = None):
    """
    A routine to compute the main predictors (linear combinations of
    coordinates) in idat to predict odat.

    *Parameters*
        idat: d x n data matrix,
            with n measurements, each of dimension d
        odat: q x n data matrix
             with n measurements, each of dimension q

    *Returns*
        **Depending on whether or not** *k* **is provided, the returned
        value is different**

      * if k is given, compute the first k regressors and return an orthogonal
        matrix that contains the regressors in its colums,
        i.e. reg[0,:] is the first regressor

      * if k is not given or None, return a d-dimensional vector v(k)
        explaining which fraction of the total predictable variance can be
        explained using only k regressors.

    **NOTE**


    #. idat and odat must have zero mean
    #. To interpret the regressors, it is advisable to have the
       for columns of idat having the same variance
    """
    # transform into z-scores
    u, s, v = svd(idat, full_matrices = False)
    su = dot(diag(1./s), u.T)

    z = dot(su,idat)
    # ! Note that the covariance of z is *NOT* 1, but 1/n; z*z.T = 1 !

    # least-squares regression:
    A = dot(odat, pinv(z))

    uA, sigma_A, vA = svd(A, full_matrices = False)
    if k is None:
        vk = cumsum(sigma_A**2) / sum(sigma_A**2)
        return vk

    else:
    # choose k predictors
        sigma_A1 = sigma_A.copy()
        sigma_A1[k:] = 0
        A1 = reduce(dot, [uA, diag(sigma_A1), vA])
        B = dot(A1, su)
        uB, sigma_B, vB = svd(B, full_matrices = False)
        regs = vB[:k,:].T
        return regs
Example #6
0
 def computeEigenFunctions(self):
     # generate matrix with M x-rows
     XX = (py.ones([len(self.x), self.M]).T @ py.diag(self.x)).T
     argument = 1j * XX @ py.diag(2 * py.pi * self.m)
     eigenfuncs = []
     for l in range(self.M):
         eigenf = py.sum(py.exp(argument) @ py.diag(self.Cq[:, l]),
                         axis=1).T
         eigenf /= py.sqrt(self.Ns)
         eigenfuncs.append(eigenf)
     self.eigenfuncs = eigenfuncs
Example #7
0
    def test_subspace_det_algo1_mimo(self):
        """
        Subspace deterministic algorithm (MIMO).
        """
        ss2 = sysid.StateSpaceDiscreteLinear(A=pl.matrix([[0, 0.1, 0.2],
                                                          [0.2, 0.3, 0.4],
                                                          [0.4, 0.3, 0.2]]),
                                             B=pl.matrix([[1, 0], [0, 1],
                                                          [0, -1]]),
                                             C=pl.matrix([[1, 0, 0], [0, 1,
                                                                      0]]),
                                             D=pl.matrix([[0, 0], [0, 0]]),
                                             Q=pl.diag([0.01, 0.01, 0.01]),
                                             R=pl.diag([0.01, 0.01]),
                                             dt=0.1)
        pl.seed(1234)
        prbs1 = sysid.prbs(1000)
        prbs2 = sysid.prbs(1000)

        def f_prbs_2d(t, x, i):
            "input function"
            #pylint: disable=unused-argument
            i = i % 1000
            return 2 * pl.matrix([prbs1[i] - 0.5, prbs2[i] - 0.5]).T

        tf = 8
        data = ss2.simulate(f_u=f_prbs_2d, x0=pl.matrix([0, 0, 0]).T, tf=tf)
        ss2_id = sysid.subspace_det_algo1(y=data.y,
                                          u=data.u,
                                          f=5,
                                          p=5,
                                          s_tol=0.1,
                                          dt=ss2.dt)
        data_id = ss2_id.simulate(f_u=f_prbs_2d,
                                  x0=pl.matrix(pl.zeros(ss2_id.A.shape[0])).T,
                                  tf=tf)
        nrms = sysid.nrms(data_id.y, data.y)
        self.assertGreater(nrms, 0.9)

        if ENABLE_PLOTTING:
            for i in range(2):
                pl.figure()
                pl.plot(data_id.t.T,
                        data_id.y[i, :].T,
                        label='$y_{:d}$ true'.format(i))
                pl.plot(data.t.T,
                        data.y[i, :].T,
                        label='$y_{:d}$ id'.format(i))
                pl.legend()
                pl.grid()
Example #8
0
def reduceDim(fullmat, n=1):
    """
    reduces the dimension of a d x d - matrix to a (d-n)x(d-n) matrix, 
    keeping the largest eigenvalues unchanged.
    """
    u, s, v = svd(fullmat)
    return dot(u[:-n, :-n], dot(diag(s[:-n]), v[:-n, :-n]))
Example #9
0
File: misc.py Project: MMaus/mutils
def fBM_nd(dims, H, return_mat = False, use_eig_ev = True):
    """
    creates fractional Brownian motion
    parameters: dims is a tuple of the shape of the sample path (nxd); 
                H: Hurst exponent
    this is the slow version of fBM. It might, however, be more precise than
    fBM, however - sometimes, the matrix square root has a problem, which might
    induce inaccuracy    
    use_eig_ev: use eigenvalue decomposition for matrix square root computation
    (faster)
    """
    n = dims[0]
    d = dims[1]
    Gamma = zeros((n,n))
    print ('building ...\n')
    for t in arange(n):
        for s in arange(n):
            Gamma[t,s] = .5*((s+1)**(2.*H) + (t+1)**(2.*H) - abs(t-s)**(2.*H))
    print('rooting ...\n')    
    if use_eig_ev:
        ev,ew = eig(Gamma.real)
        Sigma = dot(ew, dot(diag(sqrt(ev)),ew.T) )
    else:
        Sigma = sqrtm(Gamma)
    if return_mat:
        return Sigma
    v = randn(n,d)
    return dot(Sigma,v)
Example #10
0
def reduceDim(fullmat,n=1):
    """
    reduces the dimension of a d x d - matrix to a (d-n)x(d-n) matrix, 
    keeping the largest eigenvalues unchanged.
    """
    u,s,v = svd(fullmat)
    return dot(u[:-n,:-n],dot(diag(s[:-n]),v[:-n,:-n]))
Example #11
0
 def computeBlochFunctions(self):
     # compute the Bloch functions at the center of the Brillouin zone
     q = 0
     # construct the Bloch Hamiltonian
     H1 = py.diag((q - self.m)**2)
     H2 = py.diag((self.M - 1) * [self.Vq], 1)
     H3 = py.diag((self.M - 1) * [self.Vq], -1)
     H = H1 + H2 + H3
     # compute the eigenfunctions and eigenenergies
     Eq, Cq = py.eigh(H)
     # normalize eigenenergies with respect to nu_L
     Eq = (Eq + self.s / 2) * self.nu_L / 4
     self.Eq = Eq
     self.Cq = Cq
     # the omegas are
     self.omegas = (Eq * 2 * py.pi * self.Texp)[:, py.newaxis]
Example #12
0
File: misc.py Project: MMaus/mutils
def fBM_nd(dims, H, return_mat=False, use_eig_ev=True):
    """
    creates fractional Brownian motion
    parameters: dims is a tuple of the shape of the sample path (nxd); 
                H: Hurst exponent
    this is the slow version of fBM. It might, however, be more precise than
    fBM, however - sometimes, the matrix square root has a problem, which might
    induce inaccuracy    
    use_eig_ev: use eigenvalue decomposition for matrix square root computation
    (faster)
    """
    n = dims[0]
    d = dims[1]
    Gamma = zeros((n, n))
    print('building ...\n')
    for t in arange(n):
        for s in arange(n):
            Gamma[t, s] = .5 * ((s + 1)**(2. * H) +
                                (t + 1)**(2. * H) - abs(t - s)**(2. * H))
    print('rooting ...\n')
    if use_eig_ev:
        ev, ew = eig(Gamma.real)
        Sigma = dot(ew, dot(diag(sqrt(ev)), ew.T))
    else:
        Sigma = sqrtm(Gamma)
    if return_mat:
        return Sigma
    v = randn(n, d)
    return dot(Sigma, v)
Example #13
0
 def computeTimeEvolution(self):
     self.computePhaseFactor()
     self.timePhase = py.exp(-1j * self.t.T @ self.omegas.T)
     self.timeEvolution = self.timePhase \
         @ py.diag(self.phaseFactor) \
         @ self.eigenfuncs
     self.density = abs(self.timeEvolution)**2
    def sample(self, model, evidence):
        z = evidence['z']
        T, surfaces, sigma_g, sigma_h = [evidence[var] for var in ['T', 'surfaces', 'sigma_g', 'sigma_h']]
        mu_h, phi, sigma_z_g, sigma_z_h = [model.known_params[var] for var in ['mu_h', 'phi', 'sigma_z_g', 'sigma_z_h']]
        prior_mu_g, prior_cov_g = [model.hyper_params[var] for var in ['prior_mu_g', 'prior_cov_g']]
        prior_mu_h, prior_cov_h = [model.hyper_params[var] for var in ['prior_mu_h', 'prior_cov_h']]
        n = len(g)

        y = ma.asarray(ones((n, 2))*nan)
        if sum(T==1) > 0:
            y[T==1, 0] = z[T==1]
        if sum(T==2) > 0:
            y[T==2, 1] = z[T==2]
        y[isnan(y)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean=[prior_mu_g[0], prior_mu_h[0]]
        kalman.initial_state_covariance=diag([prior_cov_g[0,0], prior_cov_h[0,0]])
        kalman.transition_matrices=[[1, 0], [0, phi]]
        kalman.transition_offsets =ones((n, 2))*[0, mu_h*(1-phi)]
        kalman.transition_covariance=[[sigma_g**2, 0], [0, sigma_h**2]]
        kalman.observation_matrices=[[1, 0], [1, 1]]
        kalman.observation_covariance=[[sigma_z_g**2, 0], [0, sigma_z_h**2]]
        sampled_surfaces = forward_filter_backward_sample(kalman, y)

        return sampled_surfaces
Example #15
0
    def computeBandStructure(self):
        bandsIndex = range(self.M)
        bands = {}
        for i in bandsIndex:
            bands[i] = py.zeros((len(self.q)))

        H2 = py.diag((self.M - 1) * [self.Vq], 1)
        H3 = py.diag((self.M - 1) * [self.Vq], -1)

        for i, q in enumerate(self.q):
            H1 = py.diag((q - self.m)**2)
            H = H1 + H2 + H3
            Eq, Cq = py.eigh(H)
            Eq = (Eq + self.s / 2) * self.nu_L / 4
            for j in bandsIndex:
                bands[j][i] = Eq[j]
        self.bands = bands
Example #16
0
 def computePhaseFactor(self):
     # phase got by fourier coefficients due to translation along x-axis
     phase = py.diag(py.exp(-1j * self.angle * 2 * py.pi / 180 * self.m))
     phaseFactor = []
     for l in range(self.M):
         # Dot product between Bloch function l and shifted one
         phaseFactor.append(self.Cq[:, l].T @ phase @ self.Cq[:, 0])
     self.phaseFactor = phaseFactor
Example #17
0
    def test_subspace_det_algo1_mimo(self):
        """
        Subspace deterministic algorithm (MIMO).
        """
        ss2 = sysid.StateSpaceDiscreteLinear(
            A=pl.matrix([[0, 0.1, 0.2],
                         [0.2, 0.3, 0.4],
                         [0.4, 0.3, 0.2]]),
            B=pl.matrix([[1, 0],
                         [0, 1],
                         [0, -1]]),
            C=pl.matrix([[1, 0, 0],
                         [0, 1, 0]]),
            D=pl.matrix([[0, 0],
                         [0, 0]]),
            Q=pl.diag([0.01, 0.01, 0.01]), R=pl.diag([0.01, 0.01]), dt=0.1)
        pl.seed(1234)
        prbs1 = sysid.prbs(1000)
        prbs2 = sysid.prbs(1000)
        def f_prbs_2d(t, x, i):
            "input function"
            #pylint: disable=unused-argument
            i = i%1000
            return 2*pl.matrix([prbs1[i]-0.5, prbs2[i]-0.5]).T
        tf = 8
        data = ss2.simulate(
            f_u=f_prbs_2d, x0=pl.matrix([0, 0, 0]).T, tf=tf)
        ss2_id = sysid.subspace_det_algo1(
            y=data.y, u=data.u,
            f=5, p=5, s_tol=0.1, dt=ss2.dt)
        data_id = ss2_id.simulate(
            f_u=f_prbs_2d,
            x0=pl.matrix(pl.zeros(ss2_id.A.shape[0])).T, tf=tf)
        nrms = sysid.nrms(data_id.y, data.y)
        self.assertGreater(nrms, 0.9)

        if ENABLE_PLOTTING:
            for i in range(2):
                pl.figure()
                pl.plot(data_id.t.T, data_id.y[i, :].T,
                        label='$y_{:d}$ true'.format(i))
                pl.plot(data.t.T, data.y[i, :].T,
                        label='$y_{:d}$ id'.format(i))
                pl.legend()
                pl.grid()
def my_calibration2(sz):
    """
    Calibration function for the camera (iPhone4) used in this example.
    """
    # row,col = sz
    row, col = sz
    fx = 3538*col/4032
    fy = 3605*row/2268
    K = pylab.diag([fx,fy,1])
    K[0,2] = 0.5*col
    K[1,2] = 0.5*row
    # K[0, 2] = 0.5 * row
    # K[1, 2] = 0.5 * col
    return K
Example #19
0
def fit_quality(time, parameters, noise, repetitions):
    """
    Apply the fitting routine a number of times, as given by
    `repetitions`, and return informations about the fit performance.
    """
    results = []
    errors = []

    from numpy.random import seed

    alpha_psp = AlphaPSP()

    for _ in range(repetitions):
        seed()

        value = noisy_psp(time=time, noise=noise, **parameters)
        fit_result = fit(alpha_psp,
                         time,
                         value,
                         noise,
                         fail_on_negative_cov=[True, True, True, False, False])
        if fit_result is not None:
            result, error, chi2, success = fit_result
            if chi2 < 1.5 and success:
                print(chi2, result)
                results.append(result)
                errors.append(error)
        else:
            print("fit failed:", end=' ')
            print(fit_result)

    keys = alpha_psp.parameter_names()

    result_dict = dict(((key, []) for key in keys))
    error_dict = dict(((key, []) for key in keys))

    for result in results:
        for r, key in zip(result, keys):
            result_dict[key].append(r)

    for error in errors:
        for r, key in zip(p.diag(error), keys):
            error_dict[key].append(p.sqrt(r))
            if p.isnan(p.sqrt(r)):
                print("+++++++", r)

    return ([p.mean(result_dict[key])
             for key in keys], [p.std(result_dict[key]) for key in keys],
            len(results), keys, [result_dict[key] for key in keys],
            [error_dict[key] for key in keys])
    def sample(self, model, evidence):
        z = evidence['z']
        T, surfaces, sigma_g, sigma_h = [
            evidence[var] for var in ['T', 'surfaces', 'sigma_g', 'sigma_h']
        ]
        mu_h, phi, sigma_z_g, sigma_z_h = [
            model.known_params[var]
            for var in ['mu_h', 'phi', 'sigma_z_g', 'sigma_z_h']
        ]
        prior_mu_g, prior_cov_g = [
            model.hyper_params[var] for var in ['prior_mu_g', 'prior_cov_g']
        ]
        prior_mu_h, prior_cov_h = [
            model.hyper_params[var] for var in ['prior_mu_h', 'prior_cov_h']
        ]
        n = len(g)

        y = ma.asarray(ones((n, 2)) * nan)
        if sum(T == 1) > 0:
            y[T == 1, 0] = z[T == 1]
        if sum(T == 2) > 0:
            y[T == 2, 1] = z[T == 2]
        y[isnan(y)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = [prior_mu_g[0], prior_mu_h[0]]
        kalman.initial_state_covariance = diag(
            [prior_cov_g[0, 0], prior_cov_h[0, 0]])
        kalman.transition_matrices = [[1, 0], [0, phi]]
        kalman.transition_offsets = ones((n, 2)) * [0, mu_h * (1 - phi)]
        kalman.transition_covariance = [[sigma_g**2, 0], [0, sigma_h**2]]
        kalman.observation_matrices = [[1, 0], [1, 1]]
        kalman.observation_covariance = [[sigma_z_g**2, 0], [0, sigma_z_h**2]]
        sampled_surfaces = forward_filter_backward_sample(kalman, y)

        return sampled_surfaces
Example #21
0
File: misc.py Project: MMaus/mutils
def pseudoSpect(A, npts=200, s=2., gridPointSelect=100, verbose=True,
                lstSqSolve=True):
    """ 
    original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m
    % psa.m - Simple code for 2-norm pseudospectra of given matrix A.
    %         Typically about N/4 times faster than the obvious SVD method.
    %         Comes with no guarantees!   - L. N. Trefethen, March 1999.
    
    parameter: A: the matrix to analyze
               npts: number of points at the grid
               s: axis limits (-s ... +s)
               gridPointSelect: ???
               verbose: prints progress messages
               lstSqSolve: if true, use least squares in algorithm where
                  solve could be used (probably) instead. (replacement for
                  ldivide in MatLab)
    """
    
    from scipy.linalg import schur, triu
    from pylab import (meshgrid, norm, dot, zeros, eye, diag, find,  linspace,                       
                       arange, isreal, inf, ones, lstsq, solve, sqrt, randn,
                       eig, all)

    ldiv = lambda M1,M2 :lstsq(M1,M2)[0] if lstSqSolve else lambda M1,M2: solve(M1,M2)

    def planerot(x):
        '''
        return (G,y)
        with a matrix G such that y = G*x with y[1] = 0    
        '''
        G = zeros((2,2))
        xn = x / norm(x)
        G[0,0] = xn[0]
        G[1,0] = -xn[1]
        G[0,1] = xn[1]
        G[1,1] = xn[0]
        return G, dot(G,x)

    xmin = -s
    xmax = s
    ymin = -s
    ymax = s;  
    x = linspace(xmin,xmax,npts,endpoint=False)
    y = linspace(ymin,ymax,npts,endpoint=False)
    xx,yy = meshgrid(x,y)
    zz = xx + 1j*yy
     
    #% Compute Schur form and plot eigenvalues:
    T,Z = schur(A,output='complex');
        
    T = triu(T)
    eigA = diag(T)
    
    # Reorder Schur decomposition and compress to interesting subspace:
    select = find( eigA.real > -250)           # % <- ALTER SUBSPACE SELECTION
    n = len(select)
    for i in arange(n):
        for k in arange(select[i]-1,i,-1): #:-1:i
            G = planerot([T[k,k+1],T[k,k]-T[k+1,k+1]] )[0].T[::-1,::-1]
            J = slice(k,k+2)
            T[:,J] = dot(T[:,J],G)
            T[J,:] = dot(G.T,T[J,:])
          
    T = triu(T[:n,:n])
    I = eye(n);
    
    # Compute resolvent norms by inverse Lanczos iteration and plot contours:
    sigmin = inf*ones((len(y),len(x)));
    #A = eye(5)
    niter = 0
    for i in arange(len(y)): # 1:length(y)        
        if all(isreal(A)) and (ymax == -ymin) and (i > len(y)/2):
            sigmin[i,:] = sigmin[len(y) - i,:]
        else:
            for jj in arange(len(x)):
                z = zz[i,jj]
                T1 = z * I - T 
                T2 = T1.conj().T
                if z.real < gridPointSelect:    # <- ALTER GRID POINT SELECTION
                    sigold = 0
                    qold = zeros((n,1))
                    beta = 0
                    H = zeros((100,100))                
                    q = randn(n,1) + 1j*randn(n,1)                
                    while norm(q) < 1e-8:
                        q = randn(n,1) + 1j*randn(n,1)                
                    q = q/norm(q)
                    for k in arange(99):
                        v = ldiv(T1,(ldiv(T2,q))) - dot(beta,qold)
                        #stop
                        alpha = dot(q.conj().T, v).real
                        v = v - alpha*q
                        beta = norm(v)
                        qold = q
                        q = v/beta
                        H[k+1,k] = beta
                        H[k,k+1] = beta
                        H[k,k] = alpha
                        if (alpha > 1e100):
                            sig = alpha 
                        else:
                            sig = max(abs(eig(H[:k+1,:k+1])[0]))
                        if (abs(sigold/sig-1) < .001) or (sig < 3 and k > 2):
                            break
                        sigold = sig
                        niter += 1
                        #print 'niter = ', niter
                
                  #%text(x(jj),y(i),num2str(k))         % <- SHOW ITERATION COUNTS
                    sigmin[i,jj] = 1./sqrt(sig);
                #end
                #  end
        if verbose:
            print 'finished line ', str(i), ' out of ', str(len(y))
    
    return x,y,sigmin
Example #22
0
py.errorbar(T, L, dL, dT, '.', markersize='3')


##funzione di fit
def retta(x, a, b):
    return a * x + b


##trovo I_th con un fit
m = []
dm = []
q = []
dq = []
out = fit_curve(retta, T, L, dy=1, p0=[1, 1], absolute_sigma=True)
par = out.par
cov = out.cov
err = py.sqrt(py.diag(cov))
m_fit = par[0]
q_fit = par[1]
dm_fit = err[0]
dq_fit = err[1]
print('m = %s  q = %s' % (xe(m_fit, dm_fit), xe(q_fit, dq_fit)))
x = py.linspace(T[0] - 1, T[len(T) - 1] + 1, 100)
py.plot(x,
        retta(x, *par),
        linewidth=1,
        label='m = 0.22(2) nm/°C \n q = 775.4(5) nm')
py.legend(fontsize='large')
py.ylabel('$\lambda$ [nm]')
py.xlabel('T [°C]')
Example #23
0
def fit(psp_shape,
        time,
        voltage,
        error_estimate,
        maxcall=1000,
        maximal_red_chi2=2.0,
        fail_on_negative_cov=None):
    """
    psp_shape : object
        PSPShape instance

    time : numpy.ndarray of floats
        numpy array of data acquisition times

    voltage : numpy.ndarray
        numpy array of voltage values

    error_estimate : float
        estimate for the standard deviation of an individual data point.

    maxcall : int
        maximal number of calls to the fit routine

    fail_on_negative_cov : list of int

    returns : tuple
        (fit_results
         error_estimates
         chi2_per_dof
         success)
    """
    assert len(time) == len(voltage)

    initial_values = psp_shape.initial_fit_values(time, voltage)

    result = optimize.leastsq(
        lambda param: (psp_shape(time, *param) - voltage),
        [initial_values[key] for key in psp_shape.parameter_names()],
        full_output=1,
        maxfev=maxcall)

    resultparams, cov_x, _, _, ier = result

    ndof = len(time) - len(psp_shape.parameter_names())
    fit_voltage = psp_shape(time, *result[0])
    red_chi2 = sum(((fit_voltage - voltage)) ** 2) \
               / (error_estimate ** 2 * ndof)

    fail_neg = p.any(p.diag(cov_x) < 0)
    if fail_on_negative_cov is not None:
        fail_neg = p.any(p.logical_and(
            p.diag(cov_x) < 0, fail_on_negative_cov))

    cov_x *= error_estimate**2

    success = ((not fail_neg) and (ier in [1, 2, 3, 4])
               and (red_chi2 <= maximal_red_chi2))

    processed, processed_cov = psp_shape.process_fit_results(
        resultparams, cov_x)

    return processed, processed_cov, red_chi2, success
Example #24
0
def subspace_det_algo1(y, u, f, p, s_tol, dt):
    """
    Subspace Identification for deterministic systems
    deterministic algorithm 1 from (1)

    assuming a system of the form:

    x(k+1) = A x(k) + B u(k)
    y(k)   = C x(k) + D u(k)

    and given y and u.

    Find A, B, C, D

    See page 52. of (1)

    (1) Subspace Identification for Linear
    Systems, by Van Overschee and Moor. 1996
    """
    #pylint: disable=too-many-arguments, too-many-locals
    # for this algorithm, we need future and past
    # to be more than 1
    assert f > 1
    assert p > 1

    # setup matrices
    y = pl.matrix(y)
    n_y = y.shape[0]
    u = pl.matrix(u)
    n_u = u.shape[0]
    w = pl.vstack([y, u])
    n_w = w.shape[0]

    # make sure the input is column vectors
    assert y.shape[0] < y.shape[1]
    assert u.shape[0] < u.shape[1]

    W = block_hankel(w, f + p)
    U = block_hankel(u, f + p)
    Y = block_hankel(y, f + p)

    W_p = W[:n_w*p, :]
    W_pp = W[:n_w*(p+1), :]

    Y_f = Y[n_y*f:, :]
    U_f = U[n_y*f:, :]

    Y_fm = Y[n_y*(f+1):, :]
    U_fm = U[n_u*(f+1):, :]

    # step 1, calculate the oblique projections
    #------------------------------------------
    # Y_p = G_i Xd_p + Hd_i U_p
    # After the oblique projection, U_p component is eliminated,
    # without changing the Xd_p component:
    # Proj_perp_(U_p) Y_p = W1 O_i W2 = G_i Xd_p
    O_i = Y_f*project_oblique(U_f, W_p)
    O_im = Y_fm*project_oblique(U_fm, W_pp)

    # step 2, calculate the SVD of the weighted oblique projection
    #------------------------------------------
    # given: W1 O_i W2 = G_i Xd_p
    # want to solve for G_i, but know product, and not Xd_p
    # so can only find Xd_p up to a similarity transformation
    W1 = pl.matrix(pl.eye(O_i.shape[0]))
    W2 = pl.matrix(pl.eye(O_i.shape[1]))
    U0, s0, VT0 = pl.svd(W1*O_i*W2)  #pylint: disable=unused-variable

    # step 3, determine the order by inspecting the singular
    #------------------------------------------
    # values in S and partition the SVD accordingly to obtain U1, S1
    #print s0
    n_x = pl.find(s0/s0.max() > s_tol)[-1] + 1
    U1 = U0[:, :n_x]
    # S1 = pl.matrix(pl.diag(s0[:n_x]))
    # VT1 = VT0[:n_x, :n_x]

    # step 4, determine Gi and Gim
    #------------------------------------------
    G_i = W1.I*U1*pl.matrix(pl.diag(pl.sqrt(s0[:n_x])))
    G_im = G_i[:-n_y, :]

    # step 5, determine Xd_ip and Xd_p
    #------------------------------------------
    # only know Xd up to a similarity transformation
    Xd_i = G_i.I*O_i
    Xd_ip = G_im.I*O_im

    # step 6, solve the set of linear eqs
    # for A, B, C, D
    #------------------------------------------
    Y_ii = Y[n_y*p:n_y*(p+1), :]
    U_ii = U[n_u*p:n_u*(p+1), :]

    a_mat = pl.matrix(pl.vstack([Xd_ip, Y_ii]))
    b_mat = pl.matrix(pl.vstack([Xd_i, U_ii]))
    ss_mat = a_mat*b_mat.I
    A_id = ss_mat[:n_x, :n_x]
    B_id = ss_mat[:n_x, n_x:]
    assert B_id.shape[0] == n_x
    assert B_id.shape[1] == n_u
    C_id = ss_mat[n_x:, :n_x]
    assert C_id.shape[0] == n_y
    assert C_id.shape[1] == n_x
    D_id = ss_mat[n_x:, n_x:]
    assert D_id.shape[0] == n_y
    assert D_id.shape[1] == n_u

    if pl.matrix_rank(C_id) == n_x:
        T = C_id.I # try to make C identity, want it to look like state feedback
    else:
        T = pl.matrix(pl.eye(n_x))

    Q_id = pl.zeros((n_x, n_x))
    R_id = pl.zeros((n_y, n_y))
    sys = ss.StateSpaceDiscreteLinear(
        A=T.I*A_id*T, B=T.I*B_id, C=C_id*T, D=D_id,
        Q=Q_id, R=R_id, dt=dt)
    return sys
    def fit_exponential( 
            self, 
            tstart       = 0.0, 
            tend         = None, 
            guess        = dict( l0=5.0, a0=1.0, b=0.0 ), 
            num_exp      = None,
            verbose      = True,
            deconvolve   = False,
            fixed_params = [None], 
            curve_num=0 ):
        """
        fit a function of exponentials to a single curve of the file
        (my files only have one curve at this point anyway,
        curve 0). 
        The parameter num_exp (default is 1, max is 3) defines the number of
        exponentials in the funtion to be fitted.
        num_exp=1 yields:
        f(t) = a0*exp(-t/l0) + b
        where l0 is the lifetime and a0 and b are constants,
        and we fit over the range from tstart to tend.
        You don't have to pass this parameter anymore; just pass an initial guess and
        the number of parameters passed will determine the type of model used.
        
        If tend==None, we fit until the end of the curve.
        
        If num_exp > 1, you will need to modify the initial
        parameters for the fit (i.e. pass the method an explicit `guess`
        parameter) because the default has only three parameters
        but you will need two additional parameters for each additional
        exponential (another lifetime and another amplitude) to describe
        a multi-exponential fit. 
        For num_exp=2:
        f(t) = a1*exp(-t/l1) + a0*exp(-t/l0) + b
        
        and for num_exp=3:
        f(t) = a2*exp(-t/l2) + a1*exp(-t/l1) + a0*exp(-t/l0) + b
        
        verbose=True (default) results in printing of fitting results to terminal.
        
        """
        self.fitstart = tstart
        self.deconvolved = deconvolve
        tpulse = 1.0e9/self.curveheaders[0]['InpRate0'] # avg. time between pulses, in ns

        if num_exp is None:
            num_exp = (1 + int(guess.has_key('l1')) +
                     int(guess.has_key('l2')) +
                     int(guess.has_key('l3')) +
                     int(guess.has_key('l4')))
            num_a   = (1 + int(guess.has_key('a1')) +
                     int(guess.has_key('a2')) +
                     int(guess.has_key('a3')) +
                     int(guess.has_key('a4')))
            if num_exp != num_a:
                raise ValueError("Missing a parameter! Unequal number of lifetimes and amplitudes.")

        keylist = [ "l0", "a0", "b" ]
        errlist = [ "l0_err", "a0_err" ]
        if num_exp == 2:
            keylist = [ "l1", "a1", "l0", "a0", "b" ]
            errlist = [ "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 3 and not guess.has_key('t_ag') and not guess.has_key('t_d3'):
            keylist = [ "l2", "a2", "l1", "a1", "l0", "a0", "b" ]
            errlist = [ "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 3 and guess.has_key('t_ag'):
            keylist = [ "l2", "a2", "l1", "a1", "l0", "a0", "t_ag" ]
            errlist = [ "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 3 and guess.has_key('t_d3'):
            keylist = [ "l2", "a2", "l1", "a1", "l0", "a0", "t_d3" ]
            errlist = [ "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 4:
            keylist = [ "l3", "a3", "l2", "a2", "l1", "a1", "l0", "a0", "b" ]
            errlist = [ "l3_err", "a3_err", "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
        elif num_exp == 5:
            keylist = [ "l4", "a4", "l3", "a3", "l2", "a2", "l1", "a1", "l0", "a0", "b" ]
            errlist = [ "l4_err", "a4_err", "l3_err", "a3_err", "l2_err", "a2_err", "l1_err", "a1_err", "l0_err", "a0_err" ]
                    
                    
        if deconvolve==False:
            params = [ guess[key] for key in keylist ]
            free_params = [ i for i,key in enumerate(keylist) if not key in fixed_params ]
            initparams = [ guess[key] for key in keylist if not key in fixed_params ]
            def f(t, *args ):
                for i,arg in enumerate(args): params[ free_params[i] ] = arg
                local_params = params[:]
                b = local_params.pop(-1)
                result = pylab.zeros(len(t))
                for l,a in zip(params[::2],params[1::2]):
                    result += abs(a)*pylab.exp(-(t-tstart)/abs(l))
                return result+b

        else:
            raise NameError("Deconvolution with this module is not kept current. Use FastFit module from fit directory instead.")
            if self.irf == None: raise AttributeError("No detector trace!!! Use self.set_detector() method.")
            t0 = tstart
            tstart = 0.0
            keylist.append( "tshift" )
            params = [ guess[key] for key in keylist ]
            free_params = [ i for i,key in enumerate(keylist) if not key in fixed_params ]
            initparams = [ guess[key] for key in keylist if not key in fixed_params ]
            def f( t, *args ):
                for i,arg in enumerate(args): params[ free_params[i] ] = arg
                tshift = params[-1]
                ideal = fmodel( t, *args )
                irf = cspline1d_eval( self.irf_generator, t-tshift, dx=self.irf_dt, x0=self.irf_t0 )
                convoluted = pylab.real(pylab.ifft( pylab.fft(ideal)*pylab.fft(irf) )) # very small imaginary anyway
                return convoluted

            def fmodel( t, *args ):
                for i,arg in enumerate(args): params[ free_params[i] ] = arg
                local_params = params[:]
                tshift = local_params.pop(-1)
                if guess.has_key('t_ag'):
                    t_ag = abs(local_params.pop(-1))
                elif guess.has_key('t_d3'):
                    t_d3 = abs(local_params.pop(-1))
                elif guess.has_key('a_fix'):
                    scale = local_params.pop(-1)
                else:
                    b = local_params.pop(-1)
                    
                result = pylab.zeros(len(t))
                for l,a in zip(local_params[::2],local_params[1::2]):
                    if guess.has_key('t_ag'): l = 1.0/(1.0/l + 1.0/t_ag)
                    if guess.has_key('t_d3'): l *= t_d3
                    if guess.has_key('a_fix'): a *= scale
                    result += abs(a)*pylab.exp(-t/abs(l))/(1.0-pylab.exp(-tpulse/abs(l)))
                return result
                

        istart = pylab.find( self.t[curve_num] >= tstart )[0]
        if tend is not None:
            iend = pylab.find( self.t[curve_num] <= tend )[-1]
        else:
            iend = len(self.t[curve_num])

        # sigma (std dev.) is equal to sqrt of intensity, see
        # Lakowicz, principles of fluorescence spectroscopy (2006)
        # sigma gets inverted to find a weight for leastsq, so avoid zero
        # and imaginary weight doesn't make sense.
        trace_scaling = self.curves[0].max()/self.raw_curves[0].max()
        sigma = pylab.sqrt(self.raw_curves[curve_num][istart:iend]*trace_scaling) # use raw curves for actual noise, scale properly
        self.bestparams, self.pcov = curve_fit( f, self.t[curve_num][istart:iend],
                                        self.curves[curve_num][istart:iend],
                                        p0=initparams,
                                        sigma=sigma)

        if pylab.size(self.pcov) > 1 and len(pylab.find(self.pcov == pylab.inf))==0:
            self.stderr = pylab.sqrt( pylab.diag(self.pcov) ) # is this true?
        else:
            self.stderr = [pylab.inf]*len(guess)
            
        stderr = [numpy.NaN]*len(params)
        for i,p in enumerate(self.bestparams):
            params[ free_params[i] ] = p
            stderr[ free_params[i] ] = self.stderr[i]
        self.stderr = stderr

        self.fitresults = dict()
        keys = keylist[:]
        stderr = stderr[:]
        p = params[:]
        if deconvolve:
            tshift = p.pop(-1)
            self.fitresults['tshift'] = tshift
            tshift_err = stderr.pop(-1)
            self.fitresults['tshift_err'] = tshift_err
            keys.pop(-1)
            self.fitresults['irf_dispersion'] = self.irf_dispersion

        b = p.pop(-1)
        self.fitresults['b'] = b
        b_err = stderr.pop(-1)
        self.fitresults['b_err'] = b_err
        keys.pop(-1)
        self.lifetime = [ abs(l) for l in p[::2] ]
        for l,a,lkey,akey in zip(p[::2],p[1::2],keys[::2],keys[1::2]):
            if guess.has_key('t_ag'): l = 1.0/(1.0/l + 1.0/b)
            if guess.has_key('t_d3'): l *= b
            if guess.has_key('a_fix'): a *= b
            self.fitresults[lkey] = abs(l)
            self.fitresults[akey] = abs(a)
        for l,a,lkey,akey in zip(stderr[::2],stderr[1::2],errlist[::2],errlist[1::2]):
            self.fitresults[lkey] = l
            self.fitresults[akey] = a
        self.fitresults['l0_int'] = self.fitresults['l0']*self.fitresults['a0']

        if num_exp > 1: self.fitresults['l1_int'] = self.fitresults['l1']*self.fitresults['a1']
        if num_exp > 2: self.fitresults['l2_int'] = self.fitresults['l2']*self.fitresults['a2']
        if num_exp > 3: self.fitresults['l3_int'] = self.fitresults['l3']*self.fitresults['a3']
        if num_exp > 4: self.fitresults['l4_int'] = self.fitresults['l4']*self.fitresults['a4']

        self.bestfit = f( self.t[curve_num][istart:iend], *self.bestparams )
        if deconvolve: self.model = fmodel( self.t[curve_num][istart:iend], *self.bestparams )
        
        Chi2 = pylab.sum( (self.bestfit - self.curves[0][istart:iend])**2 / sigma**2 )
        #Chi2 *= self.raw_curves[0].max()/self.curves[0].max() # undo any scaling
        mean_squares = pylab.mean( (self.bestfit - self.curves[0][istart:iend])**2 )
        degrees_of_freedom = len(self.bestfit) - len(free_params)
        self.fitresults['MSE'] = mean_squares/degrees_of_freedom
        self.fitresults['ReducedChi2'] = Chi2/degrees_of_freedom

        if verbose:
            print "Fit results: (Reduced Chi2 = %.3E)" % (self.fitresults['ReducedChi2'])
            print "             (MSE = %.3E)" % (self.fitresults['MSE'])
            print "  Offset/t_ag/scale = %.3f +-%.3e" % (self.fitresults['b'], self.fitresults['b_err'])
            print "  l0=%.3f +-%.3f ns, a0=%.3e +-%.3e" % (self.fitresults['l0'],
                                                            self.fitresults['l0_err'],
                                                            self.fitresults['a0'],
                                                            self.fitresults['a0_err'])
            if num_exp > 1:
                print "  l1=%.3f +-%.3f ns, a1=%.3e +-%.3e" % (self.fitresults['l1'],
                                                            self.fitresults['l1_err'],
                                                            self.fitresults['a1'],
                                                            self.fitresults['a1_err'])
            if num_exp > 2:
                print "  l2=%.3f +-%.3f ns, a2=%.3e +-%.3e" % (self.fitresults['l2'],
                                                            self.fitresults['l2_err'],
                                                            self.fitresults['a2'],
                                                            self.fitresults['a2_err'])
            if num_exp > 3:
                print "  l3=%.3f +-%.3f ns, a3=%.3e +-%.3e" % (self.fitresults['l3'],
                                                            self.fitresults['l3_err'],
                                                            self.fitresults['a3'],
                                                            self.fitresults['a3_err'])
            if num_exp > 4:
                print "  l4=%.3f +-%.3f ns, a4=%.3e +-%.3e" % (self.fitresults['l4'],
                                                            self.fitresults['l4_err'],
                                                            self.fitresults['a4'],
                                                            self.fitresults['a4_err'])
            print " "

        self.has_fit = True
Example #26
0
def subspace_det_algo1(y, u, f, p, s_tol, dt):
    """
    Subspace Identification for deterministic systems
    algorithm 1 from (1)

    assuming a system of the form:

    x(k+1) = A x(k) + B u(k)
    y(k)   = C x(k) + D u(k)

    and given y and u.

    Find A, B, C, D

    See page 52. of (1)

    (1) Subspace Identification for Linear
    Systems, by Van Overschee and Moor. 1996
    """
    # pylint: disable=too-many-arguments, too-many-locals
    # for this algorithm, we need future and past
    # to be more than 1
    assert f > 1
    assert p > 1

    # setup matrices
    y = np.matrix(y)
    n_y = y.shape[0]
    u = np.matrix(u)
    n_u = u.shape[0]
    w = pl.vstack([y, u])
    n_w = w.shape[0]

    # make sure the input is column vectors
    assert y.shape[0] < y.shape[1]
    assert u.shape[0] < u.shape[1]

    W = block_hankel(w, f + p)
    U = block_hankel(u, f + p)
    Y = block_hankel(y, f + p)

    W_p = W[:n_w*p, :]
    W_pp = W[:n_w*(p+1), :]

    Y_f = Y[n_y*f:, :]
    U_f = U[n_y*f:, :]

    Y_fm = Y[n_y*(f+1):, :]
    U_fm = U[n_u*(f+1):, :]

    # step 1, calculate the oblique projections
    # ------------------------------------------
    # Y_p = G_i Xd_p + Hd_i U_p
    # After the oblique projection, U_p component is eliminated,
    # without changing the Xd_p component:
    # Proj_perp_(U_p) Y_p = W1 O_i W2 = G_i Xd_p
    O_i = Y_f*project_oblique(U_f, W_p)
    O_im = Y_fm*project_oblique(U_fm, W_pp)

    # step 2, calculate the SVD of the weighted oblique projection
    # ------------------------------------------
    # given: W1 O_i W2 = G_i Xd_p
    # want to solve for G_i, but know product, and not Xd_p
    # so can only find Xd_p up to a similarity transformation
    W1 = np.matrix(pl.eye(O_i.shape[0]))
    W2 = np.matrix(pl.eye(O_i.shape[1]))
    U0, s0, VT0 = pl.svd(W1*O_i*W2)  # pylint: disable=unused-variable

    # step 3, determine the order by inspecting the singular
    # ------------------------------------------
    # values in S and partition the SVD accordingly to obtain U1, S1
    # print s0
    n_x = pl.where(s0/s0.max() > s_tol)[0][-1] + 1
    U1 = U0[:, :n_x]
    # S1 = np.matrix(pl.diag(s0[:n_x]))
    # VT1 = VT0[:n_x, :n_x]

    # step 4, determine Gi and Gim
    # ------------------------------------------
    G_i = W1.I*U1*np.matrix(pl.diag(pl.sqrt(s0[:n_x])))
    G_im = G_i[:-n_y, :]  # check

    # step 5, determine Xd_ip and Xd_p
    # ------------------------------------------
    # only know Xd up to a similarity transformation
    Xd_i = G_i.I*O_i
    Xd_ip = G_im.I*O_im

    # step 6, solve the set of linear eqs
    # for A, B, C, D
    # ------------------------------------------
    Y_ii = Y[n_y*p:n_y*(p+1), :]
    U_ii = U[n_u*p:n_u*(p+1), :]

    a_mat = np.matrix(pl.vstack([Xd_ip, Y_ii]))
    b_mat = np.matrix(pl.vstack([Xd_i, U_ii]))
    ss_mat = a_mat*b_mat.I
    A_id = ss_mat[:n_x, :n_x]
    B_id = ss_mat[:n_x, n_x:]
    assert B_id.shape[0] == n_x
    assert B_id.shape[1] == n_u
    C_id = ss_mat[n_x:, :n_x]
    assert C_id.shape[0] == n_y
    assert C_id.shape[1] == n_x
    D_id = ss_mat[n_x:, n_x:]
    assert D_id.shape[0] == n_y
    assert D_id.shape[1] == n_u

    if np.linalg.matrix_rank(C_id) == n_x:
        T = C_id.I  # try to make C identity, want it to look like state feedback
    else:
        T = np.matrix(pl.eye(n_x))

    Q_id = pl.zeros((n_x, n_x))
    R_id = pl.zeros((n_y, n_y))
    sys = ss.StateSpaceDiscreteLinear(
        A=T.I*A_id*T, B=T.I*B_id, C=C_id*T, D=D_id,
        Q=Q_id, R=R_id, dt=dt)
    return sys
Example #27
0
    def calculate_fit_quality_fixed_seed(self,
                                         seed_val,
                                         debug_plot=False,
                                         max_dev=4.):
        """
        Assert the quality of the fit by comparing the parameters used
        to generate test data to the fit result. The deviation should
        be smaller than `max_dev` times the error estimate reported by
        the fit routine. (e.g., using max_dev=3 leads to a statistical
        failure probability of 0.3% assuming a normal distribution on
        the results.)

        Only the height and tau_1/tau_2 are tested for a correct error
        estimate.
        """

        noise = .1

        seed(seed_val)
        times = p.arange(0, 100, .1)

        height = 1.
        tau_1 = 10.
        tau_2 = 5.
        start = 30.
        offset = 50.

        voltage = noisy_psp(height, tau_1, tau_2, start, offset, times, noise)

        fitres, cov, red_chi2, success = fit(
            AlphaPSP(),
            times,
            voltage,
            noise,
            fail_on_negative_cov=[True, True, True, False, False])

        if debug_plot:
            p.figure()
            p.plot(times, AlphaPSP()(times, *fitres), 'r-')
            p.errorbar(times, voltage, yerr=noise, fmt='bx')
            p.xlabel("time / AU")
            p.ylabel("voltage / AU")
            p.title("fit result")
            fname = "/tmp/fit_quality_plot_{0}.pdf".format(seed_val)
            p.savefig(fname)
            print "Plot saved to:", fname

        err = p.sqrt(p.diag(cov))

        print "seed:", seed_val

        self.assertTrue(success)
        self.assertLess(abs(fitres[0] - height), max_dev * err[0])
        self.assertLess(abs(fitres[1] - tau_1), max_dev * err[1])
        self.assertLess(abs(fitres[2] - tau_2), max_dev * err[2])

        # NOTE: only testing height and time constants for correct
        #       error estimate
        # self.assertLess(abs(fitres[3] - start), max_dev * err[3])
        # self.assertLess(abs(fitres[4] - offset),
        #                 max_dev * err[4])

        self.assertLess(red_chi2, 1.5)
        print red_chi2, abs(fitres[1] - tau_1) / err[1], abs(fitres[2] -
                                                             tau_2) / err[2]
Example #28
0
File: misc.py Project: MMaus/mutils
def pseudoSpect(A,
                npts=200,
                s=2.,
                gridPointSelect=100,
                verbose=True,
                lstSqSolve=True):
    """ 
    original code from http://www.cs.ox.ac.uk/projects/pseudospectra/psa.m
    % psa.m - Simple code for 2-norm pseudospectra of given matrix A.
    %         Typically about N/4 times faster than the obvious SVD method.
    %         Comes with no guarantees!   - L. N. Trefethen, March 1999.
    
    parameter: A: the matrix to analyze
               npts: number of points at the grid
               s: axis limits (-s ... +s)
               gridPointSelect: ???
               verbose: prints progress messages
               lstSqSolve: if true, use least squares in algorithm where
                  solve could be used (probably) instead. (replacement for
                  ldivide in MatLab)
    """

    from scipy.linalg import schur, triu
    from pylab import (meshgrid, norm, dot, zeros, eye, diag, find, linspace,
                       arange, isreal, inf, ones, lstsq, solve, sqrt, randn,
                       eig, all)

    ldiv = lambda M1, M2: lstsq(M1, M2)[
        0] if lstSqSolve else lambda M1, M2: solve(M1, M2)

    def planerot(x):
        '''
        return (G,y)
        with a matrix G such that y = G*x with y[1] = 0    
        '''
        G = zeros((2, 2))
        xn = x / norm(x)
        G[0, 0] = xn[0]
        G[1, 0] = -xn[1]
        G[0, 1] = xn[1]
        G[1, 1] = xn[0]
        return G, dot(G, x)

    xmin = -s
    xmax = s
    ymin = -s
    ymax = s
    x = linspace(xmin, xmax, npts, endpoint=False)
    y = linspace(ymin, ymax, npts, endpoint=False)
    xx, yy = meshgrid(x, y)
    zz = xx + 1j * yy

    #% Compute Schur form and plot eigenvalues:
    T, Z = schur(A, output='complex')

    T = triu(T)
    eigA = diag(T)

    # Reorder Schur decomposition and compress to interesting subspace:
    select = find(eigA.real > -250)  # % <- ALTER SUBSPACE SELECTION
    n = len(select)
    for i in arange(n):
        for k in arange(select[i] - 1, i, -1):  #:-1:i
            G = planerot([T[k, k + 1],
                          T[k, k] - T[k + 1, k + 1]])[0].T[::-1, ::-1]
            J = slice(k, k + 2)
            T[:, J] = dot(T[:, J], G)
            T[J, :] = dot(G.T, T[J, :])

    T = triu(T[:n, :n])
    I = eye(n)

    # Compute resolvent norms by inverse Lanczos iteration and plot contours:
    sigmin = inf * ones((len(y), len(x)))
    #A = eye(5)
    niter = 0
    for i in arange(len(y)):  # 1:length(y)
        if all(isreal(A)) and (ymax == -ymin) and (i > len(y) / 2):
            sigmin[i, :] = sigmin[len(y) - i, :]
        else:
            for jj in arange(len(x)):
                z = zz[i, jj]
                T1 = z * I - T
                T2 = T1.conj().T
                if z.real < gridPointSelect:  # <- ALTER GRID POINT SELECTION
                    sigold = 0
                    qold = zeros((n, 1))
                    beta = 0
                    H = zeros((100, 100))
                    q = randn(n, 1) + 1j * randn(n, 1)
                    while norm(q) < 1e-8:
                        q = randn(n, 1) + 1j * randn(n, 1)
                    q = q / norm(q)
                    for k in arange(99):
                        v = ldiv(T1, (ldiv(T2, q))) - dot(beta, qold)
                        #stop
                        alpha = dot(q.conj().T, v).real
                        v = v - alpha * q
                        beta = norm(v)
                        qold = q
                        q = v / beta
                        H[k + 1, k] = beta
                        H[k, k + 1] = beta
                        H[k, k] = alpha
                        if (alpha > 1e100):
                            sig = alpha
                        else:
                            sig = max(abs(eig(H[:k + 1, :k + 1])[0]))
                        if (abs(sigold / sig - 1) < .001) or (sig < 3
                                                              and k > 2):
                            break
                        sigold = sig
                        niter += 1
                        #print 'niter = ', niter

                #%text(x(jj),y(i),num2str(k))         % <- SHOW ITERATION COUNTS
                    sigmin[i, jj] = 1. / sqrt(sig)
                #end
                #  end
        if verbose:
            print 'finished line ', str(i), ' out of ', str(len(y))

    return x, y, sigmin
Example #29
0
def main():
    lam = 505
    thetain = -30 / 180 * pl.pi
    nIn = 2.8
    nMaterial1 = 5.9
    nMaterial2 = 4.2
    #nMaterial2 = nIn	#When nMaterial2 == nIn, the methods works
    nOut = 4.3  #If nOut<nIn, we can have values in the S matrix exceeding 1
    nOut = 2.1
    #nOut = nMaterial2
    pol = 'Ey'
    nelx = 80

    ##Reference model (one circle + one burried circle)
    Mres = model(lx=lam, lz=2 * lam, nelx=nelx)
    Pres = physics(Mres, lam, nIn, nOut, thetaIn=thetain, pol=pol)
    if 1:  #Put to zero to avoid re-simulation and use last saved results
        materialRes1 = Pres.newMaterial(nMaterial1)
        materialRes2 = Pres.newMaterial(nMaterial2)
        materialResOut = Pres.newMaterial(nOut)

        xres = Mres.generateEmptyDesign()
        makeSlab(xres, Mres, 0, 0.5 * lam, materialResOut)
        makeSlab(xres, Mres, 0.5 * lam, 1.5 * lam, materialRes2)
        makeCircle(xres, Mres, 250, 200, materialRes1)
        makeCircle(xres, Mres, 250 + lam, 200, materialRes1)

        if 0:  #Show design
            pl.imshow(xres.T, interpolation='none', vmin=0, vmax=4)
            pl.colorbar()
            pl.savefig("full_structure.png", bbox_inches='tight')
            pl.show()
            exit()

        res = FE(Mres, Pres, xres)
        saveDicts("SmatrixRIInternalReference.h5", res, 'w')
    else:
        res = loadDicts("SmatrixRIInternalReference.h5")[0]
    mR, thetaR, R, mT, thetaT, T = calcRT(Mres, Pres, res["r"], res["t"])
    Rres = R
    Tres = T

    ##Scattering matrix model 1 (one burried circle)
    M = model(lx=lam, lz=lam, nelx=nelx)
    #Notice how nOut here is nMaterial2
    P = physics(M, lam, nIn, nMaterial2, thetaIn=thetain, pol=pol)
    material1 = P.newMaterial(nMaterial1)
    material2 = P.newMaterial(nMaterial2)
    x = M.generateEmptyDesign()
    makeSlab(x, M, 0, .5 * lam, material2)
    makeCircle(x, M, 250, 200, material1)
    if 0:  #Show design
        pl.imshow(x.T, interpolation='none', vmin=0, vmax=4)
        pl.colorbar()
        pl.savefig("upper_half.png", bbox_inches='tight')
        pl.show()
        exit()
    res = calcScatteringMatrix(M, P, x)

    S1 = RTtoS(res["RIn"], res["TIn"], res["TOut"], res["ROut"])
    matL = pl.bmat([[pl.diag(pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(pl.sqrt(P.chiOut))]])
    matR = pl.bmat([[pl.diag(1 / pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(1 / pl.sqrt(P.chiOut))]])
    S1real = matL * S1 * matR

    ##Scattering matrix model 2 (one burried circle)
    M = model(lx=lam, lz=lam, nelx=nelx)
    thetainNew = pl.arcsin(P.nIn / P.nOut * pl.sin(thetain))
    P = physics(M, lam, nMaterial2, nOut, thetaIn=thetainNew, pol=pol)
    material1 = P.newMaterial(nMaterial1)
    material2 = P.newMaterial(nMaterial2)
    materialOut = P.newMaterial(nOut)
    x = M.generateEmptyDesign()
    makeSlab(x, M, .5 * lam, lam, material2)
    makeSlab(x, M, 0, .5 * lam, materialOut)
    makeCircle(x, M, 250, 200, material1)
    if 0:  #Show design
        pl.imshow(x.T, interpolation='none', vmin=0, vmax=4)
        pl.colorbar()
        pl.savefig("lower_half.png", bbox_inches='tight')
        pl.show()
        exit()
    res = calcScatteringMatrix(M, P, x)
    S2 = RTtoS(res["RIn"], res["TIn"], res["TOut"], res["ROut"])
    matL = pl.bmat([[pl.diag(pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(pl.sqrt(P.chiOut))]])
    matR = pl.bmat([[pl.diag(1 / pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(1 / pl.sqrt(P.chiOut))]])
    S2real = matL * S2 * matR

    if 0:
        #Define incident wave as a plane wave
        einc = pl.zeros((2 * M.NM, 1), dtype='complex').view(pl.matrix)
        einc[M.NM // 2, 0] = 1.

        tmp = S2 * einc
        rnew = tmp[:M.NM].view(pl.ndarray).flatten()
        tnew = tmp[M.NM:].view(pl.ndarray).flatten()
        mR, thetaR, Rnew, mT, thetaT, Tnew = calcRT(M, P, rnew, tnew)

        tmp = S2real * einc
        r = tmp[:M.NM].view(pl.ndarray).flatten()
        t = tmp[M.NM:].view(pl.ndarray).flatten()
        idx = P.propModesIn
        R = abs(r[idx])**2
        idx = P.propModesOut
        T = abs(t[idx])**2
        print "-" * 50
        print R
        print Rnew
        print T
        print Tnew
        print abs(R - Rnew).max()
        print abs(T - Tnew).max()
        exit()

    #Stack the two scattering elements
    Stot = stackElements(S1, S2)

    #Define incident wave as a plane wave
    einc = pl.zeros((2 * M.NM, 1), dtype='complex').view(pl.matrix)
    einc[M.NM // 2, 0] = 1.
    tmp = Stot * einc
    rnew = tmp[:M.NM].view(pl.ndarray).flatten()
    tnew = tmp[M.NM:].view(pl.ndarray).flatten()
    mR, thetaR, Rnew, mT, thetaT, Tnew = calcRT(Mres, Pres, rnew, tnew)
    print("-" * 32)
    print(Rres)
    print(Rnew)
    print("-" * 10)
    print(Tres)
    print(Tnew)
    print("-" * 32)
    print("Error in reflection  : {:.2e}".format(abs(Rnew - Rres).max()))
    print("Error in transmission: {:.2e}".format(abs(Tnew - Tres).max()))
    #print("(note: since the significant numbers are in general between 0.01 and 1")
    #print(" we required a much lower error (1e-6 or better) to confirm that it works)")

    #Define incident wave as a plane wave
    einc = pl.zeros((2 * M.NM, 1), dtype='complex').view(pl.matrix)
    einc[M.NM // 2, 0] = 1.

    Stotreal = stackElements(S1real, S2real)
    #Stotreal = S1real
    if 1:
        pl.imshow(abs(S1real), interpolation='none', vmin=0, vmax=1)
        print abs(S1real).max()
        pl.colorbar()
        pl.show()
        pl.imshow(abs(S2real), interpolation='none', vmin=0, vmax=1)
        print abs(S2real).max()
        pl.colorbar()
        pl.show()

    tmp = Stotreal * einc
    r = tmp[:M.NM].view(pl.ndarray).flatten()
    t = tmp[M.NM:].view(pl.ndarray).flatten()

    idx = Pres.propModesIn
    R = abs(r[idx])**2

    idx = Pres.propModesOut
    T = abs(t[idx])**2

    print "-" * 50
    print Rres
    print Rnew
    print R
    print("Error in reflection  : {:.2e}".format(abs(R - Rres).max()))
    print("Error in transmission: {:.2e}".format(abs(T - Tres).max()))

fdfs = pd.read_pickle("./Freq_small_df.pkl")
Edata = fdfs["snr"]

E_y,E_x,E_=hist(Edata,100,alpha=.3,label='On-Pulse')
E_x = (E_x[1:]+E_x[:-1])/2
f = 0.5

mu1, sigma1, A1, mu2, sigma2, A2 = (0.96,1.1,4000,7.3,2.7,4000)
expected = (0.5,0.96,1.1,4000,2,7.3,2.7,4000,2)
#alpha1 = 2
#alpha2 = 2
raise a
E_params,E_cov=curve_fit(model,E_x,E_y,expected)
E_sigma=np.sqrt(diag(E_cov))
E_output = model(E_x,*E_params)

print("Curve-fit parameters are: f={:.4f}, mu1={:.4f}, sigma1={:.4f}, A1={:.4f}, alpha1={:.4f}, mu2={:.4f}, sigma2={:.4f}, A2={:.4f}, alpha2={:.4f}".format(E_params[0], E_params[1], E_params[2], E_params[3], E_params[4], E_params[5], E_params[6], E_params[7], E_params[8]))
print("With errors: f=+/-{:.4f}, mu1=+/-{:.4f}, sigma1=+/-{:.4f}, A1=+/-{:.4f}, mu2=+/-{:.4f}, sigma2=+/-{:.4f}, A2=+/-{:.4f}".format(np.sqrt(E_cov[0,0]),np.sqrt(E_cov[1,1]),np.sqrt(E_cov[2,2]),np.sqrt(E_cov[3,3]),np.sqrt(E_cov[4,4]),np.sqrt(E_cov[5,5]),np.sqrt(E_cov[6,6]),np.sqrt(E_cov[7,7]),np.sqrt(E_cov[8,8])))
#for x in E_x:
#Eanswer = model(E_x,f,mu1,sigma1,A1,alpha1,mu2,sigma2,A2,alpha2)
#output2 = model2(E_x,f,mu1,sigma1,A1,mu2,sigma2,A2)
#resultbin.append(result)
plt.plot(E_x,E_output,label="Total Num Int")
#plt.plot(E_x,gauss1(E_x,E_params[0],E_params[1],E_params[2],E_params[3],E_params[4]),label="Weak Gauss")
#plt.plot(E_x,gauss2(E_x,E_params[0],E_params[5],E_params[6],E_params[7],E_params[8]),label="Strong Gauss")
plt.legend()
plt.show()
#result = model(E_x,f,mu1,sigma1,A1,alpha1,mu2,sigma2,A2,alpha2)
Example #31
0
def win(board, letter):
    wins = logical_or(board == letter, board == 'T')
    return any(all(wins, 0)) or any(all(wins, 1)) or all(diag(wins)) or \
      all(diag(rot90(wins)))
Example #32
0
from SBDet import *
import pylab as P

data = zload("./cor-graph-360.pkz")
npcor = data["npcor"]
nd = P.diag(npcor)
valid_idx = np.nonzero(nd == 1)[0]
npcor = npcor[valid_idx, :][:, valid_idx]
P.pcolor(npcor, cmap="Greys", vmin=0, vmax=1)
plt.colorbar()
n = npcor.shape[0]
P.axis([0, n, 0, n])
P.savefig("cor-graph-pcolor.pdf")
P.show()
# import ipdb;ipdb.set_trace()
def calcdp(rep_no, thetainput):
    rep_no = rep_no
    wav = 505
    lam = 400
    thetain = thetainput / 180 * pl.pi
    nAir = 1.0
    nBac = 1.38
    nMed = 1.34
    pol = 'Ey'
    nelx = 40
    z_uc = 2.0 * math.sqrt(3)

    ##Reference model
    FE_start_time = time.time()
    Mres = model(lx=2 * lam, lz=(1 + rep_no) * (z_uc) * lam, nelx=nelx)
    Pres = physics(Mres, wav, nAir, nBac, thetaIn=thetain, pol=pol)
    if 1:  #Put to zero to avoid re-simulation and use last saved results
        materialResAir = Pres.newMaterial(nAir)
        materialResBac = Pres.newMaterial(nBac)
        materialResMed = Pres.newMaterial(nMed)

        xres = Mres.generateEmptyDesign()
        makeSlab(xres, Mres, (rep_no) * (z_uc) * lam,
                 (1 + rep_no) * (z_uc) * lam, materialResAir)
        makeSlab(xres, Mres, 0 * (z_uc) * lam, (rep_no) * (z_uc) * lam,
                 materialResMed)

        for x in range(0, rep_no):
            makeCircle(xres, Mres, lam, x * (z_uc) * lam, lam, materialResBac)
            makeCircle(xres, Mres, 0, (x + 1 / 2) * (z_uc) * lam, lam,
                       materialResBac)
            makeCircle(xres, Mres, 2 * lam, (x + 1 / 2) * (z_uc) * lam, lam,
                       materialResBac)
        makeCircle(xres, Mres, lam, (rep_no) * (z_uc) * lam, lam,
                   materialResBac)

        if 0:  #Show design
            pl.imshow(xres.T, interpolation='none', vmin=0, vmax=4)
            pl.colorbar()
            pl.savefig("full_structure.png", bbox_inches='tight')
            pl.show()
            exit()

        res = FE(Mres, Pres, xres)
        #saveDicts("SmatrixRIInternalReference.h5",res,'a')
    #else:
    #res = loadDicts("SmatrixRIInternalReference.h5")[0]
    mR, thetaR, R, mT, thetaT, T = calcRT(Mres, Pres, res["r"], res["t"])
    Rres = R
    Tres = T
    FE_time = time.time() - FE_start_time

    CSM_start_time = time.time()

    ##Scattering matrix model air-material boundary
    M = model(lx=2.0 * lam, lz=1 * (z_uc) * lam, nelx=nelx)
    P = physics(M, wav, nAir, nBac, thetaIn=thetain, pol=pol)
    materialAir = P.newMaterial(nAir)
    materialBac = P.newMaterial(nBac)
    materialMed = P.newMaterial(nMed)
    x = M.generateEmptyDesign()
    makeSlab(x, M, 0, 1.0 * (z_uc) * lam, materialAir)
    makeCircle(x, M, lam, 0, lam, materialBac)
    if 0:  #Show design
        pl.imshow(x.T, interpolation='none', vmin=0, vmax=4)
        pl.colorbar()
        pl.savefig("upper_half.png", bbox_inches='tight')
        pl.show()
        exit()
    res = calcScatteringMatrix(M, P, x)

    S1 = RTtoS(res["RIn"], res["TIn"], res["TOut"], res["ROut"])
    matL = pl.bmat([[pl.diag(pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(pl.sqrt(P.chiOut))]])
    matR = pl.bmat([[pl.diag(1 / pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(1 / pl.sqrt(P.chiOut))]])
    S1real = matL * S1 * matR

    Stot = S1
    Stotreal = S1real
    thetainNew = thetain

    ##Scattering matrix model unit cell
    M = model(lx=2.0 * lam, lz=(z_uc) * lam, nelx=nelx)
    thetainNew = pl.arcsin(P.nIn / P.nOut * pl.sin(thetainNew))
    P = physics(M, wav, nBac, nBac, thetaIn=thetainNew, pol=pol)  #
    materialAir = P.newMaterial(nAir)
    materialBac = P.newMaterial(nBac)
    materialMed = P.newMaterial(nMed)
    x = M.generateEmptyDesign()
    makeSlab(x, M, 0, 1.0 * (z_uc) * lam, materialMed)
    makeCircle(x, M, lam, 0, lam, materialBac)
    makeCircle(x, M, lam, 1.0 * (z_uc) * lam, lam, materialBac)
    makeCircle(x, M, 0, (1 / 2) * (z_uc) * lam, lam, materialBac)
    makeCircle(x, M, 2 * lam, (1 / 2) * (z_uc) * lam, lam, materialBac)
    if 0:  #Show design
        pl.imshow(x.T, interpolation='none', vmin=0, vmax=4)
        pl.colorbar()
        pl.savefig("lower_half.png", bbox_inches='tight')
        pl.show()
        exit()
    res = calcScatteringMatrix(M, P, x)
    S2 = RTtoS(res["RIn"], res["TIn"], res["TOut"], res["ROut"])
    matL = pl.bmat([[pl.diag(pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(pl.sqrt(P.chiOut))]])
    matR = pl.bmat([[pl.diag(1 / pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(1 / pl.sqrt(P.chiOut))]])
    S2real = matL * S2 * matR
    for x in range(1, rep_no + 1):
        Stot = stackElements(Stot, S2)
        Stotreal = stackElements(Stotreal, S2real)
    CSM_time = time.time() - CSM_start_time

    #Stack the two scattering elements
    #Stot = stackElements(S1,S2)

    RIn, TIn, TOut, ROut = StoRT(Stot)
    results = {}
    results["RIn"] = RIn
    results["TIn"] = TIn
    results["ROut"] = ROut
    results["TOut"] = TOut
    results.update(M.getParameters())
    results.update(P.getParameters())
    #saveDicts("SmatrixRICalculations.h5",results,'a')

    #Define incident wave as a plane wave
    einc = pl.zeros((2 * M.NM, 1), dtype='complex').view(pl.matrix)
    einc[M.NM // 2, 0] = 1.
    tmp = Stot * einc
    rnew = tmp[:M.NM].view(pl.ndarray).flatten()
    tnew = tmp[M.NM:].view(pl.ndarray).flatten()
    mR, thetaR, Rnew, mT, thetaT, Tnew = calcRT(Mres, Pres, rnew, tnew)
    title = "speed_wrt_rep_no_FE.csv"
    txtdata = open(title, "a")
    txtdata.write("{:f}, {:.8f}\n".format(rep_no, FE_time))
    title = "speed_wrt_rep_no_CSM.csv"
    txtdata = open(title, "a")
    txtdata.write("{:f}, {:.8f}\n".format(rep_no, CSM_time))
    print("rep_no={:f}, FE_time={:.8f}, CSM_time={:.8f}".format(
        rep_no, FE_time, CSM_time))
    txtdata.close()

    #Define incident wave as a plane wave
    einc = pl.zeros((2 * M.NM, 1), dtype='complex').view(pl.matrix)
    einc[M.NM // 2, 0] = 1.
    #Stotreal = S1real
    if 0:
        pl.imshow(abs(S1real), interpolation='none', vmin=0, vmax=1)
        print abs(S1real).max()
        pl.colorbar()
        pl.show()
        pl.imshow(abs(S2real), interpolation='none', vmin=0, vmax=1)
        print abs(S2real).max()
        pl.colorbar()
        pl.show()

    tmp = Stotreal * einc
    r = tmp[:M.NM].view(pl.ndarray).flatten()
    t = tmp[M.NM:].view(pl.ndarray).flatten()

    idx = Pres.propModesIn
    R = abs(r[idx])**2

    idx = Pres.propModesOut
    T = abs(t[idx])**2

    return
Example #34
0
def robust_combined_algo(y, u, f, p, s_tol, dt):
    """
    Subspace Identification for stochastic systems with input
    Robust combined algorithm from chapter 4 of (1)

    assuming a system of the form:

    x(k+1) = A x(k) + B u(k) + w(k)
    y(k)   = C x(k) + D u(k) + v(k)
    E[(w_p; v_p) (w_q^T v_q^T)] = (Q S; S^T R) delta_pq

    and given y and u.

    Find the order of the system and A, B, C, D, Q, S, R

    See page 131, and generally chapter 4, of (1)
    A different implementation of the algorithm is presented in 6.1 of (1)

    (1) Subspace Identification for Linear
    Systems, by Van Overschee and Moor. 1996
    """
    #pylint: disable=too-many-arguments, too-many-locals
    # for this algorithm, we need future and past
    # to be more than 1
    assert f > 1
    assert p > 1

    # setup matrices
    y = pl.matrix(y)
    n_y = y.shape[0]
    u = pl.matrix(u)
    n_u = u.shape[0]
    w = pl.vstack([y, u])
    n_w = w.shape[0]

    # make sure the input is column vectors
    assert y.shape[0] < y.shape[1]
    assert u.shape[0] < u.shape[1]

    W = block_hankel(w, f + p)
    U = block_hankel(u, f + p)
    Y = block_hankel(y, f + p)

    W_p = W[:n_w*p, :]
    W_pp = W[:n_w*(p+1), :]

    Y_f = Y[n_y*f:, :]
    U_f = U[n_y*f:, :]

    Y_fm = Y[n_y*(f+1):, :]
    U_fm = U[n_u*(f+1):, :]

    # step 1, calculate the oblique and orthogonal projections
    #------------------------------------------
    #TODO fix explanation
    # Y_p = G_i Xd_p + Hd_i U_p
    # After the oblique projection, U_p component is eliminated,
    # without changing the Xd_p component:
    # Proj_perp_(U_p) Y_p = W1 O_i W2 = G_i Xd_p
    O_i  = Y_f*project_oblique(U_f, W_p)
    Z_i  = Y_f*project(pl.vstack(W_p, U_f))
    Z_ip = Y_fm*project(pl.vstack(W_pp, U_fm))

    #TODO fix explanation
    # step 2, calculate the SVD of the weighted oblique projection
    #------------------------------------------
    # given: W1 O_i W2 = G_i Xd_p
    # want to solve for G_i, but know product, and not Xd_p
    # so can only find Xd_p up to a similarity transformation
    U0, s0, VT0 = pl.svd(O_i*project_perp(U_f))  #pylint: disable=unused-variable

    # step 3, determine the order by inspecting the singular
    #------------------------------------------
    # values in S and partition the SVD accordingly to obtain U1, S1
    #print s0
    n_x = pl.find(s0/s0.max() > s_tol)[-1] + 1
    U1 = U0[:, :n_x]
    S1 = pl.matrix(pl.diag(s0[:n_x]))
    # VT1 = VT0[:n_x, :n_x]

    # step 4, determine Gi and Gim
    #------------------------------------------
    G_i = U1*pl.matrix(pl.diag(pl.sqrt(s1[:n_x])))
    G_im = G_i[:-n_y, :]

    # step 5, solve the linear equations for A and C
    #------------------------------------------
    # Recompute G_i and G_im from A and C
    #TODO figure out what K (contains B and D) and the rhos (residuals) are in terms of knowns
    AC_stack = (pl.vstack(G_im.I*Z_ip,Y_f(1,:))-K*U_f-pl.vstack(rho_w, rho_v))*(G_i.I*Z_i).I #TODO not done
Example #35
0
 def train (self):
     self.K = self._K () + diag (self.trainNoise)
Example #36
0
 def train(self):
     self.K = self._K() + diag(self.trainNoise)
Example #37
0
    def get_edge_errors(self):
        """
    Calculates the error estimates of the expression projected into the mesh.
    
    :rtype: Dolfin edge function containing the edge errors of the mesh

    """
        mesh = self.mesh
        coord = mesh.coordinates()

        V = FunctionSpace(mesh, "CG", 1)
        Hxx = TrialFunction(V)
        Hxy = TrialFunction(V)
        Hyy = TrialFunction(V)
        phi = TestFunction(V)

        edge_errors = EdgeFunction('double', mesh)

        U = project(self.U_ex, V)
        a_xx = Hxx * phi * dx
        L_xx = -U.dx(0) * phi.dx(0) * dx

        a_xy = Hxy * phi * dx
        L_xy = -U.dx(0) * phi.dx(1) * dx

        a_yy = Hyy * phi * dx
        L_yy = -U.dx(1) * phi.dx(1) * dx

        Hxx = Function(V)
        Hxy = Function(V)
        Hyy = Function(V)

        Mxx = Function(V)
        Mxy = Function(V)
        Myy = Function(V)

        solve(a_xx == L_xx, Hxx)
        solve(a_xy == L_xy, Hxy)
        solve(a_yy == L_yy, Hyy)
        e_list = []

        for v in vertices(mesh):
            idx = v.index()
            pt = v.point()
            x = pt.x()
            y = pt.y()

            a = Hxx(x, y)
            b = Hxy(x, y)
            d = Hyy(x, y)

            H_local = ([[a, b], [b, d]])

            l, ve = pl.eig(H_local)
            M = pl.dot(pl.dot(ve, abs(pl.diag(l))), ve.T)

            Mxx.vector()[idx] = M[0, 0]
            Mxy.vector()[idx] = M[1, 0]
            Myy.vector()[idx] = M[1, 1]

        e_list = []
        for e in edges(mesh):
            I, J = e.entities(0)
            x_I = coord[I, :]
            x_J = coord[J, :]
            M_I = pl.array([[Mxx.vector()[I], Mxy.vector()[I]],
                            [Mxy.vector()[I], Myy.vector()[I]]])
            M_J = pl.array([[Mxx.vector()[J], Mxy.vector()[J]],
                            [Mxy.vector()[J], Myy.vector()[J]]])
            M = (M_I + M_J) / 2.
            dX = x_I - x_J
            error = pl.dot(pl.dot(dX, M), dX.T)

            e_list.append(error)
            edge_errors[e] = error

        return edge_errors
Example #38
0
# script to generate data files for the least squares assignment
from pylab import linspace, meshgrid, logspace, dot, plot, xlabel, ylabel, ones, randn, diag, title, grid, savetxt, show, c_
import scipy.special as sp
N = 101  # no of data points
k = 9
# no of sets of data with varying noise
# generate the data points and add noise
t = linspace(0, 10, N)
# t vector
y = 1.05 * sp.jv(2, t) - 0.105 * t  # f(t) vector
Y = meshgrid(y, ones(k), indexing="ij")[0]  # make k copies
scl = logspace(-1, -3, k)  # noise stdev
n = dot(randn(N, k), diag(scl))  # generate k vectors
yy = Y + n
# add noise to signal
# shadow plot
plot(t, yy)
xlabel("t", size=20)
ylabel("f(t)+n", size=20)
title("Plot of the data to be fitted")
grid(True)
savetxt("fitting.dat", c_[t, yy])  # write out matrix to file
show()
Example #39
0
        if coef[0] < 0.0 and t_rxn_var == 0:
            t_rxn_var = i
    if t_rxn_corr and t_rxn_var:  # if all relaxation times have been asigned, end simulation
        t = t[:i]
        TLIM = t[-1]
        x = x[:, :i]
        prom = prom[:i]
        var = var[:i]
        corr = corr[:i]
        break

expected = (1, .5, 1.4, -1, .5, 0.9
            )  # starting coefficients to fir the bimodal function
p_eq, edges = np.histogram(x[:, -1], density=True, bins=20)
params, cov = curve_fit(bimodal, edges[:-1], p_eq, expected)
sigma = sqrt(diag(cov))

print("Markov process:")
print("t_rxn_corr: {} \t t_rxn_var: {}".format(t[t_rxn_corr - 1],
                                               t[t_rxn_var - 1]))
print("Mean: {0:1.4f} \t Var: {1:1.4f} \t Corr: {2:1.4f}".format(
    prom[-1], var[-1], corr[-1]))

# BOKEH Visualization ##################################################################################################
# Create sources
tray_source = ColumnDataSource(data=dict(t=t))

for tray_n in range(N):
    tray_source.data[str(tray_n)] = x[tray_n]
analysis_source = ColumnDataSource(
    data=dict(t=t, prom=prom, var=var, corr=corr))
Example #40
0
    theta = r[0]
    cov_x = r[1]
    return theta, cov_x * (noise**2)
    # print cov_x


def plott():
    y = f(x, theta_0) + numpy.random.normal(scale=noise)
    chi2 = lambda theta: sum((f(x, theta) - y)**2) / noise**2
    p.figure()
    tval = p.arange(25, 35, 1)
    p.plot(tval, [chi2(t) for t in tval], '-')


print("original parameters:         ", theta_0)
print("mean fit values:             ",
      p.mean([estimate()[0] for _ in range(rep)], axis=0))
print()
print("mean fit parameter deviation:",
      p.std([estimate()[0] for _ in range(rep)], axis=0))
print()
print("mean deviation estimate:     ",
      p.mean([p.sqrt(p.diag(estimate()[1])) for _ in range(rep)], axis=0))
print()
print("fit parameter covariances:")
print(p.cov([estimate()[0] for _ in range(rep)], rowvar=0))
print()
print("mean covariance matrix:      ")
print(p.mean([estimate()[1] for _ in range(rep)], axis=0))
Example #41
0
 ##trovo I_th con un fit
 m = []
 dm = []
 q = []
 dq = []
 out = fit_curve(retta,
                 I[0:j - 1],
                 P[0:j - 1],
                 dy=dP[0:j - 1],
                 p0=[1, 1],
                 absolute_sigma=True)
 par = out.par
 cov = out.cov
 m_fit = par[0]
 q_fit = par[1]
 dm_fit, dq_fit = py.sqrt(py.diag(cov))
 I_th = -q_fit / m_fit
 dI = I_th * (dm_fit / m_fit + dq_fit / q_fit)
 print('m = %s  q = %s' % (xe(m_fit, dm_fit), xe(q_fit, dq_fit)))
 print('I_th = %s' % (xe(I_th, dI)))
 m.append(m_fit)
 q.append(q_fit)
 dm.append(dm_fit)
 dq.append(dq_fit)
 x = py.linspace(I[j] - 2, I[0] + 1, 100)
 py.plot(x,
         retta(x, *par),
         linewidth=1,
         color=c_fit[i],
         label='{} = {} mA, T = {} °C'.format('$I_{th}$', xe(I_th, dI),
                                              T[i]))
Example #42
0
  def get_edge_errors(self):
    """
    Calculates the error estimates of the expression projected into the mesh.
    
    :rtype: Dolfin edge function containing the edge errors of the mesh
    """
    mesh  = self.mesh
    coord = mesh.coordinates()
    
    V     = FunctionSpace(mesh, "CG", 1)
    Hxx   = TrialFunction(V)
    Hxy   = TrialFunction(V)
    Hyy   = TrialFunction(V)
    phi   = TestFunction(V)
    
    edge_errors = EdgeFunction('double', mesh)

    U    = project(self.U_ex, V)
    a_xx = Hxx * phi * dx
    L_xx = - U.dx(0) * phi.dx(0) * dx

    a_xy = Hxy * phi * dx
    L_xy = - U.dx(0) * phi.dx(1) * dx

    a_yy = Hyy * phi * dx
    L_yy = - U.dx(1) * phi.dx(1) * dx       

    Hxx  = Function(V)
    Hxy  = Function(V)
    Hyy  = Function(V)
         
    Mxx  = Function(V)
    Mxy  = Function(V)
    Myy  = Function(V)
  
    solve(a_xx == L_xx, Hxx)
    solve(a_xy == L_xy, Hxy)
    solve(a_yy == L_yy, Hyy)
    e_list = []

    for v in vertices(mesh):
      idx = v.index()
      pt  = v.point()
      x   = pt.x()
      y   = pt.y()
          
      a   = Hxx(x, y)
      b   = Hxy(x, y)
      d   = Hyy(x, y)

      H_local = ([[a,b], [b,d]])

      l, ve   = p.eig(H_local)
      M       = p.dot(p.dot(ve, abs(p.diag(l))), ve.T)       

      Mxx.vector()[idx] = M[0,0]
      Mxy.vector()[idx] = M[1,0]
      Myy.vector()[idx] = M[1,1]

    e_list = []
    for e in edges(mesh):
      I, J  = e.entities(0)
      x_I   = coord[I,:]
      x_J   = coord[J,:]
      M_I   = p.array([[Mxx.vector()[I], Mxy.vector()[I]],
                       [Mxy.vector()[I], Myy.vector()[I]]]) 
      M_J   = p.array([[Mxx.vector()[J], Mxy.vector()[J]],
                       [Mxy.vector()[J], Myy.vector()[J]]])
      M     = (M_I + M_J)/2.
      dX    = x_I - x_J
      error = p.dot(p.dot(dX, M), dX.T)
      
      e_list.append(error)
      edge_errors[e] = error
    
    return edge_errors
Example #43
0
def calcdp(wavinput, thetainput):
    rep_no = 50
    wav = wavinput
    lam = 400
    thetain = thetainput / 180 * pl.pi
    nAir = 1.0
    nBac = 1.38
    nMed = 1.34
    #nMaterial2 = nIn	#When nMaterial2 == nIn, the methods works
    #nOut = nMaterial2
    pol = 'Ey'
    nelx = 40
    z_uc = 2.0 * math.sqrt(3)

    ##Scattering matrix model air-material boundary
    Mres = model(lx=2 * lam, lz=(1 + rep_no) * (z_uc) * lam, nelx=nelx)
    Pres = physics(Mres, wav, nAir, nBac, thetaIn=thetain, pol=pol)
    if 1:  #Put to zero to avoid re-simulation and use last saved results
        materialResAir = Pres.newMaterial(nAir)
        materialResBac = Pres.newMaterial(nBac)
        materialResMed = Pres.newMaterial(nMed)

        xres = Mres.generateEmptyDesign()
        makeSlab(xres, Mres, (rep_no) * (z_uc) * lam,
                 (1 + rep_no) * (z_uc) * lam, materialResAir)
        makeSlab(xres, Mres, 0 * (z_uc) * lam, (rep_no) * (z_uc) * lam,
                 materialResMed)

        for x in range(0, rep_no):
            makeCircle(xres, Mres, lam, x * (z_uc) * lam, lam, materialResBac)
            makeCircle(xres, Mres, 0, (x + 1 / 2) * (z_uc) * lam, lam,
                       materialResBac)
            makeCircle(xres, Mres, 2 * lam, (x + 1 / 2) * (z_uc) * lam, lam,
                       materialResBac)
        makeCircle(xres, Mres, lam, (rep_no) * (z_uc) * lam, lam,
                   materialResBac)

        if 0:  #Show design
            pl.imshow(xres.T, interpolation='none', vmin=0, vmax=4)
            pl.colorbar()
            pl.savefig("full_structure.png", bbox_inches='tight')
            pl.show()
            exit()

    ##Scattering matrix model air-material boundary
    M = model(lx=2.0 * lam, lz=1 * (z_uc) * lam, nelx=nelx)
    #Notice how nOut here is nMaterial2
    P = physics(M, wav, nAir, nBac, thetaIn=thetain, pol=pol)
    materialAir = P.newMaterial(nAir)
    materialBac = P.newMaterial(nBac)
    materialMed = P.newMaterial(nMed)
    x = M.generateEmptyDesign()
    makeSlab(x, M, 0, 1.0 * (z_uc) * lam, materialAir)
    makeCircle(x, M, lam, 0, lam, materialBac)
    if 0:  #Show design
        pl.imshow(x.T, interpolation='none', vmin=0, vmax=4)
        pl.colorbar()
        pl.savefig("upper_half.png", bbox_inches='tight')
        pl.show()
        exit()
    res = calcScatteringMatrix(M, P, x)

    S1 = RTtoS(res["RIn"], res["TIn"], res["TOut"], res["ROut"])
    matL = pl.bmat([[pl.diag(pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(pl.sqrt(P.chiOut))]])
    matR = pl.bmat([[pl.diag(1 / pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(1 / pl.sqrt(P.chiOut))]])
    S1real = matL * S1 * matR

    Stot = S1
    Stotreal = S1real
    thetainNew = thetain

    ##Scattering matrix model unit cell
    M = model(lx=2.0 * lam, lz=(z_uc) * lam, nelx=nelx)
    thetainNew = pl.arcsin(P.nIn / P.nOut * pl.sin(thetainNew))
    P = physics(M, wav, nBac, nBac, thetaIn=thetainNew, pol=pol)  #
    materialAir = P.newMaterial(nAir)
    materialBac = P.newMaterial(nBac)
    materialMed = P.newMaterial(nMed)
    x = M.generateEmptyDesign()
    makeSlab(x, M, 0, 1.0 * (z_uc) * lam, materialMed)
    makeCircle(x, M, lam, 0, lam, materialBac)
    makeCircle(x, M, lam, 1.0 * (z_uc) * lam, lam, materialBac)
    makeCircle(x, M, 0, (1 / 2) * (z_uc) * lam, lam, materialBac)
    makeCircle(x, M, 2 * lam, (1 / 2) * (z_uc) * lam, lam, materialBac)
    if 0:
        pl.imshow(x.T, interpolation='none', vmin=0, vmax=4)
        pl.colorbar()
        pl.savefig("lower_half.png", bbox_inches='tight')
        pl.show()
        exit()
    res = calcScatteringMatrix(M, P, x)
    S2 = RTtoS(res["RIn"], res["TIn"], res["TOut"], res["ROut"])
    matL = pl.bmat([[pl.diag(pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(pl.sqrt(P.chiOut))]])
    matR = pl.bmat([[pl.diag(1 / pl.sqrt(P.chiIn)),
                     pl.zeros((M.NM, M.NM))],
                    [pl.zeros((M.NM, M.NM)),
                     pl.diag(1 / pl.sqrt(P.chiOut))]])
    S2real = matL * S2 * matR
    for x in range(1, rep_no + 1):
        Stot = stackElements(Stot, S2)
        Stotreal = stackElements(Stotreal, S2real)

    RIn, TIn, TOut, ROut = StoRT(Stot)
    results = {}
    results["RIn"] = RIn
    results["TIn"] = TIn
    results["ROut"] = ROut
    results["TOut"] = TOut
    results.update(M.getParameters())
    results.update(P.getParameters())
    #saveDicts("SmatrixRICalculations.h5",results,'a')

    #Define incident wave as a plane wave
    einc = pl.zeros((2 * M.NM, 1), dtype='complex').view(pl.matrix)
    einc[M.NM // 2, 0] = 1.
    tmp = Stot * einc
    rnew = tmp[:M.NM].view(pl.ndarray).flatten()
    tnew = tmp[M.NM:].view(pl.ndarray).flatten()
    mR, thetaR, Rnew, mT, thetaT, Tnew = calcRT(Mres, Pres, rnew, tnew)
    for i in range(len(mR)):
        title = "reflect_mode_n_equals_{:d}_50.csv".format(mR[i])
        txtdata = open(title, "a")
        txtdata.write("{:d}, {:f}, {:.8f}\n".format(wavinput, thetainput,
                                                    Rnew[i]))
        #print("m={:d}, theta={:f}, mode={:d}, R={:.8f}".format(wavinput,thetainput,mR[i],Rnew[i]))
        txtdata.close()
    for i in range(len(mT)):
        title = "trans_mode_n_equals_{:d}_50.csv".format(mT[i])
        txtdata = open(title, "a")
        txtdata.write("{:d}, {:f}, {:.8f}\n".format(wavinput, thetainput,
                                                    Tnew[i]))
        #print("m={:d}, theta={:f}, mode={:d}, T={:.8f}".format(wavinput,thetainput,mT[i],Tnew[i]))
        txtdata.close()

    #Define incident wave as a plane wave
    einc = pl.zeros((2 * M.NM, 1), dtype='complex').view(pl.matrix)
    einc[M.NM // 2, 0] = 1.
    #Stotreal = S1real
    if 0:
        pl.imshow(abs(S1real), interpolation='none', vmin=0, vmax=1)
        print abs(S1real).max()
        pl.colorbar()
        pl.show()
        pl.imshow(abs(S2real), interpolation='none', vmin=0, vmax=1)
        print abs(S2real).max()
        pl.colorbar()
        pl.show()

    tmp = Stotreal * einc
    r = tmp[:M.NM].view(pl.ndarray).flatten()
    t = tmp[M.NM:].view(pl.ndarray).flatten()

    idx = Pres.propModesIn
    R = abs(r[idx])**2

    idx = Pres.propModesOut
    T = abs(t[idx])**2
    return