Пример #1
0
    def initialize(self):
        S = sum([N.dot(unit.X.T, unit.X) for unit in self.units])
        Y = sum([N.dot(unit.X.T, unit.Y) for unit in self.units])
        self.a = L.lstsq(S, Y)[0]

        D = 0
        t = 0
        sigmasq = 0
        for unit in self.units:
            unit.r = unit.Y - N.dot(unit.X, self.a)
            if self.q > 1:
                unit.b = L.lstsq(unit.Z, unit.r)[0]
            else:
                Z = unit.Z.reshape((unit.Z.shape[0], 1))
                unit.b = L.lstsq(Z, unit.r)[0]

            sigmasq += (N.power(unit.Y, 2).sum() -
                        (self.a * N.dot(unit.X.T, unit.Y)).sum() -
                        (unit.b * N.dot(unit.Z.T, unit.r)).sum())
            D += N.multiply.outer(unit.b, unit.b)
            t += L.pinv(N.dot(unit.Z.T, unit.Z))

        sigmasq /= (self.N - (self.m - 1) * self.q - self.p)
        self.sigma = N.sqrt(sigmasq)
        self.D = (D - sigmasq * t) / self.m
def con2vert(A, b):
    """
    Convert sets of constraints to a list of vertices (of the feasible region).
    If the shape is open, con2vert returns False for the closed property.
    """
    # Python implementation of con2vert.m by Michael Kleder (July 2005),
    #  available: http://www.mathworks.com/matlabcentral/fileexchange/7894
    #  -con2vert-constraints-to-vertices
    # Author: Michael Kelder (Original)
    #         Andre Campher (Python implementation)
    c = linalg.lstsq(mat(A), mat(b))[0]
    btmp = mat(b)-mat(A)*c
    D = mat(A)/matlib.repmat(btmp, 1, A.shape[1])

    fmatv = qhull(D, "Ft") #vertices on facets

    G  = zeros((fmatv.shape[0], D.shape[1]))
    for ix in range(0, fmatv.shape[0]):
        F = D[fmatv[ix, :], :].squeeze()
        G[ix, :] = linalg.lstsq(F, ones((F.shape[0], 1)))[0].transpose()

    V = G + matlib.repmat(c.transpose(), G.shape[0], 1)
    ux = uniqm(V)

    eps = 1e-13
    Av = dot(A, ux.T)
    bv = tile(b, (1, ux.shape[0]))
    closed = sciall(Av - bv <= eps)

    return ux, closed
 def _calculate_log_likelihood(self):
     #if self.m == None:
     #    Give error message
     R = zeros((self.n, self.n))
     X,Y = array(self.X), array(self.Y)
     thetas = 10.**self.thetas
     for i in range(self.n):
         for j in arange(i+1,self.n):
             R[i,j] = (1-self.nugget)*e**(-sum(thetas*(X[i]-X[j])**2.)) #weighted distance formula
     R = R + R.T + eye(self.n)
     self.R = R
     one = ones(self.n)
     try:
         self.R_fact = cho_factor(R)
         rhs = vstack([Y, one]).T
         R_fact = (self.R_fact[0].T,not self.R_fact[1])
         cho = cho_solve(R_fact, rhs).T
         
         self.mu = dot(one,cho[0])/dot(one,cho[1])
         self.sig2 = dot(Y-dot(one,self.mu),cho_solve(self.R_fact,(Y-dot(one,self.mu))))/self.n
         #self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))-sum(thetas)
         self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))
     except (linalg.LinAlgError,ValueError):
         #------LSTSQ---------
         self.R_fact = None #reset this to none, so we know not to use cholesky
         #self.R = self.R+diag([10e-6]*self.n) #improve conditioning[Booker et al., 1999]
         rhs = vstack([Y, one]).T
         lsq = lstsq(self.R.T,rhs)[0].T
         self.mu = dot(one,lsq[0])/dot(one,lsq[1])
         self.sig2 = dot(Y-dot(one,self.mu),lstsq(self.R,Y-dot(one,self.mu))[0])/self.n
         self.log_likelihood = -self.n/2.*log(self.sig2)-1./2.*log(abs(det(self.R)+1.e-16))
Пример #4
0
def dfa(x, ave=None, l=None):
    x = np.array(x)

    if ave is None:
        ave = np.mean(x)

    y = np.cumsum(x)
    y -= ave

    if l is None:
        l = np.floor(len(x) * 1 / (2 ** np.array(range(4, int(np.log2(len(x))) - 4))))

    f = np.zeros(len(l))  # f(n) of different given box length n

    for i in xrange(0, len(l)):
        n = int(l[i])  # for each box length L[i]
        if n == 0:
            print "time series is too short while the box length is too big"
            print "abort"
            exit()
        for j in xrange(0, len(x), n):  # for each box
            if j + n < len(x):
                c = range(j, j + n)
                c = np.vstack([c, np.ones(n)]).T  # coordinates of time in the box
                y = y[j:j + n]  # the value of data in the box
                f[i] += lstsq(c, y)[1]  # add residue in this box
        f[i] /= ((len(x) / n) * n)
    f = np.sqrt(f)

    alpha = lstsq(np.vstack([np.log(l), np.ones(len(l))]).T, np.log(f))[0][0]

    return alpha
Пример #5
0
def cca(x_tn,y_tm, reg=0.00000001):
    x_tn = x_tn-x_tn.mean(axis=0)
    y_tm = y_tm-y_tm.mean(axis=0)
    N = x_tn.shape[1]
    M = y_tm.shape[1]
    xy_tq = c_[x_tn,y_tm]
    cqq = cov(xy_tq,rowvar=0)
    cxx = cqq[:N,:N]+reg*np.eye(N)+0.000000001*np.ones((N,N))
    cxy = cqq[:N,N:(N+M)]+0.000000001*np.ones((N,N))
    cyx = cqq[N:(N+M),:N]+0.000000001*np.ones((N,N))
    cyy = cqq[N:(N+M),N:(N+M)]+reg*np.eye(N)+0.000000001*np.ones((N,N))
    
    K = min(N,M)
    
    xldivy = lstsq(cxx,cxy)[0]
    yldivx = lstsq(cyy,cyx)[0]
    #print xldivy
    #print dot(np.linalg.inv(cxx),cxy)
    _,vecs = eig(dot(xldivy,yldivx))
    a_nk = vecs[:,:K]
    #print normr(vecs.T)
    b_mk = dot(yldivx,a_nk)
    
    u_tk = dot(x_tn,a_nk)
    v_tk = dot(y_tm,b_mk)
    
    return a_nk,b_mk,u_tk,v_tk
Пример #6
0
    def initialize(self):
        S = sum([np.dot(unit.X.T, unit.X) for unit in self.units])
        Y = sum([np.dot(unit.X.T, unit.Y) for unit in self.units])
        self.a = L.lstsq(S, Y)[0]

        D = 0
        t = 0
        sigmasq = 0
        for unit in self.units:
            unit.r = unit.Y - np.dot(unit.X, self.a)
            if self.q > 1:
                unit.b = L.lstsq(unit.Z, unit.r)[0]
            else:
                Z = unit.Z.reshape((unit.Z.shape[0], 1))
                unit.b = L.lstsq(Z, unit.r)[0]

            sigmasq += (np.power(unit.Y, 2).sum() -
                        (self.a * np.dot(unit.X.T, unit.Y)).sum() -
                        (unit.b * np.dot(unit.Z.T, unit.r)).sum())
            D += np.multiply.outer(unit.b, unit.b)
            t += L.pinv(np.dot(unit.Z.T, unit.Z))

        #TODO: JP added df_resid check
        self.df_resid = (self.N - (self.m - 1) * self.q - self.p)
        sigmasq /= (self.N - (self.m - 1) * self.q - self.p)
        self.sigma = np.sqrt(sigmasq)
        self.D = (D - sigmasq * t) / self.m
def multiLinearExampleWithLeastSQR():
  from numpy.linalg import lstsq
  X = [[1,6,2],[1,8,1],[1,10,0],[1,14,2],[1,18,0]]
  y =[[7],[9],[13],[17.5],[18]]  
  print "\n Least sqr:"
  print lstsq(X,y)[0]
  print lstsq(X,y)[0]  
Пример #8
0
def simplex(A,b,c,basis):
    B=A[:,basis]
    xx,resid,rank,s = lin.lstsq(B,b)
    x = np.zeros(c.shape[0])    
    v = np.zeros(c.shape[0])
    x[basis[0]] = xx[0,0]
    cost = c[basis].T*x[basis]     # cost at starting corner
    for iteration in np.arange(100):
        y = lin.lstsq(B.T,c[basis])           # this y may not be feasible
        y = np.array([y[0]]).T
        idx = (c - np.dot(A.T,y).T).argmin()
        rmin = (c - np.dot(A.T,y).T).min()
        if rmin >= -0.00000001:      # optimality is reached, r>=0
            break                 # current x and y are optimal
         
        print B
        print A 
        print A[:,idx]
        print idx
        v[basis] = lin.lstsq(B,A[:,idx])[0]
        tmp = x[basis] / np.max(v[basis],.000001)
        out = tmp.argmin()
        minratio = tmp.min()
        if v[out] == .000001:  # out = index of first x to reach 0
            break      # break when that edge is extremely short
            
        cost = cost + minratio*rmin  # lower cost at end of step
        x[basis] = x[basis] - minratio*v[basis]   # update old x
        x[idx] = minratio      # find new positive component of x
        basis[out] = idx      # replace old index by new in basis
        print basis
    
    return x,y,cost
Пример #9
0
def calibrate_peak_ensemble(models_forecasts, measurements, forecast_len = 48, peak_level = 80):
    T_predictors = []
    T_target = []
    H_predictors = []
    H_target = []

    rng = min(len(model) for model in models_forecasts)
    for tm in range(rng):
        msm = measurements[tm * 6: tm * 6 + forecast_len]
        measured_peaks = detect_peaks(msm, peak_level)
        if not measured_peaks:
            continue

        forecasts = [prd[tm] for prd in models_forecasts]
        forecasts_peaks = [detect_peaks(fcst, peak_level) for fcst in forecasts]
        
        forecasts_peaks_cor = [list(map(lambda x: find_corresponding_peak(x, forecast_peaks),
                                      measured_peaks)) for forecast_peaks in forecasts_peaks]
        for measured, *corresponding in zip(measured_peaks, *forecasts_peaks_cor):
            if all(corresponding):
                H_predictors.append([peak[3] for peak in corresponding] + [1])
                T_predictors.append([peak[2] for peak in corresponding] + [1])
                H_target.append(measured[3])
                T_target.append(measured[2])

    print(H_predictors, H_target)

    H_coefs = lstsq(H_predictors, H_target)[0]
    T_coefs = lstsq(T_predictors, T_target)[0]
    return list(T_coefs), list(H_coefs)
Пример #10
0
def solve_and_check_impl(A,Ah,B,Bh):
    Am=numpy.matrix(A)
    Amh=numpy.matrix(Ah)
    Bm=numpy.matrix(B)
    Bmh=numpy.matrix(Bh)
    Ar,Aresidue=linalg.lstsq(Am,Amh)[0:2]
    Br,Bresidue=linalg.lstsq(Bm,Bmh)[0:2]
    A11,A12,Tx=Ar.transpose()[0].tolist()[0]
    A21,A22,Ty=Br.transpose()[0].tolist()[0]
    mA=numpy.matrix([[A11,A12],[A21,A22]])
    T=numpy.matrix([[Tx],[Ty]])
    print "Determinant:",linalg.det(mA)
    error=0
    for m,mh in zip(A,Ah):
        #print "m:",m,"mh:",mh        
        x,y,one=m
        deg=mh[0]
        X=numpy.matrix([[x],[y]])        
        Y=mA*X+T
        lat,lon=Y[0,0],Y[1,0]
        #print "Mapped lat",lat,"correct",deg
        error=max(error,(deg-lat))
    for m,mh in zip(B,Bh):
        #print "m:",m,"mh:",mh        
        x,y,one=m
        deg=mh[0]
        X=numpy.matrix([[x],[y]])        
        Y=mA*X+T
        lat,lon=Y[0,0],Y[1,0]
        #print "Mapped lon",lon,"correct",deg
        error=max(error,(deg-lon))
    Ai=linalg.inv(mA)
        
    return error,mA,T
Пример #11
0
def getOrthColumns(m):
    '''
    Constructs the orthogonally complementing columns of the input.

    Input of the form pxr is assumed to have r<=p,
    and have either full column rank r or rank 0 (scalar or matrix)
    Output is of the form px(p-r), except:
    a) if M square and full rank p, returns scalar 0
    b) if rank(M)=0 (zero matrix), returns I_p
    (Note you cannot pass scalar zero, because dimension info would be
    missing.)
    Return type is as input type.
    '''
    if type(m) == type(asarray(m)):
        m = mat(m)
        output = 'array'
    else: output = 'matrix'
    p, r = m.shape
    # first catch the stupid input case
    if p < r: raise ValueError, 'need at least as many rows as columns'
    # we use lstsq(M, ones) just to exploit its rank-finding algorithm,
    rk = lstsq(m, ones(p).T)[2]
    # first the square and full rank case:
    if rk == p: result = zeros((p,0))   # note the shape! hopefully octave-like
    # then the zero-matrix case (within machine precision):
    elif rk == 0: result = eye(p)
    # now the rank-deficient case:
    elif rk < r:
        raise ValueError, 'sorry, matrix does not have full column rank'
    # (what's left should be ok)
    else:
        # we have to watch out for zero rows in M,
        # if they are in the first p-r positions!
        # so the (probably inefficient) algorithm:
            # 1. check the rank of each row
            # 2. if zero, then also put a zero row in c
            # 3. if not, put the next unit vector in c-row
        idr = eye(r)
        idpr = eye(p-r)
        c = empty([0,r])    # starting point  
        co = empty([0, p-r]) # will hold orth-compl.
        idrcount = 0
        for row in range(p):
            # (must be ones() instead of 1 because of 2d-requirement
            if lstsq( m[row,:], ones(1) )[2] == 0 or idrcount >= r:
                c = r_[ c, zeros(r) ]
                co = r_[ co, idpr[row-idrcount, :] ]
            else:     # row is non-zero, and we haven't used all unit vecs 
                c = r_[ c, idr[idrcount, :] ] 
                co = r_[ co, zeros(p-r) ]
                idrcount += 1
        # earlier non-general (=bug) line: c = mat(r_[eye(r), zeros((p-r, r))])
        # and:  co = mat( r_[zeros((r, p-r)), eye(p-r)] )
        # old:
        # result = ( eye(p) - c * (M.T * c).I * M.T ) * co
        result = co - c * solve(m.T * c, m.T * co)
    if output == 'array': return result.A
    else: return result
def my_nmf(document_term_mat, n_components=15, n_iterations=50, eps=1e-6):
    n_rows, n_cols = document_term_mat.shape
    W = rand(n_rows*n_components).reshape([n_rows, n_components])
    H = rand(n_components*n_cols).reshape([n_components, n_cols])
    # linalg.lstsq doesn't work on sparse mats
    dense_document_term_mat = document_term_mat.todense()
    for i in range(n_iterations):
        H = linalg.lstsq(W, dense_document_term_mat)[0].clip(eps)
        W = linalg.lstsq(H.T, dense_document_term_mat.T)[0].clip(eps).T
    return array(W), array(H)
Пример #13
0
def glrd_diverse(V, G, F, r, err_V, err_F):
    # diversity threshold is 0.5
    for k in xrange(r):
        G_copy = np.copy(G)  # create local copies for excluding the k^th col and row of G and F resp.
        F_copy = np.copy(F)
        G_copy[:, k] = 0.0
        F_copy[k, :] = 0.0

        R = V - np.dot(G_copy, F_copy)  # compute residual

        # Solve for optimal G(.)(k) with diversity constraints
        F_k = F[k, :]
        x_star_G = linalg.lstsq(R.T, F_k.T)[0].T
        x_G = cvx.Variable(x_star_G.shape[0])

        objective_G = cvx.Minimize(cvx.norm2(x_star_G - x_G))

        constraints_G = [x_G >= 0]
        for j in xrange(r):
            if j != k:
                constraints_G += [x_G.T * G[:, j] <= err_V]

        prob_G = cvx.Problem(objective_G, constraints_G)
        result = prob_G.solve(solver='SCS')
        if not np.isinf(result):
            G_k_min = np.asarray(x_G.value)
            G[:, k] = G_k_min[:, 0]
        else:
            print result

        # Solve for optimal F(k)(.) with diversity constraints
        G_k = G[:, k]
        x_star_F = linalg.lstsq(R, G_k)[0]
        x_F = cvx.Variable(x_star_F.shape[0])
        objective_F = cvx.Minimize(cvx.norm2(x_star_F - x_F))

        constraints_F = [x_F >= 0]
        for j in xrange(r):
            if j != k:
                constraints_F += [x_F.T * F[j, :] <= err_F]

        prob_F = cvx.Problem(objective_F, constraints_F)
        result = prob_F.solve(solver='SCS')
        if not np.isinf(result):
            F_k_min = np.asarray(x_F.value)
            F[k, :] = F_k_min[0, :]
        else:
            print result

    return G, F
Пример #14
0
def kal0(x,sv=None,Kdisp=1.0,Nsamp=1000,L=5,Norder=3,pg=1.0,vg=1.0,
           sigma0=1000,N0=200,Prange=8):
  x = x.T
  # Time scale
  if sv is None:
    mux = x-mean(x,0)
    phi = unwrap(angle(mux[:,0]+1j*mux[:,1]))
    sv= 2*pi*x.shape[0]/abs(phi[-1]-phi[0])
  # System matrix
  A =  Kdisp*eye(2*x.shape[1])
  A[:x.shape[1],x.shape[1]:2*x.shape[1]] = eye(x.shape[1])/sv
  
  # Observation matrix
  C = zeros((x.shape[1],2*x.shape[1]))
  C[:x.shape[1],:x.shape[1]] = eye(x.shape[1])
  
  # Observation covariance
  R = cov((x[:-1]-x[1:]).T)/sqrt(2.0)
  
  # System covariance
  idx = random.randint(x.shape[0]-5,size=(Nsamp))
  idx = vstack([idx+i for i in xrange(L)])
  tx = x[idx].reshape(idx.shape[0],-1)
  P = array([[(i-(L-1)/2)**j for i in xrange(L)] for j in xrange(Norder)])
  K = lstsq(P.T,tx)[0]
  s = (cov((tx-dot(P[:-1].T,K[:-1]))[1])-cov((tx-dot(P.T,K))[1]))/cov((tx-dot(P[:-1].T,K[:-1]))[1])
  D = zeros_like(A)
  D[:x.shape[1],:x.shape[1]] = R*pg
  D[x.shape[1]:,x.shape[1]:] = R*vg
  Q = D*s
  return(Kalman(A,C,Q,R))
Пример #15
0
    def add_data(self, v):
        mask_c = np.isnan(v)
        mask = 1 - mask_c
        U = self.U

        n = self.n
        d = self.d

        Ov = v[mask==1]
        OU = U[mask==1,:]

        w, _, __, ___ = la.lstsq(OU, Ov)
        p = U.dot(w)
        r = np.zeros((n,))
        r[mask==1] = Ov - p[mask==1]

        sigma = la.norm(r) * la.norm(p)
        eta = self.eta0 / self.it

        pw = la.norm(p) * la.norm(w)
        rw = la.norm(r) * la.norm(w)

        if pw == 0 or rw == 0: return

        U = U + (np.cos(sigma * eta) - 1.0) * np.outer(p, w) / pw \
              + np.sin(sigma * eta) * np.outer(r, w) / rw

        self.U = U
        self.it += 1.0
Пример #16
0
Файл: HW4.py Проект: mfintz/CV
def GalculateLinesFromSegments():
    global segments, lines
    for i in range(0, 3):
        sectionSegments = segments[i]
        for segment in sectionSegments:
            x1 = segment[0][0]
            y1 = segment[0][1]
            x2 = segment[1][0]
            y2 = segment[1][1]
            points = [(x1,y1),(x2,y2)]
            x_coords, y_coords = zip(*points)
            A = vstack([x_coords,ones(len(x_coords))]).T

            #
            # We use the LSQ approach to solve the equations
            #
            m, c = lstsq(A, y_coords)[0] # y = mx + c
            newLine = [m, -1, - c]
            lines[i].append(newLine)

            if DEBUG_OUTPUT == True:
                print("Added line: ")
                print(newLine)
                print("\n")
    return
Пример #17
0
def calibrate_ensemble(models_forecasts, measurements, forecast_len = 48):
    """Calculates coefficient for models in ensembles usulg OLS.
       Returns coefficients for all possible ensembles obtained by models combinations.
    """
    models_count = len(models_forecasts)

    predictors = [list() for mdl in range(models_count)]
    target = list()

    rng = min(len(model) for model in models_forecasts)
    for tm in range(rng):
        msm = measurements[tm * 6: tm * 6 + forecast_len]
        msm_len = len(msm)
        for current_prediction, predictor \
                in zip([prd[tm] for prd in models_forecasts], predictors):
            predictor.extend(current_prediction[:msm_len])
        target.extend(msm)

    ensembles = list()
    for ens_map in reversed(list(product([1,0], repeat = models_count))):
        ensemble_predictors = \
                    [[a*b for a,b in zip(point, ens_map)] for point in zip(*predictors)]

        ensemble_predictors = [pred + [1] for pred in ensemble_predictors]
        coefs = list(lstsq(ensemble_predictors, target)[0])
        ensembles.append(coefs)
    return ensembles
Пример #18
0
def calibrate(x, y, z, sensor_type):
  H = numpy.array([x, y, z, -y**2, -z**2, numpy.ones([len(x), 1])])
  H = numpy.transpose(H)
  w = x**2

  (X, residues, rank, shape) = linalg.lstsq(H, w)

  OSx = X[0] / 2
  OSy = X[1] / (2 * X[3])
  OSz = X[2] / (2 * X[4])

  A = X[5] + OSx**2 + X[3] * OSy**2 + X[4] * OSz**2
  B = A / X[3]
  C = A / X[4]

  SCx = numpy.sqrt(A)
  SCy = numpy.sqrt(B)
  SCz = numpy.sqrt(C)

  # type conversion from numpy.float64 to standard python floats
  offsets = [OSx, OSy, OSz]
  scale = [SCx, SCy, SCz]

  offsets = map(numpy.asscalar, offsets)
  scale = map(numpy.asscalar, scale)

  #misalignment matrix
  if(sensor_type == "mag"):
    mis_matrix = calibrate_misalignment(x_file, y_file, z_file)
    return (offsets, scale, mis_matrix)
  else:
    return (offsets, scale)
Пример #19
0
  def run(self, genes):
    """ Computes cross validations for given set of clusters. """
    from numpy import dot, array
    from numpy.linalg import lstsq

    # creates boolean array from genes.
    genes = array([i in genes for i in xrange(self.pis.shape[1])])
    # selects pis for given set of clusters.
    pis = self.pis[:,genes]

    # loops over cross-validation sets.
    scores, fits = [], []
    for croset, fitset in zip(self.crosets, self.fitsets): 
      # pis, energies in cross-validation set.
      cropis = pis[croset, :]
      croene = self.energies[croset]
      # pis, energies in fitting set.
      fitpis = pis[fitset,:]
      fitene = self.energies[fitset]
      # performs fitting.
      try: interactions = lstsq(fitpis, fitene)
      except:
        print "Encountered error in least-square fit."
        print fitpis.shape, fitene.shape, self.pis.shape, genes
        raise
      else: interactions = interactions[0]
      scores.append(dot(cropis, interactions) - croene)
      fits.append(dot(fitpis, interactions) - fitene)
      
    return array(scores), array(fits)
Пример #20
0
def calibrate(x, y, z):
  H = numpy.array([x, y, z, -y**2, -z**2, numpy.ones([len(x), 1])])
  H = numpy.transpose(H)
  w = x**2
  
  (X, residues, rank, shape) = linalg.lstsq(H, w)
  
  OSx = X[0] / 2
  OSy = X[1] / (2 * X[3])
  OSz = X[2] / (2 * X[4])
  
  A = X[5] + OSx**2 + X[3] * OSy**2 + X[4] * OSz**2
  B = A / X[3]
  C = A / X[4]
  
  SCx = 1.0 / numpy.sqrt(A)
  SCy = 1.0 / numpy.sqrt(B)
  SCz = 1.0 / numpy.sqrt(C)
  
  # type conversion from numpy.float64 to standard python floats
  offsets = [OSx, OSy, OSz]
  scale = [SCx, SCy, SCz]
  
  offsets = map(numpy.asscalar, offsets)
  scale = map(numpy.asscalar, scale)
  
  return (offsets, scale)
Пример #21
0
 def train(self):
     #self._split_data()
     #print self.train_matrix.shape,self.train_target.shape
     self._random_split_data()
     #print self.train_matrix.shape,self.train_target.shape
     self.weight=linalg.lstsq(self.train_matrix.T,self.train_target)[0]
     return self.weight
Пример #22
0
def compute(x, y, z):
  H = numpy.array([x, y, z, -y**2, -z**2, numpy.ones(len(x))])
  H = numpy.transpose(H)
  w = x**2
  
	# Least square solution to the fitting
  (X, residues, rank, shape) = linalg.lstsq(H, w)
  
	# Offset values (center)
  OSx = X[0] / 2
  OSy = X[1] / (2 * X[3])
  OSz = X[2] / (2 * X[4])
  
  A = X[5] + OSx**2 + X[3] * OSy**2 + X[4] * OSz**2
  B = A / X[3]
  C = A / X[4]

  #print(A, B, C)

	# Scaling values 
  SCx = numpy.sqrt(A)
  SCy = numpy.sqrt(B)
  SCz = numpy.sqrt(C)
  
  return ([OSx, OSy, OSz], [SCx, SCy, SCz])
Пример #23
0
def f(x):
	A = np.zeros((points.shape))
	A[:, 0] = 1
	A[:, 1] = points[:, 0]
	b = points[:, 1]
	coeff = la.lstsq(A, b)[0]
	return coeff[0] + coeff[1] * x
Пример #24
0
def _leastsqrs(xx, yy, siz, tmp_mask):
    """
    Following is faster than np.polyfit
    e.g., return np.polyfit(xx[tmp_mask], yy[tmp_mask], 1)
    """
    A = np.array([xx[tmp_mask], np.ones(siz)])
    return linalg.lstsq(A.T, yy[tmp_mask])[0]
Пример #25
0
def evaluate():
	print("Collect data...")
	hashs = collect_data()

	print('Build matrix...')
	A, B, symrow = build_matrix(hashs)

	# Reduce matrix A
	print('Simplify matrix...')
	bases = []
	reduce_matrix(A, symrow, [bases])

	# Calculate value
	print('Figuring values...')
	R = nplag.lstsq(A, B)

	# Print result
	print('--------------------')
	utils.build_symbol_map()
	for i in range(0, len(R[0])):
		if symrow[i] == 0:
			print("Base", end=' ')
		else:
			if len(bases) > 0:
				if i in bases[0]:
					print("Base", end=' ')
			elif len(bases) > 1:
				for x in range(0, len(bases)):
					if i in bases[x]:
						print("Base" + x, end=' ')
			for s in symrow[i]:
				print(utils.smap[s], end=' ')
		print("=", end=' ')
		print(str(R[0][i]))
Пример #26
0
 def trainClassifer(self,labels,vectors,ilog=None):
     '''
     Train the polynomial.  Do not call this function
     manually, instead call the train function on the super
     class.
     '''
     #build matrix
     matrix = []
     for each in vectors:
         if len(each) != 2:
             raise ValueError("ERROR: Vector length=%d.  Polynomial2D only predicts for vectors of length 2."%len(each))
         x,y = each
         matrix.append(self.buildRow(x,y))
     
     matrix = array(matrix)
     labels = array(labels)
     
     x,resids,rank,s = lstsq(matrix,labels)
     
     self.x = x
     self.resids = resids
     self.rank = rank
     self.s = s
     
     if rank != matrix.shape[1]:
         print "WARNING: Polynomial is not fully constrained."
Пример #27
0
    def calculate(self):
        """Calculates the homography if there are 4+ point pairs"""
        n = len(self.display_points)

        if n < self.points:
            print "Need 4 points to calculate transform"
            return None

        # This calculation is from the paper, A Plane Measuring Device
        # by A. Criminisi, I. Reid, A. Zisserman.  For more details, see:
        # http://www.robots.ox.ac.uk/~vgg/presentations/bmvc97/criminispaper/
        A = numpy.zeros((n * 2, 8))
        B = numpy.zeros((n * 2, 1))
        for i in range(0, n):
            A[2 * i][0:2] = self.camera_points[i]
            A[2 * i][2] = 1
            A[2 * i][6] = -self.camera_points[i][0] * self.display_points[i][0]
            A[2 * i][7] = -self.camera_points[i][1] * self.display_points[i][0]
            A[2 * i + 1][3:5] = self.camera_points[i]
            A[2 * i + 1][5] = 1
            A[2 * i + 1][6] = -self.camera_points[i][0] * self.display_points[i][1]
            A[2 * i + 1][7] = -self.camera_points[i][1] * self.display_points[i][1]
            B[2 * i] = self.display_points[i][0]
            B[2 * i + 1] = self.display_points[i][1]

        X = linalg.lstsq(A, B)
        return numpy.reshape(numpy.vstack((X[0], [1])), (3, 3))
Пример #28
0
def least_squares_fit(M, S):
    """
    Least squares fit to fit two datastreams to create calibration matrix C.
    Args:

    Returns:
        Calibration matrix C.
    """
    S_squared = S * S
    S_cubed = S * S * S
    S_24 = np.zeros((S.shape[0], 8, 3))

    for i in xrange(S.shape[0]):
        for j in xrange(S.shape[1]):
            thirds = np.zeros((3, ))
            thirds[0], thirds[1], thirds[2] = S[i][j], S_squared[i][
                j], S_cubed[i][j]
            S_24[i][j] = thirds

    S_24 = S_24.reshape((S_24.shape[0], 24))
    print "=== S_24 shape:{0} ===".format(S_24.shape)

    #C is the vector we are solving for in the S_24 * C = M equation.
    C = lstsq(S_24, M)[0]
    print "=== least squares fit DONE ==="
    print "=== C shape: {0} ===".format(C.shape)
    return C
Пример #29
0
def __loglinregression(rs, zs):
    coef = linalg.lstsq(c_[log(rs), (1,)*len(rs)], log(zs))[0]
    a, b = coef
    #print 'Regression: log(z) = %f*log(r) + %f' % (a,b)
    if a > -1.0:
       print 'Warning: slope is > -1.0'
    return a, b
Пример #30
0
def mglm_Levenberg(y, design, dispersion=0, offset=0, coef_start=None,
        start_method='null'):
    """ Fit genewise negative binomial glms with log-link using Levenberg
    dampening for convergence.  

    Parameters
    ----------
    y : matrix 
    design : dataframe

    Adapted from Gordon Smyth's and Yunshun Chen's algorithm in R.
    """
    design = add_constant(design)
    if not coef_start:
        start_method = [i for i in ['null', 'y'] if i == start_method][0]
        if start_method == 'null': N = exp(offset)
    else: 
        coef_start = asarray(coef_start)

    if not coef_start:
        if start_method == 'y':
            delta = np.min(np.max(y), 1/6)
            y1 = np.maximum(y, delta)
            #Need to find something similiar to pmax
            fit = lstsq(design, np.log(y1 - offset)).fit()
            beta = fit.params
            mu = np.exp(beta + offset)
        else:
            beta_mean = np.log(np.average(y,axis=1, weights=offset))
    else:
        beta = coef_start.T

    pass
    def __init__(self,
                 cluster_energies,
                 gamma,
                 site_species,
                 compositional_interactions=np.array([[0., 0.], [0., 0.]])):

        self.n_sites = len(cluster_energies.shape)
        self.species_per_site = np.array(cluster_energies.shape)
        self.site_start_indices = (np.cumsum(self.species_per_site) -
                                   self.species_per_site[0])
        self.site_index_tuples = np.array(
            [self.site_start_indices, self.species_per_site]).T
        self.n_site_species = np.sum(self.species_per_site)

        if not len(site_species) == len(self.species_per_site):
            raise Exception('site_species must be a list of lists, '
                            'each second level list containing the number '
                            'of species on each site.')

        if not all([
                len(s) == self.species_per_site[i]
                for i, s in enumerate(site_species)
        ]):
            raise Exception('site_species must have the correct '
                            'number of species on each site')
        self.site_species = site_species

        self.site_species_flat = [
            species for site in site_species for species in site
        ]
        self.n_clusters = len(self.site_species_flat)

        self.components = sorted(list(set(self.site_species_flat)))
        self.n_components = len(self.components)

        # Make correlation matrix between composition and site species
        self.site_species_compositions = np.zeros(
            (len(self.site_species_flat), len(self.components)), dtype='int')
        for i, ss in enumerate(self.site_species_flat):
            self.site_species_compositions[i, self.components.index(ss)] = 1

        clusters = itertools.product(*[
            np.identity(n_species, dtype='int')
            for n_species in cluster_energies.shape
        ])

        self.cluster_energies = cluster_energies
        self.cluster_energies_flat = cluster_energies.flatten()

        self.cluster_occupancies = np.array([np.hstack(cl) for cl in clusters])

        self.cluster_compositions = np.einsum('ij, jk->ik',
                                              self.cluster_occupancies,
                                              self.site_species_compositions)

        self.pivots = list(
            sorted(set([list(c).index(1)
                        for c in self.cluster_occupancies.T])))
        self.n_ind = len(self.pivots)

        ind_cl_occs = np.array(
            [self.cluster_occupancies[p] for p in self.pivots], dtype='int')
        ind_cl_comps = np.array(
            [self.cluster_compositions[p] for p in self.pivots], dtype='int')
        self.independent_cluster_occupancies = ind_cl_occs
        self.independent_cluster_compositions = ind_cl_comps

        self.independent_interactions = np.einsum('ij, lk, jk->il',
                                                  ind_cl_comps, ind_cl_comps,
                                                  compositional_interactions)

        null = Matrix(self.independent_cluster_compositions.T).nullspace()
        rxn_matrix = np.array([np.array(v).T[0] for v in null])
        self.isochemical_reactions = rxn_matrix
        self.n_reactions = len(rxn_matrix)

        self._ps_to_p_ind = pinv(self.independent_cluster_occupancies.T)

        self.A_ind = lstsq(self.independent_cluster_occupancies.T,
                           self.cluster_occupancies.T,
                           rcond=None)[0].round(decimals=10).T

        self._AA = np.einsum('ik, ij -> ijk', self.A_ind, self.A_ind)
        self._AAA = np.einsum('il, ik, ij -> ijkl', self.A_ind, self.A_ind,
                              self.A_ind)

        self.gamma = gamma

        np.random.seed(seed=19)
        std = np.std(self.cluster_energies_flat)
        delta = np.random.rand(len(self.cluster_energies_flat)) * std * 1.e-10
        self._delta_cluster_energies = delta
Пример #32
0
def get_tvalue_with_alternative_library(tested_vars, target_vars, covars=None):
    """Utility function to compute tvalues with linalg or statsmodels

    Massively univariate linear model (= each target is considered
    independently).

    Parameters
    ----------
    tested_vars: array-like, shape=(n_samples, n_regressors)
      Tested variates, the associated coefficient of which are to be tested
      independently with a t-test, resulting in as many t-values.

    target_vars: array-like, shape=(n_samples, n_descriptors)
      Target variates, to be approximated with a linear combination of
      the tested variates and the confounding variates.

    covars: array-like, shape=(n_samples, n_confounds)
      Confounding variates, to be fitted but not to be tested

    Returns
    -------
    t-values: np.ndarray, shape=(n_regressors, n_descriptors)

    """
    ### set up design
    n_samples, n_regressors = tested_vars.shape
    n_descriptors = target_vars.shape[1]
    if covars is not None:
        n_covars = covars.shape[1]
        design_matrix = np.hstack((tested_vars, covars))
    else:
        n_covars = 0
        design_matrix = tested_vars
    mask_covars = np.ones(n_regressors + n_covars, dtype=bool)
    mask_covars[:n_regressors] = False
    test_matrix = np.array([[1.] + [0.] * n_covars])

    ### t-values computation
    try:  # try with statsmodels if available (more concise)
        from statsmodels.regression.linear_model import OLS
        t_values = np.empty((n_descriptors, n_regressors))
        for i in range(n_descriptors):
            current_target = target_vars[:, i].reshape((-1, 1))
            for j in range(n_regressors):
                current_tested_mask = mask_covars.copy()
                current_tested_mask[j] = True
                current_design_matrix = design_matrix[:, current_tested_mask]
                ols_fit = OLS(current_target, current_design_matrix).fit()
                t_values[i, j] = np.ravel(ols_fit.t_test(test_matrix).tvalue)
    except:  # use linalg if statsmodels is not available
        from numpy import linalg
        lost_dof = n_covars + 1  # fit all tested variates independently
        t_values = np.empty((n_descriptors, n_regressors))
        for i in range(n_regressors):
            current_tested_mask = mask_covars.copy()
            current_tested_mask[i] = True
            current_design_matrix = design_matrix[:, current_tested_mask]
            invcov = linalg.pinv(current_design_matrix)
            normalized_cov = np.dot(invcov, invcov.T)
            t_val_denom_aux = np.diag(
                np.dot(test_matrix, np.dot(normalized_cov, test_matrix.T)))
            t_val_denom_aux = t_val_denom_aux.reshape((-1, 1))
            for j in range(n_descriptors):
                current_target = target_vars[:, j].reshape((-1, 1))
                res_lstsq = linalg.lstsq(current_design_matrix, current_target)
                residuals = (current_target -
                             np.dot(current_design_matrix, res_lstsq[0]))
                t_val_num = np.dot(test_matrix, res_lstsq[0])
                t_val_denom = np.sqrt(
                    np.sum(residuals**2, 0) / float(n_samples - lost_dof) *
                    t_val_denom_aux)
                t_values[j, i] = np.ravel(t_val_num / t_val_denom)
    t_values = t_values.T
    assert t_values.shape == (n_regressors, n_descriptors)
    return t_values
Пример #33
0
O = np.array(([0, 0]))
#Generating the Standard parabola

#Eigenvalues and eigenvectors
D_vec, P = LA.eig(V)

D = np.diag(D_vec)
p = P[:, 1]
eta = (u @ p)
foc = np.abs(2 * u @ p) / D_vec[1]

x = parab_gen(y, foc)
cA = np.vstack((u + eta * p, V))
cb = np.vstack((-f, (eta * p - u).reshape(-1, 1)))
c = LA.lstsq(cA, cb, rcond=None)[0]
c = c.flatten()

c1 = np.array(([
    (u @ V @ u - 2 * D_vec[0] * u @ u + D_vec[0]**2 * f) / (eta * D_vec[1]**2),
    0
]))
xStandardparab = np.vstack((x, y))

xActualparab = P @ xStandardparab + c[:, np.newaxis]

parab_coords = np.vstack((O, c)).T
plt.scatter(parab_coords[0, :], parab_coords[1, :])
vert_labels = ['$O$', '$c$']
for i, txt in enumerate(vert_labels):
    plt.annotate(
Пример #34
0
def cal_linear_alg_lstsq(
    df_landcover,
    df_lst,
    df_emiswb,
    df_emis_repre,
    row,
    col,
    kernel_list,
    _type,
    radiance=False,
    bounds=None,
    moving_pixel=4,
):
    '''
    This function :
        - convert to fraction map for each land cover 
    There will be 8 matrix corresponsing to 8 land cover class 
        - calculate using linear algrabra 
        
    Input:
        df_landcover: pandas Dataframe holds fraction values of each land cover class
        df_lst: pandas Dataframe holds ECOSTRESS LST values per each pixel
        df_emiswb: pandas Dataframe holds ECOSTRESS EmisWB values per each pixel
        df_emis_repre: the representative Emis values for each land cover class
        row: number of row of the whole image
        col: number of col of the whole image
        kerner_list: list of kernel window size (e.g. [10,20,30,40])
        bounds: upper and lower bound values for constaint optimization, optional, default is None
            which means no bounds
        type: measured using "radiance" or "emissivity" functions
        radiance: define whether calculating using "radiance" or not, by default is False which means
            calculating using "emissivity" function
        
    Output:
        out_value: pandas Dataframe contains the output of Linear Algebra for each pixel
            columns=['value','nrow','ncol','indep_value','out_value']:
                'value': list of fraction of land cover properties, extracting from coeff_matrix_df
                'nrow','ncol': indexes of row and col
                'indep_value': ECOSTRESS LST and Emissivity values, extracting from indep_matrix_df
                'out_value': list of temperture of land cover properties, as results of Linear Algebra 
        
        
    Example:
        # Kernel Test:
        row = 47
        col = 54
        
        # Staten Island
        row = 303
        col = 243
        
        # Staten Island (490 m resolution)
        row = 43
        col = 34
        
        
        # Testing with different kernel sizes
        # Radiance function
        out_value_list = cal_linear_alg_lstsq(df_landcover_concat, df_lst, df_emiswb, df_emis_repre,  
            row, col, kernel_list=[10], _type="radiance", 
            bounds=(290**4,310**4), moving_pixel = 4,  radiance=True)
        
        
        # Emissivity function
        out_value_list = cal_linear_alg_lstsq(df_landcover_concat, df_lst, df_emiswb, df_emis_repre,
            row, col, kernel_list=[25], _type="Emis", 
            bounds=(290**4,310**4), moving_pixel = 5,  radiance=False)
        
        #
        import time
        start_time = time.time() 
        out_value_list = cal_linear_alg_lstsq(df_landcover_concat, df_lst, df_emiswb, df_emis_repre,
    row, col, kernel_list=[3], _type="Emis", 
    bounds=(290**4,310**4), moving_pixel = 2,  radiance=False)
        print(time.time() - start_time)
        
        49.32850956916809

    
    '''
    import numpy as np
    import matplotlib.pyplot as plt
    import numpy.linalg as la
    import pandas as pd
    from scipy import linalg
    from scipy.optimize import lsq_linear
    import time

    # Starting time
    start_time = time.time()

    # Create an empy 3D array-like e.g. numpy array or list which holds
    # 8 fraction map
    fraction_map = np.empty((8, row, col), dtype=np.float64)
    indices_map = np.empty((8, row, col), dtype=np.float64)

    # Groupping the dataframe by Class, thus results in 8 classes
    df_grp = df_landcover.groupby('Class')

    # Looping over 8 land cover class
    for i in range(9):
        # Pass the i=0 as the class code starts from 1
        if i == 0:
            pass
        else:
            fraction_map[i - 1] = df_grp.get_group(i).fraction.values.reshape(
                row, col)
            indices_map[i - 1] = df_grp.get_group(i).Value.values.reshape(
                row, col)

    # Reading df_lst contains LST value for each pixel and assign as independent value
    indepent_matrix = df_lst.MEAN.values.reshape(row, col)
    emis_matrix = df_emiswb.MEAN.values.reshape(row, col)

    # Old version:
    # Create an empty pandas Dataframe with columns = 'value','nrow','ncol'
    # with the purpose of indexing each pixel with old row and column indexes
    coeff_matrix_df = pd.DataFrame(data=[],
                                   columns=[
                                       'index', 'value_fraction', 'nrow',
                                       'ncol', 'class', 'indep_value',
                                       'value_emis', 'value_emis_sum',
                                       'out_value'
                                   ])
    indep_matrix_df = pd.DataFrame(
        data=[],
        columns=['index', 'value_lst', 'value_emis_sum', 'nrow', 'ncol'])

    # Looping over the whole domain to assign new coeff_matrix and independt value dataframes
    for nrow in range(row):
        for ncol in range(col):
            # Ingnoring NoData value
            if fraction_map[:, nrow, ncol].mean() == -9999:
                pass
            else:
                for j in range(8):

                    coeff_matrix_df = coeff_matrix_df.append(
                        {
                            'index': indices_map[:, nrow, ncol][0],
                            'value_fraction':
                            fraction_map[:, nrow,
                                         ncol][j],  # value of fraction fj
                            'nrow': nrow,
                            'ncol': ncol,
                            'class': j,
                            'indep_value': indepent_matrix[nrow, ncol],
                            'value_emis': list(
                                df_emis_repre["Emis"].values)[j],
                            'value_emis_sum': emis_matrix[nrow, ncol]
                        },
                        ignore_index=True)  # value of emiss ei

                    indep_matrix_df = indep_matrix_df.append(
                        {
                            'index': indices_map[:, nrow, ncol][0],
                            'value_lst': indepent_matrix[nrow, ncol],
                            'value_emis_sum': emis_matrix[nrow, ncol],
                            'nrow': nrow,
                            'ncol': ncol
                        },
                        ignore_index=True)
            print(nrow, ncol)

    coeff_df = []
    indep_df = []
    # out_value = []
    out_value = pd.DataFrame(data=[],
                             columns=[
                                 'index', 'class', 'value', 'nrow', 'ncol',
                                 'indep_value', 'out_value', "type"
                             ])
    out_value_list = []

    # Testing with jumping kernel windows, e.g. it is not neccessary to moving every 1 pixel but instead of moving every
    # 4 pixels. Doing so would speed up much time calculation especially when we consider the whole NYC domain.

    coeff_df_matrix_list = []
    indep_df_matrix_list = []

    for kernel in kernel_list:  # Looping over the kernel list for testing on different kernel size
        nrow = -moving_pixel  # Starting from nrow index -movingpixe, so it will make up with moving 4 pixels
        count = 0

        while nrow < row:
            nrow = nrow + moving_pixel

            ncol = -moving_pixel  # Starting from nrow index -movingpixel
            while ncol < col:
                ncol = ncol + moving_pixel

                # Applying linear algebra function for each kernel window:
                # Can consider parallel from this step for each kernel

                # Extracting coeff_matrix values for each kernel window and assign it to a new dataframe
                coeff_df = coeff_matrix_df.loc[
                    (coeff_matrix_df['nrow'] >= nrow)
                    & (coeff_matrix_df['nrow'] < nrow + kernel) &
                    (coeff_matrix_df['ncol'] >= ncol) &
                    (coeff_matrix_df['ncol'] < ncol + kernel)]

                # Extracting independent values for each kernel window and assign it to a new dataframe
                indep_df = indep_matrix_df.loc[
                    (indep_matrix_df['nrow'] >= nrow)
                    & (indep_matrix_df['nrow'] < nrow + kernel) &
                    (indep_matrix_df['ncol'] >= ncol) &
                    (indep_matrix_df['ncol'] < ncol + kernel)]

                # Ignoring kernel windows does not have the same size with kernel*kernel
                # It could happend when moving window close to the edge
                if len(
                        coeff_df
                ) < 9 * 8:  # As we consider 8 elements of land cover class
                    pass
                else:
                    coeff_df_matrix_list.append(coeff_df)
                    indep_df_matrix_list.append(indep_df)

                    # Older version:
                    ### Processing each kernel window
                    ### Consider parallel comptutation? As each kernel is indepent

                    index_list = list(coeff_df.groupby("index").groups.keys())

                    indep_df_list = []
                    coeff_df_list = []
                    for item in index_list:

                        indep_df_loc = indep_df.loc[indep_df["index"] == item]
                        coeff_df_loc = coeff_df.loc[coeff_df["index"] == item]

                        # Applying with radience formula instead of direct LST function:
                        # LST^4 = sum(fraction*Temp^4) + residuals
                        if radiance is True:
                            # Independent values
                            indep_df_element = list(
                                map(lambda x: pow(x, 4),
                                    np.array(indep_df_loc["value_lst"])))

                            # Coefficient matrix values
                            coeff_df_element = np.array(
                                coeff_df_loc["value_fraction"])

                        # Applying with Emis formula instead of direct LST function:emis_sum * LST^4 = sum(e*fraction*Temp^4) + residuals
                        else:

                            # LST^4:
                            lst4 = list(
                                map(lambda x: pow(x, 4),
                                    np.array(indep_df_loc["value_lst"])))
                            # emissivity:
                            emis_sum = indep_df_loc["value_emis_sum"].tolist()

                            # Element-wise multiplication
                            indep_df_element = [
                                a * b for a, b in zip(lst4, emis_sum)
                            ][0]

                            # fraction i * emis i
                            coeff_df_element = list(
                                coeff_df_loc["value_fraction"] *
                                coeff_df_loc["value_emis"])

                        indep_df_list.append(indep_df_element)
                        coeff_df_list.append(coeff_df_element)

                    # Applying function:
                    x, sum_res, rank, s = la.lstsq(coeff_df_list,
                                                   indep_df_list)

                    # Applying optimze function: Testing with Scipy package
                    if bounds is not None:
                        res = lsq_linear(coeff_df_list,
                                         np.array(indep_df_list).reshape(
                                             len(indep_df_list), ),
                                         bounds=bounds)
                    else:
                        res = lsq_linear(
                            coeff_df_list,
                            np.array(indep_df_list).reshape(
                                len(indep_df_list), ))

                    ### End processing each kernel. At this step, we should be able to extract
                    # End member temperature for each kernel
                    # Time: 0.0239 seconds for 72 rows (3x3 kernel)

                    # Old version:
                    for j in range(8):
                        if radiance is True:
                            # Solution: x = 4sqrt(x)
                            coeff_df.loc[coeff_df["class"] == j,
                                         "out_value"] = res.x[j]**(1 / 4)

                        else:

                            coeff_df.loc[coeff_df["class"] == j,
                                         "out_value"] = res.x[j]**(1 / 4)

                    # Old version

                    # Adding optimality value
                    coeff_df["optimality"] = [
                        res.optimality for i in range(len(coeff_df))
                    ]

                    # Adding nit value
                    # Number of iterations. Zero if the unconstrained solution is optimal.
                    coeff_df["nit"] = [res.nit for i in range(len(coeff_df))]

                    # Adding count column contains the order of kernel window
                    coeff_df["count"] = count + 1
                    # print(coeff_df)

                    # Adding type colum such as radiance or temperature
                    coeff_df["type"] = _type

                    # Append new dataframe to the existing dataframe
                    out_value = out_value.append(coeff_df)

                count = count + 1
                #print(count)
            print(nrow, ncol, kernel)
        out_value_list.append(out_value)

    end_time = time.time() - start_time
    print('Processing time' + str(end_time))

    # Save data to pickle
    #    path_output = '\\uahdata\rhome\py_code\aes509\data\staten_island_70m'
    #    out_value_list[0].to_pickle(path_output+'out_value')

    # Test new version: 03.28.2020 (Staten Island 490 m )
    # Time: 8.951 seconds
    # Test old version:
    # Time: 31.64 seconds

    # Test new version: 03.28.2020 (Staten Island 70 m )
    # Time: 395.76682 seconds
    # Test old version:
    # Time:  seconds
    return out_value_list, end_time
Пример #35
0
def draw_lines(img, lines, color=[0, 255, 255], thickness=3):
    try:
        # finds the maximum y value for a lane marker
        # (since we cannot assume the horizon will always be at the same point)
        ys = []
        for i in lines:
            for ii in i:
                ys += [ii[1], ii[3]]

        # 라인중에서 y값이 가장 작은값(화면상 위쪽)을 저장하고
        min_y = min(ys)
        # 가장 큰값은 800x600에서 600이 최대이므로 600을 저장한다
        max_y = 600

        new_lines = []
        line_dict = {}

        for idx, i in enumerate(lines):
            for xyxy in i:
                # These four lines:
                # modified from http://stackoverflow.com/questions/21565994/method-to-return-the-equation-of-a-straight-line-given-two-points
                # Used to calculate the definition of a line, given two sets of coords.
                x_coords = (xyxy[0], xyxy[2])
                y_coords = (xyxy[1], xyxy[3])

                A = vstack([x_coords, ones(len(x_coords))]).T
                m, b = lstsq(A, y_coords)[0]

                # calculating our new, and improved, xs
                x1 = (min_y - b) / m
                x2 = (max_y - b) / m

                # 기울기, 절편, 실제좌표값을 담는 line_dict[] 리스트를 만듭니다
                line_dict[idx] = [m, b, [int(x1), min_y, int(x2), max_y]]

                new_lines.append([int(x1), min_y, int(x2), max_y])

        final_lanes = {}

        for idx in line_dict:
            final_lanes_copy = final_lanes.copy()
            m = line_dict[idx][0]
            b = line_dict[idx][1]
            line = line_dict[idx][2]

            if len(final_lanes) == 0:
                final_lanes[m] = [[m, b, line]]

            else:
                found_copy = False

                for other_ms in final_lanes_copy:
                    if not found_copy:
                        # 다른 기울기의 직선들도 +-20%안에 있으면 같은 직선으로 인식한다
                        # +-20% 보다 차이가 많이나면 새로운 직선으로 인식한다
                        if abs(other_ms * 1.2) > abs(m) > abs(other_ms * 0.8):
                            if abs(final_lanes_copy[other_ms][0][1] *
                                   1.2) > abs(b) > abs(
                                       final_lanes_copy[other_ms][0][1] * 0.8):
                                final_lanes[other_ms].append([m, b, line])
                                found_copy = True
                                break
                        else:
                            final_lanes[m] = [[m, b, line]]

        line_counter = {}

        for lanes in final_lanes:
            line_counter[lanes] = len(final_lanes[lanes])

        # 여러 차선 후보들 중에서 가장 기울기가 많이 유사하게 검출된 직선들 중
        # 2개를 뽑아서 차선으로 인식한다
        top_lanes = sorted(line_counter.items(),
                           key=lambda item: item[1])[::-1][:2]

        lane1_id = top_lanes[0][0]
        lane2_id = top_lanes[1][0]

        # 기울기가 유사한 직선들의 x,y값들을 평균내서 반환하는 함수
        def average_lane(lane_data):
            x1s = []
            y1s = []
            x2s = []
            y2s = []

            for data in lane_data:
                x1s.append(data[2][0])
                y1s.append(data[2][1])
                x2s.append(data[2][2])
                y2s.append(data[2][3])

            return int(mean(x1s)), int(mean(y1s)), int(mean(x2s)), int(
                mean(y2s))

        # 최종적으로 2개의 직선의 시작점, 끝점을 알 수 있다. (각각 2개씩 총 4개의 (x,y)좌표이므로 총 8개의 점)
        l1_x1, l1_y1, l1_x2, l1_y2 = average_lane(final_lanes[lane1_id])
        l2_x1, l2_y1, l2_x2, l2_y2 = average_lane(final_lanes[lane2_id])

        return [l1_x1, l1_y1, l1_x2, l1_y2], [l2_x1, l2_y1, l2_x2, l2_y2]

    except Exception as e:
        print('1 : ' + str(e))
        pass
Пример #36
0
yp = yp[['SNGL','XBH','HR','BB','K','BIPOUT']]
yp = yp.replace(1,0.999)
yp = yp.replace(0,0.0002)
yr = yp/(1-yp)
ylogr = np.log(yr)
y = np.subtract(ylogr,logrbar)


#%%
# Try a universal tau value
tau = 1

xt = x * tau

# Solve the system
bhat = pd.DataFrame(la.lstsq(np.matmul(xt.transpose().to_numpy(),xt.to_numpy()),np.matmul(xt.transpose().to_numpy(),y.to_numpy()))[0])
bhat.index = x.columns
bhat.columns = y.columns

# Take the bhat estimate and put it back in probability space
rhat = np.exp(np.add(bhat,logrbar))

rhat.groupby('split').mean()

# Okay, now get the original probabilities
phat = rhat/(1+rhat)
phat.groupby('split').mean()
phat.loc['batter'].hist()

phat['SUM'] = np.sum(phat,axis=1)
phat.groupby('split').mean()
Пример #37
0
import numpy as np
import random
import math
#dataset creation
[a1, b1] = [np.random.uniform(-1, 1), np.random.uniform(-1, 1)]
[a2, b2] = [np.random.uniform(-1, 1), np.random.uniform(-1, 1)]
from numpy import ones, vstack
from numpy.linalg import lstsq
points = [(a1, b1), (a2, b2)]
x_coords, y_coords = zip(*points)
A = vstack([x_coords, ones(len(x_coords))]).T
m, c = lstsq(A, y_coords)[0]
print("Line Solution is y = {m}x + {c}".format(m=m, c=c))
[x, y] = [np.random.uniform(-1, 1, 100), np.random.uniform(-1, 1, 100)]
Output = []
for [a, b] in zip(x, y):
    if (m * a - b < 0):
        Output.append(-1)
    else:
        Output.append(1)
Input = zip(np.ones(100), x, y)


def cross_entropy(feature, res, weights):
    return math.log(1 + math.exp(-res * np.dot(feature, weights)))


weights = [0, 0, 0]


def gradient(coordinate, res, weights):
Пример #38
0
import matplotlib.pyplot as plt
import numpy as np
import numpy.random
from numpy import linalg as LA

# 生成数据
X = np.random.rand(20, 10)
b = np.random.rand(10)
z = np.random.randn(20)
y = np.dot(X, b) - z

b_ = LA.lstsq(X, y, rcond=None)[0]

# 生成图像
index = list(range(10))
plt.scatter(index, b, label='True coefficients', color='r', marker='x')
plt.scatter(index,
            b_,
            label='Estimated coefficients',
            color='blue',
            marker='o')

plt.xlabel('index')
plt.ylabel('value')
plt.title('Parameter plot')
plt.legend()
plt.show()
Пример #39
0
def old_epd_magseries(times, mags, errs,
                      fsv, fdv, fkv, xcc, ycc, bgv, bge,
                      epdsmooth_windowsize=21,
                      epdsmooth_sigclip=3.0,
                      epdsmooth_func=smooth_magseries_medfilt,
                      epdsmooth_extraparams=None):
    '''
    Detrends a magnitude series given in mag using accompanying values of S in
    fsv, D in fdv, K in fkv, x coords in xcc, y coords in ycc, background in
    bgv, and background error in bge. smooth is used to set a smoothing
    parameter for the fit function. Does EPD voodoo.

    '''

    # find all the finite values of the magsnitude
    finiteind = np.isfinite(mags)

    # calculate median and stdev
    mags_median = np.median(mags[finiteind])
    mags_stdev = np.nanstd(mags)

    # if we're supposed to sigma clip, do so
    if epdsmooth_sigclip:
        excludeind = abs(mags - mags_median) < epdsmooth_sigclip*mags_stdev
        finalind = finiteind & excludeind
    else:
        finalind = finiteind

    final_mags = mags[finalind]
    final_len = len(final_mags)

    # smooth the signal
    if isinstance(epdsmooth_extraparams, dict):
        smoothedmags = epdsmooth_func(final_mags,
                                      epdsmooth_windowsize,
                                      **epdsmooth_extraparams)
    else:
        smoothedmags = epdsmooth_func(final_mags, epdsmooth_windowsize)

    # make the linear equation matrix
    epdmatrix = np.c_[fsv[finalind]**2.0,
                      fsv[finalind],
                      fdv[finalind]**2.0,
                      fdv[finalind],
                      fkv[finalind]**2.0,
                      fkv[finalind],
                      np.ones(final_len),
                      fsv[finalind]*fdv[finalind],
                      fsv[finalind]*fkv[finalind],
                      fdv[finalind]*fkv[finalind],
                      np.sin(2*np.pi*xcc[finalind]),
                      np.cos(2*np.pi*xcc[finalind]),
                      np.sin(2*np.pi*ycc[finalind]),
                      np.cos(2*np.pi*ycc[finalind]),
                      np.sin(4*np.pi*xcc[finalind]),
                      np.cos(4*np.pi*xcc[finalind]),
                      np.sin(4*np.pi*ycc[finalind]),
                      np.cos(4*np.pi*ycc[finalind]),
                      bgv[finalind],
                      bge[finalind]]

    # solve the matrix equation [epdmatrix] . [x] = [smoothedmags]
    # return the EPD differential magss if the solution succeeds
    try:

        coeffs, residuals, rank, singulars = lstsq(epdmatrix, smoothedmags,
                                                   rcond=None)

        if DEBUG:
            print('coeffs = %s, residuals = %s' % (coeffs, residuals))


        retdict = {'times':times,
                   'mags':(mags_median +
                           old_epd_diffmags(coeffs, fsv, fdv,
                                            fkv, xcc, ycc, bgv, bge, mags)),
                   'errs':errs,
                   'fitcoeffs':coeffs,
                   'residuals':residuals}

        return retdict

    # if the solution fails, return nothing
    except Exception as e:

        LOGEXCEPTION('EPD solution did not converge')

        retdict = {'times':times,
                   'mags':np.full_like(mags, np.nan),
                   'errs':errs,
                   'fitcoeffs':coeffs,
                   'residuals':residuals}

        return retdict
Пример #40
0
def polyfit(x, y, deg, rcond=None, full=False, w=None):
    """
    Least-squares fit of a polynomial to data.

    Return the coefficients of a polynomial of degree `deg` that is the
    least squares fit to the data values `y` given at points `x`. If `y` is
    1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
    fits are done, one for each column of `y`, and the resulting
    coefficients are stored in the corresponding columns of a 2-D return.
    The fitted polynomial(s) are in the form

    .. math::  p(x) = c_0 + c_1 * x + ... + c_n * x^n,

    where `n` is `deg`.

    Since numpy version 1.7.0, polyfit also supports NA. If any of the
    elements of `x`, `y`, or `w` are NA, then the corresponding rows of the
    linear least squares problem (see Notes) are set to 0. If `y` is 2-D,
    then an NA in any row of `y` invalidates that whole row.

    Parameters
    ----------
    x : array_like, shape (`M`,)
        x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.
    y : array_like, shape (`M`,) or (`M`, `K`)
        y-coordinates of the sample points.  Several sets of sample points
        sharing the same x-coordinates can be (independently) fit with one
        call to `polyfit` by passing in for `y` a 2-D array that contains
        one data set per column.
    deg : int
        Degree of the polynomial(s) to be fit.
    rcond : float, optional
        Relative condition number of the fit.  Singular values smaller
        than `rcond`, relative to the largest singular value, will be
        ignored.  The default value is ``len(x)*eps``, where `eps` is the
        relative precision of the platform's float type, about 2e-16 in
        most cases.
    full : bool, optional
        Switch determining the nature of the return value.  When ``False``
        (the default) just the coefficients are returned; when ``True``,
        diagnostic information from the singular value decomposition (used
        to solve the fit's matrix equation) is also returned.
    w : array_like, shape (`M`,), optional
        Weights. If not None, the contribution of each point
        ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
        weights are chosen so that the errors of the products ``w[i]*y[i]``
        all have the same variance.  The default value is None.

        .. versionadded:: 1.5.0

    Returns
    -------
    coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)
        Polynomial coefficients ordered from low to high.  If `y` was 2-D,
        the coefficients in column `k` of `coef` represent the polynomial
        fit to the data in `y`'s `k`-th column.

    [residuals, rank, singular_values, rcond] : present when `full` == True
        Sum of the squared residuals (SSR) of the least-squares fit; the
        effective rank of the scaled Vandermonde matrix; its singular
        values; and the specified value of `rcond`.  For more information,
        see `linalg.lstsq`.

    Raises
    ------
    RankWarning
        Raised if the matrix in the least-squares fit is rank deficient.
        The warning is only raised if `full` == False.  The warnings can
        be turned off by:

        >>> import warnings
        >>> warnings.simplefilter('ignore', RankWarning)

    See Also
    --------
    chebfit, legfit, lagfit, hermfit, hermefit
    polyval : Evaluates a polynomial.
    polyvander : Vandermonde matrix for powers.
    linalg.lstsq : Computes a least-squares fit from the matrix.
    scipy.interpolate.UnivariateSpline : Computes spline fits.

    Notes
    -----
    The solution is the coefficients of the polynomial `p` that minimizes
    the sum of the weighted squared errors

    .. math :: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,

    where the :math:`w_j` are the weights. This problem is solved by
    setting up the (typically) over-determined matrix equation:

    .. math :: V(x) * c = w * y,

    where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
    coefficients to be solved for, `w` are the weights, and `y` are the
    observed values.  This equation is then solved using the singular value
    decomposition of `V`.

    If some of the singular values of `V` are so small that they are
    neglected (and `full` == ``False``), a `RankWarning` will be raised.
    This means that the coefficient values may be poorly determined.
    Fitting to a lower order polynomial will usually get rid of the warning
    (but may not be what you want, of course; if you have independent
    reason(s) for choosing the degree which isn't working, you may have to:
    a) reconsider those reasons, and/or b) reconsider the quality of your
    data).  The `rcond` parameter can also be set to a value smaller than
    its default, but the resulting fit may be spurious and have large
    contributions from roundoff error.

    Polynomial fits using double precision tend to "fail" at about
    (polynomial) degree 20. Fits using Chebyshev or Legendre series are
    generally better conditioned, but much can still depend on the
    distribution of the sample points and the smoothness of the data.  If
    the quality of the fit is inadequate, splines may be a good
    alternative.

    Examples
    --------
    >>> from numpy import polynomial as P
    >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
    >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise"
    >>> c, stats = P.polyfit(x,y,3,full=True)
    >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
    array([ 0.01909725, -1.30598256, -0.00577963,  1.02644286])
    >>> stats # note the large SSR, explaining the rather poor results
    [array([ 38.06116253]), 4, array([ 1.38446749,  1.32119158,  0.50443316,
    0.28853036]), 1.1324274851176597e-014]

    Same thing without the added noise

    >>> y = x**3 - x
    >>> c, stats = P.polyfit(x,y,3,full=True)
    >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
    array([ -1.73362882e-17,  -1.00000000e+00,  -2.67471909e-16,
             1.00000000e+00])
    >>> stats # note the minuscule SSR
    [array([  7.46346754e-31]), 4, array([ 1.38446749,  1.32119158,
    0.50443316,  0.28853036]), 1.1324274851176597e-014]

    """
    order = int(deg) + 1
    x = np.asarray(x) + 0.0
    y = np.asarray(y) + 0.0

    # check arguments.
    if deg < 0:
        raise ValueError("expected deg >= 0")
    if x.ndim != 1:
        raise TypeError("expected 1D vector for x")
    if x.size == 0:
        raise TypeError("expected non-empty vector for x")
    if y.ndim < 1 or y.ndim > 2:
        raise TypeError("expected 1D or 2D array for y")
    if len(x) != len(y):
        raise TypeError("expected x and y to have same length")

    # set up the least squares matrices in transposed form
    lhs = polyvander(x, deg).T
    rhs = y.T
    if w is not None:
        w = np.asarray(w) + 0.0
        if w.ndim != 1:
            raise TypeError("expected 1D vector for w")
        if len(x) != len(w):
            raise TypeError("expected x and w to have same length")
        # apply weights. Don't use inplace operations as they
        # can cause problems with NA.
        lhs = lhs * w
        rhs = rhs * w

    # set rcond
    if rcond is None:
        rcond = len(x) * np.finfo(x.dtype).eps

    # Determine the norms of the design matrix columns.
    if issubclass(lhs.dtype.type, np.complexfloating):
        scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
    else:
        scl = np.sqrt(np.square(lhs).sum(1))
    scl[scl == 0] = 1

    # Solve the least squares problem.
    c, resids, rank, s = la.lstsq(lhs.T / scl, rhs.T, rcond)
    c = (c.T / scl).T

    # warn on rank reduction
    if rank != order and not full:
        msg = "The fit may be poorly conditioned"
        warnings.warn(msg, pu.RankWarning)

    if full:
        return c, [resids, rank, s, rcond]
    else:
        return c
Пример #41
0
def localizeExtremumViaQuadraticFit(i,
                                    j,
                                    image_index,
                                    octave_index,
                                    num_intervals,
                                    dog_images_in_octave,
                                    sigma,
                                    contrast_threshold,
                                    image_border_width,
                                    eigenvalue_ratio=10,
                                    num_attempts_until_convergence=5):
    """Iteratively refine pixel positions of scale-space extrema via quadratic fit around each extremum's neighbors
    """
    logger.debug('Localizing scale-space extrema...')
    extremum_is_outside_image = False
    image_shape = dog_images_in_octave[0].shape
    for attempt_index in range(num_attempts_until_convergence):
        # need to convert from uint8 to float32 to compute derivatives and need to rescale pixel values to [0, 1] to apply Lowe's thresholds
        first_image, second_image, third_image = dog_images_in_octave[
            image_index - 1:image_index + 2]
        pixel_cube = stack([
            first_image[i - 1:i + 2, j - 1:j + 2],
            second_image[i - 1:i + 2, j - 1:j + 2], third_image[i - 1:i + 2,
                                                                j - 1:j + 2]
        ]).astype('float32') / 255.
        gradient = computeGradientAtCenterPixel(pixel_cube)
        hessian = computeHessianAtCenterPixel(pixel_cube)
        extremum_update = -lstsq(hessian, gradient, rcond=None)[0]
        if abs(extremum_update[0]) < 0.5 and abs(
                extremum_update[1]) < 0.5 and abs(extremum_update[2]) < 0.5:
            break
        j += int(round(extremum_update[0]))
        i += int(round(extremum_update[1]))
        image_index += int(round(extremum_update[2]))
        # make sure the new pixel_cube will lie entirely within the image
        if i < image_border_width or i >= image_shape[
                0] - image_border_width or j < image_border_width or j >= image_shape[
                    1] - image_border_width or image_index < 1 or image_index > num_intervals:
            extremum_is_outside_image = True
            break
    if extremum_is_outside_image:
        logger.debug(
            'Updated extremum moved outside of image before reaching convergence. Skipping...'
        )
        return None
    if attempt_index >= num_attempts_until_convergence - 1:
        logger.debug(
            'Exceeded maximum number of attempts without reaching convergence for this extremum. Skipping...'
        )
        return None
    functionValueAtUpdatedExtremum = pixel_cube[1, 1, 1] + 0.5 * dot(
        gradient, extremum_update)
    if abs(functionValueAtUpdatedExtremum
           ) * num_intervals >= contrast_threshold:
        xy_hessian = hessian[:2, :2]
        xy_hessian_trace = trace(xy_hessian)
        xy_hessian_det = det(xy_hessian)
        if xy_hessian_det > 0 and eigenvalue_ratio * (xy_hessian_trace**2) < (
            (eigenvalue_ratio + 1)**2) * xy_hessian_det:
            # Contrast check passed -- construct and return OpenCV KeyPoint object
            keypoint = KeyPoint()
            keypoint.pt = ((j + extremum_update[0]) * (2**octave_index),
                           (i + extremum_update[1]) * (2**octave_index))
            keypoint.octave = octave_index + image_index * (2**8) + int(
                round((extremum_update[2] + 0.5) * 255)) * (2**16)
            keypoint.size = sigma * (2**(
                (image_index + extremum_update[2]) /
                float32(num_intervals))) * (2**(octave_index + 1))
            keypoint.response = abs(functionValueAtUpdatedExtremum)
            return keypoint, image_index
    return None
Пример #42
0
        # upper = np.array([40, 50, 255], dtype="uint8")

        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, lower, upper)
        diff = cv2.absdiff(img, background)
        mask2 = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)[1]
        # import ipdb; ipdb.set_trace()
        mask = cv2.bitwise_and(mask, mask2[:, :, 0])
        cv2.erode(mask, None, iterations=8)
        cv2.dilate(mask, None, iterations=10)

        x, y = np.nonzero(mask)

        if len(x) > 0:
            A = vstack([x, ones(len(x))]).T
            m, c = lstsq(A, y)[0]

            # print(m)

            w = img.shape[0]
            h = img.shape[1]

            try:
                intersection_t = (0, int(-c / m))
                intersection_b = (int(h), int((h - c) / m))
                intersection_r = (int(c), 0)
                intersection_l = (int(w * m + c), int(w))
            except:
                continue

            points = []
Пример #43
0
                     [1, 16, 2, 18], [1, 12, 0, 11]],
                    columns=columns)

# find the value of beta which minimizes the cost function

X = train[train.columns[:-1]].as_matrix()
y = train[train.columns[-1]].as_matrix()

print(y)
print(X)

print(dot(inv(dot(transpose(X), X)), dot(transpose(X), y)))

# using NumPy's least squares to solve for beta

print(lstsq(X, y)[0])

# train model

X_train = train[train.columns[1:3]].as_matrix()
y_train = train[train.columns[-1]].as_matrix()

model = LinearRegression()
model.fit(X_train, y_train)

X_test = test[test.columns[1:3]].as_matrix()
y_test = test[test.columns[-1]].as_matrix()

# predict

predictions = model.predict(X_test)
Пример #44
0
def draw_lanes(img, lines, color=[0, 255, 255], thickness=3):
    # 에러 발생 시, 기본선을 사용
    try:
        # 차량이 움직이면 화면에서 수평선이 항상 같은 지점에 있지 않게 되므로
        # 차선 마커의 최대 y 값을 찾는다

        ys = []
        for i in lines:
            for ii in i:
                ys += [ii[1], ii[3]]  # 직선의 출발점과 도착점의 y 성분들

        min_y = min(ys)
        max_y = 600

        new_lines = []
        line_dict = {}

        for idx, i in enumerate(lines):
            for xyxy in i:
                # http://stackoverflow.com/questions/21565994/method-to-return-the-equation-of-a-straight-line-given-two-points
                # 두 점의 좌표로 선을 생성
                x_coords = (xyxy[0], xyxy[2])
                y_coords = (xyxy[1], xyxy[3])

                # [ [xyxy[0], xyxy[2]],
                #   [      1,       1] ].T =
                # [ [xyxy[0], 1],
                #   [xyxy[2], 1] ]
                A = vstack([x_coords, ones(len(x_coords))]).T  # 전치 행렬

                m, b = lstsq(A, y_coords)[0]  # y_coords = mA + b의 Least Square(최소제곱해) 반환(feat. 선형대수)

                # y = mx + b 였으므로, x = (y - b) / m
                x1 = (min_y - b) / m
                x2 = (max_y - b) / m

                # 차선 추가
                line_dict[idx] = [m, b, [int(x1), min_y, int(x2), max_y]]
                new_lines.append([int(x1), min_y, int(x2), max_y])

        # ... 여기부터는 제가 영어가 부족해서... 코드 해석 좀 대신 부탁드릴께요!!!
        final_lanes = {}

        for idx in line_dict:
            final_lanes_copy = final_lanes.copy()

            m = line_dict[idx][0]
            b = line_dict[idx][1]
            line = line_dict[idx][2]

            if len(final_lanes) == 0:
                final_lanes[m] = [[m, b, line]]

            else:
                found_copy = False

                for other_ms in final_lanes_copy:

                    if not found_copy:
                        if abs(other_ms * 1.2) > abs(m) > abs(other_ms * 0.8):
                            if abs(final_lanes_copy[other_ms][0][1] * 1.2) > abs(b) > abs(
                                    final_lanes_copy[other_ms][0][1] * 0.8):
                                final_lanes[other_ms].append([m, b, line])
                                found_copy = True
                                break
                        else:
                            final_lanes[m] = [[m, b, line]]

        line_counter = {}

        for lanes in final_lanes:
            line_counter[lanes] = len(final_lanes[lanes])

        top_lanes = sorted(line_counter.items(), key=lambda item: item[1])[::-1][:2]

        lane1_id = top_lanes[0][0]
        lane2_id = top_lanes[1][0]

        # 차선 후보들을 평균 내서, 최종 차선 반환
        def average_lane(lane_data):
            x1s = []
            y1s = []
            x2s = []
            y2s = []
            for data in lane_data:
                x1s.append(data[2][0])
                y1s.append(data[2][1])
                x2s.append(data[2][2])
                y2s.append(data[2][3])

            return int(mean(x1s)), int(mea(y1s)), int(mean(x2s)), int(mean(y2s))

        l1_x1, l1_y1, l1_x2, l1_y2 = average_lane(final_lanes[lane1_id])
        l2_x1, l2_y1, l2_x2, l2_y2 = average_lane(final_lanes[lane2_id])

        return [l1_x1, l1_y1, l1_x2, l1_y2], [l2_x1, l2_y1, l2_x2, l2_y2]

    # 예외처리
    except Exception as e:
        print(str(e))
Пример #45
0
def dfa(X, Ave = None, L = None):
	"""Compute Detrended Fluctuation Analysis from a time series X and length of
	boxes L.
	
	The first step to compute DFA is to integrate the signal. Let original seres
	be X= [x(1), x(2), ..., x(N)]. 

	The integrated signal Y = [y(1), y(2), ..., y(N)] is otained as follows
	y(k) = \sum_{i=1}^{k}{x(i)-Ave} where Ave is the mean of X. 

	The second step is to partition/slice/segment the integrated sequence Y into
	boxes. At least two boxes are needed for computing DFA. Box sizes are
	specified by the L argument of this function. By default, it is from 1/5 of
	signal length to one (x-5)-th of the signal length, where x is the nearest 
	power of 2 from the length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...

	In each box, a linear least square fitting is employed on data in the box. 
	Denote the series on fitted line as Yn. Its k-th elements, yn(k), 
	corresponds to y(k).
	
	For fitting in each box, there is a residue, the sum of squares of all 
	offsets, difference between actual points and points on fitted line. 

	F(n) denotes the square root of average total residue in all boxes when box
	length is n, thus
	Total_Residue = \sum_{k=1}^{N}{(y(k)-yn(k))}
	F(n) = \sqrt(Total_Residue/N)

	The computing to F(n) is carried out for every box length n. Therefore, a 
	relationship between n and F(n) can be obtained. In general, F(n) increases
	when n increases.

	Finally, the relationship between F(n) and n is analyzed. A least square 
	fitting is performed between log(F(n)) and log(n). The slope of the fitting 
	line is the DFA value, denoted as Alpha. To white noise, Alpha should be 
	0.5. Higher level of signal complexity is related to higher Alpha.
	
	Parameters
	----------

	X:
		1-D Python list or numpy array
		a time series

	Ave:
		integer, optional
		The average value of the time series

	L:
		1-D Python list of integers
		A list of box size, integers in ascending order

	Returns
	-------
	
	Alpha:
		integer
		the result of DFA analysis, thus the slope of fitting line of log(F(n)) 
		vs. log(n). where n is the 

	Examples
	--------
	>>> import pyeeg
	>>> from numpy.random import randn
	>>> print pyeeg.dfa(randn(4096))
	0.490035110345

	Reference
	---------
	Peng C-K, Havlin S, Stanley HE, Goldberger AL. Quantification of scaling 
	exponents and 	crossover phenomena in nonstationary heartbeat time series. 
	_Chaos_ 1995;5:82-87

	Notes
	-----

	This value depends on the box sizes very much. When the input is a white
	noise, this value should be 0.5. But, some choices on box sizes can lead to
	the value lower or higher than 0.5, e.g. 0.38 or 0.58. 

	Based on many test, I set the box sizes from 1/5 of	signal length to one 
	(x-5)-th of the signal length, where x is the nearest power of 2 from the 
	length of the signal, i.e., 1/16, 1/32, 1/64, 1/128, ...

	You may generate a list of box sizes and pass in such a list as a parameter.

	"""

	X = array(X)

	if Ave is None:
		Ave = mean(X)

	Y = cumsum(X)
	Y -= Ave

	if L is None:
		L = floor(len(X)*1/(2**array(range(4,int(log2(len(X)))-4))))

	F = zeros(len(L)) # F(n) of different given box length n

	for i in range(0,len(L)):
		n = int(L[i])						# for each box length L[i]
		if n==0:
			print ("time series is too short while the box length is too big")
			print ("abort")
			exit()
		for j in range(0,len(X),n): # for each box
			if j+n < len(X):
				c = range(j,j+n)
				c = vstack([c, ones(n)]).T # coordinates of time in the box
				y = Y[j:j+n]				# the value of data in the box
				F[i] += lstsq(c,y,rcond = None)[1]	# add residue in this box
		F[i] /= ((len(X)/n)*n)
	F = sqrt(F)
	
	Alpha = lstsq(vstack([log(L), ones(len(L))]).T,log(F),rcond = None)[0][0]
	
	return Alpha
# <demo> --- stop ---
# Exercise 8
print("inv(y):")
print(inv(y))
(D, V) = eig(y)
D = 1 / D
print("dot(dot(V,diag(D)),V.T):")
print(dot(dot(V, diag(D)), V.T))
# <demo> --- stop ---
# Exercise 9
x = randn(100, 2)
e = randn(100, 1)
B = array([[1], [0.5]])
y = dot(x, B) + e

out = lstsq(x, y)
estimate = out[0]
# <demo> --- stop ---
# Exercise 10
y = array([[5, -1.5, -3.5], [-1.5, 2, -0.5], [-3.5, -0.5, 4]])
D = eigvals(y)
print("matrix_rank(y):")
print(matrix_rank(y))
print("det(y):")
print(det(y))
# <demo> --- stop ---
# Exercise 11
x = randn(100, 2)
SigmaX = dot(x.T, x) / 100
print("kron(eye(2),SigmaX):")
print(kron(eye(2), SigmaX))
Пример #47
0
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
    """
    Least squares polynomial fit.

    Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
    to points `(x, y)`. Returns a vector of coefficients `p` that minimises
    the squared error in the order `deg`, `deg-1`, ... `0`.

    The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
    method is recommended for new code as it is more stable numerically. See
    the documentation of the method for more information.

    Parameters
    ----------
    x : array_like, shape (M,)
        x-coordinates of the M sample points ``(x[i], y[i])``.
    y : array_like, shape (M,) or (M, K)
        y-coordinates of the sample points. Several data sets of sample
        points sharing the same x-coordinates can be fitted at once by
        passing in a 2D-array that contains one dataset per column.
    deg : int
        Degree of the fitting polynomial
    rcond : float, optional
        Relative condition number of the fit. Singular values smaller than
        this relative to the largest singular value will be ignored. The
        default value is len(x)*eps, where eps is the relative precision of
        the float type, about 2e-16 in most cases.
    full : bool, optional
        Switch determining nature of return value. When it is False (the
        default) just the coefficients are returned, when True diagnostic
        information from the singular value decomposition is also returned.
    w : array_like, shape (M,), optional
        Weights to apply to the y-coordinates of the sample points. For
        gaussian uncertainties, use 1/sigma (not 1/sigma**2).
    cov : bool or str, optional
        If given and not `False`, return not just the estimate but also its
        covariance matrix. By default, the covariance are scaled by
        chi2/sqrt(N-dof), i.e., the weights are presumed to be unreliable
        except in a relative sense and everything is scaled such that the
        reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``,
        as is relevant for the case that the weights are 1/sigma**2, with
        sigma known to be a reliable estimate of the uncertainty.

    Returns
    -------
    p : ndarray, shape (deg + 1,) or (deg + 1, K)
        Polynomial coefficients, highest power first.  If `y` was 2-D, the
        coefficients for `k`-th data set are in ``p[:,k]``.

    residuals, rank, singular_values, rcond
        Present only if `full` = True.  Residuals is sum of squared residuals
        of the least-squares fit, the effective rank of the scaled Vandermonde
        coefficient matrix, its singular values, and the specified value of
        `rcond`. For more details, see `linalg.lstsq`.

    V : ndarray, shape (M,M) or (M,M,K)
        Present only if `full` = False and `cov`=True.  The covariance
        matrix of the polynomial coefficient estimates.  The diagonal of
        this matrix are the variance estimates for each coefficient.  If y
        is a 2-D array, then the covariance matrix for the `k`-th data set
        are in ``V[:,:,k]``


    Warns
    -----
    RankWarning
        The rank of the coefficient matrix in the least-squares fit is
        deficient. The warning is only raised if `full` = False.

        The warnings can be turned off by

        >>> import warnings
        >>> warnings.simplefilter('ignore', np.RankWarning)

    See Also
    --------
    polyval : Compute polynomial values.
    linalg.lstsq : Computes a least-squares fit.
    scipy.interpolate.UnivariateSpline : Computes spline fits.

    Notes
    -----
    The solution minimizes the squared error

    .. math ::
        E = \\sum_{j=0}^k |p(x_j) - y_j|^2

    in the equations::

        x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
        x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
        ...
        x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]

    The coefficient matrix of the coefficients `p` is a Vandermonde matrix.

    `polyfit` issues a `RankWarning` when the least-squares fit is badly
    conditioned. This implies that the best fit is not well-defined due
    to numerical error. The results may be improved by lowering the polynomial
    degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
    can also be set to a value smaller than its default, but the resulting
    fit may be spurious: including contributions from the small singular
    values can add numerical noise to the result.

    Note that fitting polynomial coefficients is inherently badly conditioned
    when the degree of the polynomial is large or the interval of sample points
    is badly centered. The quality of the fit should always be checked in these
    cases. When polynomial fits are not satisfactory, splines may be a good
    alternative.

    References
    ----------
    .. [1] Wikipedia, "Curve fitting",
           https://en.wikipedia.org/wiki/Curve_fitting
    .. [2] Wikipedia, "Polynomial interpolation",
           https://en.wikipedia.org/wiki/Polynomial_interpolation

    Examples
    --------
    >>> import warnings
    >>> x = np.array([0.0, 1.0, 2.0, 3.0,  4.0,  5.0])
    >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
    >>> z = np.polyfit(x, y, 3)
    >>> z
    array([ 0.08703704, -0.81349206,  1.69312169, -0.03968254]) # may vary

    It is convenient to use `poly1d` objects for dealing with polynomials:

    >>> p = np.poly1d(z)
    >>> p(0.5)
    0.6143849206349179 # may vary
    >>> p(3.5)
    -0.34732142857143039 # may vary
    >>> p(10)
    22.579365079365115 # may vary

    High-order polynomials may oscillate wildly:

    >>> with warnings.catch_warnings():
    ...     warnings.simplefilter('ignore', np.RankWarning)
    ...     p30 = np.poly1d(np.polyfit(x, y, 30))
    ...
    >>> p30(4)
    -0.80000000000000204 # may vary
    >>> p30(5)
    -0.99999999999999445 # may vary
    >>> p30(4.5)
    -0.10547061179440398 # may vary

    Illustration:

    >>> import matplotlib.pyplot as plt
    >>> xp = np.linspace(-2, 6, 100)
    >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
    >>> plt.ylim(-2,2)
    (-2, 2)
    >>> plt.show()

    """
    order = int(deg) + 1
    x = NX.asarray(x) + 0.0
    y = NX.asarray(y) + 0.0

    # check arguments.
    if deg < 0:
        raise ValueError("expected deg >= 0")
    if x.ndim != 1:
        raise TypeError("expected 1D vector for x")
    if x.size == 0:
        raise TypeError("expected non-empty vector for x")
    if y.ndim < 1 or y.ndim > 2:
        raise TypeError("expected 1D or 2D array for y")
    if x.shape[0] != y.shape[0]:
        raise TypeError("expected x and y to have same length")

    # set rcond
    if rcond is None:
        rcond = len(x) * finfo(x.dtype).eps

    # set up least squares equation for powers of x
    lhs = vander(x, order)
    rhs = y

    # apply weighting
    if w is not None:
        w = NX.asarray(w) + 0.0
        if w.ndim != 1:
            raise TypeError("expected a 1-d array for weights")
        if w.shape[0] != y.shape[0]:
            raise TypeError("expected w and y to have the same length")
        lhs *= w[:, NX.newaxis]
        if rhs.ndim == 2:
            rhs *= w[:, NX.newaxis]
        else:
            rhs *= w

    # scale lhs to improve condition number and solve
    scale = NX.sqrt((lhs * lhs).sum(axis=0))
    lhs /= scale
    c, resids, rank, s = lstsq(lhs, rhs, rcond)
    c = (c.T / scale).T  # broadcast scale coefficients

    # warn on rank reduction, which indicates an ill conditioned matrix
    if rank != order and not full:
        msg = "Polyfit may be poorly conditioned"
        warnings.warn(msg, RankWarning, stacklevel=4)

    if full:
        return c, resids, rank, s, rcond
    elif cov:
        Vbase = inv(dot(lhs.T, lhs))
        Vbase /= NX.outer(scale, scale)
        if cov == "unscaled":
            fac = 1
        else:
            if len(x) <= order:
                raise ValueError("the number of data points must exceed order "
                                 "to scale the covariance matrix")
            # note, this used to be: fac = resids / (len(x) - order - 2.0)
            # it was deciced that the "- 2" (originally justified by "Bayesian
            # uncertainty analysis") is not was the user expects
            # (see gh-11196 and gh-11197)
            fac = resids / (len(x) - order)
        if y.ndim == 1:
            return c, Vbase * fac
        else:
            return c, Vbase[:, :, NX.newaxis] * fac
    else:
        return c
Пример #48
0
def dogbox(fun, jac, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, scaling,
           loss_function, tr_solver, tr_options, verbose):
    f = f0
    f_true = f.copy()
    nfev = 1

    J = J0
    njev = 1

    if loss_function is not None:
        rho = loss_function(f)
        cost = 0.5 * np.sum(rho[0])
        J, f = scale_for_robust_loss_function(J, f, rho)
    else:
        cost = 0.5 * np.dot(f, f)

    g = compute_grad(J, f)

    jac_scaling = isinstance(scaling, string_types) and scaling == 'jac'
    if jac_scaling:
        scale, scale_inv = compute_jac_scaling(J)
    else:
        scale, scale_inv = scaling, 1 / scaling

    Delta = norm(x0 * scale, ord=np.inf)
    if Delta == 0:
        Delta = 1.0

    on_bound = np.zeros_like(x0, dtype=int)
    on_bound[np.equal(x0, lb)] = -1
    on_bound[np.equal(x0, ub)] = 1

    x = x0
    step = np.empty_like(x0)

    if max_nfev is None:
        max_nfev = x0.size * 100

    termination_status = None
    iteration = 0
    step_norm = None
    actual_reduction = None

    if verbose == 2:
        print_header_nonlinear()

    while True:
        active_set = on_bound * g < 0
        free_set = ~active_set

        g_free = g[free_set]
        g_full = g.copy()
        g[active_set] = 0

        g_norm = norm(g, ord=np.inf)
        if g_norm < gtol:
            termination_status = 1

        if verbose == 2:
            print_iteration_nonlinear(iteration, nfev, cost, actual_reduction,
                                      step_norm, g_norm)

        if termination_status is not None or nfev == max_nfev:
            break

        x_free = x[free_set]
        lb_free = lb[free_set]
        ub_free = ub[free_set]
        scale_inv_free = scale_inv[free_set]

        # Compute (Gauss-)Newton and build quadratic model for Cauchy step.
        if tr_solver == 'exact':
            J_free = J[:, free_set]
            newton_step = lstsq(J_free, -f)[0]

            # Coefficients for the quadratic model along the anti-gradient.
            a, b = build_quadratic_1d(J_free, g_free, -g_free)
        elif tr_solver == 'lsmr':
            Jop = aslinearoperator(J)

            # We compute lsmr step in scaled variables and then
            # transform back to normal variables, if lsmr would give exact lsq
            # solution this would be equivalent to not doing any
            # transformations, but from experience it's better this way.

            # We pass active_set to make computations as if we selected
            # the free subset of J columns, but without actually doing any
            # slicing, which is expensive for sparse matrices and impossible
            # for LinearOperator.

            lsmr_op = lsmr_operator(Jop, scale_inv, active_set)
            newton_step = -lsmr(lsmr_op, f, **tr_options)[0][free_set]
            newton_step *= scale_inv_free

            # Components of g for active variables were zeroed, so this call
            # is correct and equivalent to using J_free and g_free.
            a, b = build_quadratic_1d(Jop, g, -g)

        actual_reduction = -1.0
        while actual_reduction <= 0 and nfev < max_nfev:
            tr_bounds = Delta * scale_inv_free

            step_free, on_bound_free, tr_hit = dogleg_step(
                x_free, newton_step, g_free, a, b, tr_bounds, lb_free, ub_free)

            step.fill(0.0)
            step[free_set] = step_free

            if tr_solver == 'exact':
                predicted_reduction = -evaluate_quadratic(
                    J_free, g_free, step_free)
            elif tr_solver == 'lsmr':
                predicted_reduction = -evaluate_quadratic(Jop, g, step)

            x_new = x + step
            f_new = fun(x_new)
            nfev += 1

            step_h_norm = norm(step * scale, ord=np.inf)

            if not np.all(np.isfinite(f_new)):
                Delta = 0.25 * step_h_norm
                continue

            # Usual trust-region step quality estimation.
            if loss_function is not None:
                cost_new = loss_function(f_new, cost_only=True)
            else:
                cost_new = 0.5 * np.dot(f_new, f_new)
            actual_reduction = cost - cost_new

            Delta, ratio = update_tr_radius(Delta, actual_reduction,
                                            predicted_reduction, step_h_norm,
                                            tr_hit)

            step_norm = norm(step)
            termination_status = check_termination(actual_reduction, cost,
                                                   step_norm, norm(x), ratio,
                                                   ftol, xtol)

            if termination_status is not None:
                break

        if actual_reduction > 0:
            on_bound[free_set] = on_bound_free

            x = x_new
            # Set variables exactly at the boundary.
            mask = on_bound == -1
            x[mask] = lb[mask]
            mask = on_bound == 1
            x[mask] = ub[mask]

            f = f_new
            f_true = f.copy()

            cost = cost_new

            J = jac(x, f)
            njev += 1

            if loss_function is not None:
                rho = loss_function(f)
                J, f = scale_for_robust_loss_function(J, f, rho)

            g = compute_grad(J, f)

            if jac_scaling:
                scale, scale_inv = compute_jac_scaling(J, scale)
        else:
            step_norm = 0
            actual_reduction = 0

        iteration += 1

    if termination_status is None:
        termination_status = 0

    return OptimizeResult(x=x,
                          cost=cost,
                          fun=f_true,
                          jac=J,
                          grad=g_full,
                          optimality=g_norm,
                          active_mask=on_bound,
                          nfev=nfev,
                          njev=njev,
                          status=termination_status)
Пример #49
0
from numpy import *
import matplotlib.pyplot as plt
from numpy.linalg import inv
from numpy.linalg import lstsq
x_i = linspace(0,10,150)
u_i = x_i**2
matrice = array([array([1,i]) for i in x_i])
coeff = lstsq(matrice, u_i)
inter = lambda x: coeff[0][0]+ coeff[0][1]*x
x = linspace(0,10,150)
plt.plot(x, x**2, 'r')
plt.plot(x, inter(x), 'b')
print(abs((1/3)**2 - coeff[0][0] - coeff[0][1]*(1/3)))
plt.show()
Пример #50
0
    A = np.vstack((xi, ones(lAll)))

    #Y vector where training points for the isolated author are set to -1
    y = list(ones(lAll))
    y[indexes[author1]:indexes[author1 + 1]] = [
        -1 for i in range(indexes[author1 + 1] - indexes[author1])
    ]
    y = array(y)

    #Derive Eout for our model
    ierror = []
    for bucket in range(len(shuffled)):
        restindex = [i for i in range(lAll) if i not in shuffled[bucket]]
        Atest = A.T[restindex]
        ytest = y[restindex]
        wtest = linalg.lstsq(Atest, ytest)[0]

        Aval = A.T[shuffled[bucket]]
        yval = y[shuffled[bucket]]
        ystar = np.dot(Aval, wtest.T)
        ydiff = [elem / math.fabs(elem) for elem in ystar] + yval
        ierror.append(
            len([1 for i in ydiff if i == 0]) / len(shuffled[bucket]))
    Eout.append(np.mean(ierror))

    #Get linear regression coefficients
    w = linalg.lstsq(A.T, y)[0]  # obtaining the parameters
    W.append(w)
    #Derive Ein for the author
    ystar = np.dot(A.T, w.T)
    error = [elem / math.fabs(elem) for elem in ystar] + y
Пример #51
0
 def test_poly2D_fitting(self):
     fitter = LinearLSQFitter()
     v = self.model.fit_deriv(x=self.x, y=self.y)
     p = linalg.lstsq(v, self.z.flatten(), rcond=-1)[0]
     new_model = fitter(self.model, self.x, self.y, self.z)
     assert_allclose(new_model.parameters, p)
Пример #52
0
def lsd(velos, V, S, rvs, masks, Lambda=0.):
    """
    Compute LSD profiles and cross correlation functions.
    
    Possibility to include Tikhonov regularization to clean up the profiles,
    when setting C{Lambda>0}.
    
    Possibility to include multiprofile LSD. Parameter C{masks} should be a list
    of C{(centers,weights)} (if you give only one mask, give
    C{masks=[(centers,weights)]}.
    
    See Donati, 1997 for the original paper and Kochukhov, 2010 for extensions.
    
    @parameter velos: velocity vector of observations
    @type velos: array of length N_spec
    @parameter V: observation array
    @type V: N_obs x N_spec array
    @parameter S: weights of individual pixels
    @type S: array of length N_spec
    @parameter rvs: radial velocity vector to compute the profile on
    @type rvs: array of length N_rv
    @parameter Lambda: Tikhonov regularization parameter
    @parameter masks: list of tuples (center velocities, weights)
    @type masks: list (length N_mask) of tuples of 1D arrays
    @type Lambda: float
    @return: LSD profile, CCF of shape (N_obs x (N_rv.N_mask))
    @rtype: 2D array, 2D array
    """
    #-- some global parameters
    m, n = len(rvs), len(velos)
    Nspec = V.shape[1]
    Nmask = len(masks)
    V = np.matrix(V) - 1

    #-- weights of the individual pixels
    S = np.matrix(np.diag(S))
    #-- line masks (yes, this can be vectorized but I'm too lazy for the moment)
    M = np.matrix(np.zeros((n, m * len(masks))))
    for N, (line_centers, weights) in enumerate(masks):
        for l, lc in enumerate(line_centers):
            for i in range(n):
                for j in range(m - 1):
                    vi = velos[i] - lc
                    if not (rvs[j] < vi < rvs[j + 1]): continue
                    M[i, j + N * m] = weights[l] * (rvs[j + 1] -
                                                    vi) / (rvs[j + 1] - rvs[j])
                    M[i, j + 1 + N *
                      m] = weights[l] * (vi - rvs[j]) / (rvs[j + 1] - rvs[j])
    M = np.matrix(M)
    #-- regularization parameter
    if Lambda:
        R = np.matrix(np.zeros((m * Nmask, m * Nmask)))
        for i in range(1, m - 1):
            R[i, i] = 2
            R[i - 1, i] = -1
            R[i + 1, i] = -1
        R[0, 0] = 1
        R[1, 0] = -1
        R[-1, -1] = 1
        R[-2, -1] = -1
    #-- compute the LSD
    X = M.T * (S**2)
    XM = X * M
    if Lambda:
        XM = XM + Lambda * R
    cc = X * V  # this is in fact the cross correlation profile
    #-- XM is of shape (mxm), cc is of shape (mxNspec)
    #-- we can solve this system quickly ourselves or call numpy. I find the
    #   latter more elegant, but it might be slower.
    #Z = la.inv(XM)*cc
    Z, res, rank, s = la.lstsq(XM, cc)
    #-- retrieve LSD profile and cross-correlation function
    Z = np.array(Z.T)
    cc = np.array(cc.T)
    #-- split up the profiles
    Z_ = []
    C_ = []
    for i in range(len(Z)):
        Z_.append([])
        C_.append([])
        for N in range(Nmask):
            Z_[-1].append(Z[i][N * m:(N + 1) * m])
            C_[-1].append(cc[i][N * m:(N + 1) * m])
    #-- that's it!
    return Z_, C_
Пример #53
0
def polyfit(x, y, deg, rcond=None, full=False):
    """
    Least squares polynomial fit.

    Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
    to points `(x, y)`. Returns a vector of coefficients `p` that minimises
    the squared error.

    Parameters
    ----------
    x : array_like, shape (M,)
        x-coordinates of the M sample points ``(x[i], y[i])``.
    y : array_like, shape (M,) or (M, K)
        y-coordinates of the sample points. Several data sets of sample
        points sharing the same x-coordinates can be fitted at once by
        passing in a 2D-array that contains one dataset per column.
    deg : int
        Degree of the fitting polynomial
    rcond : float, optional
        Relative condition number of the fit. Singular values smaller than this
        relative to the largest singular value will be ignored. The default
        value is len(x)*eps, where eps is the relative precision of the float
        type, about 2e-16 in most cases.
    full : bool, optional
        Switch determining nature of return value. When it is
        False (the default) just the coefficients are returned, when True
        diagnostic information from the singular value decomposition is also
        returned.

    Returns
    -------
    p : ndarray, shape (M,) or (M, K)
        Polynomial coefficients, highest power first.
        If `y` was 2-D, the coefficients for `k`-th data set are in ``p[:,k]``.

    residuals, rank, singular_values, rcond : present only if `full` = True
        Residuals of the least-squares fit, the effective rank of the scaled
        Vandermonde coefficient matrix, its singular values, and the specified
        value of `rcond`. For more details, see `linalg.lstsq`.

    Warns
    -----
    RankWarning
        The rank of the coefficient matrix in the least-squares fit is
        deficient. The warning is only raised if `full` = False.

        The warnings can be turned off by

        >>> import warnings
        >>> warnings.simplefilter('ignore', np.RankWarning)

    See Also
    --------
    polyval : Computes polynomial values.
    linalg.lstsq : Computes a least-squares fit.
    scipy.interpolate.UnivariateSpline : Computes spline fits.

    Notes
    -----
    The solution minimizes the squared error

    .. math ::
        E = \\sum_{j=0}^k |p(x_j) - y_j|^2

    in the equations::

        x[0]**n * p[n] + ... + x[0] * p[1] + p[0] = y[0]
        x[1]**n * p[n] + ... + x[1] * p[1] + p[0] = y[1]
        ...
        x[k]**n * p[n] + ... + x[k] * p[1] + p[0] = y[k]

    The coefficient matrix of the coefficients `p` is a Vandermonde matrix.

    `polyfit` issues a `RankWarning` when the least-squares fit is badly
    conditioned. This implies that the best fit is not well-defined due
    to numerical error. The results may be improved by lowering the polynomial
    degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
    can also be set to a value smaller than its default, but the resulting
    fit may be spurious: including contributions from the small singular
    values can add numerical noise to the result.

    Note that fitting polynomial coefficients is inherently badly conditioned
    when the degree of the polynomial is large or the interval of sample points
    is badly centered. The quality of the fit should always be checked in these
    cases. When polynomial fits are not satisfactory, splines may be a good
    alternative.

    References
    ----------
    .. [1] Wikipedia, "Curve fitting",
           http://en.wikipedia.org/wiki/Curve_fitting
    .. [2] Wikipedia, "Polynomial interpolation",
           http://en.wikipedia.org/wiki/Polynomial_interpolation

    Examples
    --------
    >>> x = np.array([0.0, 1.0, 2.0, 3.0,  4.0,  5.0])
    >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
    >>> z = np.polyfit(x, y, 3)
    >>> z
    array([ 0.08703704, -0.81349206,  1.69312169, -0.03968254])

    It is convenient to use `poly1d` objects for dealing with polynomials:

    >>> p = np.poly1d(z)
    >>> p(0.5)
    0.6143849206349179
    >>> p(3.5)
    -0.34732142857143039
    >>> p(10)
    22.579365079365115

    High-order polynomials may oscillate wildly:

    >>> p30 = np.poly1d(np.polyfit(x, y, 30))
    /... RankWarning: Polyfit may be poorly conditioned...
    >>> p30(4)
    -0.80000000000000204
    >>> p30(5)
    -0.99999999999999445
    >>> p30(4.5)
    -0.10547061179440398

    Illustration:

    >>> import matplotlib.pyplot as plt
    >>> xp = np.linspace(-2, 6, 100)
    >>> plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
    [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
    >>> plt.ylim(-2,2)
    (-2, 2)
    >>> plt.show()

    """
    order = int(deg) + 1
    x = NX.asarray(x) + 0.0
    y = NX.asarray(y) + 0.0

    # check arguments.
    if deg < 0:
        raise ValueError, "expected deg >= 0"
    if x.ndim != 1:
        raise TypeError, "expected 1D vector for x"
    if x.size == 0:
        raise TypeError, "expected non-empty vector for x"
    if y.ndim < 1 or y.ndim > 2:
        raise TypeError, "expected 1D or 2D array for y"
    if x.shape[0] != y.shape[0]:
        raise TypeError, "expected x and y to have same length"

    # set rcond
    if rcond is None:
        rcond = len(x) * finfo(x.dtype).eps

    # scale x to improve condition number
    scale = abs(x).max()
    if scale != 0:
        x /= scale

    # solve least squares equation for powers of x
    v = vander(x, order)
    c, resids, rank, s = lstsq(v, y, rcond)

    # warn on rank reduction, which indicates an ill conditioned matrix
    if rank != order and not full:
        msg = "Polyfit may be poorly conditioned"
        warnings.warn(msg, RankWarning)

    # scale returned coefficients
    if scale != 0:
        if c.ndim == 1:
            c /= vander([scale], order)[0]
        else:
            c /= vander([scale], order).T

    if full:
        return c, resids, rank, s, rcond
    else:
        return c
def initialGuess(vel, data , errors = None, alpha = None, plot = False, mode ='c',
                 verbose = False, SNR_thresh = 5.0, BLFrac = 0.1, SNR2_thresh = 5.0,
                 deblend = True):
    """  Find initial parameter guesses (AGD algorithm)

    data,             Input data
    dv,             x-spacing absolute units
    alpha = No Default,     regularization parameter
    plot = False,     Show diagnostic plots?
    verbose = True    Diagnostic messages
    SNR_thresh = 5.0  Initial Spectrum S/N threshold
    BLFrac =          Edge fraction of data used for S/N threshold computation
    SNR2_thresh =   S/N threshold for Second derivative
    mode = Method for taking derivatives
    """

    errors = None # Until error

    say('\n\n  --> initialGuess() \n',verbose)
    say('Algorithm parameters: ', verbose)
    say('alpha = {0}'.format(alpha), verbose)
    say('SNR_thesh = {0}'.format(SNR_thresh), verbose)
    say('SNR2_thesh = {0}'.format(SNR2_thresh), verbose)
    say('BLFrac = {0}'.format(BLFrac), verbose)

    if not alpha:
        print 'Must choose value for alpha, no default.'
        return

    if np.any(np.isnan(data)):
        print 'NaN-values in data, cannot continue.'
        return

    # Data inspection
    vel = np.array(vel)
    data = np.array(data)
    dv = np.abs(vel[1]-vel[0])
    fvel = interp1d(np.arange(len(vel)),vel) # Converts from index -> x domain
    data_size = len(data)


    # Take regularized derivatives
    t0 = time.time()
    if mode == 'python':
        say('Taking python derivatives...', verbose)
        u =  tvdiff.TVdiff(data, dx = dv, alph = alpha)
        u2 = tvdiff.TVdiff(u,    dx = dv, alph = alpha)
        u3 = tvdiff.TVdiff(u2,   dx = dv, alph = alpha)
        u4 = tvdiff.TVdiff(u3,   dx = dv, alph = alpha)
    elif mode == 'c':
        say('Taking c derivatives...', verbose)
        u =  u2 = u3 = u4 = np.zeros(len(data))
        tv.tv(data = data, deriv = u, alpha = alpha, dx = 1)
        u = u / dv
        tv.tv(data = u, deriv = u2, alpha = alpha, dx = 1)
        u2 = u2 / dv
        tv.tv(data = u2, deriv = u3, alpha = alpha, dx = 1)
        u3 = u3 / dv
        tv.tv (data = u3, deriv = u4, alpha = alpha, dx = 1)
        u4 = u4 / dv
    elif mode == 'conv':
        say('Convolution sigma [pixels]: {0}'.format(alpha), verbose)
        gauss_sigma = alpha
        gauss_sigma_int = np.max([np.fix(gauss_sigma), 5])
        gauss_dn = gauss_sigma_int * 6

        xx = np.arange(2*gauss_dn+2)-(gauss_dn) - 0.5
        gauss = np.exp(-xx**2/2./gauss_sigma**2)
        gauss = gauss / np.sum(gauss)
        gauss1 = np.diff(gauss) / dv
        gauss3 = np.diff(np.diff(gauss1)) / dv**2

        xx2 = np.arange(2*gauss_dn+1)-(gauss_dn)
        gauss2 = np.exp(-xx2**2/2./gauss_sigma**2)
        gauss2 = gauss2 / np.sum(gauss2)
        gauss2 = np.diff(gauss2) / dv
        gauss2 = np.diff(gauss2) / dv
        gauss4 = np.diff(np.diff(gauss2)) / dv**2

        u  = convolve(data, gauss1, mode='wrap')
        u2  = convolve(data, gauss2, mode='wrap')
        u3  = convolve(data, gauss3, mode='wrap')
        u4  = convolve(data, gauss4, mode='wrap')
    elif mode == 'savgol':

        u = savgol_filter(data, alpha, 4, mode='constant', deriv=1)
        u2 = savgol_filter(data, alpha, 4, mode='constant', deriv=2)
        u3 = savgol_filter(data, alpha, 4, mode='constant', deriv=3)
        u4 = savgol_filter(data, alpha, 4, mode='constant', deriv=4)

        u /= dv
        u2 /= dv
        u3 /= dv
        u4 /= dv

    say('...took {0:4.2f} seconds per derivative.'.format((time.time()-t0)/4.), verbose)


    # Decide on signal threshold
    if not errors:
        errors = np.std(data[0:int(BLFrac * data_size)])


    thresh = SNR_thresh * errors
    mask1 = np.array(data > thresh, dtype = 'int')[1:] # Raw Data S/N
    mask3 = np.array(u4.copy()[1:] > 0., dtype = 'int')     # Positive 4nd derivative

    if SNR2_thresh > 0.:
        wsort = np.argsort(np.abs(u2))
        RMSD2 = np.std(u2[wsort[0:int(0.5*len(u2))]]) / 0.377 # RMS based in +-1 sigma fluctuations
        say('Second derivative noise: {0}'.format(RMSD2), verbose)
        thresh2 = -RMSD2 * SNR2_thresh
        say('Second derivative threshold: {0}'.format(thresh2), verbose)
    else:
        thresh2 = 0.
    mask4 = np.array(u2.copy()[1:] < thresh2, dtype='int') # Negative second derivative



    # Find optima of second derivative
    # --------------------------------
    zeros = np.abs(np.diff(np.sign(u3)))
    zeros = zeros * mask1 *  mask3 * mask4
    offsets_data_i = np.array(np.where(zeros)).ravel() # Index offsets
    offsets = fvel(offsets_data_i + 0.5) # Velocity offsets (Added 0.5 July 23)
    N_components = len(offsets)
    say('Components found for alpha={1}: {0}'.format(N_components,alpha), verbose=verbose)


    # Check if nothing was found, if so, return null
    # ----------------------------------------------
    if N_components == 0:
        odict = {'means':[], 'FWHMs': [], 'amps':[],
             'u2':u2, 'errors':errors, 'thresh2':thresh2,
             'thresh':thresh, 'N_components':N_components}

        return odict


#        say('AGD2.initialGuess: No components found for alpha={0}! Returning ([] [] [] [] [])'.format(alpha))
#        return [], [], [], u2



    # Find points of inflection
    inflection = np.abs(np.diff(np.sign(u2)))


    # Find Relative widths, then measure
    # peak-to-inflection distance for sharpest peak
    widths = np.sqrt(np.abs(data/u2)[offsets_data_i])
    FWHMs = widths * 2.355


    # Attempt deblending.
    # If Deblending results in all non-negative answers, keep.
    amps = np.array(data[offsets_data_i])
    if deblend:
        FF_matrix = np.zeros([len(amps),len(amps)])
        for i in range(FF_matrix.shape[0]):
            for j in range(FF_matrix.shape[1]):
                FF_matrix[i,j] = np.exp(-(offsets[i]-offsets[j])**2/2./(FWHMs[j] / 2.355)**2)
        amps_new = lstsq(FF_matrix,amps)[0]
        if np.all(amps_new > 0):
            amps = amps_new


    odict = {'means':offsets, 'FWHMs': FWHMs, 'amps':amps,
             'u2':u2, 'errors':errors, 'thresh2':thresh2,
             'thresh':thresh, 'N_components':N_components}

    return odict
Пример #55
0
 def lsq(self):
     A = array([self.x, ones(self.x.size)])
     W = linalg.lstsq(A.T, self.y)[0]
     return (self.x[0], self.x[0] * W[0] + W[1], self.x[-1],
             self.x[-1] * W[0] + W[1])
Пример #56
0
def chebfit(x, y, deg, rcond=None, full=False, w=None):
    """
    Least squares fit of Chebyshev series to data.

    Fit a Chebyshev series ``p(x) = p[0] * T_{0}(x) + ... + p[deg] *
    T_{deg}(x)`` of degree `deg` to points `(x, y)`. Returns a vector of
    coefficients `p` that minimises the squared error.

    Parameters
    ----------
    x : array_like, shape (M,)
        x-coordinates of the M sample points ``(x[i], y[i])``.
    y : array_like, shape (M,) or (M, K)
        y-coordinates of the sample points. Several data sets of sample
        points sharing the same x-coordinates can be fitted at once by
        passing in a 2D-array that contains one dataset per column.
    deg : int
        Degree of the fitting polynomial
    rcond : float, optional
        Relative condition number of the fit. Singular values smaller than
        this relative to the largest singular value will be ignored. The
        default value is len(x)*eps, where eps is the relative precision of
        the float type, about 2e-16 in most cases.
    full : bool, optional
        Switch determining nature of return value. When it is False (the
        default) just the coefficients are returned, when True diagnostic
        information from the singular value decomposition is also returned.
    w : array_like, shape (`M`,), optional
        Weights. If not None, the contribution of each point
        ``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
        weights are chosen so that the errors of the products ``w[i]*y[i]``
        all have the same variance.  The default value is None.
        .. versionadded:: 1.5.0

    Returns
    -------
    coef : ndarray, shape (M,) or (M, K)
        Chebyshev coefficients ordered from low to high. If `y` was 2-D,
        the coefficients for the data in column k  of `y` are in column
        `k`.

    [residuals, rank, singular_values, rcond] : present when `full` = True
        Residuals of the least-squares fit, the effective rank of the
        scaled Vandermonde matrix and its singular values, and the
        specified value of `rcond`. For more details, see `linalg.lstsq`.

    Warns
    -----
    RankWarning
        The rank of the coefficient matrix in the least-squares fit is
        deficient. The warning is only raised if `full` = False.  The
        warnings can be turned off by

        >>> import warnings
        >>> warnings.simplefilter('ignore', RankWarning)

    See Also
    --------
    chebval : Evaluates a Chebyshev series.
    chebvander : Vandermonde matrix of Chebyshev series.
    polyfit : least squares fit using polynomials.
    linalg.lstsq : Computes a least-squares fit from the matrix.
    scipy.interpolate.UnivariateSpline : Computes spline fits.

    Notes
    -----
    The solution are the coefficients ``c[i]`` of the Chebyshev series
    ``T(x)`` that minimizes the squared error

    ``E = \\sum_j |y_j - T(x_j)|^2``.

    This problem is solved by setting up as the overdetermined matrix
    equation

    ``V(x)*c = y``,

    where ``V`` is the Vandermonde matrix of `x`, the elements of ``c`` are
    the coefficients to be solved for, and the elements of `y` are the
    observed values.  This equation is then solved using the singular value
    decomposition of ``V``.

    If some of the singular values of ``V`` are so small that they are
    neglected, then a `RankWarning` will be issued. This means that the
    coeficient values may be poorly determined. Using a lower order fit
    will usually get rid of the warning.  The `rcond` parameter can also be
    set to a value smaller than its default, but the resulting fit may be
    spurious and have large contributions from roundoff error.

    Fits using Chebyshev series are usually better conditioned than fits
    using power series, but much can depend on the distribution of the
    sample points and the smoothness of the data. If the quality of the fit
    is inadequate splines may be a good alternative.

    References
    ----------
    .. [1] Wikipedia, "Curve fitting",
           http://en.wikipedia.org/wiki/Curve_fitting

    Examples
    --------

    """
    order = int(deg) + 1
    x = np.asarray(x) + 0.0
    y = np.asarray(y) + 0.0

    # check arguments.
    if deg < 0:
        raise ValueError("expected deg >= 0")
    if x.ndim != 1:
        raise TypeError("expected 1D vector for x")
    if x.size == 0:
        raise TypeError("expected non-empty vector for x")
    if y.ndim < 1 or y.ndim > 2:
        raise TypeError("expected 1D or 2D array for y")
    if len(x) != len(y):
        raise TypeError("expected x and y to have same length")

    # set up the least squares matrices
    lhs = chebvander(x, deg)
    rhs = y
    if w is not None:
        w = np.asarray(w) + 0.0
        if w.ndim != 1:
            raise TypeError("expected 1D vector for w")
        if len(x) != len(w):
            raise TypeError("expected x and w to have same length")
        # apply weights
        if rhs.ndim == 2:
            lhs *= w[:, np.newaxis]
            rhs *= w[:, np.newaxis]
        else:
            lhs *= w[:, np.newaxis]
            rhs *= w

    # set rcond
    if rcond is None:
        rcond = len(x) * np.finfo(x.dtype).eps

    # scale the design matrix and solve the least squares equation
    scl = np.sqrt((lhs * lhs).sum(0))
    c, resids, rank, s = la.lstsq(lhs / scl, rhs, rcond)
    c = (c.T / scl).T

    # warn on rank reduction
    if rank != order and not full:
        msg = "The fit may be poorly conditioned"
        warnings.warn(msg, pu.RankWarning)

    if full:
        return c, [resids, rank, s, rcond]
    else:
        return c
def DARTS(Z, **kwargs):
    """Compute the advection field from a sequence of input images by using the
    DARTS method.

    Parameters
    ----------
    Z : array-like
      Array of shape (T,m,n) containing a sequence of T two-dimensional input
      images of shape (m,n).

    Other Parameters
    ----------------
    N_x : int
        Number of DFT coefficients to use for the input images, x-axis (default=50).
    N_y : int
        Number of DFT coefficients to use for the input images, y-axis (default=50).
    N_t : int
        Number of DFT coefficients to use for the input images, time axis (default=4).
        N_t must be strictly smaller than T.
    M_x : int
        Number of DFT coefficients to compute for the output advection field,
        x-axis  (default=2).
    M_y : int
        Number of DFT coefficients to compute for the output advection field,
        y-axis (default=2).
    fft_method : str
        A string defining the FFT method to use, see utils.fft.get_method.
        Defaults to 'numpy'.
    n_threads : int
        Number of threads to use for the FFT computation. Applicable if
        fft_method is 'pyfftw'.
    print_info : bool
        If True, print information messages.
    lsq_method : {1, 2}
        The method to use for solving the linear equations in the least squares
        sense: 1=numpy.linalg.lstsq, 2=explicit computation of the Moore-Penrose
        pseudoinverse and SVD.
    verbose : bool
        if set to True, it prints information about the program

    Returns
    -------
    out : ndarray
        Three-dimensional array (2,H,W) containing the dense x- and y-components
        of the motion field.

    References
    ----------
    :cite:`RCW2011`

    """
    N_x = kwargs.get("N_x", 50)
    N_y = kwargs.get("N_y", 50)
    N_t = kwargs.get("N_t", 4)
    M_x = kwargs.get("M_x", 2)
    M_y = kwargs.get("M_y", 2)
    fft_method = kwargs.get("fft_method", "numpy")
    print_info = kwargs.get("print_info", False)
    lsq_method = kwargs.get("lsq_method", 2)
    verbose = kwargs.get("verbose", True)

    if N_t >= Z.shape[0]:
        raise ValueError("N_t = %d >= %d = T, but N_t < T required" %
                         (N_t, Z.shape[0]))

    if verbose:
        print("Computing the motion field with the DARTS method.")
        t0 = time.time()

    Z = np.moveaxis(Z, (0, 1, 2), (2, 0, 1))

    fft = utils.get_method(fft_method,
                           shape=Z.shape[:2],
                           fftn_shape=Z.shape,
                           **kwargs)

    T_x = Z.shape[1]
    T_y = Z.shape[0]
    T_t = Z.shape[2]

    if print_info:
        print("-----")
        print("DARTS")
        print("-----")

        print("  Computing the FFT of the reflectivity fields..."),
        sys.stdout.flush()
        starttime = time.time()

    Z = fft.fftn(Z)

    if print_info:
        print("Done in %.2f seconds." % (time.time() - starttime))

        print("  Constructing the y-vector..."),
        sys.stdout.flush()
        starttime = time.time()

    m = (2 * N_x + 1) * (2 * N_y + 1) * (2 * N_t + 1)
    n = (2 * M_x + 1) * (2 * M_y + 1)

    y = np.zeros(m, dtype=complex)

    k_t, k_y, k_x = np.unravel_index(np.arange(m),
                                     (2 * N_t + 1, 2 * N_y + 1, 2 * N_x + 1))

    for i in range(m):
        k_x_ = k_x[i] - N_x
        k_y_ = k_y[i] - N_y
        k_t_ = k_t[i] - N_t

        Z_ = Z[k_y_, k_x_, k_t_]

        y[i] = k_t_ * Z_

    if print_info:
        print("Done in %.2f seconds." % (time.time() - starttime))

    A = np.zeros((m, n), dtype=complex)
    B = np.zeros((m, n), dtype=complex)

    if print_info:
        print("  Constructing the H-matrix..."),
        sys.stdout.flush()
        starttime = time.time()

    c1 = -1.0 * T_t / (T_x * T_y)

    kp_y, kp_x = np.unravel_index(np.arange(n), (2 * M_y + 1, 2 * M_x + 1))

    for i in range(m):
        k_x_ = k_x[i] - N_x
        k_y_ = k_y[i] - N_y
        k_t_ = k_t[i] - N_t

        kp_x_ = kp_x[:] - M_x
        kp_y_ = kp_y[:] - M_y

        i_ = k_y_ - kp_y_
        j_ = k_x_ - kp_x_

        Z_ = Z[i_, j_, k_t_]

        c2 = c1 / T_y * i_
        A[i, :] = c2 * Z_

        c2 = c1 / T_x * j_
        B[i, :] = c2 * Z_

    if print_info:
        print("Done in %.2f seconds." % (time.time() - starttime))

        print("  Solving the linear systems..."),
        sys.stdout.flush()
        starttime = time.time()

    if lsq_method == 1:
        x = lstsq(np.hstack([A, B]), y, rcond=0.01)[0]
    else:
        x = _leastsq(A, B, y)

    if print_info:
        print("Done in %.2f seconds." % (time.time() - starttime))

    h, w = 2 * M_y + 1, 2 * M_x + 1

    U = np.zeros((h, w), dtype=complex)
    V = np.zeros((h, w), dtype=complex)

    i, j = np.unravel_index(np.arange(h * w), (h, w))

    V[i, j] = x[0:h * w]
    U[i, j] = x[h * w:2 * h * w]

    k_x, k_y = np.meshgrid(np.arange(-M_x, M_x + 1), np.arange(-M_y, M_y + 1))

    U = np.real(fft.ifft2(_fill(U, Z.shape[0], Z.shape[1], k_x, k_y)))
    V = np.real(fft.ifft2(_fill(V, Z.shape[0], Z.shape[1], k_x, k_y)))

    if verbose:
        print("--- %s seconds ---" % (time.time() - t0))

    # TODO: Sometimes the sign of the advection field is wrong. This appears to
    # depend on N_t...
    return np.stack([U, V])
Пример #58
0
def draw_lanes(img, lines, color=[0, 255, 255], thickness=3):

    # if this fails, go with some default line
    try:

        # finds the maximum y value for a lane marker
        # (since we cannot assume the horizon will always be at the same point.)

        ys = []
        for i in lines:
            for ii in i:
                ys += [ii[1], ii[3]]  # 取得y坐标
        min_y = min(ys)  # 得到线段中最小的y坐标
        max_y = 600
        new_lines = []
        line_dict = {}

        for idx, i in enumerate(lines):
            for xyxy in i:
                # These four lines:
                # modified from http://stackoverflow.com/questions/21565994/method-to-return-the-equation-of-a-straight-line-given-two-points
                # Used to calculate the definition of a line, given two sets of coords.
                x_coords = (xyxy[0], xyxy[2])
                y_coords = (xyxy[1], xyxy[3])
                A = vstack([x_coords, ones(len(x_coords))]).T  # 堆叠
                m, b = lstsq(A, y_coords)[0]  # 通过最小二乘法解超定方程组

                # Calculating our new, and improved, xs
                x1 = (min_y - b) / m
                x2 = (max_y - b) / m

                line_dict[idx] = [m, b, [int(x1), min_y, int(x2), max_y]]
                new_lines.append([int(x1), min_y, int(x2), max_y])

        final_lanes = {}

        for idx in line_dict:
            final_lanes_copy = final_lanes.copy()
            m = line_dict[idx][0]
            b = line_dict[idx][1]
            line = line_dict[idx][2]

            if len(final_lanes) == 0:
                final_lanes[m] = [[m, b, line]]

            else:
                found_copy = False

                for other_ms in final_lanes_copy:

                    if not found_copy:
                        if abs(other_ms * 1.2) > abs(m) > abs(other_ms * 0.8):
                            if abs(final_lanes_copy[other_ms][0][1] *
                                   1.2) > abs(b) > abs(
                                       final_lanes_copy[other_ms][0][1] * 0.8):
                                final_lanes[other_ms].append([m, b, line])
                                found_copy = True
                                break
                        else:
                            final_lanes[m] = [[m, b, line]]

        line_counter = {}

        for lanes in final_lanes:
            line_counter[lanes] = len(final_lanes[lanes])

        top_lanes = sorted(line_counter.items(),
                           key=lambda item: item[1])[::-1][:2]

        lane1_id = top_lanes[0][0]
        lane2_id = top_lanes[1][0]

        def average_lane(lane_data):
            x1s = []
            y1s = []
            x2s = []
            y2s = []
            for data in lane_data:
                x1s.append(data[2][0])
                y1s.append(data[2][1])
                x2s.append(data[2][2])
                y2s.append(data[2][3])
            return int(mean(x1s)), int(mean(y1s)), int(mean(x2s)), int(
                mean(y2s))

        l1_x1, l1_y1, l1_x2, l1_y2 = average_lane(final_lanes[lane1_id])
        l2_x1, l2_y1, l2_x2, l2_y2 = average_lane(final_lanes[lane2_id])

        return [l1_x1, l1_y1, l1_x2, l1_y2], [l2_x1, l2_y1, l2_x2, l2_y2]
    except Exception as e:
        print(str(e))
Пример #59
0
print("\nzad3\n")

n = 20
t = np.transpose(np.linspace(0, 1, n))
b = [math.cos(4 * t[i]) for i in range(t.size)]
b = np.transpose(b)

print(t)
print(b)

vand = np.vander(t, increasing=True)
#print(("\nMacierz Vand:\n {0}".format(vand)))
print(np.round(vand, 3))
#print("\n",np.fliplr(np.round(vand,1)))

x1 = solve(vand, b)
print("\n", np.round(x1, 1))

q, r = qr(
    vand)  #A = QR of an orthogonal matrix Q and an upper triangular matrix R
x2 = solve_triangular(r, b)
print("\n", np.round(x2, 1))

m = lstsq(vand, b, rcond=None)[0]
x3 = m
print("\n", x3)
#If a is square and of full rank then x (but for round-off error) is the exact solution of the equation

#norm1=b-vand*x1
#print(norm1)
Пример #60
0
folder = "scripts/../output/L1_reg/"

# We start by defining the characteristics of the problem
data_size = 100
data_number = round(data_size / 2)
sparsity_level = 10
noise_level = 1e-2 * 0

# We define the main components of our problem
Phi = np.random.randn(data_number, data_size)
x0 = np.sign(sparse.randn(data_size, 1, sparsity_level))
noise = noise_level * np.random.randn(data_number, 1)
y = Phi@x0 + noise

# Let's compare the ground truth with the pseudo inverse solution
x_pinv = la.lstsq(Phi, y, rcond=None)[0]
_ = plt.figure(dpi=dpi)
sparse.stem(x0, "C0", "ground truth")
sparse.stem(x_pinv, "C1", "pinv solution")
plt.show()

# Let's compare the ground truth with the solution of the LASSO
# (computed with the Forward-Backward algorithm)
reg_param = 0.01
iter_nb = 40000

x_reg = fb_lasso(Phi, y, reg_param, iter_nb)
_ = plt.figure(dpi=dpi)
sparse.stem(x0, "C0", "ground truth")
sparse.stem(x_reg, "C1", "reg solution")
plt.show()