Ejemplo n.º 1
0
def fit_to_model(imchunk,model, mode = 'pinv',fit_pix_mask = None,baseline = None):
    import numpy as np
    #im_array = (imchunk-baseline)#/baseline
    if not(baseline is None):
    	im_array = imchunk-baseline#/baseline
    else:
    	im_array = imchunk
    imshape = np.shape(im_array[0])
    im_array = im_array.reshape((-1,imshape[0]*imshape[1]))
    if mode == 'nnls':
        fits = np.empty((np.shape(model)[0],np.shape(im_array)[0]))
        for i,im2 in enumerate(im_array):
            im = im2.copy()
            im[~np.isfinite(im)] = 0
            from scipy.optimize import nnls
            if not(fit_pix_mask is None):
                fits[:,i] = nnls(model[:,fit_pix_mask].T,im[fit_pix_mask])[0]
            else:
                fits[:,i] = nnls(model.T,im)[0]
    else:
        im = im_array
        print np.shape(im_array)
        from numpy.linalg import pinv
        if not(fit_pix_mask is None):
            fits = np.dot(pinv(model[:,fit_pix_mask]).T,im[:,fit_pix_mask].T)
        else:
            fits = np.dot(pinv(model).T,im)
    return fits
Ejemplo n.º 2
0
    def _update_scipy_nnls(self, W, H):
        """
        Run the update step with regularized cost function, and Nonnegative Least Squares
        provided by SciPy (activeset variant)

        :param W: the left factorizing matrix
        :param H: the right factorizing matrix
        :type W: numpy.ndarray
        :type H: numpy.ndarray
        :returns: two-tuple (W,H) with new matrices
        """

        # 'augmented' data matrix with vector of zeros
        Xaug = np.r_[self.X, np.zeros((1,H.shape[1]))]

        # 'augmented' left factorizing matrix with vector of ones
        Waug = np.r_[W, np.sqrt(self.beta)*np.ones((1,H.shape[0]))]

        Htaug = np.r_[H.T, np.sqrt(self.eta) * np.eye(H.shape[0])]
        Xaugkm = np.r_[self.X.T, np.zeros(W.T.shape)]

        for i in xrange(W.shape[0]):
            W[i, :] = nnls(Htaug, Xaugkm[:, i])[0]

        for j in xrange(H.shape[1]):
            H[:, j] = nnls(Waug, Xaug[:, j])[0]

        return (W,H)
Ejemplo n.º 3
0
 def test_maxiter(self):
     # test that maxiter argument does stop iterations
     # NB: did not manage to find a test case where the default value
     # of maxiter is not sufficient, so use a too-small value
     rndm = np.random.RandomState(1234)
     a = rndm.uniform(size=(100, 100))
     b = rndm.uniform(size=100)
     with assert_raises(RuntimeError):
         nnls(a, b, maxiter=1)
Ejemplo n.º 4
0
 def _autoEncode(self, queryVector, H, nnlsRegress=False):
     if nnlsRegress:
         # a very clean topic assignment, most topics are zero.
         latentVector, _ = nnls(H.T, queryVector)
         recs, _ = nnls(H, latentVector)
     else:
         # slightly more noisy, which might be good
         latentVector = np.dot(H, queryVector)
         recs = np.dot(latentVector, H)
     return recs, latentVector
Ejemplo n.º 5
0
    def transform(self, X):
        """Transform the data X according to the fitted NMF model

        Parameters
        ----------

        X: {array-like, sparse matrix}, shape = [n_samples, n_features]
            Data matrix to be transformed by the model

        Returns
        -------
        data: array, [n_samples, n_components]
            Transformed data
        """
        X, = check_arrays(X, sparse_format='csc')
        Wt = np.zeros((self.n_components_, X.shape[0]))
        check_non_negative(X, "ProjectedGradientNMF.transform")

        if sp.issparse(X):
            Wt, _, _ = _nls_subproblem(X.T, self.components_.T, Wt,
                                       tol=self.tol,
                                       max_iter=self.nls_max_iter)
        else:
            for j in range(0, X.shape[0]):
                Wt[:, j], _ = nnls(self.components_.T, X[j, :])
        return Wt.T
Ejemplo n.º 6
0
def tikhonov(A, b, alpha, allowNegative=True):
    """
    Wendet die Tikhonov-Regularisierung auf die Matrix A und die Lösung b an.
    
    @type  A: matrix
    @param A: Die zu regularisierende Matrix.
    @type  b: vector
    @param b: Die Lösung der Matrixoperation.
    @type  alpha: number
    @param alpha: Der Regularisierungsparameter.
    @type  allowNegative: boolean 
    @param allowNegative: Wenn false, werden nur positive x-Werte zur Annäherung erlaubt; sonst auch negative.
    
    @rtype: vector, number, number
    @return: Solution, Residuum, Norm der Lösung
    """
    
    n = A.shape[0]
    m = A.shape[1]
    A1 = np.concatenate((A, alpha * matlib.identity(m)))
    b1 = np.concatenate((b, np.zeros(shape=(n,1))))
    
    print A, A.shape
    print A1, A1.shape

    if (allowNegative):
        x, res, rank, s = lstsq(A1, np.squeeze(b1))
        return x, res, norm(x)
    else:
        x, res = spOpt.nnls(A1, np.squeeze(b1))
        return x, res, norm(x)
Ejemplo n.º 7
0
 def store_models(self, specs, ivar):
     self.models = n.zeros( (specs.shape) )
     for i in xrange(self.models.shape[0]):
         minloc = n.unravel_index( self.zchi2arr[i].argmin(),
                                  self.zchi2arr[i].shape )
         pmat = n.zeros( (specs.shape[-1],self.npoly+1) )
         this_temp = self.templates[minloc[:-1]]
         pmat[:,0] = this_temp[(minloc[-1]*self.npixstep) +
                               self.pixoffset:(minloc[-1]*self.npixstep) +
                               self.pixoffset+specs.shape[-1]]
         polyarr = poly_array(self.npoly, specs.shape[-1])
         pmat[:,1:] = n.transpose(polyarr)
         ninv = n.diag(ivar[i])
         try: # Some eBOSS spectra have ivar[i] = 0 for all i
             '''
             f = n.linalg.solve( n.dot(n.dot(n.transpose(pmat),ninv),pmat),
                             n.dot( n.dot(n.transpose(pmat),ninv),specs[i]) )
             '''
             f = nnls( n.dot(n.dot(n.transpose(pmat),ninv),pmat),
                      n.dot( n.dot(n.transpose(pmat),ninv),specs[i]) );\
                             f = n.array(f)[0]
             self.models[i] = n.dot(pmat, f)
         except Exception as e:
             self.models[i] = n.zeros(specs.shape[-1])
             print "Exception: %r" % r
Ejemplo n.º 8
0
    def change_component_set(self, new_component_list):
        """
        Change the set of basis components without 
        changing the bulk composition. 

        Will raise an exception if the new component set is 
        invalid for the given composition.

        Parameters
        ----------
        new_component_list : list of strings
            New set of basis components.
        """
        composition = np.array([self.atomic_composition[element]
                                for element in self.element_list])
        component_matrix = np.zeros((len(new_component_list), len(self.element_list)))
        
        for i, component in enumerate(new_component_list):
            formula = dictionarize_formula(component)
            for element, n_atoms in formula.items():
                component_matrix[i][self.element_list.index(element)] = n_atoms

        sol = nnls(component_matrix.T, composition)
        if sol[1] < 1.e-12:
            component_amounts = sol[0]
        else:
            raise Exception('Failed to change component set. '
                            'Could not find a non-negative least squares solution. '
                            'Can the bulk composition be described with this set of components?')

        composition = OrderedCounter(dict(zip(new_component_list, component_amounts)))
        self.__init__(composition, 'molar')
Ejemplo n.º 9
0
def _optimise_onedgaussian_amp_nnls(pars, x=None, data=None) :
    """ Returns the best set of amplitude for a given set of sigma
    for a set of N 1D Gaussian functions

    The function returns the result of the NNLS solving (scipy)

    :param pars: input parameters including sigma
    :param x: radii
    :param data: data to fit
        data and r should have the same size
    """

    ngauss = len(pars) 

    ## First get the normalised values from the gaussians
    ## We normalised this also to 1/data to have a sum = 1
    nGnorm = _n_centred_onedgaussian_Datanorm(pars=pars)(x,data)

    ## This is the vector we wish to get as close as possible
    ## The equation being : Sum_n In * (G1D_n) = 1.0
    ##                   or       I  x    G    = d
    d = np.ones(np.size(x), dtype=np.float64)

    ## Use NNLS to solve the linear bounded (0) equations
    try :
        sol_nnls, norm_nnls = nnls(nGnorm, d)
    except RuntimeError :
        print "Warning: Too many iterations in NNLS"
        return np.zeros(ngauss, dtype=np.float64)
    return sol_nnls
Ejemplo n.º 10
0
def NNLS(M, U):
    """
    NNLS performs non-negative constrained least squares of each pixel
    in M using the endmember signatures of U.  Non-negative constrained least
    squares with the abundance nonnegative constraint (ANC).
    Utilizes the method of Bro.

    Parameters:
        M: `numpy array`
            2D data matrix (N x p).

        U: `numpy array`
            2D matrix of endmembers (q x p).

    Returns: `numpy array`
        An abundance maps (N x q).

    References:
        Bro R., de Jong S., Journal of Chemometrics, 1997, 11, 393-401.
    """
    import scipy.optimize as opt

    N, p1 = M.shape
    q, p2 = U.shape

    X = np.zeros((N, q), dtype=np.float32)
    MtM = np.dot(U, U.T)
    for n1 in range(N):
        # opt.nnls() return a tuple, the first element is the result
        X[n1] = opt.nnls(MtM, np.dot(U, M[n1]))[0]
    return X
Ejemplo n.º 11
0
def logP(value=0.,p=pars):
    lp = 0.
    models = []
    for i in range(len(imgs)):
        if i == 0:
            dx,dy = 0,0
        else:
            dx = pars[0].value 
            dy = pars[1].value 
        xp,yp = xc+dx,yc+dy
        image = imgs[i]
        sigma = sigs[i]
        psf = PSFs[i]
        imin,sigin,xin,yin = image[mask], sigma[mask],xp[mask2],yp[mask2]
        n = 0
        model = np.empty(((len(srcs)),imin.size))
        for lens in lenses:
            lens.setPars()
        x0,y0 = pylens.lens_images(lenses,srcs,[xin,yin],1./OVRS,getPix=True)
        for src in srcs:
            src.setPars()
            tmp = xc*0.
            tmp[mask2] = src.pixeval(x0,y0,1./OVRS,csub=1)
            tmp = iT.resamp(tmp,OVRS,True)
            tmp = convolve.convolve(tmp,psf,False)[0]
            model[n] = tmp[mask].ravel()
            n +=1
        rhs = (imin/sigin) # data
        op = (model/sigin).T # model matrix
        fit, chi = optimize.nnls(op,rhs)
        model = (model.T*fit).sum(1)
        resid = (model-imin)/sigin
        lp += -0.5*(resid**2.).sum()
        models.append(model)
    return lp #,models
Ejemplo n.º 12
0
 def create_model(self, fname, npoly, npixstep, minvector, zfindobj,
                  flux, ivar):
     """Return the best fit model for a given template at a
         given redshift.
     """
     try:
         pixoffset = zfindobj.pixoffset
         temps = read_ndArch( join( environ['REDMONSTER_TEMPLATES_DIR'],
                                   fname ) )[0]
         pmat = n.zeros( (self.npixflux, npoly+1) )
         this_temp = temps[minvector[:-1]]
         pmat[:,0] = this_temp[(minvector[-1]*npixstep)+pixoffset:\
                               (minvector[-1]*npixstep)+pixoffset + \
                               self.npixflux]
         polyarr = poly_array(npoly, self.npixflux)
         pmat[:,1:] = n.transpose(polyarr)
         ninv = n.diag(ivar)
         f = linalg.solve( n.dot(n.dot(n.transpose(pmat),ninv),pmat),
                            n.dot( n.dot(n.transpose(pmat),ninv),flux) ); \
                 f = n.array(f)
         if f[0] < 0:
             try:
                 f = nnls( n.dot(n.dot(n.transpose(pmat),ninv),pmat),
                          n.dot( n.dot(n.transpose(pmat),ninv),flux) )[0]; \
                         f = n.array(f)
                 return n.dot(pmat,f), tuple(f)
             except Exception as e:
                 print("Exception: %r" % e)
                 return n.zeros(self.npixflux), (0,)
         else:
             return n.dot(pmat,f), tuple(f)
     except Exception as e:
         print("Exception: %r" % e)
         return n.zeros(self.npixflux), (0,)
Ejemplo n.º 13
0
 def test_nnls(self):
     a = arange(25.0).reshape(-1,5)
     x = arange(5.0)
     y = dot(a,x)
     x, res = nnls(a,y)
     assert_(res < 1e-7)
     assert_(norm(dot(a,x)-y) < 1e-7)
Ejemplo n.º 14
0
def calculate(target, ig_min, ig_max, ingredients):
    from scipy import optimize
    Gmatrix = np.array([i.compounds for i in ingredients])
    # Non-negative least squares solution
    Igrams = optimize.nnls(Gmatrix.T, target)[0]
    # Weed out gram amounts that are not within the specified range. Also
    # remove the rows corresponding to that ingredient from the matrix and the
    # ingredient.
    zipped = izip(Igrams, Gmatrix, ingredients)
    tuples = filter(lambda (g, r, i): within(ig_min, ig_max, g), zipped)
    Igrams = [ g for g, r, i in tuples ]
    if not Igrams:
        return None
    Gmatrix = [ r for g, r, i in tuples ]
    Ingredients = [ i for g, r, i in tuples ]

    Cgrams    = (np.matrix(Gmatrix).T * np.array([Igrams]).T).T.getA1()
    Icalories = [ i.calories*x for i, x in zip(Ingredients, Igrams) ]
    Ccalories = np.multiply(CGs.values(), Cgrams)
    Cnames    = CGs.keys()
    Inames    = [ i.name for i in Ingredients ]

    return {
        'error': np.linalg.norm(np.array(Cgrams) - target)/100,
        'total': (np.sum(Igrams), np.sum(Icalories)),
        'ingredients': sorted(zip(Inames, Igrams, Icalories), key=lambda x: x[1], reverse=True),
        'compounds': zip(Cnames, Cgrams, Ccalories),
    }
Ejemplo n.º 15
0
 def complete_with(self, foods):
     # This function does these things
     # Adjust rdi based on already added foods
     # Build food_mat from foods
     # Normalizes and weights using half of upper-lower
     # Linear regress
     # Remove zero foods from result
     n, m = len(self.target_di), len(foods)
     food_mat = np.zeros([n, m])
     di_amounts = np.zeros([n])
     di_so_far = self.get_di()
     norm_terms = np.ones([n])
     missing = dict()
     for i in range(n):
         di = self.target_di[i]
         norm_terms[i] = (di.upper - di.lower) / 2.0 
         di_amounts[i] = (di.lower + norm_terms[i] - di_so_far[i].amount) / norm_terms[i]
             
         for j in range(m):
             food = foods[j]
             try:
                 food_mat[i][j] = food.nut_amounts[di.nut] / norm_terms[i]
             except KeyError:
                 pass
                 # print "Warning: No nutrient data for " + str(nut) + " in " + str(food)
                 # missing[nut] = (missing.get(nut) or 0) + 1
     amounts, error = op.nnls(food_mat, di_amounts)
     for j in range(m):
         if amounts[j] > 0:
             self.add_food(foods[j], amounts[j] * 100.0)
     return error
Ejemplo n.º 16
0
def run_regression(result, dates):
    mid = []
    future = []
    sample_freq = 120
    future_horizon = 120 * 0.5
    signals = defaultdict(list)
    for adate in dates:
        data = result[adate]
        signals_tmp = get_signals(data)
        mid_tmp = midpoint(data)
        base_ind, offset_inds = generate_offset_index(len(data), [future_horizon],
                                                      base = subsample_nsample(data, sample_freq),
                                                      burn_in = sample_freq)
        for key in signals_tmp.keys():
            signals[key] += list(signals_tmp[key][base_ind])
        mid +=(list(mid_tmp[base_ind]))
        future +=(list(mid_tmp[offset_inds[0]]))
    names = signals.keys()
    X = np.c_[[np.array(signals[nm]) for nm in names]].T
    Y = np.array(future) - np.array(mid)
    #beta, res, rank, s = np.linalg.lstsq(X, Y)
    beta, rrs = nnls(X, Y)
    valuation = np.dot(X, beta)
    print 'Regression Dates  : %s - %s'%(min(dates), max(dates))
    print 'Regression Results: %s'%(str(dict(zip(names, beta))))
    return names, dict(zip(names, list(beta))), np.abs(valuation).mean()
Ejemplo n.º 17
0
 def residuum(arr):
     cali.sV0 = float(arr)
     sigma = cali.responses
     sigma[0] = 1e6*np.ones_like(cali.tau_arr)
     data[0] = 1e6
     residual = nnls(sigma, data)[1]
     return residual
Ejemplo n.º 18
0
def do_nnls(A,b):
    n = b.shape[1]
    out = np.zeros((A.shape[1], n))
    for i in range(n):
        #mls.bounded_lsq(A.T, b[:,i], np.zeros((A.shape[1],1)), np.ones((A.shape[1],1))).shape
        out[:,i] =  nnls(A, b[:,i])[0]
    return out
Ejemplo n.º 19
0
def nnls_fit(spectrum, expected_matrix):
    """
    Non-negative least squares fitting.

    Parameters
    ----------
    spectrum : array
        spectrum of experiment data
    expected_matrix : array
        2D matrix of activated element spectrum

    Returns
    -------
    results : array
        weights of different element
    residue : array
        error

    Note
    ----
    This is merely a domain-specific wrapper of
    scipy.optimize.nnls. Note that the order of arguments
    is changed. Confusing for scipy users, perhaps, but
    more natural for domain-specific users.
    """
    experiments = spectrum
    standard = expected_matrix

    [results, residue] = nnls(standard, experiments)
    return results, residue
Ejemplo n.º 20
0
def get_coeffs(file_list, target_path, is_mono):
    """Calculate weighted mixing coefficients.

    Parameters
    ----------
    file_list : list
        List of files to calculate coefficients of.
    target_path: str
        Path to file that the list will be tested against.
    is_mono: bool
        True if input file is mono. Default=False.

    Returns
    -------
    mixing_coeffs : dict
        Dictionary of each file and its associated mixing coefficient
        relative to target path.
    """

    target_audio = loadmono(target_path)

    full_audio = np.vstack(
        [loadmono(f, is_mono=is_mono) for f in file_list]
    )

    coeffs, _ = nnls(full_audio.T, target_audio.T)

    base_keys = [os.path.basename(s) for s in file_list]

    mixing_coeffs = { 
        i : float(c) for i, c in zip(base_keys, coeffs)
    }

    return mixing_coeffs
Ejemplo n.º 21
0
 def estimate_expression(feat_class, pieces, ids):
     #--- Build the exons-transcripts structure matrix:
     # Lines are exons, columns are transcripts,
     # so that A[i,j]!=0 means "transcript Tj contains exon Ei".
     if feat_class == Gene:
         is_in = lambda x,g: g in x.gene_id.split('|')
     elif feat_class == Transcript:
         is_in = lambda x,t: t in x.transcripts
     n = len(pieces)
     m = len(ids)
     A = zeros((n,m))
     for i,p in enumerate(pieces):
         for j,f in enumerate(ids):
             A[i,j] = 1. if is_in(p,f) else 0.
     #--- Build the exons scores vector
     E = asarray([p.rpk for p in pieces])
     #--- Solve for RPK
     T,rnorm = nnls(A,E)
     #--- Store result in *feat_class* objects
     feats = []
     for i,f in enumerate(ids):
         exs = sorted([e for e in exons if is_in(e,f)], key=lambda x:(x.start,x.end))
         flen = sum(p.length for p in pieces if is_in(p,f))
         feats.append(feat_class(name=f, start=exs[0].start, end=exs[-1].end,
                 length=flen, rpk=T[i], count=fromRPK(T[i],flen,options['normalize']),
                 chrom=exs[0].chrom, gene_id=exs[0].gene_id, gene_name=exs[0].gene_name))
     return feats
Ejemplo n.º 22
0
def weighted_nnls_fit(spectrum, expected_matrix, constant_weight=10):
    """
    Non-negative least squares fitting with weight.

    Parameters
    ----------
    spectrum : array
        spectrum of experiment data
    expected_matrix : array
        2D matrix of activated element spectrum
    constant_weight : float
        value used to calculate weight like so:
        weights = constant_weight / (constant_weight + spectrum)

    Returns
    -------
    results : array
        weights of different element
    residue : array
        error
    """
    experiments = spectrum
    standard = expected_matrix

    weights = constant_weight / (constant_weight + experiments)
    weights = np.abs(weights)
    weights = weights/np.max(weights)

    a = np.transpose(np.multiply(np.transpose(standard), np.sqrt(weights)))
    b = np.multiply(experiments, np.sqrt(weights))

    [results, residue] = nnls(a, b)

    return results, residue
Ejemplo n.º 23
0
def solution_bounds(endmember_occupancies):
    """
    Parameters
    ----------
    endmember_occupancies : 2d array of floats
        A 1D array for each endmember in the solid solution,
        containing the number of atoms of each element on each site.

    Returns
    -------
    solution_bounds : 2d array of floats
        An abbreviated version of endmember_occupancies, 
        where the columns represent the independent compositional 
        bounds on the solution
    """
    # Find bounds for the solution
    i_sorted =zip(*sorted([(i,
                            sum([1 for val in endmember_occupancies.T[i]
                                 if val>1.e-10]))
                           for i in range(len(endmember_occupancies.T))
                                          if np.any(endmember_occupancies.T[i] > 1.e-10)],
                          key=lambda x: x[1]))[0]

    solution_bounds = endmember_occupancies[:,i_sorted[0],np.newaxis]
    for i in i_sorted[1:]:
        if np.abs(nnls(solution_bounds, endmember_occupancies.T[i])[1]) > 1.e-10:
            solution_bounds = np.concatenate((solution_bounds,
                                              endmember_occupancies[:,i,np.newaxis]),
                                             axis=1)
    return solution_bounds
Ejemplo n.º 24
0
def nnls_fit(spectrum, expected_matrix, weights=None):
    """
    Non-negative least squares fitting.

    Parameters
    ----------
    spectrum : array
        spectrum of experiment data
    expected_matrix : array
        2D matrix of activated element spectrum
    weights : array, optional
        for weighted nnls fitting. Setting weights as None means fitting
        without weights.

    Returns
    -------
    results : array
        weights of different element
    residue : float
        error

    Note
    ----
    nnls is chosen as amplitude of each element should not be negative.
    """
    if weights is not None:
        expected_matrix = np.transpose(np.multiply(np.transpose(expected_matrix), np.sqrt(weights)))
        spectrum = np.multiply(spectrum, np.sqrt(weights))
    return nnls(expected_matrix, spectrum)
Ejemplo n.º 25
0
def nmfTransform(R, nmfResult, flatX):
    """ Replace the existing numpy implementation to work on sparse tensor """
    W = np.zeros((flatX.shape[0], R))
    coef = nmfResult.coef().todense().transpose()
    for j in xrange(0, flatX.shape[0]):
        W[j, :], _ = nnls(coef, np.ravel(flatX.getrow(j).todense()))
    return W
Ejemplo n.º 26
0
        def main():
            if parameters["-k"] not in ["7","8"]:
                print usage
                print "Problem(s):"
                print "-"*50
                print "-k parameter has to be 7, or 8"
                print "-"*50
                
            else:
                data=kmer()

                db,organisms=loadDB()

                #find the best set of organisms that reconstruct the user metagenome using NNLS
                weights=normalise(nnls(db,data)[0])

                c=6
                for i in [0]:
                    labels,fracs,level=GetResults(i,organisms,weights)

                    c+=1

                #Writes tabular output!
                o=open(parameters["-q"]+"__output.txt","w+")
                o.write("Query: "+parameters["-q"]+"\n")
                o.write("K-mer size: "+parameters["-k"]+"\n\n")
                for result in tabular:
                    o.write(result)
                o.close()
Ejemplo n.º 27
0
def ridge_regression(X_y, alpha=0.15):
    r"""Fits an L2-penalized linear regression to the data.

    The ridge coefficients are guaranteed to be non-negative and minimize

    .. math::

       \min\limits_w ||X w - y||_2 + \alpha ||w||_2

    Parameters
    ----------
    Xy : (N, M + 1) array_like
        Observation matrix. The first M columns are observations. The
        last column corresponds to the target values.
    alpha : float
        Penalization strength. Larger values make the solution more robust
        to collinearity.

    Returns
    -------
    w : (M, ) ndarray
        Non-negative ridge coefficients.
    """
    X_y = np.atleast_2d(X_y)
    X, y = X_y[:, :-1], X_y[:, -1]

    M = X.shape[1]
    X_new = np.append(X, alpha * np.eye(M), axis=0)
    y_new = np.append(y, np.zeros(M))
    w, _residuals = nnls(X_new, y_new)
    return w
def solve_nnls(x, y, kernel=None, params=None, design=None):
    """
    Solve the mixture problem using NNLS

    Parameters
    ----------
    x : ndarray
    y : ndarray

    kernel : callable
    params : list

    """
    if design is None and (kernel is None or params is None):
        e_s = "Need to provide either design matrix, or kernel and list of"
        e_s += "params for generating the design matrix"
        raise ValueError(e_s)

    if design is None:
        A = parameters_to_regressors(x, kernel, params)
    else:
        A = design
    y = y.ravel()
    beta_hat, rnorm = opt.nnls(A, y)
    return beta_hat, rnorm
Ejemplo n.º 29
0
def optimise_twodgaussian_amp_nnls(pars, parPSF=_default_parPSF, r=None, theta=None, data=None) :
    """
    Returns the best set of amplitude for a given set of q,sigma,pa
    for a set of N 2D Gaussian functions
    The function returns the result of the NNLS solving (scipy)
    pars  : input parameters including q, sigma, pa (in degrees)
    r     : radii
    theta : angle for each point in radians
    data  : data to fit
       data, theta and r should have the same size
    """

    pars = pars.ravel()

    ## First get the normalised values from the gaussians
    ## We normalised this also to 1/data to have a sum = 1
    nGnorm = _n_centred_twodgaussian_Datanorm(pars=pars, parPSF=parPSF)(r,theta,data)

    ## This is the vector we wish to get as close as possible
    ## The equation being : Sum_n In * (G2D_n) = 1.0
    ##                   or       I  x    G    = d
    d = np.ones(np.size(r), dtype=floatFit)

    ## Use NNLS to solve the linear bounded (0) equations
    sol_nnls, norm_nnls = nnls(nGnorm, d)
    return nGnorm, sol_nnls
Ejemplo n.º 30
0
def nn_ls_fit(data,spect, max_bins=16, min_norm=10**-4):
    #not used?
    #uses non-negitive least squares to fit data
    #spect is libaray array
    #match wavelength of spectra to data change in to appropeate format
    model = {}
    for i in xrange(spect[0, :].shape[0]):
        if i == 0:
            model['wave'] = nu.copy(spect[:, i])
        else:
            model[str(i-1)] = nu.copy(spect[:, i])

    model = data_match_new(data, model, spect[0, :].shape[0] - 1)
    index = nu.int64(model.keys())
    
    #nnls fit handles uncertanty now 
    if data.shape[1] == 2:
        N,chi = nnls(nu.array(model.values()).T[:,
                nu.argsort(nu.int64(nu.array(model.keys())))],
                     data[:, 1])
    elif data.shape[1] == 3:
        N, chi = nnls(nu.array(model.values()).T[:, 
                nu.argsort(nu.int64(nu.array(model.keys())))] / nu.tile(data[:, 2], (bins, 1)).T, data[:, 1] / data[:, 2]) 
    N = N[index.argsort()]
    
    #check if above max number of binns
    if len(N[N > min_norm]) > max_bins:
        #remove the lowest normilization
        N_max_arg = nu.nonzero(N > min_norm)[0]
        N_max_arg = N_max_arg[N[N_max_arg].argsort()]
        #sort by norm value
        current = []
        for i in xrange(N_max_arg.shape[0] - 1, -1, -1):
            current.append(info[N_max_arg[i]])
            if len(current) == max_bins:
                break
        current = nu.array(current)
    else:
        current = info[N > min_norm]
    metal, age=[], []
    for i in current:
        metal.append(float(i[4: 10]))
        age.append(float(i[11: -5]))
    metal, age=nu.array(metal), nu.array(age)
    #check if any left
    return (metal[nu.argsort(age)], age[nu.argsort(age)], 
            N[N > min_norm][nu.argsort(age)])
Ejemplo n.º 31
0
        _ff = ff[[n], :]
        t, _ffsw = sliding_window(_ff,
                                  fs=fs,
                                  window_size=window_size,
                                  step_size=step_size)
        _ffpower = np.sum(_ffsw**2, axis=-1) / _ffsw.shape[-1]
        power.append(_ffpower)
    power = np.stack(power)
    t, _p = sliding_window(pupil, fs=fs, window_size=4, step_size=2)
    pds = np.mean(_p, axis=-1)[np.newaxis, :]

    power = scale(power, with_mean=True, with_std=True, axis=-1)
    pds = scale(pds, with_mean=True, with_std=True, axis=-1)

    # do nnls regression to avoid to sign ambiguity due to power conversion
    x, r = nnls(power.T, -pds.squeeze())
    second_order_weights = x

    if np.linalg.norm(x) == 0:
        sow_norm = x
    else:
        sow_norm = x / np.linalg.norm(x)

    # project weights back into neuron space (then the can be compared with PC weights too)
    fow_nspace = pca.components_.T.dot(fow_norm)
    sow_nspace = pca.components_.T.dot(sow_norm)

    # compute cosine similarities
    cos_fow_sow = fow_nspace.dot(sow_nspace)
    cos_fow_PC1 = fow_nspace.dot(pca.components_[0])
    cos_sow_PC1 = sow_nspace.dot(pca.components_[0])
Ejemplo n.º 32
0
def minfn(data, model, theTemp, doHyst):
    """
    Using an assumed value for gamma (already stored in the model), find optimum
    values for remaining cell parameters, and compute the RMS error between true
    and predicted cell voltage
    """

    alltemps = [d.temp for d in data]
    ind, = np.where(np.array(alltemps) == theTemp)[0]

    G = abs(model.GParam[ind])

    Q = abs(model.QParam[ind])
    eta = abs(model.etaParam[ind])
    RC = abs(model.RCParam[ind])
    numpoles = len(RC)

    ik = data[ind].s1.current.copy()
    vk = data[ind].s1.voltage.copy()
    tk = np.arange(len(vk))
    etaik = ik.copy()
    etaik[ik < 0] = etaik[ik < 0] * eta

    hh = 0*ik
    sik = 0*ik
    fac = np.exp(-abs(G * etaik/(3600*Q)))

    for k in range(1, len(ik)):
        hh[k] = (fac[k-1]*hh[k-1]) - ((1-fac[k-1])*np.sign(ik[k-1]))
        sik[k] = np.sign(ik[k])
        if abs(ik[k]) < Q/100:
            sik[k] = sik[k-1]

    # First modeling step: Compute error with model = OCV only
    vest1 = data[ind].OCV
    verr = vk - vest1

    # Second modeling step: Compute time constants in "A" matrix
    y = -np.diff(verr)
    u = np.diff(etaik)
    A = SISOsubid(y, u, numpoles)

    # Modify results to ensure real, preferably distinct, between 0 and 1

    eigA = np.linalg.eigvals(A)
    eigAr = eigA + 0.001 * np.random.normal(loc=0.0, scale=1.0, size=eigA.shape)
    eigA[eigA != np.conj(eigA)] = abs(eigAr[eigA != np.conj(eigA)]) # Make sure real
    eigA = np.real(eigA)                                            # Make sure real
    eigA[eigA<0] = abs(eigA[eigA<0])    # Make sure in range 
    eigA[eigA>1] = 1 / eigA[eigA>1]
    RCfact = np.sort(eigA)
    RCfact = RCfact[-numpoles:]
    RC = -1 / np.log(RCfact)

    # Compute RC time constants as Plett's Matlab ESCtoolbox 
    # nup = numpoles
    # while 1:
    #     A = SISOsubid(y, u, nup)

    #     # Modify results to ensure real, preferably distinct, between 0 and 1
    #     eigA = np.linalg.eigvals(A)
    #     eigA = np.real(eigA[eigA == np.conj(eigA)])   # Make sure real
    #     eigA = eigA[(eigA>0) & (eigA<1)]    # Make sure in range 
    #     okpoles = len(eigA)
    #     nup = nup + 1
    #     if okpoles >= numpoles:
    #         break
    #     # print(nup)

    # RCfact = np.sort(eigA)
    # RCfact = RCfact[-numpoles:]
    # RC = -1 / np.log(RCfact)

    # Simulate the R-C filters to find R-C currents
    stsp = dlti(np.diag(RCfact), np.vstack(1-RCfact), np.eye(numpoles), np.zeros((numpoles, 1))) 
    [tout, vrcRaw, xout] = dlsim(stsp, etaik)

    # Third modeling step: Hysteresis parameters
    if doHyst:
        H = np.column_stack((hh, sik, -etaik, -vrcRaw))
        W = nnls(H, verr)
        M = W[0][0]
        M0 = W[0][1]
        R0 = W[0][2]
        Rfact = W[0][3:].T
    else:
        H = np.column_stack((-etaik, -vrcRaw))
        W = np.linalg.lstsq(H,verr, rcond=None)[0]
        M = 0
        M0 = 0
        R0 = W[0]
        Rfact = W[1:].T

    idx, = np.where(np.array(model.temps) == data[ind].temp)[0]
    model.R0Param[idx] = R0
    model.M0Param[idx] = M0
    model.MParam[idx] = M
    model.RCParam[idx] = RC.T
    model.RParam[idx] = Rfact.T

    vest2 = vest1 + M*hh + M0*sik - R0*etaik - vrcRaw @ Rfact.T
    verr = vk - vest2

    # plot voltages
    plt.figure(1)
    plt.plot(tk[::10]/60, vk[::10], label='voltage')
    plt.plot(tk[::10]/60, vest1[::10], label='vest1 (OCV)')
    plt.plot(tk[::10]/60, vest2[::10], label='vest2 (DYN)')
    plt.xlabel('Time (min)')
    plt.ylabel('Voltage (V)')
    plt.title(f'Voltage and estimates at T = {data[ind].temp} C')
    plt.legend(loc='best', numpoints=1)
    #plt.show()

    # plot modeling errors
    plt.figure(2)
    plt.plot(tk[::10]/60, verr[::10], label='verr')
    plt.xlabel('Time (min)')
    plt.ylabel('Error (V)')
    plt.title(f'Modeling error at T = {data[ind].temp} C')
    #plt.show()

    # Compute RMS error only on data roughly in 5% to 95% SOC
    v1 = OCVfromSOCtemp(0.95, data[ind].temp, model)[0]
    v2 = OCVfromSOCtemp(0.05, data[ind].temp, model)[0]
    N1 = np.where(vk < v1)[0][0]
    N2 = np.where(vk < v2)[0][0]

    rmserr = np.sqrt(np.mean(verr[N1:N2]**2))
    cost = np.sum(rmserr)
    print(f'RMS error = {cost*1000:.2f} mV')

    return cost, model
Ejemplo n.º 33
0
    def solve_SS_network(self, T, P):
        """
        calculates the steady state concentrations if all A => B + C
        reactions are irreversible and the flux from/to the source
        configuration is 1.0
        """
        A = np.zeros((len(self.isomers),len(self.isomers)))
        b = np.zeros(len(self.isomers))
        bimolecular = len(self.source) > 1
        
        isomerSpcs = [iso.species[0] for iso in self.isomers]
        

        for rxn in self.netReactions:
            if rxn.reactants[0] in isomerSpcs:
                ind = isomerSpcs.index(rxn.reactants[0])
                kf = rxn.getRateCoefficient(T,P)
                A[ind,ind] -= kf
            else:
                ind = None
            if rxn.products[0] in isomerSpcs:
                ind2 = isomerSpcs.index(rxn.products[0])
                kr = rxn.getRateCoefficient(T,P)/rxn.getEquilibriumConstant(T)
                A[ind2,ind2] -= kr
            else:
                ind2 = None

            if ind is not None and ind2 is not None:
                A[ind,ind2] += kr
                A[ind2,ind] += kf

            if bimolecular:
                if rxn.reactants[0] == self.source:
                    kf = rxn.getRateCoefficient(T,P)
                    b[ind2] += kf
                elif rxn.products[0] == self.source:
                    kr = rxn.getRateCoefficient(T,P)/rxn.getEquilibriumConstant(T)
                    b[ind] += kr
        
        
        if not bimolecular:
            ind = isomerSpcs.index(self.source[0])
            b[ind] = -1.0 #flux at source
        else:
            b = -b/b.sum() #1.0 flux from source
        
        if len(b) == 1:
            return np.array([b[0]/A[0,0]])
        
        con = np.linalg.cond(A)
        
        if np.log10(con) < 15:
            c = np.linalg.solve(A,b)
        else:
            logging.warn("Matrix Ill-conditioned, attempting to use Arbitrary Precision Arithmetic")
            mp.dps = 30+int(np.log10(con))
            Amp = mp.matrix(A.tolist())
            bmp = mp.matrix(b.tolist())
            
            try:
                c = mp.qr_solve(Amp,bmp)

                c = np.array(list(c[0]))

                if any(c<=0.0):
                    c, rnorm = opt.nnls(A,b)

                c = c.astype(np.float64)
            except: #fall back to raw flux analysis rather than solve steady state problem
                return None
        return c
Ejemplo n.º 34
0
                index_p += 1
                index_B += 1
        index_a += 1
        index_Theta += 1
    index_Phi += 1

LamInv = np.linalg.pinv(LambdaM)
Experiment = IntensityDC2.flat
pVec1 = np.dot(LamInv, Experiment)

# read weights from a file
pMatrix = np.reshape(pVec1, (len(Phi), len(Theta)))
TheoryVec = np.dot(LambdaM, pVec1)
TheoryMatr = np.reshape(TheoryVec, (765, 29))

pVec2, rnorm1 = nnls(LambdaM, Experiment)
pMatrix2 = np.reshape(pVec2, (len(Phi), len(Theta)))
TheoryVec2 = np.dot(LambdaM, pVec2)
TheoryMatr2 = np.reshape(TheoryVec2, (765, 29))

pVec3, rnorm2 = nnls(LambdaMepr, Experiment)
pMatrix3 = np.reshape(pVec3, (len(Phi), len(Theta)))
TheoryVec3 = np.dot(LambdaMepr, pVec3)
TheoryMatr3 = np.reshape(TheoryVec3, (765, 29))

gnufile = open('TwoTrpTheor5nnlsEPR.dat', 'w+')
for i in xrange(765):
    for j in xrange(29):
        gnufile.write(
            str(freqDC2[i]) + '  ' + str(fieldDC2[j]) + '  ' +
            str(TheoryMatr[i][j]) + '  ' + str(TheoryMatr2[i][j]) + '  ' +
Ejemplo n.º 35
0
def mixed_netNMF(data,
                 KNN_glap,
                 k=3,
                 l=200,
                 maxiter=250,
                 eps=1e-15,
                 err_tol=1e-4,
                 err_delta_tol=1e-8,
                 verbose=False):
    # Initialize H and W Matrices from data array if not given
    r, c = data.shape[0], data.shape[1]
    # Initialize H
    H_init = np.random.rand(k, c)
    H = np.maximum(H_init, eps)
    # Initialize W
    W_init = np.linalg.lstsq(H.T, data.T)[0].T
    W_init = np.dot(W_init, np.diag(1 / sum(W_init)))
    W = np.maximum(W_init, eps)

    if verbose:
        print 'W and H matrices initialized'

    # Get graph matrices from laplacian array
    D = np.diag(np.diag(KNN_glap)).astype(float)
    A = (D - KNN_glap).astype(float)
    if verbose:
        print 'D and A matrices calculated'
    # Set mixed netNMF reconstruction error convergence factor
    XfitPrevious = np.inf

    # Updating W and H
    for i in range(maxiter):
        XfitThis = np.dot(W, H)
        WHres = np.linalg.norm(data - XfitThis)  # Reconstruction error

        # Change in reconstruction error
        if i == 0:
            fitRes = np.linalg.norm(XfitPrevious)
        else:
            fitRes = np.linalg.norm(XfitPrevious - XfitThis)
        XfitPrevious = XfitThis

        # Reporting netNMF update status
        if (verbose) & (i % 10 == 0):
            print 'Iteration >>', i, 'Mat-res:', WHres, 'Lambda:', l, 'Wfrob:', np.linalg.norm(
                W)
        if (err_delta_tol > fitRes) | (err_tol > WHres) | (i + 1 == maxiter):
            if verbose:
                print 'NMF completed!'
                print 'Total iterations:', i + 1
                print 'Final Reconstruction Error:', WHres
                print 'Final Reconstruction Error Delta:', fitRes
            numIter = i + 1
            finalResidual = WHres
            break

        # Note about this part of the netNMF function:
        # There used to be a small block of code that would dynamically change l
        # to improve the convergence of the algorithm. We did not see any mathematical
        # or statistical support to have this block of code here. It seemed to just
        # add confusion in the final form of the algorithm. Therefore it has been removed.
        # The default l parameter is fine here, but the regularization constant can
        # be changed by the user if so desired.

        # Terms to be scaled by regularization constant: l
        KWmat_D = np.dot(D, W)
        KWmat_W = np.dot(A, W)

        # Update W with network constraint
        W = W * ((np.dot(data, H.T) + l * KWmat_W + eps) /
                 (np.dot(W, np.dot(H, H.T)) + l * KWmat_D + eps))
        W = np.maximum(W, eps)
        # Normalize W across each gene (row-wise)
        W = W / matlib.repmat(np.maximum(sum(W), eps), len(W), 1)

        # Update H
        H = np.array([nnls(W, data[:, j])[0] for j in range(c)]).T
        # ^ Hofree uses a custom fast non-negative least squares solver here, we will use scipy's implementation here
        H = np.maximum(H, eps)

    return W, H, numIter, finalResidual
Ejemplo n.º 36
0
 def NNLS_recon(self, ps, PhiRecon):
     chis = -np.log(2 * (ps - .5))
     return op.nnls(PhiRecon, np.squeeze(chis))[0]
Ejemplo n.º 37
0
def mixed_netNMF_debug(data,
                       KNN_glap,
                       W_init=None,
                       H_init=None,
                       k=3,
                       l=200,
                       maxiter=250,
                       eps=1e-15,
                       err_tol=1e-4,
                       err_delta_tol=1e-8,
                       verbose=False):
    # Initialize H and W Matrices from data array if not given
    r, c = data.shape[0], data.shape[1]
    # Initialize H
    if H_init is None:
        H_init = np.random.rand(k, c)
        H = np.maximum(H_init, eps)
    else:
        # Check H_init dimensions
        if H_init.shape == (k, c):
            H = np.copy(H_init)
        else:
            raise ValueError('H_init dimensions must be ' + repr(k) + ' x ' +
                             repr(c))
    # Initialize W
    if W_init is None:
        W_init = np.linalg.lstsq(H.T, data.T)[0].T
        W_init = np.dot(W_init, np.diag(1 / sum(W_init)))
        W = np.maximum(W_init, eps)
    else:
        # Check H_init dimensions
        if W_init.shape == (r, k):
            W = np.copy(W_init)
        else:
            raise ValueError('W_init dimensions must be ' + repr(k) + ' x ' +
                             repr(c))
    if verbose:
        print 'W and H matrices initialized'

    # Get graph matrices from laplacian array
    D = np.diag(np.diag(KNN_glap)).astype(float)
    A = (D - KNN_glap).astype(float)
    if verbose:
        print 'D and A matrices calculated'
    # Set mixed netNMF reporting variables
    resVal, fitResVect, timestep, Wlist, Hlist = [], [], [], [], []
    XfitPrevious = np.inf

    # Updating W and H
    for i in range(maxiter):
        iter_time = time.time()
        XfitThis = np.dot(W, H)
        WHres = np.linalg.norm(data - XfitThis)  # Reconstruction error

        # Change in reconstruction error
        if i == 0:
            fitRes = np.linalg.norm(XfitPrevious)
        else:
            fitRes = np.linalg.norm(XfitPrevious - XfitThis)
        XfitPrevious = XfitThis
        # Tracking reconstruction errors and residuals
        resVal.append(WHres)
        fitResVect.append(fitRes)
        Wlist.append(W)
        Hlist.append(H)
        if (verbose) & (i % 10 == 0):
            print 'Iteration >>', i, 'Mat-res:', WHres, 'Gamma:', l, 'Wfrob:', np.linalg.norm(
                W)
        if (err_delta_tol > fitRes) | (err_tol > WHres) | (i + 1 == maxiter):
            if verbose:
                print 'NMF completed!'
                print 'Total iterations:', i + 1
                print 'Final Reconstruction Error:', WHres
                print 'Final Reconstruction Error Delta:', fitRes
            numIter = i + 1
            finalResidual = WHres
            break

    # Note about this part of the netNMF function:
    # There used to be a small block of code that would dynamically change l
    # to improve the convergence of the algorithm. We did not see any mathematical
    # or statistical support to have this block of code here. It seemed to just
    # add confusion in the final form of the algorithm. Therefore it has been removed.
    # The default l parameter is fine here, but the regularization constant can
    # be changed by the user if so desired.

    # Terms to be scaled by regularization constant: l
        KWmat_D = np.dot(D, W)
        KWmat_W = np.dot(A, W)

        # Update W with network constraint
        W = W * ((np.dot(data, H.T) + l * KWmat_W + eps) /
                 (np.dot(W, np.dot(H, H.T)) + l * KWmat_D + eps))
        W = np.maximum(W, eps)
        W = W / matlib.repmat(np.maximum(sum(W), eps), len(W), 1)

        # Update H
        H = np.array([nnls(W, data[:, j])[0] for j in range(c)]).T
        # ^ Hofree uses a custom fast non-negative least squares solver here, we will use scipy's implementation here
        H = np.maximum(H, eps)

        # Track each iterations' time step
        timestep.append(time.time() - iter_time)

    return W, H, numIter, finalResidual, resVal, fitResVect, Wlist, Hlist, timestep
Ejemplo n.º 38
0
 def _reweight(self, f):
     self.w[f] = 1.
     nz_idcs = self.w > 0
     res = nnls(self.A[:, nz_idcs], self.b, maxiter=100 * self.A.shape[1])
     self.w[nz_idcs] = res[0]
     return
Ejemplo n.º 39
0
if __name__ == '__main__':

    # Generate random problem instance
    n = 1000000
    m = 3
    showMatrices = False

    A = np.random.rand(n, m)
    if showMatrices:
        print("A:")
        print(A)

    y = np.dot(A, np.random.randn(m))
    if showMatrices:
        print("y:")
        print(y)

    start = time.time()
    x_NNLS = nnls(A, y)[0]
    end = time.time()
    print("NNLS: " + str(end - start) + " Seconds")
    if showMatrices:
        print(x_NNLS)

    start = time.time()
    x_subspaceNNLS = subspaceNNLS(A, y)
    end = time.time()
    print("Subspace NNLS:" + str(end - start) + " Seconds")
    if showMatrices:
        print(x_subspaceNNLS)
Ejemplo n.º 40
0
        print(i, loss.item())

        optimizerADAM.zero_grad()
        loss.backward()
        optimizerADAM.step()

        deep_nmf.apply(
            constraints)  # keep wieghts positive after gradient decent
        h_out = torch.transpose(out.data, 0, 1)
        h_out_t = out.data

        # NNLS
        # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.nnls.html

        w_arrays = [
            nnls(out.data.numpy(), V[f, mask])[0] for f in range(features)
        ]
        nnls_w = np.stack(w_arrays, axis=-1)
        dnmf_w = torch.from_numpy(nnls_w).float()

        # dnmf_w = dnmf_w * (h_out.mm(v_train)).div(h_out.mm(h_out_t).mm(dnmf_w))
        loss_values.append(loss.item())

    # test_inputs = (h_0_test, v_test)
    # start_iter = time.time()
    # netwrok_prediction = deep_nmf(*test_inputs)
    # dnmf_elapsed = round(time.time() - start_iter, 5)
    # dnmf_err = round(
    #     frobinuis_reconstruct_error(V[:, ~mask], dnmf_w.data.numpy().T, netwrok_prediction.data.numpy().T), 2)
    # mu_error = round(frobinuis_reconstruct_error(V[:, ~mask], W, fit.coef()), 2)
Ejemplo n.º 41
0
 def nnls(A, b):
     return nnls(A, b)
Ejemplo n.º 42
0
def nnls_reg(K, b, val):
    b_prime = r_[b, zeros(K.shape[1])]
    x, _ = nnls(A_prime(K, val), b_prime)
    return x
Ejemplo n.º 43
0
    def integrate(self, t_final, atol=1e-18, rtol=1e-10):
        """
        Adaptively integrate the system of equations assuming self.t and self.y set the initial value.
        :param t_final: (scalar) the final time to be reached.
        :param atol: the absolute tolerance parameter
        :param rtol: the relative tolerance parameter
        :return: current value of y
        """
        if not np.isscalar(t_final):
            raise ValueError(
                "t_final must be a scalar. If t_final is iterable consider using "
                "the list comprehension [integrate(t) for t in times].")

        sign_dt = np.sign(t_final - self.t)

        # Loop util the final time moment is reached
        while sign_dt * (t_final - self.t) > 0:
            #######################################################################################
            #
            #           Description of numerical methods
            #
            #   A formal solution of the system of linear ode y'(t) = M(t) y(t) reads as
            #
            #       y(t) = T exp[ \int_{t_init}^{t_fin} M(\tau) d\tau ] y(t_init)
            #
            #   where T exp is a Dyson time-ordered exponent. Hence,
            #
            #       y(t + dt) = T exp[ \int_{t}^{t+dt} M(\tau) d\tau ] y(t).
            #
            #   Dropping the time ordering operation leads to the cubic error
            #
            #       y(t + dt) = exp[ \int_{t}^{t+dt} M(\tau) d\tau ] y(t) + O( dt^3 ).
            #
            #   Employing the mid-point rule for the integration also leads to the cubic error
            #
            #       y(t + dt) = exp[  M(t + dt / 2) dt ] y(t) + O( dt^3 ).
            #
            #   Therefore, we finally get the linear equation w.r.t. unknown y(t + dt) [note y(t) is known]
            #
            #       exp[  -M(t + dt / 2) dt ] y(t + dt) = y(t) + O( dt^3 ),
            #
            #   which can be solved by scipy.optimize.nnls ensuring the non-negativity constrain for y(t + dt).
            #
            #######################################################################################

            # Initial guess for the time-step
            dt = 0.25 / norm(self.M(self.t, *self.M_args))

            # time step must not take as above t_final
            dt = sign_dt * min(dt, abs(t_final - self.t))

            # Loop until optimal value of dt is not found (adaptive step size integrator)
            while True:
                M = self.M(self.t + 0.5 * dt, *self.M_args)
                M = np.array(M, copy=False)
                M *= -dt

                new_y, residual = nnls(expm(M), self.y)

                # Adaptive step termination criterion
                if np.allclose(residual, 0., rtol, atol):
                    # residual is small it seems we got the solution

                    # Additional check: If M is a transition rate matrix,
                    # then the sum of y must be preserved
                    if np.allclose(M.sum(axis=0), 0., rtol, atol):

                        # exit only if sum( y(t+dt) ) = sum( y(t) )
                        if np.allclose(sum(self.y), sum(new_y), rtol, atol):
                            break
                    else:
                        # M is not a transition rate matrix, thus exist
                        break

                if np.allclose(dt, 0., rtol, atol):
                    # print waring if dt is very small
                    print(
                        "Warning in nnl_ode: adaptive time-step became very small." \
                        "The numerical result may not be trustworthy."
                    )
                    break
                else:
                    # half the time-step
                    dt *= 0.5

            # the dt propagation is successfully completed
            self.t += dt
            self.y = new_y

        return self.y
Ejemplo n.º 44
0
 def fit(self, X, y):
     self.coef_, self.residuals_ = nnls(X, y)
     return self
Ejemplo n.º 45
0
def fit_HH21inv(
	he,
	nu_max = 10,
	nu_min = -50,
	nnu = 300,
	non_neg = True,
	omega = 'auto',
	**kwargs
	):
	'''
	Fits D evolution data using the distributed activation energy model of
	Hemingway and Henkes (2021). This function solves for rho_nu, the
	regularized distribution of rates in lnk space. See HH21 Eq. X for
	notation and details. This function can estimate best-fit omega using
	Tikhonov regularization.
	
	Parameters
	----------

	he : isotopylog.HeatingExperiment
		`ipl.HeatingExperiment` instance containing the D data to be modeled.

	nu_max : float
		The maximum lnk value to consider. Defaults to `10`.

	nu_min : float
		The minimum lnk value to consider. Defaults to `-50`.

	nnu : int
		The number of nu values in the array such that
		dnu = (nu_max - nu_min)/nnu. Defaults to `300`.

	non_neg : boolean
		Tells the function whether or not to constrain the solution to be
		non-negative. Defaults to ``True``.

	omega : str or float
		The "smoothing parameter" to use. This can be a number or `auto`; if 
		'auto', the function uses Tikhonov regularization to calculate the
		optimal omega value. Defaults to `auto`.
	
	Returns
	-------

	rho_nu_inv : array-like
		Resulting regularized rho distribution, of length `n_nu`.

	omega : float
		If inputed `omega = 'auto'`, then this is the best-fit omega value.
		If inputted omega was a number, this is simply same as the inputted
		value.

	res_inv : float
		Root mean square error of the inverse model fit, in D47 units.

	rgh_inv : float
		Roughness norm of the inverse model fit.

	Raises
	------

	TypeError
		If `omega` is not 'Auto' or float or int type.

	TypeError
		If unexpected keyword arguments are passed to `calc_L_curve`.

	See Also
	--------

	isotopylog.fit_HH21
		Method for fitting heating experiment data using the lognormal model
		of Hemingway and Henkes (2021).

	kDistribution.invert_experiment
		Method for generating a `kDistribution` instance from experimental
		data.

	Examples
	--------

	Basic implementation, assuming a `ipl.HeatingExperiment` instance `he`
	exists::
		
		#import modules
		import isotopylog as ipl

		#assume he is a HeatingExperiment instance
		results = ipl.fit_HH21inv(he, omega = 'auto')

	Same implementation, but if best-fit `omega` is known a priori::

		#import modules
		import isotopylog as ipl

		#assume best-fit omega is 3
		omega = 3

		#assume he is a HeatingExperiment instance
		results = ipl.fit_HH21inv(he, omega = 3)

	References
	----------

	[1] Forney and Rothman (2012) *J. Royal Soc. Inter.*, **9**, 2255--2267.\n
	[2] Hemingway and Henkes (2021) *Earth Planet. Sci. Lett.*, **566**, 116962.
	'''

	#extract variables
	tex = he.tex
	Gex = he.Gex
	nu = np.linspace(nu_min, nu_max, nnu)
	nt = len(tex)

	#calculate A matrix
	A = _calc_A(tex, nu)
	
	#calculate regularization matrix, R
	R = _calc_R(nnu)
	
	#calculate omega using L curve if necessary:
	if omega in ['auto', 'Auto']:

		#run L curve function to calculate best-fit omega
		omega = calc_L_curve(
			he,
			nu_max = nu_max,
			nu_min = nu_min,
			nnu = nnu,
			plot = False,
			**kwargs
			)

	#make sure omega is a scalar
	elif not isinstance(omega, float) and not isinstance(omega, int):

		omt = type(omega).__name__

		raise TypeError(
			'Attempting to input `omega` of type %s. Must be `int`, `float`'
			' or "auto".' % omt)

	#ensure it's float
	else:
		omega = float(omega)

	#concatenate A+R and Gex+zeros
	A_reg = np.concatenate(
		(A, R*omega))

	Gex_reg = np.concatenate(
		(Gex, np.zeros(nnu + 1)))

	#concatenate sum to unity constraint
	dnu = nu[1] - nu[0]
	nuvec = dnu*np.ones([1,nnu])

	A_reg_unity = np.concatenate((A_reg, nuvec))
	Gex_reg_unity = np.concatenate((Gex_reg, np.ones(1)))

	#calculate inverse results and estimated G
	if non_neg is True:
		# rho_nu_inv, _ = nnls(A_reg, Gex_reg)
		rho_nu_inv, _ = nnls(A_reg_unity, Gex_reg_unity)

	else:
		res = lsq_linear(A_reg, Gex_reg)
		rho_nu_inv = res.x

	Ghat = np.inner(A, rho_nu_inv)
	rgh = np.inner(R, rho_nu_inv)

	#convert to D47
	D47hat, _ = _calc_D_from_G(
		he.dex[0,0],
		Ghat,
		he.T,
		he.caleq,
		clumps = he.clumps,
		G_std = None,
		ref_frame = he.ref_frame
		)

	#calculate errors
	# res_inv = norm(Gex - Ghat)/nt**0.5
	res_inv = _calc_rmse(he.dex[:,0], D47hat)
	rgh_inv = norm(rgh)/nnu**0.5

	return rho_nu_inv, omega, res_inv, rgh_inv
def frac_coverage_classify(dataset_in, clean_mask=None, no_data=-9999):
    """
    Description:
      Performs fractional coverage algorithm on given dataset. If no clean mask is given, the 'cf_mask'
      variable must be included in the input dataset, as it will be used to create a
      clean mask
    Assumption:
      - The implemented algqorithm is defined for Landsat 5/Landsat 7; in order for it to
        be used for Landsat 8, the bands will need to be adjusted
    References:
      - Guerschman, Juan P., et al. "Assessing the effects of site heterogeneity and soil
        properties when unmixing photosynthetic vegetation, non-photosynthetic vegetation
        and bare soil fractions from Landsat and MODIS data." Remote Sensing of Environment
        161 (2015): 12-26.
    -----
    Inputs:
      dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube (can be a derived
        product, such as a cloudfree mosaic; should contain
          coordinates: latitude, longitude
          variables: blue, green, red, nir, swir1, swir2
        If user does not provide a clean_mask, dataset_in must also include the cf_mask
        variable
    Optional Inputs:
      clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
        If none is provided, one will be created which considers all values to be clean.
    Output:
      dataset_out (xarray.Dataset) - fractional coverage results with no data = -9999; containing
          coordinates: latitude, longitude
          variables: bs, pv, npv
        where bs -> bare soil, pv -> photosynthetic vegetation, npv -> non-photosynthetic vegetation
    """
    # Default to masking nothing.
    if clean_mask is None:
        clean_mask = create_default_clean_mask(dataset_in)

    band_stack = []

    mosaic_clean_mask = clean_mask.flatten()
    #     mosaic_clean_mask = clean_mask

    for band in [
            dataset_in.blue.values, dataset_in.green.values,
            dataset_in.red.values, dataset_in.nir.values,
            dataset_in.swir1.values, dataset_in.swir2.values
    ]:
        band = band.astype(np.float32)
        band = band * 0.0001
        band = band.flatten()
        band_clean = np.full(band.shape, np.nan)
        band_clean[mosaic_clean_mask] = band[mosaic_clean_mask]
        band_stack.append(band_clean)

    band_stack = np.array(band_stack).transpose()

    for b in range(6):
        band_stack = np.hstack(
            (band_stack, np.expand_dims(np.log(band_stack[:, b]), axis=1)))
    for b in range(6):
        band_stack = np.hstack(
            (band_stack,
             np.expand_dims(np.multiply(band_stack[:, b], band_stack[:,
                                                                     b + 6]),
                            axis=1)))
    for b in range(6):
        for b2 in range(b + 1, 6):
            band_stack = np.hstack(
                (band_stack,
                 np.expand_dims(np.multiply(band_stack[:, b], band_stack[:,
                                                                         b2]),
                                axis=1)))
    for b in range(6):
        for b2 in range(b + 1, 6):
            band_stack = np.hstack(
                (band_stack,
                 np.expand_dims(np.multiply(band_stack[:, b + 6],
                                            band_stack[:, b2 + 6]),
                                axis=1)))
    for b in range(6):
        for b2 in range(b + 1, 6):
            band_stack = np.hstack((band_stack,
                                    np.expand_dims(np.divide(
                                        band_stack[:, b2] - band_stack[:, b],
                                        band_stack[:, b2] + band_stack[:, b]),
                                                   axis=1)))

    band_stack = np.nan_to_num(
        band_stack)  # Now a n x 63 matrix (assuming one acquisition)

    ones = np.ones(band_stack.shape[0])
    ones = ones.reshape(ones.shape[0], 1)
    band_stack = np.concatenate(
        (band_stack, ones),
        axis=1)  # Now a n x 64 matrix (assuming one acquisition)

    end_members = np.loadtxt(csv_file_path,
                             delimiter=',')  # Creates a 64 x 3 matrix

    SumToOneWeight = 0.02
    ones = np.ones(end_members.shape[1]) * SumToOneWeight
    ones = ones.reshape(1, end_members.shape[1])
    end_members = np.concatenate((end_members, ones),
                                 axis=0).astype(np.float32)

    result = np.zeros((band_stack.shape[0], end_members.shape[1]),
                      dtype=np.float32)  # Creates an n x 3 matrix

    for i in range(band_stack.shape[0]):
        if mosaic_clean_mask[i]:
            result[i, :] = (
                opt.nnls(end_members, band_stack[i, :])[0].clip(0, 2.54) *
                100).astype(np.int16)
        else:
            result[i, :] = np.ones((end_members.shape[1]), dtype=np.int16) * (
                -9999)  # Set as no data

    latitude = dataset_in.latitude
    longitude = dataset_in.longitude

    result = result.reshape(latitude.size, longitude.size, 3)

    pv_band = result[:, :, 0]
    npv_band = result[:, :, 1]
    bs_band = result[:, :, 2]

    pv_clean = np.full(pv_band.shape, -9999)
    npv_clean = np.full(npv_band.shape, -9999)
    bs_clean = np.full(bs_band.shape, -9999)
    pv_clean[clean_mask] = pv_band[clean_mask]
    npv_clean[clean_mask] = npv_band[clean_mask]
    bs_clean[clean_mask] = bs_band[clean_mask]

    rapp_bands = collections.OrderedDict([('bs', (['latitude',
                                                   'longitude'], bs_band)),
                                          ('pv', (['latitude',
                                                   'longitude'], pv_band)),
                                          ('npv', (['latitude',
                                                    'longitude'], npv_band))])

    rapp_dataset = xr.Dataset(rapp_bands,
                              coords={
                                  'latitude': latitude,
                                  'longitude': longitude
                              })

    return rapp_dataset
Ejemplo n.º 47
0
    def ssp_fit(self,
                input_z,
                input_sigma,
                input_Av,
                fit_data,
                fit_scheme='nnls'):

        #Quick naming
        obs_wave = fit_data['obs_wave_resam']
        obs_flux_masked = fit_data['obs_flux_norm_masked']
        rest_wave = fit_data['basesWave_resam']
        bases_flux = fit_data['bases_flux_norm']
        int_mask = fit_data['int_mask']
        obsFlux_mean = fit_data['normFlux_obs']

        #Apply physical data to stellar grid
        ssp_grid = self.physical_SED_model(rest_wave, obs_wave, bases_flux,
                                           input_Av, input_z, input_sigma, 3.1)
        ssp_grid_masked = (int_mask * ssp_grid.T).T

        #---Leasts square fit
        if fit_scheme == 'lsq':
            start = timer()
            optimize_result = lsq_linear(ssp_grid_masked,
                                         obs_flux_masked,
                                         bounds=(0, inf))
            end = timer()
            print 'lsq', ' time ', (end - start)

            coeffs_bases = optimize_result.x * obsFlux_mean

        elif fit_scheme == 'nnls':
            start = timer()
            optimize_result = nnls(ssp_grid_masked, obs_flux_masked)
            end = timer()
            print 'nnls', ' time ', (end - start), '\n'

            coeffs_bases = optimize_result[0] * obsFlux_mean

        #---Linear fitting without restrictions
        else:

            start = timer()

            #First guess
            coeffs_bases = self.linfit1d(obs_flux_masked, obsFlux_mean,
                                         ssp_grid_masked, inv_pdl_error_i)

            #Count positive and negative coefficients
            idx_plus_0 = coeffs_bases[:] > 0
            plus_coeff = idx_plus_0.sum()
            neg_coeff = (~idx_plus_0).sum()

            #Start loops
            counter = 0
            if plus_coeff > 0:

                while neg_coeff > 0:
                    counter += 1

                    bases_model_n = zeros([nObsPix, plus_coeff])

                    idx_plus_0 = (coeffs_bases[:] > 0)
                    bases_model_n[:, 0:idx_plus_0.sum(
                    )] = bases_grid_model_masked[:,
                                                 idx_plus_0]  #These are replace in order
                    coeffs_bases[~idx_plus_0] = 0

                    #Repeat fit
                    coeffs_n = self.linfit1d(obsFlux_normMasked, obsFlux_mean,
                                             bases_model_n, inv_pdl_error_i)

                    idx_plus_n = coeffs_n[:] > 0
                    idx_min_n = ~idx_plus_n
                    plus_coeff = idx_plus_n.sum()
                    neg_coeff = (idx_min_n).sum()

                    #Replacing negaive by zero
                    coeffs_n[idx_min_n] = 0
                    coeffs_bases[idx_plus_0] = coeffs_n

                    if plus_coeff == 0:
                        neg_coeff = 0
            else:
                plus_coeff = nBases

            end = timer()
            print 'FIT3D', ' time ', (end - start)

        #Save data to export
        fit_products = {}
        flux_sspFit = np_sum(coeffs_bases.T * ssp_grid_masked, axis=1)
        fluxMasked_sspFit = flux_sspFit * int_mask
        fit_products['flux_components'] = coeffs_bases.T * ssp_grid_masked
        fit_products['weight_coeffs'] = coeffs_bases
        fit_products['flux_sspFit'] = flux_sspFit
        fit_products['fluxMasked_sspFit'] = fluxMasked_sspFit

        return fit_products
Ejemplo n.º 48
0
    def NMF(self, rank, nmf_tol, print_results=0):
        """
        Run NMF on the TFIDF representation of documents to obtain a low-dimensional representaion (dim=rank),
        then apply SVM to classify the data.

        Args:
            rank (int): input rank for NMF
            nmf_tol (float): tolerance for termanating NMF model
            print_results (boolean): 1: print classification report, heatmaps, and keywords, 0:otherwise
        Retruns:
             nmf_svm_acc (float): classification accuracy on test set
             W (ndarray): learnt word dictionary matrix, shape (words, topics)
             nn_svm (ndarray): learnt (nonegative) coefficient matrix for SVM classification, shape (classes, topics)
             nmf_svm_predicted (ndarray): predicted labels for data points in test set
             nmf_iter (int): actual number of iterations of NMF
             H (ndarray): document representation matrix for train set, shape (topics, train documents)
             H_test (ndarray): document representation matrix for test set, shape (topics, test documents)
        """
        self.nmf_tol = nmf_tol

        print("\nRunning NMF + SVM")
        nmf = NMF(n_components=rank,
                  init='random',
                  tol=self.nmf_tol,
                  solver='mu',
                  max_iter=400)

        # TRAINING STEP
        # Dictionary matrix, shape (vocabulary, topics)
        W = nmf.fit_transform(self.X_train_full)
        # Representation matrix, shape (topics, documents)
        H = nmf.components_
        # Actual number of iterations
        nmf_iter = nmf.n_iter_
        # Train SVM classifier on train data
        text_clf = Pipeline([('scl',StandardScaler()), \
                                ('clf', SGDClassifier(tol=1e-5))])
        text_clf.fit(H.T, self.train_labels_full)

        # TESTING STEP
        # Compute the representation of test data
        H_test = np.zeros([rank, np.shape(self.X_test)[1]])
        for i in range(np.shape(self.X_test)[1]):
            H_test[:, i] = nnls(W, self.X_test[:, i])[0]
        # Classify test data using trained SVM classifier
        nmf_svm_predicted = text_clf.predict(H_test.T)
        # Report classification accuracy on test data
        nmf_svm_acc = np.mean(nmf_svm_predicted == self.test_labels)
        print(
            "The classification accuracy on the test data is {:.4f}%\n".format(
                nmf_svm_acc * 100))

        # SVM non-negaitve coefficient matrix
        nn_svm = text_clf['clf'].coef_.copy()
        nn_svm[nn_svm < 0] = 0

        if print_results == 1:
            # Extract top keywords representaion of topics
            print_keywords(W.T, features=self.feature_names_train, top_num=10)
            print(
                metrics.classification_report(self.test_labels,
                                              nmf_svm_predicted,
                                              target_names=self.cls_names))
            factors_heatmaps(nn_svm, cls_names=self.cls_names)

        return nmf_svm_acc, W, nn_svm, nmf_svm_predicted, nmf_iter, H, H_test
Ejemplo n.º 49
0
def quadratic_program(H, f, A, b, C=None, d=None, tol=1.e-7):
    """
    Solves the strictly convex (H > 0) quadratic program min .5 x' H x + f' x s.t. A x <= b, C x  = d using nonnegative least squres.
    (See "Bemporad - A Quadratic Programming Algorithm Based on Nonnegative Least Squares With Applications to Embedded Model Predictive Control", Theorem 1.)

    Arguments
    ----------
    H : numpy.ndarray
        Positive definite Hessian of the cost function.
    f : numpy.ndarray
        Gradient of the cost function.
    A : numpy.ndarray
        Left-hand side of the inequality constraints.
    b : numpy.ndarray
        Right-hand side of the inequality constraints.
    C : numpy.ndarray
        Left-hand side of the equality constraints.
    d : numpy.ndarray
        Right-hand side of the equality constraints.
    tol : float
        Maximum value for: the residual of the pnnls to consider the problem unfeasible, for the residual of an inequality to consider the constraint active.

    Returns
    ----------
    sol : dict
        Dictionary with the solution of the QP.

        Fields
        ----------
        min : float
            Minimum of the QP (None if the problem is unfeasible).
        argmin : numpy.ndarray
            Argument that minimizes the QP (None if the problem is unfeasible).
        active_set : list of int
            Indices of the active inequallities {i | A_i argmin = b} (None if the problem is unfeasible).
        multiplier_inequality : numpy.ndarray
            Lagrange multipliers for the inequality constraints (None if the problem is unfeasible).
        multiplier_equality : numpy.ndarray
            Lagrange multipliers for the equality constraints (None if the problem is unfeasible or without equality constraints).
    """

    # check equalities
    if (C is None) != (d is None):
        raise ValueError('missing C or d.')

    # problem size
    n_ineq, n_x = A.shape
    if C is not None:
        n_eq = C.shape[0]
    else:
        n_eq = 0

    # reshape inputs
    if len(f.shape) == 1:
        f = np.reshape(f, (f.shape[0], 1))
    if len(b.shape) == 1:
        b = np.reshape(b, (b.shape[0], 1))
    if n_eq > 0 and len(d.shape) == 1:
        d = np.reshape(d, (d.shape[0], 1))

    # state equalities as inequalities
    if n_eq > 0:
        AC = np.vstack((A, C, -C))
        bd = np.vstack((b, d, -d))
    else:
        AC = A
        bd = b

    # build and solve pnnls problem
    L = np.linalg.cholesky(H)
    L_inv = np.linalg.inv(L)
    H_inv = L_inv.T.dot(L_inv)
    M = AC.dot(L_inv.T)
    m = bd + AC.dot(H_inv).dot(f)
    gamma = 1
    A_nnls = np.vstack((-M.T, -m.T))
    b_nnls = np.vstack((np.zeros((n_x, 1)), gamma))
    y, r = nnls(A_nnls, b_nnls.flatten())
    y = np.reshape(y, (y.shape[0], 1))

    # initialize output
    sol = {
        'min': None,
        'argmin': None,
        'active_set': None,
        'multiplier_inequality': None,
        'multiplier_equality': None
    }

    # if feasibile
    if r > tol:
        lam = y / (gamma + m.T.dot(y))
        sol['multiplier_inequality'] = lam[:n_ineq, :]
        sol['argmin'] = -H_inv.dot(f + AC.T.dot(lam))
        sol['min'] = (.5 * sol['argmin'].T.dot(H).dot(sol['argmin']) +
                      f.T.dot(sol['argmin']))[0, 0]
        sol['active_set'] = sorted(
            list(np.where(sol['multiplier_inequality'] > tol)[0]))
        if n_eq > 0:
            mul_eq_pos = lam[n_ineq:n_ineq + n_eq, :]
            mul_eq_neg = -lam[n_ineq + n_eq:n_ineq + 2 * n_eq, :]
            sol['multiplier_equality'] = mul_eq_pos + mul_eq_neg

    return sol
Ejemplo n.º 50
0
def run_inversion(home,
                  project_name,
                  run_name,
                  fault_name,
                  model_name,
                  GF_list,
                  G_from_file,
                  G_name,
                  epicenter,
                  rupture_speed,
                  num_windows,
                  reg_spatial,
                  reg_temporal,
                  nfaults,
                  beta,
                  decimate,
                  bandpass,
                  solver,
                  bounds,
                  weight=False,
                  Ltype=2):
    '''
    Assemble G and d, determine smoothing and run the inversion
    '''
    from mudpy import inverse as inv
    from numpy import zeros, dot, array, squeeze, expand_dims, empty, tile, eye
    from numpy.linalg import lstsq
    from scipy.sparse import csr_matrix as sparse
    from scipy.optimize import nnls
    from datetime import datetime
    import gc

    t1 = datetime.now()
    #Get data vector
    d = inv.getdata(home, project_name, GF_list, decimate, bandpass=None)
    #Get GFs
    G = inv.getG(home, project_name, fault_name, model_name, GF_list,
                 G_from_file, G_name, epicenter, rupture_speed, num_windows,
                 decimate, bandpass)
    gc.collect()
    #Get data weights
    if weight == True:
        print 'Applying data weights'
        w = inv.get_data_weights(home, project_name, GF_list, d, decimate)
        W = empty(G.shape)
        W = tile(w, (G.shape[1], 1)).T
        WG = empty(G.shape)
        WG = W * G
        wd = w * d.squeeze()
        wd = expand_dims(wd, axis=1)
        #Clear up extraneous variables
        W = None
        w = None
        #Define inversion quantities
        x = WG.transpose().dot(wd)
        print 'Computing G\'G'
        K = (WG.T).dot(WG)
    else:
        #Define inversion quantities if no weightd
        x = G.transpose().dot(d)
        print 'Computing G\'G'
        K = (G.T).dot(G)
    #Get regularization matrices (set to 0 matrix if not needed)
    static = False  #Is it jsut a static inversion?
    if reg_spatial != None:
        if Ltype == 2:  #Laplacian smoothing
            Ls = inv.getLs(home, project_name, fault_name, nfaults,
                           num_windows, bounds)
        else:  #Tikhonov smoothing
            N = nfaults[0] * nfaults[
                1] * num_windows * 2  #Get total no. of model parameters
            Ls = eye(N)
        Ninversion = len(reg_spatial)
    else:
        Ls = zeros(K.shape)
        reg_spatial = array([0.])
        Ninversion = 1
    if reg_temporal != None:
        Lt = inv.getLt(home, project_name, fault_name, num_windows)
        Ninversion = len(reg_temporal) * Ninversion
    else:
        Lt = zeros(K.shape)
        reg_temporal = array([0.])
        static = True
    #Make L's sparse
    Ls = sparse(Ls)
    Lt = sparse(Lt)
    #Get regularization tranposes for ABIC
    LsLs = Ls.transpose().dot(Ls)
    LtLt = Lt.transpose().dot(Lt)
    #inflate
    Ls = Ls.todense()
    Lt = Lt.todense()
    LsLs = LsLs.todense()
    LtLt = LtLt.todense()
    #off we go
    dt = datetime.now() - t1
    print 'Preprocessing wall time was ' + str(dt)
    print '\n--- RUNNING INVERSIONS ---\n'
    ttotal = datetime.now()
    kout = 0
    for kt in range(len(reg_temporal)):
        for ks in range(len(reg_spatial)):
            t1 = datetime.now()
            lambda_spatial = reg_spatial[ks]
            lambda_temporal = reg_temporal[kt]
            print 'Running inversion ' + str(kout + 1) + ' of ' + str(
                Ninversion) + ' at regularization levels: ls =' + repr(
                    lambda_spatial) + ' , lt = ' + repr(lambda_temporal)
            if static == True:  #Only statics inversion no Lt matrix
                Kinv = K + (lambda_spatial**2) * LsLs
                Lt = eye(len(K))
                LtLt = Lt.T.dot(Lt)
            else:  #Mixed inversion
                Kinv = K + (lambda_spatial**2) * LsLs + (lambda_temporal**
                                                         2) * LtLt
            if solver.lower() == 'lstsq':
                sol, res, rank, s = lstsq(Kinv, x)
            elif solver.lower() == 'nnls':
                x = squeeze(x.T)
                try:
                    sol, res = nnls(Kinv, x)
                except:
                    print '+++ WARNING: No solution found, writting zeros.'
                    sol = zeros(G.shape[1])
                x = expand_dims(x, axis=1)
                sol = expand_dims(sol, axis=1)
            else:
                print 'ERROR: Unrecognized solver \'' + solver + '\''
            #Compute synthetics
            ds = dot(G, sol)
            #Get stats
            L2, Lmodel = inv.get_stats(Kinv, sol, x)
            VR = inv.get_VR(home, project_name, GF_list, sol, d, ds, decimate)
            #VR=inv.get_VR(WG,sol,wd)
            #ABIC=inv.get_ABIC(WG,K,sol,wd,lambda_spatial,lambda_temporal,Ls,LsLs,Lt,LtLt)
            ABIC = inv.get_ABIC(G, K, sol, d, lambda_spatial, lambda_temporal,
                                Ls, LsLs, Lt, LtLt)
            #Get moment
            Mo, Mw = inv.get_moment(home, project_name, fault_name, model_name,
                                    sol)
            #If a rotational offset was applied then reverse it for output to file
            if beta != 0:
                sol = inv.rot2ds(sol, beta)
            #Write log
            inv.write_log(home, project_name, run_name, kout, rupture_speed,
                          num_windows, lambda_spatial, lambda_temporal, beta,
                          L2, Lmodel, VR, ABIC, Mo, Mw, model_name, fault_name,
                          G_name, GF_list, solver)
            #Write output to file
            inv.write_synthetics(home, project_name, run_name, GF_list, G, sol,
                                 ds, kout, decimate)
            inv.write_model(home, project_name, run_name, fault_name,
                            model_name, rupture_speed, num_windows, epicenter,
                            sol, kout)
            kout += 1
            dt1 = datetime.now() - t1
            dt2 = datetime.now() - ttotal
            print '... inversion wall time was ' + str(
                dt1) + ', total wall time elapsed is ' + str(dt2)
def optimMuscleParams(osimModel_ref_filepath, osimModel_targ_filepath, N_eval,
                      log_folder):

    # results file identifier
    res_file_id_exp = '_N' + str(N_eval)

    # import models
    osimModel_ref = opensim.Model(osimModel_ref_filepath)
    osimModel_targ = opensim.Model(osimModel_targ_filepath)

    # models details
    name = Path(osimModel_targ_filepath).stem
    ext = Path(osimModel_targ_filepath).suffix

    # assigning new name to the model
    osimModel_opt_name = name + '_opt' + res_file_id_exp + ext
    osimModel_targ.setName(osimModel_opt_name)

    # initializing log file
    log_folder = Path(log_folder)
    log_folder.mkdir(parents=True, exist_ok=True)
    logging.basicConfig(filename=str(log_folder) + '/' + name + '_opt' +
                        res_file_id_exp + '.log',
                        filemode='w',
                        format='%(levelname)s:%(message)s',
                        level=logging.INFO)

    # get muscles
    muscles = osimModel_ref.getMuscles()
    muscles_scaled = osimModel_targ.getMuscles()

    # initialize with recognizable values
    LmOptLts_opt = -1000 * np.ones((muscles.getSize(), 2))
    SimInfo = {}

    for n_mus in range(0, muscles.getSize()):

        tic = time()

        # current muscle name (here so that it is possible to choose a single muscle when developing).
        curr_mus_name = muscles.get(n_mus).getName()
        print('processing mus ' + str(n_mus + 1) + ': ' + curr_mus_name)

        # import muscles
        curr_mus = muscles.get(curr_mus_name)
        curr_mus_scaled = muscles_scaled.get(curr_mus_name)

        # extracting the muscle parameters from reference model
        LmOptLts = [
            curr_mus.getOptimalFiberLength(),
            curr_mus.getTendonSlackLength()
        ]
        PenAngleOpt = curr_mus.getPennationAngleAtOptimalFiberLength()
        Mus_ref = sampleMuscleQuantities(osimModel_ref, curr_mus, 'all',
                                         N_eval)

        # calculating minimum fiber length before having pennation 90 deg
        # acos(0.1) = 1.47 red = 84 degrees, chosen as in OpenSim
        limitPenAngle = np.arccos(0.1)
        # this is the minimum length the fiber can be for geometrical reasons.
        LfibNorm_min = np.sin(PenAngleOpt) / np.sin(limitPenAngle)
        # LfibNorm as calculated above can be shorter than the minimum length
        # at which the fiber can generate force (taken to be 0.5 Zajac 1989)
        if LfibNorm_min < 0.5:
            LfibNorm_min = 0.5

        # muscle-tendon paramenters value
        MTL_ref = [musc_param_iter[0] for musc_param_iter in Mus_ref]
        LfibNorm_ref = [musc_param_iter[1] for musc_param_iter in Mus_ref]
        LtenNorm_ref = [
            musc_param_iter[2] / LmOptLts[1] for musc_param_iter in Mus_ref
        ]
        penAngle_ref = [musc_param_iter[4] for musc_param_iter in Mus_ref]
        # LfibNomrOnTen_ref = LfibNorm_ref.*cos(penAngle_ref)
        LfibNomrOnTen_ref = [(musc_param_iter[1] * np.cos(musc_param_iter[4]))
                             for musc_param_iter in Mus_ref]

        # checking the muscle configuration that do not respect the condition.
        okList = [
            pos for pos, value in enumerate(LfibNorm_ref)
            if value > LfibNorm_min
        ]
        # keeping only acceptable values
        MTL_ref = np.array([MTL_ref[index] for index in okList])
        LfibNorm_ref = np.array([LfibNorm_ref[index] for index in okList])
        LtenNorm_ref = np.array([LtenNorm_ref[index] for index in okList])
        penAngle_ref = np.array([penAngle_ref[index] for index in okList])
        LfibNomrOnTen_ref = np.array(
            [LfibNomrOnTen_ref[index] for index in okList])

        # in the target only MTL is needed for all muscles
        MTL_targ = sampleMuscleQuantities(osimModel_targ, curr_mus_scaled,
                                          'MTL', N_eval)
        evalTotPoints = len(MTL_targ)
        MTL_targ = np.array([MTL_targ[index] for index in okList])
        evalOkPoints = len(MTL_targ)

        # The problem to be solved is:
        # [LmNorm*cos(penAngle) LtNorm]*[Lmopt Lts]' = MTL;
        # written as Ax = b or their equivalent (A^T A) x = (A^T b)
        A = np.array([LfibNomrOnTen_ref, LtenNorm_ref]).T
        b = MTL_targ

        # ===== LINSOL =======
        # solving the problem to calculate the muscle param
        x = linalg.solve(np.dot(A.T, A), np.dot(A.T, b))
        LmOptLts_opt[n_mus] = x

        # checking the results
        if np.min(x) <= 0:
            # informing the user
            line0 = ' '
            line1 = 'Negative value estimated for muscle parameter of muscle ' + curr_mus_name + '\n'
            line2 = '                         Lm Opt        Lts' + '\n'
            line3 = 'Template model       : ' + str(LmOptLts) + '\n'
            line4 = 'Optimized param      : ' + str(LmOptLts_opt[n_mus]) + '\n'

            # ===== IMPLEMENTING CORRECTIONS IF ESTIMATION IS NOT CORRECT =======
            x = optimize.nnls(np.dot(A.T, A), np.dot(A.T, b))
            x = x[0]
            LmOptLts_opt[n_mus] = x
            line5 = 'Opt params (optimize.nnls): ' + str(LmOptLts_opt[n_mus])

            logging.info(line0 + line1 + line2 + line3 + line4 + line5 + '\n')
            # In our tests, if something goes wrong is generally tendon slack
            # length becoming negative or zero because tendon length doesn't change
            # throughout the range of motion, so lowering the rank of A.
            if np.min(x) <= 0:
                # analyzes of Lten behaviour
                Lten_ref = [musc_param_iter[2] for musc_param_iter in Mus_ref]
                Lten_ref = np.array([Lten_ref[index] for index in okList])
                if (np.max(Lten_ref) - np.min(Lten_ref)) < 0.0001:
                    logging.warning(
                        ' Tendon length not changing throughout range of motion'
                    )

                # calculating proportion of tendon and fiber
                Lten_fraction = Lten_ref / MTL_ref
                Lten_targ = Lten_fraction * MTL_targ

                # first round: optimizing Lopt maintaing the proportion of
                # tendon as in the reference model
                A1 = np.array([LfibNomrOnTen_ref, LtenNorm_ref * 0]).T
                b1 = MTL_targ - Lten_targ
                x1 = optimize.nnls(np.dot(A1.T, A1), np.dot(A1.T, b1))
                x[0] = x1[0][0]

                # second round: using the optimized Lopt to recalculate Lts
                A2 = np.array([LfibNomrOnTen_ref * 0, LtenNorm_ref]).T
                b2 = MTL_targ - np.dot(A1, x1[0])
                x2 = optimize.nnls(np.dot(A2.T, A2), np.dot(A2.T, b2))
                x[1] = x2[0][1]

                LmOptLts_opt[n_mus] = x

        # Here tests about/against optimizers were implemented

        # calculating the error (mean squared errors)
        fval = mean_squared_error(b, np.dot(A, x), squared=False)

        # update muscles from scaled model
        curr_mus_scaled.setOptimalFiberLength(LmOptLts_opt[n_mus][0])
        curr_mus_scaled.setTendonSlackLength(LmOptLts_opt[n_mus][1])

        # PRINT LOGS
        toc = time() - tic
        line0 = ' '
        line1 = 'Calculated optimized muscle parameters for ' + curr_mus.getName(
        ) + ' in ' + str(toc) + ' seconds.' + '\n'
        line2 = '                         Lm Opt        Lts' + '\n'
        line3 = 'Template model       : ' + str(LmOptLts) + '\n'
        line4 = 'Optimized param      : ' + str(LmOptLts_opt[n_mus]) + '\n'
        line5 = 'Nr of eval points    : ' + str(evalOkPoints) + '/' + str(
            evalTotPoints) + ' used' + '\n'
        line6 = 'fval                 : ' + str(fval) + '\n'
        line7 = 'var from template [%]: ' + str(
            100 *
            (np.abs(LmOptLts - LmOptLts_opt[n_mus])) / LmOptLts) + '%' + '\n'

        logging.info(line0 + line1 + line2 + line3 + line4 + line5 + line6 +
                     line7 + '\n')

        # SIMULATION INFO AND RESULTS

        SimInfo[n_mus] = {}
        SimInfo[n_mus]['colheader'] = curr_mus.getName()
        SimInfo[n_mus]['LmOptLts_ref'] = LmOptLts
        SimInfo[n_mus]['LmOptLts_opt'] = LmOptLts_opt[n_mus]
        SimInfo[n_mus]['varPercLmOptLts'] = 100 * (
            np.abs(LmOptLts - LmOptLts_opt[n_mus])) / LmOptLts
        SimInfo[n_mus]['sampledEvalPoints'] = evalOkPoints
        SimInfo[n_mus]['sampledEvalPoints'] = evalTotPoints
        SimInfo[n_mus]['fval'] = fval

    # assigning optimized model as output
    osimModel_opt = osimModel_targ

    return osimModel_opt, SimInfo
Ejemplo n.º 52
0
                                              start_pt)
                    # Else zero.
                    s_idx += 1

                # Augment by one column for normalization
                pt_mat = np.c_[predicted_points.T,
                               np.ones((len(support_regions), 1))]
                local_A = np.dot(local_feat_mat, pt_mat)

                # Divide by last column for common normalization.
                if n_rad > 0:
                    local_A = local_A / np.tile(local_A[:, 2], (3, 1)).T

                A = np.r_[A, local_A[:, :-1].T]
                rhs = np.r_[rhs, matched_pt]

    # System is built, solve now.
# A = np.r_[A, 1e5*np.ones((1, feature_dims))]
# rhs = np.r_[rhs, 1e5]

    weights = la.lstsq(A, rhs)[0]
    res = np.dot(A, weights) - rhs
    rms = np.sqrt(res.dot(res) / len(res))

    weights_pos = opt.nnls(A, rhs)[0]
    res_pos = np.dot(A, weights_pos) - rhs
    rms_pos = np.sqrt(res_pos.dot(res_pos) / len(res))

    plt.bar(range(0, len(weights)), abs(weights))
    pdb.set_trace()
Ejemplo n.º 53
0
 def fit_nnls(self):
     from scipy.optimize import nnls
     return nnls(self.tree_graph, self.HI_dist)[0]
Ejemplo n.º 54
0
    def brute_force_closest_vertex_to_joints(self):
        scenes = self.get_valid_scenes()

        # calculate 50 nearest vertex ids for all meshes and joints
        closest_k = 50
        candidates = {}
        keep_searching = True
        while keep_searching:
            for key, scene in scenes.items():
                joint = np.squeeze(scene['joint'].vertices)
                distances, vertex_ids = scene['mesh'].kdtree.query(joint, closest_k)
                candidates[key] = {vertex_id: dist for vertex_id, dist in zip(vertex_ids, distances)}

            # only keep common ids
            from functools import reduce
            common_ids = reduce(np.intersect1d, [list(c.keys()) for c in candidates.values()])

            if len(common_ids) <= self.settings_loader.min_vertices:
                closest_k += 10
                continue
            else:
                keep_searching = False

            # calculate average distance per mesh/joint for valid ids
            mean_dist = [np.mean([c[common_id] for c in candidates.values()]) for common_id in common_ids]
            mean_dist = {common_id: dist for common_id, dist in zip(common_ids, mean_dist)}
            mean_dist = {k: v for k, v in sorted(mean_dist.items(), key=lambda item: item[1])}

        # pick closest vertex with min average distance to all joints per mesh
        closest_id = list(mean_dist)[0]
        final_vertices = [closest_id]
        mean_dist.pop(closest_id)

        while len(final_vertices) < self.settings_loader.min_vertices:
            # calculate all distance combinations between valid vertices
            vertex_ids = list(mean_dist)
            id_dist = [sp.distance.cdist(s['mesh'].vertices[final_vertices], s['mesh'].vertices[vertex_ids]) for s in
                       scenes.values()]
            id_dist = np.mean(id_dist, axis=0)

            # min the ratio between distances to joint and distance to all other vertices
            best_dist = list(mean_dist.values()) / id_dist
            best_id = np.argmin(best_dist)

            # max the difference between distance to all other vertices and distances to joint
            # best_dist = id_dist - list(mean_dist.values())
            # best_id = np.argmax(best_dist)

            n, m = np.unravel_index(best_id, best_dist.shape)
            best_id = vertex_ids[m]

            final_vertices.append(best_id)
            mean_dist.pop(best_id)

        vertices, joints = [], []
        for scene in scenes.values():
            verts = np.asarray(scene['mesh'].vertices).reshape([-1, 3])
            verts = verts[final_vertices]
            vertices.append(verts)
            joint = np.asarray(scene['joint'].vertices).reshape([-1, 3])
            joints.append(joint)

        vertex_weight = np.zeros([6890, ])

        vertices = np.stack(vertices).transpose([0, 2, 1]).reshape([-1, len(final_vertices)])
        joints = np.stack(joints).transpose([0, 2, 1]).reshape([-1])
        # constraint weights to sum up to 1
        vertices = np.concatenate([vertices, np.ones([1, vertices.shape[1]])], 0)
        joints = np.concatenate([joints, np.ones(1)])
        # solve with non negative least squares
        weights = so.nnls(vertices, joints)[0]
        vertex_weight[final_vertices] = weights

        file = join('regressors', 'regressor_{}.npy'.format(self.regressor_name.text()))
        with open(file, 'wb') as f:
            vertex_weight = vertex_weight.astype(np.float32)
            vertex_weight = np.expand_dims(vertex_weight, -1)
            np.save(f, vertex_weight)

            widget = QMessageBox(
                icon=QMessageBox.Information,
                text='Regressor file successfully saved to: {}\n\nClick Reset to start again'.format(file),
                buttons=[QMessageBox.Ok]
            )
            widget.exec_()

        vertex_weight = np.squeeze(vertex_weight)
        self.convert_button.setEnabled(False)
        self.regressor_name.setEnabled(False)

        for scene in self.scene_chache.values():
            mesh = scene.geometry['geometry_0']
            mesh.visual.vertex_colors = [200, 200, 200, 255]
            mesh.visual.vertex_colors[final_vertices] = [0, 255, 0, 255]

            x = np.matmul(vertex_weight, mesh.vertices[:, 0])
            y = np.matmul(vertex_weight, mesh.vertices[:, 1])
            z = np.matmul(vertex_weight, mesh.vertices[:, 2])
            joints = np.vstack((x, y, z)).T
            joints = trimesh.PointCloud(joints, colors=[0, 255, 0, 255])
            scene.add_geometry(joints, geom_name='new_joints')
Ejemplo n.º 55
0
    def lsq_solution(self, point, plot=False):
        """
        Returns non-negtive least-squares solution for given input point.

        Parameters
        ----------
        point : dict
            in solution space

        Returns
        -------
        point with least-squares solution
        """
        from scipy.optimize import nnls

        if self.config.problem_config.mode_config.regularization != \
                'laplacian':
            raise ValueError(
                'Least-squares- solution for distributed slip is only '
                'available with laplacian regularization!')

        lc = self.composites['laplacian']
        slip_varnames_candidates = ['uparr', 'utens']

        slip_varnames = []
        for var in slip_varnames_candidates:
            if var in self.varnames:
                slip_varnames.append(var)

        if len(slip_varnames) == 0.:
            raise ValueError(
                'LSQ distributed slip solution is only available for %s,'
                ' which were fixed in the setup!' %
                list2string(slip_varnames_candidates))

        Gs = []
        ds = []
        for datatype, composite in self.composites.items():
            if datatype == 'geodetic':
                crust_ind = composite.config.gf_config.reference_model_idx
                keys = [
                    composite.get_gflibrary_key(crust_ind=crust_ind,
                                                wavename='static',
                                                component=var)
                    for var in slip_varnames
                ]
                Gs.extend([composite.gfs[key]._gfmatrix for key in keys])

                # removing hierarchicals from data
                displacements = []
                for dataset in composite.datasets:
                    displacements.append(copy.deepcopy(dataset.displacement))

                displacements = composite.apply_corrections(displacements,
                                                            point=point,
                                                            operation='-')
                ds.extend(displacements)

            elif datatype == 'seismic':
                if False:
                    for wmap in composite.wavemaps:
                        keys = [
                            composite.get_gflibrary_key(crust_ind=crust_ind,
                                                        wavename=wmap.name,
                                                        component=var)
                            for var in slip_varnames
                        ]
                        Gs.extend(
                            [composite.gfs[key]._gfmatrix for key in keys])
                        ds.append(wmap._prepared_data)

        if len(Gs) == 0:
            raise ValueError('No Greens Function matrix available!'
                             ' (needs geodetic datatype!)')

        G = num.vstack(Gs)
        D = num.vstack([lc.smoothing_op for sv in slip_varnames]) * \
            point[bconfig.hyper_name_laplacian] ** 2.

        dzero = num.zeros(D.shape[1], dtype=tconfig.floatX)
        A = num.hstack([G, D])
        d = num.hstack(ds + [dzero])

        # m, rmse, rankA, singularsA =  num.linalg.lstsq(A.T, d, rcond=None)
        m, res = nnls(A.T, d)
        npatches = self.config.problem_config.mode_config.npatches
        for i, var in enumerate(slip_varnames):
            point[var] = m[i * npatches:(i + 1) * npatches]

        if plot:
            from beat.plotting import source_geometry
            gc = self.composites['geodetic']
            fault = gc.load_fault_geometry()
            source_geometry(fault,
                            list(fault.iter_subfaults()),
                            event=gc.event,
                            values=point[slip_varnames[0]],
                            title='slip [m]',
                            datasets=gc.datasets)

        point['uperp'] = dzero
        return point
Ejemplo n.º 56
0
Hph1 = create_hph(angle1=0, angle2=180, npx=200, radius=radius)
Hph2 = create_hph(angle1=90, angle2=270, npx=200, radius=radius)

I1 = np.abs(ifft2(S1 * image_fft))
I2 = np.abs(ifft2(S2 * image_fft))
I3 = np.abs(ifft2(S3 * image_fft))
I4 = np.abs(ifft2(S4 * image_fft))

Idpc1 = simple_dpc(I1, I2)
Idpc2 = simple_dpc(I3, I4)

# phi = tik_deconvolution([Hph1], [Idpc1], alpha=0.1)

print(Idpc1.shape, np.zeros(shape=(npx, 1)).shape)
A1 = np.concatenate((Hph1, 1 * np.matlib.identity(npx)))
b1 = np.concatenate((Idpc1, np.zeros(shape=(1, npx))))
phi = optimize.nnls(A1, b1)

fig, (axes) = plt.subplots(3, 2, figsize=(25, 15))
fig.show()

Hph = 1j * (signal.correlate2d(P, S1 * P) - signal.correlate2d(P, S2 * P))
axes[0][0].imshow(np.imag(1 / (Hph1 + .1)), cmap=plt.get_cmap('hot'))
axes[0][1].imshow(image_phase, cmap=plt.get_cmap('hot'))
axes[1][0].imshow(Idpc1)
axes[1][1].imshow(Idpc2)
axes[2][0].imshow(np.abs(phi))
axes[2][1].imshow(np.imag(phi))
plt.show()
Ejemplo n.º 57
0
 def fit(self, X, y):
     self.coef_, self.residual, *extra = nnls(X, y)
     return self
Ejemplo n.º 58
0
print 'Computing G\'G'
K=(WG.T).dot(WG)


##Inversion quantities if single data set
#K=(G.T).dot(G)
#x=G.transpose().dot(d)

#Get smoothing matrix
L=eye(len(m)) 
LL=L.transpose().dot(L)
for ks in range(len(reg_spatial)):
    print ks
    lambda_spatial=reg_spatial[ks]
    Kinv=K+(lambda_spatial**2)*LL
    x=squeeze(x.T)
    try:
        sol,res=nnls(Kinv,x)
    except:
        print '+++ WARNING: No solution found, writting zeros.'
        sol=zeros(G.shape[1])
        x=expand_dims(x,axis=1)
        sol=expand_dims(sol,axis=1)
    #Save
    number=rjust(str(ks),4,'0')
    fout='/Users/dmelgar/Slip_inv/Coquimbo_4s/output/forward_models/'+run+'.'+number+'.rupt'
    f[:,8]=sol[iss]
    f[:,9]=sol[ids]
    savetxt(fout,f,fmt='%6i\t%.4f\t%.4f\t%8.4f\t%.2f\t%.2f\t%.2f\t%.2f\t%12.4e\t%12.4e%10.1f\t%10.1f\t%8.4f\t%.4e')
    #gmttools.make_total_model(fout,0)
Ejemplo n.º 59
0
    def _get_coefs(self, y, y_pred_cv):
        """
        Find coefficients that minimize the estimated risk.

        Parameters
        ----------
        y: numpy.array of observed oucomes

        y_pred_cv: numpy.array of shape [len(y), len(self.library)] of cross-validated
                   predictions

        Returns
        _______
        coef: numpy.array of normalized non-negative coefficents to combine
              candidate estimators
              
        
        """
        if self.coef_method is 'L_BFGS_B':
            if self.loss == 'nloglik':
                raise SLError("coef_method 'L_BFGS_B' is only for 'L2' loss")

            def ff(x):
                return self._get_risk(y, self._get_combination(y_pred_cv, x))

            x0 = np.array([1. / self.n_estimators] * self.n_estimators)
            bds = [(0, 1)] * self.n_estimators
            coef_init, b, c = fmin_l_bfgs_b(ff,
                                            x0,
                                            bounds=bds,
                                            approx_grad=True)
            if c['warnflag'] is not 0:
                raise SLError(
                    "fmin_l_bfgs_b failed when trying to calculate coefficients"
                )

        elif self.coef_method is 'NNLS':
            if self.loss == 'nloglik':
                raise SLError("coef_method 'NNLS' is only for 'L2' loss")
            coef_init, b = nnls(y_pred_cv, y)

        elif self.coef_method is 'SLSQP':

            def ff(x):
                return self._get_risk(y, self._get_combination(y_pred_cv, x))

            def constr(x):
                return np.array([np.sum(x) - 1])

            x0 = np.array([1. / self.n_estimators] * self.n_estimators)
            bds = [(0, 1)] * self.n_estimators
            coef_init, b, c, d, e = fmin_slsqp(ff,
                                               x0,
                                               f_eqcons=constr,
                                               bounds=bds,
                                               disp=0,
                                               full_output=1)
            if d is not 0:
                raise SLError(
                    "fmin_slsqp failed when trying to calculate coefficients")

        else:
            raise ValueError("method not recognized")
        coef_init = np.array(coef_init)
        #All coefficients should be non-negative or possibly a very small negative number,
        #But setting small values to zero makes them nicer to look at and doesn't really change anything
        coef_init[coef_init < np.sqrt(np.finfo(np.double).eps)] = 0
        #Coefficients should already sum to (almost) one if method is 'SLSQP', and should be really close
        #for the other methods if loss is 'L2' anyway.
        coef = coef_init / np.sum(coef_init)
        return coef
Ejemplo n.º 60
0
    def predict(self, X, n_neighbor=None, radius=NP.inf, use_nugget=False):
        '''
        [DESCRIPTION]
           Calculate and predict interesting value with looping for all data, the method take long time but save memory
           Obtain the linear argibra terms
            | V_ij -1 || w_i | = | V_k |
            |  1   0 ||  u  |   |  1  |
                a    *   w    =    b
              w = a^-1 * b
              y = w_i * Y
            V_ij : semi-variance matrix within n neighors
            V_k  : semi-variance vector between interesting point and n neighbors
            w_i  : weights for linear combination
            u    : lagrainge multiplier
            Y    : true value of neighbors
            y    : predicted value of interesting point
        [INPUT]
            X          : array-like, input data with same number of fearture in training data
            n_neighbor : int,        number of neighbor w.r.t input data, while distance < searching radius (5)
            radius     : float,      searching radius w.r.t input data (inf)
            use_nugget : bool,       if use nugget to be diagonal of kriging matrix for prediction calculation (False)
        [OUTPUT]
            1D/2D array(float)
            prediction : float, prdicted value via Kriging system y
            error      : float, error of predicted value (only if get_error = True)
            lambda     : float, lagrange multiplier u
        '''
        ## Make input to numpy.array
        X = NP.atleast_1d(X)
        if self.ndim == 1:
            if X.ndim < 2: X = X[:, NP.newaxis]
        else:
            X = NP.atleast_2d(X)

        ## Find the neighbors with K-tree object
        if n_neighbor is None:
            n_neighbor = self.shape[0]
        neighbor_dst, neighbor_idx = self.X.query(X, k=n_neighbor, p=2)

        ## Calculate prediction
        idxes = range(X.shape[0])
        out_lambda = NP.zeros(len(X))
        out_predict = NP.zeros(len(X))
        out_error = NP.zeros(len(X))
        for nd, ni, i in zip(neighbor_dst, neighbor_idx, idxes):
            ## select in searching radius
            ni = ni[nd < radius] # neighbors' index, while the distance < search radius
            nd = nd[nd < radius] # neighbors' distance, while the distance < search radius

            if len(ni) == 0:
                continue
            else:
                n = len(ni)

            ## Initialization
            a = NP.zeros((n+1,n+1))
            b = NP.ones((n+1,))

            ## Fill matrix a
            a[:n, :n] = self.variogram.predict(cdist(self.X.data[ni], self.X.data[ni], metric='euclidean'))
            a[:n, n] = 1
            a[n, :n] = 1

            ## Fill vector b
            b[:n] = self.variogram.predict(nd)

            ## set self-varinace is zero if not using Nugget
            if not use_nugget:
                ## modify a
                NP.fill_diagonal(a, 0.)
                ## modify b
                zero_index = NP.where(NP.absolute(nd) == 0)
                if len(zero_index) > 0:
                    b[zero_index[0]] = 0.

            ## Get weights
            #w = scipy.linalg.solve(a, b) # no constraint solution
            w = OPT.nnls(a, b)[0] # non-negative solution

            ## Fill results and prediction
            out_lambda[i] = w[n]
            out_predict[i] = w[:n].dot(self.y[ni])
            out_error[i] = NP.sqrt(w[:n].dot(b[:n]))

        return out_predict, out_error, out_lambda