def test_lasso():
    np.random.seed(0)
    print("test lasso")
##############################################
# Decomposition of a large number of signals
##############################################
# data generation
    X = np.asfortranarray(np.random.normal(size=(100,100000)))
    #* X=X./repmat(sqrt(sum(X.^2)),[size(X,1) 1]);
    X = np.asfortranarray(X / np.tile(np.sqrt((X*X).sum(axis=0)),(X.shape[0],1)),dtype= myfloat)
    D = np.asfortranarray(np.random.normal(size=(100,200)))
    D = np.asfortranarray(D / np.tile(np.sqrt((D*D).sum(axis=0)),(D.shape[0],1)),dtype= myfloat)
    # parameter of the optimization procedure are chosen
#param.L=20; # not more than 20 non-zeros coefficients (default: min(size(D,1),size(D,2)))
    param = {
        'lambda1' : 0.15, # not more than 20 non-zeros coefficients
        'numThreads' : -1, # number of processors/cores to use; the default choice is -1
        # and uses all the cores of the machine
        'mode' : spams.PENALTY}        # penalized formulation

    tic = time.time()
    alpha = spams.lasso(X,D = D,return_reg_path = False,**param)
    tac = time.time()
    t = tac - tic
    print("%f signals processed per second\n" %(float(X.shape[1]) / t))
########################################
# Regularization path of a single signal
########################################
    X = np.asfortranarray(np.random.normal(size=(64,1)),dtype= myfloat)
    D = np.asfortranarray(np.random.normal(size=(64,10)))
    D = np.asfortranarray(D / np.tile(np.sqrt((D*D).sum(axis=0)),(D.shape[0],1)),dtype= myfloat)
    (alpha,path) = spams.lasso(X,D = D,return_reg_path = True,**param)
    return None
def test_trainDL_Memory():
    img_file = 'lena.png'
    try:
        img = Image.open(img_file)
    except:
        print("Cannot load image %s : skipping test" %img_file)
        return None
    I = np.array(img) / 255.
    if I.ndim == 3:
        A = np.asfortranarray(I.reshape((I.shape[0],I.shape[1] * I.shape[2])))
        rgb = True
    else:
        A = np.asfortranarray(I)
        rgb = False

    m = 8
    n = 8
    X = spams.im2col_sliding(A,m,n,rgb)

    X = X - np.tile(np.mean(X,0),(X.shape[0],1))
    X = np.asfortranarray(X / np.tile(np.sqrt((X * X).sum(axis=0)),(X.shape[0],1)))
    X = np.asfortranarray(X[:,np.arange(0,X.shape[1],10)],dtype = myfloat)

    param = { 'K' : 200, # learns a dictionary with 100 elements
          'lambda1' : 0.15, 'numThreads' : 4,
          'iter' : 100}

    ############# FIRST EXPERIMENT  ##################
    tic = time.time()
    D = spams.trainDL_Memory(X,**param)
    tac = time.time()
    t = tac - tic
    print('time of computation for Dictionary Learning: %f' %t)

    print('Evaluating cost function...')
    lparam = _extract_lasso_param(param)
    alpha = spams.lasso(X,D = D,**lparam)
    xd = X - D * alpha
    R = np.mean(0.5 * (xd * xd).sum(axis=0) + param['lambda1'] * np.abs(alpha).sum(axis=0))
    print("objective function: %f" %R)
    #* ? DISPLAY

    ############# SECOND EXPERIMENT  ##################
    tic = time.time()
    D = spams.trainDL(X,**param)
    tac = time.time()
    t = tac - tic
    print('time of computation for Dictionary Learning: %f' %t)
    print('Evaluating cost function...')
    alpha = spams.lasso(X,D = D,**lparam)
    xd = X - D * alpha
    R = np.mean(0.5 * (xd * xd).sum(axis=0) + param['lambda1'] * np.abs(alpha).sum(axis=0))
    print("objective function: %f" %R)

    #* ? DISPLAY

    return None
def test_trainDL_Memory():
    img_file = 'lena.png'
    try:
        img = Image.open(img_file)
    except:
        print("Cannot load image %s : skipping test" %img_file)
        return None
    I = np.array(img) / 255.
    if I.ndim == 3:
        A = np.asfortranarray(I.reshape((I.shape[0],I.shape[1] * I.shape[2])))
        rgb = True
    else:
        A = np.asfortranarray(I)
        rgb = False

    m = 8;n = 8;
    X = spams.im2col_sliding(A,m,n,rgb)

    X = X - np.tile(np.mean(X,0),(X.shape[0],1))
    X = np.asfortranarray(X / np.tile(np.sqrt((X * X).sum(axis=0)),(X.shape[0],1)))
    X = np.asfortranarray(X[:,np.arange(0,X.shape[1],10)],dtype = myfloat)

    param = { 'K' : 200, # learns a dictionary with 100 elements
          'lambda1' : 0.15, 'numThreads' : 4,
          'iter' : 100}

    ############# FIRST EXPERIMENT  ##################
    tic = time.time()
    D = spams.trainDL_Memory(X,**param)
    tac = time.time()
    t = tac - tic
    print('time of computation for Dictionary Learning: %f' %t)

    print('Evaluating cost function...')
    lparam = _extract_lasso_param(param)
    alpha = spams.lasso(X,D = D,**lparam)
    xd = X - D * alpha
    R = np.mean(0.5 * (xd * xd).sum(axis=0) + param['lambda1'] * np.abs(alpha).sum(axis=0))
    print("objective function: %f" %R)
    #* ? DISPLAY

    ############# SECOND EXPERIMENT  ##################
    tic = time.time()
    D = spams.trainDL(X,**param)
    tac = time.time()
    t = tac - tic
    print('time of computation for Dictionary Learning: %f' %t)
    print('Evaluating cost function...')
    alpha = spams.lasso(X,D = D,**lparam)
    xd = X - D * alpha
    R = np.mean(0.5 * (xd * xd).sum(axis=0) + param['lambda1'] * np.abs(alpha).sum(axis=0))
    print("objective function: %f" %R)

    #* ? DISPLAY

    return None
Beispiel #4
0
def smaf(X, d, lda1, lda2, maxItr=10, UW=None, posW=False, posU=True, use_chol=False, module_lower=500,
         activity_lower=5, donorm=False, mode=1, mink=5, U0=[], U0_delta=0.1, doprint=False):
    # use Cholesky when we expect a very sparse result
    # this tends to happen more on the full vs subsampled matrices
    if UW == None:
        U, W = spams.nmf(np.asfortranarray(X), return_lasso=True, K=d, numThreads=THREADS)
        W = np.asarray(W.todense())
    else:
        U, W = UW
    Xhat = U.dot(W)
    Xnorm = np.linalg.norm(X) ** 2 / X.shape[1]
    for itr in range(maxItr):
        if mode == 1:
            # In this mode the ldas correspond to an approximate desired fit
            # Higher lda will be a worse fit, but will result in a sparser sol'n
            U = spams.lasso(np.asfortranarray(X.T), D=np.asfortranarray(W.T),
                            lambda1=lda2 * Xnorm, mode=1, numThreads=THREADS, cholesky=use_chol, pos=posU)
            U = np.asarray(U.todense()).T
        elif mode == 2:
            if len(U0) > 0:
                U = projected_grad_desc(W.T, X.T, U.T, U0.T, lda2, U0_delta, maxItr=400)
                U = U.T
            else:
                U = spams.lasso(np.asfortranarray(X.T), D=np.asfortranarray(W.T),
                                lambda1=lda2, lambda2=0.0, mode=2, numThreads=THREADS, cholesky=use_chol, pos=posU)
                U = np.asarray(U.todense()).T
        if donorm:
            U = U / np.linalg.norm(U, axis=0)
            U[np.isnan(U)] = 0
        if mode == 1:
            wf = (1 - lda2)
            W = sparse_decode(X, U, lda1, worstFit=wf, mink=mink)
        elif mode == 2:
            if len(U0) > 0:
                W = projected_grad_desc(U, X, W, [], lda1, 0., nonneg=posW, maxItr=400)
            else:
                W = spams.lasso(np.asfortranarray(X), D=np.asfortranarray(U),
                                lambda1=lda1, lambda2=1.0, mode=2, numThreads=THREADS, cholesky=use_chol, pos=posW)
                W = np.asarray(W.todense())
        Xhat = U.dot(W)
        module_size = np.average([np.exp(entropy(u)) for u in U.T if u.sum() > 0])
        activity_size = np.average([np.exp(entropy(abs(w))) for w in W.T])
        if doprint:
            print distance.correlation(X.flatten(), Xhat.flatten()), module_size, activity_size, lda1, lda2
        if module_size < module_lower:
            lda2 /= 2.
        if activity_size < activity_lower:
            lda2 /= 2.
    return U, W
def get_concentration(
    Im,
    stain_matrix,
    lamda=0.01,
):
    return sp.lasso(Im.T, D=stain_matrix.T, mode=2, lambda1=lamda,
                    pos=True).toarray().T
Beispiel #6
0
def fit_strf_lasso(input, output, lags, lambda1=1.0, lambda2=1.0, num_threads=10):

    #convert the input into a toeplitz-like matrix
    stime = time.time()
    A = make_toeplitz(input, lags, include_bias=True, fortran_style=True)
    etime = time.time() - stime
    print '[fit_strf_lasso] Time to make Toeplitz matrix: %d seconds' % etime

    fy = np.asfortranarray(output.reshape(len(output), 1))
    #print 'fy.shape=',fy.shape
    #print 'fA.shape=',fA.shape

    #fit the STRF
    stime = time.time()
    fit_params = spams.lasso(fy, A, mode=2, lambda1=lambda1, lambda2=lambda2, numThreads=num_threads)
    etime = time.time() - stime
    print '[fit_strf_lasso] Time to fit STRF: %d seconds' % etime

    #reshape the STRF so that it makes sense
    nt = input.shape[0]
    nf = input.shape[1]
    d = len(lags)
    strf = np.array(fit_params[:-1].todense()).reshape([nf, d])
    bias = fit_params[-1].todense()[0, 0]

    return strf,bias
def dictEval( X, D, param, lam=None, dsfactor=None, patchSize=None, patchFnGrp=None, kind='avg'):
    if dsfactor is not None:
        X_useme,dsz  = downsamplePatchList( X, patchSize, dsfactor, kind=kind )
        D_useme,Ddsz = downsamplePatchList( D, patchSize, dsfactor, kind=kind )

        if patchFnGrp:
            patchFnGrp.create_dataset('patchesDown', data=X_useme)
    else:
        X_useme = X
        D_useme = D

    if lam is None:
        lam = param['lambda1']

    alpha = spams.lasso( np.asfortranarray(X_useme), D = np.asfortranarray(D_useme), **param )
    Xre = ( D * alpha )

    if patchFnGrp:
        patchFnGrp.create_dataset('patchesRecon', data=Xre)

    xd = X - Xre 

    R = np.mean( (xd * xd).sum(axis=0))

    if lam > 0:
        print "   dictEval - lambda: ", lam
        R = R + lam * np.mean( np.abs(alpha).sum(axis=0))

    return R
Beispiel #8
0
    def transform(self, X):
        '''
        Compute the loadings of X using the learned components

        Parameters
        ----------
        X : array, shape (n_samples, n_features)
            data matrix

        Returns
        -------
        coefs : array, shape (n_samples, n_components)
            transformed data
        '''
        coefs = spams.lasso(
            # data
            X=np.asfortranarray(X.T),
            # dict
            D=self.components_.T,
            # pos
            pos=True,
            lambda1=0,
            lambda2=0,
        )
        return coefs.toarray().T
Beispiel #9
0
    def project_data(self, X, agreg=1):
        """
        Projects data on the model dictionary

        Parameters
        ----------
        X : array, shape (n_samples, n_features)
            Matrix to project on dictoonary

        agreg : int
            Specifies size of the groups to average after projection

        Returns
        -------
        projections: array, shape(n_samples/agreg, n_components)
            Projection matrix
        """
        alpha_mat = spams.lasso(np.asfortranarray(np.transpose(X)),
                                D=np.asfortranarray(self.D),
                                lambda1=self.lambda1,
                                lambda2=self.lambda2,
                                mode=2,
                                pos=self.pos)
        alpha_mat = alpha_mat.toarray()
        if agreg > 1:
            return np.mean(np.reshape(alpha_mat,
                                      (alpha_mat.shape[0] / agreg, agreg, -1)),
                           axis=1)
        else:
            return alpha_mat
Beispiel #10
0
    def fit(self, y, dirs, KERNELS, params):
        nD = dirs.shape[0]
        if nD > 1:  # model works only with one direction
            raise RuntimeError('"%s" model requires exactly 1 orientation' %
                               self.name)

        n1 = len(self.d_perps)
        n2 = len(self.d_isos)
        nATOMS = n1 + n2
        if nATOMS == 0:  # empty dictionary
            return [0, 0], None, None, None

        # prepare DICTIONARY from dir and lookup tables
        i1, i2 = amico.lut.dir_TO_lut_idx(dirs[0])
        A = np.zeros((len(y), nATOMS), dtype=np.float64, order='F')
        A[:, :(nD * n1)] = KERNELS['D'][:, i1, i2, :].T
        A[:, (nD * n1):] = KERNELS['CSF'].T

        # fit
        x = spams.lasso(np.asfortranarray(y.reshape(-1, 1)), D=A,
                        **params).todense().A1

        # return estimates
        v = x[:n1].sum() / (x.sum() + 1e-16)

        # checking that there is more than 1 isotropic compartment
        if self.type == 'Mouse':
            v_blood = x[n1] / (x.sum() + 1e-16)
            v_csf = x[n1 + 1] / (x.sum() + 1e-16)

            return [v, 1 - v, v_blood, v_csf], dirs, x, A

        else:
            return [v, 1 - v], dirs, x, A
Beispiel #11
0
 def learn_sparse_coeffs(self, matrix, D, params, weighting=None):
     if weighting is not None:
         alphas = spams.lassoWeighted(matrix, D=D, W=weighting, **params)
     else:
         alphas = spams.lasso(matrix, D=D, **params)
     #logging.info('Alphas shape and sparsity:\t{}\t{:.4f}'.format(alphas.shape, 100*alphas.nnz/np.prod(alphas.shape)))
     return alphas
def cod2sparseLASSO(dictionary, feat, lambdaLasso=0.35, numThreads=-1):

    init_time = time.time()

    X_ = np.asfortranarray(feat)
    D_ = np.asfortranarray(dictionary)

    param = {
        'lambda1': lambdaLasso,  # not more than 20 non-zeros coefficients
        'numThreads':
        numThreads,  # number of processors/cores to use; the default choice is -1
        # and uses all the cores of the machine
        'pos': False,
        'mode': spams.PENALTY
    }  # penalized formulation

    print "Using LASSO for sparse codification. Please wait..."

    alpha = spams.lasso(X_, D_, return_reg_path=False, verbose=True, **param)

    end_time = time.time()

    t = end_time - init_time

    print "%f signals processed per second\n" % (float(alpha.shape[1]) / t)

    print "Total time: ", t, "seconds"

    return alpha.todense()
Beispiel #13
0
    def get_concentrations(self, image, stain_matrix):
        """Get concentration matrix.

        Parameters
        ----------
        image: array_like
               rgb

        Returns
        -------

        concentration: array_like
                       N x 2 matrix for an N x M case.
        """
        OD = RGB2OD(image).reshape((-1, 3))
        if self.maskout_white:
            nonwhite_mask = get_nonwhite_mask(image,
                                              self.nonwhite_threshold).reshape(
                                                  (-1, ))
            OD = OD[nonwhite_mask]
        coefs = (spams.lasso(OD.T,
                             D=stain_matrix.T,
                             mode=2,
                             lambda1=self.lambda1,
                             pos=True).toarray().T)
        return coefs
Beispiel #14
0
    def fit( self, y, dirs, KERNELS, params ) :
        nD = dirs.shape[0]
        if nD > 1 : # model works only with one direction
            raise RuntimeError( '"%s" model requires exactly 1 orientation' % self.name )

        n1 = len(self.d_perps)
        n2 = len(self.d_isos)
        nATOMS = n1+n2
        if nATOMS == 0 : # empty dictionary
            return [0, 0], None, None, None

        # prepare DICTIONARY from dir and lookup tables
        i1, i2 = amico.lut.dir_TO_lut_idx( dirs[0] )
        A = np.zeros( (len(y), nATOMS), dtype=np.float64, order='F' )
        A[:,:(nD*n1)] = KERNELS['D'][:,i1,i2,:].T
        A[:,(nD*n1):] = KERNELS['CSF'].T

        # fit
        x = spams.lasso( np.asfortranarray( y.reshape(-1,1) ), D=A, **params ).todense().A1

        # return estimates
        v = x[ :n1 ].sum() / ( x.sum() + 1e-16 )

        # checking that there is more than 1 isotropic compartment
        if self.type == 'Mouse' :
            v_blood = x[ n1 ] / ( x.sum() + 1e-16 )
            v_csf = x[ n1+1 ] / ( x.sum() + 1e-16 )

            return [ v, 1-v, v_blood, v_csf ], dirs, x, A

        else :
            return [ v, 1-v ], dirs, x, A
 def get_concentrations(I, stain_matrix, **kwargs):
     """
     Estimate concentration matrix given an image, stain matrix and relevant method parameters.
     """
     OD = convert_RGB_to_OD(I).reshape((-1, 3))
     lasso_regularizer = kwargs['lasso_regularizer'] if 'lasso_regularizer' in kwargs.keys() else 0.01
     return spams.lasso(X=OD.T, D=stain_matrix.T, mode=2, lambda1=lasso_regularizer, pos=True).toarray().T
Beispiel #16
0
    def get_concentrations(I, stain_matrix, lamda=0.01):
        """
        Get the concentration matrix. Suppose the input image is H x W x 3 (uint8). Define Npix = H * W.
        Then the concentration matrix is Npix x 2 (or we could reshape to H x W x 2).
        The first element of each row is the Hematoxylin concentration.
        The second element of each row is the Eosin concentration.

        We do this by 'solving' OD = C*S (Matrix product) where OD is optical density (Npix x 3),\
        C is concentration (Npix x 2) and S is stain matrix (2 x 3).
        See docs for spams.lasso.

        We restrict the concentrations to be positive and penalise very large concentration values,\
        so that background pixels (which can not easily be expressed in the Hematoxylin-Eosin basis) have \
        low concentration and thus appear white.

        :param I: Image. A np array HxWx3 of type uint8.
        :param stain_matrix: a 2x3 stain matrix. First row is Hematoxylin stain vector, second row is Eosin stain vector.
        :return:
        """
        # param = {
        #     'lambda1': 0.15,  # not more than 20 non-zeros coefficients
        #     'numThreads': -1,  # number of processors/cores to use, the default choice is -1
        #     # and uses all the cores of the machine
        #     'mode': spams.PENALTY}  # penalized formulation
        # alpha = spams.lasso(X,D = D,return_reg_path = False,**param)

        OD = mu.RGB_to_OD(I).reshape((-1, 3))  # convert to optical density and flatten to (H*W)x3.
        return spams.lasso(OD.T, D=stain_matrix.T, mode=2, numThreads=6, lambda1=lamda, pos=True).toarray().T
    def get_concentrations(I, stain_matrix, lamda=0.01):
        """
        Get the concentration matrix. Suppose the input image is H x W x 3 (uint8). Define Npix = H * W.
        Then the concentration matrix is Npix x 2 (or we could reshape to H x W x 2).
        The first element of each row is the Hematoxylin concentration.
        The second element of each row is the Eosin concentration.

        We do this by 'solving' OD = C*S (Matrix product) where OD is optical density (Npix x 3),\
        C is concentration (Npix x 2) and S is stain matrix (2 x 3).
        See docs for spams.lasso.

        We restrict the concentrations to be positive and penalise very large concentration values,\
        so that background pixels (which can not easily be expressed in the Hematoxylin-Eosin basis) have \
        low concentration and thus appear white.

        :param I: Image. A np array HxWx3 of type uint8.
        :param stain_matrix: a 2x3 stain matrix. First row is Hematoxylin stain vector, second row is Eosin stain vector.
        :return:
        """
        OD = mu.RGB_to_OD(I).reshape(
            (-1, 3))  # convert to optical density and flatten to (H*W)x3.
        return spams.lasso(OD.T,
                           D=stain_matrix.T,
                           mode=2,
                           lambda1=lamda,
                           pos=True).toarray().T
Beispiel #18
0
    def coding_series(segment_list, D, lambda1):
        """Represent given series with learned dictionary
        Args:
            segment_list(list): each item contains m subsequences which is
                sliced from original time series instance
            D(numpy 2d-array): learning dictionary
            lambda1(float): lambda conefficient in sparse coding,
                |X-D*a|^2 + lambda*|a|^1
                For more information, see spams packages:
                http://spams-devel.gforge.inria.fr/doc-python/html/doc_spams005.html#sec15
        Returns:
            bow_data(numpy array): transformed data
        """
        k = D.shape[1]
        bow_data = numpy.zeros([len(segment_list), k])

        # BoW for data
        for index, item in enumerate(segment_list):
            # Log lasso information
            log_msg = "Solve lasso problem for %d series" % (index)
            logger.info(log_msg)
            code = spams.lasso(numpy.asfortranarray(item),
                               D,
                               lambda1=lambda1,
                               pos=True)
            code = numpy.sum(code.todense(), axis=1)
            bow_data[index:index + 1, :] += code.reshape([1, k])
            div = numpy.linalg.norm(bow_data[index, :])
            if div > 0:
                bow_data[index, :] = bow_data[index, :] / div
            # Log bow result for debug
            log_msg = "%d series: " % (index)
            logger.debug(log_msg + str(bow_data[index, :]))
        return bow_data
def test_cd():
    np.random.seed(0)
    X = np.asfortranarray(np.random.normal(size = (64,100)))
    X = np.asfortranarray(X / np.tile(np.sqrt((X*X).sum(axis=0)),(X.shape[0],1)),dtype=myfloat)
    D = np.asfortranarray(np.random.normal(size = (64,100)))
    D = np.asfortranarray(D / np.tile(np.sqrt((D*D).sum(axis=0)),(D.shape[0],1)),dtype=myfloat)
    # parameter of the optimization procedure are chosen
    lambda1 = 0.015
    mode = spams.PENALTY
    tic = time.time()
    alpha = spams.lasso(X,D,lambda1 = lambda1,mode = mode,numThreads = 4)
    tac = time.time()
    t = tac - tic
    xd = X - D * alpha
    E = np.mean(0.5 * (xd * xd).sum(axis=0) + lambda1 * np.abs(alpha).sum(axis=0))
    print("%f signals processed per second for LARS" %(X.shape[1] / t))
    print('Objective function for LARS: %g' %E)
    tol = 0.001
    itermax = 1000
    tic = time.time()
#    A0 = ssp.csc_matrix(np.empty((alpha.shape[0],alpha.shape[1])))
    A0 = ssp.csc_matrix((alpha.shape[0],alpha.shape[1]),dtype=myfloat)
    alpha2 = spams.cd(X,D,A0,lambda1 = lambda1,mode = mode,tol = tol, itermax = itermax,numThreads = 4)
    tac = time.time()
    t = tac - tic
    print("%f signals processed per second for CD" %(X.shape[1] / t))
    xd = X - D * alpha2
    E = np.mean(0.5 * (xd * xd).sum(axis=0) + lambda1 * np.abs(alpha).sum(axis=0))
    print('Objective function for CD: %g' %E)
    print('With Random Design, CD can be much faster than LARS')

    return None
Beispiel #20
0
def _compute_codes(X, D, sc_mode, W, lambda1, lambda2, 
                         n_jobs, pos_coef, **args):
    """ Deprecated for very-large datasets! Use sparse_decode instead.
    """                               
    X = np.asfortranarray(X)
    D = np.asfortranarray(D)
     
    gram = None
    cov = None
    if W is None:
        gram = np.dot(D.T, D) 
        gram = np.asfortranarray(gram)
        cov  = np.dot(D.T, X)  
        cov  = np.asfortranarray(cov)
    
    
    if sc_mode in [0, 1, 2]:
        if W is None:
            A = spams.lasso(X , D, gram, cov, lambda1=lambda1, lambda2=lambda2,
                            numThreads=n_jobs, mode=sc_mode, pos=pos_coef)
        else:
            A = spams.lassoWeighted(X , D, W, lambda1=lambda1,mode=sc_mode, 
                                    pos=pos_coef, numThreads=n_jobs)
            
    else:
        L        = lambda1 if sc_mode == 3 else None
        eps      = lambda1 if sc_mode == 4 else None
        lambda_1 = lambda1 if sc_mode == 5 else None
        A = spams.omp(X, D, L, eps, lambda_1, numThreads=n_jobs)
        
    return A.toarray()
Beispiel #21
0
    def fit( self, y, dirs, KERNELS, params ) :
        nD = dirs.shape[0]
        n1 = len(self.Rs)
        n2 = len(self.ICVFs)
        n3 = len(self.d_ISOs)

        # prepare DICTIONARY from dirs and lookup tables
        A = np.zeros( (len(y), nD*(n1+n2)+n3 ), dtype=np.float64, order='F' )
        o = 0
        for i in xrange(nD) :
            i1, i2 = amico.lut.dir_TO_lut_idx( dirs[i] )
            A[:,o:(o+n1)] = KERNELS['wmr'][:,i1,i2,:].T
            o += n1
        for i in xrange(nD) :
            i1, i2 = amico.lut.dir_TO_lut_idx( dirs[i] )
            A[:,o:(o+n2)] = KERNELS['wmh'][:,i1,i2,:].T
            o += n2
        A[:,o:] = KERNELS['iso'].T

        # empty dictionary
        if A.shape[1] == 0 :
            return [0, 0, 0], None, None, None

        # fit
        x = spams.lasso( np.asfortranarray( y.reshape(-1,1) ), D=A, **params ).todense().A1

        # return estimates
        f1 = x[ :(nD*n1) ].sum()
        f2 = x[ (nD*n1):(nD*(n1+n2)) ].sum()
        v = f1 / ( f1 + f2 + 1e-16 )
        xIC = x[:nD*n1].reshape(-1,n1).sum(axis=0)
        a = 1E6 * 2.0 * np.dot(self.Rs,xIC) / ( f1 + 1e-16 )
        d = (4.0*v) / ( np.pi*a**2 + 1e-16 )
        return [v, a, d], dirs, x, A
Beispiel #22
0
def get_concentrations(I, stain_matrix, regularizer=0.01):
    OD = convert_RGB_to_OD(I).reshape((-1, 3))
    # 稀疏优化
    return spams.lasso(X=OD.T,
                       D=stain_matrix.T,
                       mode=2,
                       lambda1=regularizer,
                       pos=True).toarray().T
Beispiel #23
0
    def fit(self, y, dirs, KERNELS, params):
        singleb0 = True if len(y) == (1 + self.scheme.dwi_count) else False
        nD = dirs.shape[0]
        if nD != 1:
            raise RuntimeError('"%s" model requires exactly 1 orientation' %
                               self.name)

        # prepare DICTIONARY from dir and lookup tables
        nWM = len(self.IC_ODs) * len(self.IC_VFs)
        nATOMS = nWM + 1
        if self.isExvivo == True:
            nATOMS += 1
        i1, i2 = amico.lut.dir_TO_lut_idx(dirs[0])
        A = np.ones((len(y), nATOMS), dtype=np.float64, order='F')
        A[:, :nWM] = KERNELS['wm'][:, i1, i2, :].T
        A[:, -1] = KERNELS['iso']

        # estimate CSF partial volume (and isotropic restriction, if exvivo) and remove from signal
        x, _ = scipy.optimize.nnls(A, y)
        yy = y - x[-1] * A[:, -1]
        if self.isExvivo == True:
            yy = yy - x[-2] * A[:, -2]
        yy[yy < 0] = 0

        # estimate IC and EC compartments and promote sparsity
        if singleb0:
            An = A[1:, :nWM] * KERNELS['norms']
            yy = yy[1:].reshape(-1, 1)
        else:
            An = A[self.scheme.dwi_idx, :nWM] * KERNELS['norms']
            yy = yy[self.scheme.dwi_idx].reshape(-1, 1)
        x = spams.lasso(np.asfortranarray(yy),
                        D=np.asfortranarray(An),
                        **params).todense().A1

        # debias coefficients
        x = np.append(x, 1)
        if self.isExvivo == True:
            x = np.append(x, 1)
        idx = x > 0
        x[idx], _ = scipy.optimize.nnls(A[:, idx], y)

        # return estimates
        xx = x / (x.sum() + 1e-16)
        xWM = xx[:nWM]
        fISO = xx[-1]
        xWM = xWM / (xWM.sum() + 1e-16)
        f1 = np.dot(KERNELS['icvf'], xWM)
        f2 = np.dot((1.0 - KERNELS['icvf']), xWM)
        v = f1 / (f1 + f2 + 1e-16)
        k = np.dot(KERNELS['kappa'], xWM)
        od = 2.0 / np.pi * np.arctan2(1.0, k)

        if self.isExvivo:
            return [v, od, fISO, xx[-2]], dirs, x, A
        else:
            return [v, od, fISO], dirs, x, A
Beispiel #24
0
def get_concentrations(I, stain_matrix, lamda=0.01):
    """
    Get concentrations, a npix x 2 matrix
    :param I:
    :param stain_matrix: a 2x3 stain matrix
    :return:
    """
    OD = RGB_to_OD(I).reshape((-1, 3))
    return spams.lasso(OD.T, D=stain_matrix.T, mode=2, lambda1=lamda, pos=True).toarray().T
Beispiel #25
0
def sparse_decode(Y, D, lda):
    Ynorm = np.linalg.norm(Y)**2 / Y.shape[1]
    W = spams.lasso(np.asfortranarray(Y),
                    np.asfortranarray(D),
                    lambda1=lda * Ynorm,
                    mode=1,
                    numThreads=THREADS,
                    pos=False)
    W = np.asarray(W.todense())
    return W
 def sparse_coding_lasso(self, i, vectors):
     A = np.zeros((self.K, self.K))
     B = np.zeros((self.n, self.K))
     inds = np.arange(i, i + self.chunk_size)
     v = vectors[:, inds]
     a = spams.lasso(v, D=self.D, **self.lasso_params).toarray()
     for k in np.arange(len(inds)):
         A += np.outer(a[:, k], a[:, k])
         B += np.outer(v[:, k], a[:, k])
     return A, B
Beispiel #27
0
 def run(self, n_iter):
     y = np.expand_dims(np.asfortranarray(self.y), axis=1)
     if (scipy.sparse.issparse(self.X)):
         W0 = np.zeros((self.X.shape[1], 1), dtype=y.dtype, order="F")
         self.w = fistaFlat(y, self.X, W0, **self.solver_parameter,
                            regul='l1', it0=10000, loss='square', tol=1e-12,
                            max_it=n_iter).flatten()
     else:
         self.w = lasso(y,  D=self.X, L=n_iter,
                        **self.solver_parameter).toarray().flatten()
Beispiel #28
0
    def __call__(self, X, D):
        import spams
        lasso_params = {
            'lambda1': self._lambda,
            'lambda2': 0,
            'numThreads': self.n_jobs,
            'mode': 2}

        return np.array(spams.lasso(np.asfortranarray(X, np.float64), D=np.asfortranarray(D, np.float64),
                                    return_reg_path=False, **lasso_params).todense())
    def predict(self, imgs, neuron_idx=None, penalty_lambda=None, algorithm=None):
        """ get neuron response to images

        Parameters
        ----------
        imgs

        Returns
        -------

        """
        imgs_array = make_2d_input_matrix(imgs)
        if neuron_idx is None:
            dict_to_use = self.w
        else:
            dict_to_use = self.w[neuron_idx:(neuron_idx + 1), :]

        if penalty_lambda is None:
            _lambda = self._lambda
        else:
            _lambda = penalty_lambda
        assert np.isscalar(_lambda)

        if algorithm is None:
            _algorithm = self.algorithm
        else:
            _algorithm = algorithm


        # let's call sparse encoder to do it!
        # no scaling at all!
        # having /nsample in objective function is exactly the same as sovling each problem separately.
        # the underlying function called is elastic net, and that function fits each column of y separately.
        # each column of y is each stimulus. This is because when passing imgs_array and dict_to_use to Elastic Net,
        # they are transposed. That is, y = imgs_array.T
        #
        # in the code there's also a subtle detail, where alpha is divided by number of pixels in each stimulus.
        # I haven't figured that out, but seems that's simply a detail for using ElasticNet to do this.
        if _algorithm in ['lasso_lars', 'lasso_cd']:
            response = sparse_encode(imgs_array, dict_to_use, alpha=_lambda, algorithm=_algorithm, max_iter=10000)
        else:
            assert _algorithm == 'spams'
            #print(imgs_array.dtype, dict_to_use.dtype, _lambda.shape)
            response = lasso(np.asfortranarray(imgs_array.T), D=np.asfortranarray(dict_to_use.T), lambda1=_lambda,
                             mode=2)
            response = response.T.toarray()  # because lasso returns sparse matrix...
        # this can be used for debugging, for comparison with SPAMS.
        # notice here I give per sample cost.
        self.last_cost_recon = 0.5 * np.sum((imgs_array - np.dot(response, dict_to_use)) ** 2, axis=1)
        self.last_cost_sparsity = _lambda * np.abs(response).sum(axis=1)
        assert self.last_cost_sparsity.shape == (imgs_array.shape[0], )
        assert self.last_cost_recon.shape == (imgs_array.shape[0],)
        self.last_cost = np.mean(self.last_cost_recon + self.last_cost_sparsity)

        return response
Beispiel #30
0
    def forward(self, x: Tensor):
        # x is B x input_size matrix to reconstruct
        # init_guess is B x output_size matrix code guess
        if self.solver_type == 'spams':
            raise RuntimeError('spams deprecated')
            from spams import lasso  # intially I put this line later; and it failed.
            x_cpu = x.data.cpu().numpy()
            assert x_cpu.ndim == 2 and x_cpu.shape[1] == self.linear_module.out_features

            # init_guess_shape = (x.shape[0], self.linear_module.in_features)
            # if init_guess is not None:
            #     init_guess = init_guess.data.cpu().numpy()
            # else:
            #     init_guess = np.zeros(init_guess_shape)
            # assert init_guess.shape == init_guess_shape

            # spams does not need any init guess.
            response = lasso(np.asfortranarray(x_cpu.T),
                             D=np.asfortranarray(self.linear_module.weight.data.cpu().numpy()),
                             lambda1=self.lam / 2,  # so to remove 1/2 factor for reconstruction loss.
                             mode=2)
            response = response.T.toarray()  # because lasso returns sparse matrix...
            response_cost = abs(response) * self.lam
            if self.size_average_l1:
                response_cost = float(response_cost.mean())
            else:
                response_cost = float(response_cost.sum())
            # then pass response into self.linear_module
            response = tensor(response, dtype=torch.float32)
            if x.is_cuda:
                response = response.cuda()

            recon = self.linear_module(response)

            # return reconstruction loss, so that we can do gradient descent on weight later.
            return self.cost(recon, x) + response_cost
        elif self.solver_type == 'fista_custom':
            # using my hand written fista code, which should reproduce the lua code exactly (up to machine precision).
            xinit = torch.zeros(x.size()[0], self.linear_module.in_features)
            if x.is_cuda:
                xinit = xinit.cuda()
            with torch.no_grad():
                response, L_new = fista_ls(partial(self.f_fista, x.data), self.g_fista, self.pl, xinit,
                                           verbose=False, L=self.L)
            # i think this is kind of speed up trick.
            # TODO: in FistaL1.lua (not LinearFistaL1.lua),
            # there's another cap on L.
            if L_new == self.L:
                self.L = L_new / 2
            else:
                self.L = L_new
            recon = self.linear_module(response)
            return self.cost(recon, x) + self.g_fista(response), response
        else:
            raise NotImplementedError
Beispiel #31
0
    def fit( self, y, dirs, KERNELS, params ) :
        singleb0 = True if len(y) == (1+self.scheme.dwi_count) else False
        nD = dirs.shape[0]
        if nD != 1 :
            raise RuntimeError( '"%s" model requires exactly 1 orientation' % self.name )

        # prepare DICTIONARY from dir and lookup tables
        nWM = len(self.IC_ODs)*len(self.IC_VFs)
        nATOMS = nWM + 1
        if self.isExvivo == True :
            nATOMS += 1
        i1, i2 = amico.lut.dir_TO_lut_idx( dirs[0] )
        A = np.ones( (len(y), nATOMS), dtype=np.float64, order='F' )
        A[:,:nWM] = KERNELS['wm'][:,i1,i2,:].T
        A[:,-1]  = KERNELS['iso']
        

        # estimate CSF partial volume (and isotropic restriction, if exvivo) and remove from signal
        x, _ = scipy.optimize.nnls( A, y )
        yy = y - x[-1]*A[:,-1]
        if self.isExvivo == True :
            yy = yy - x[-2]*A[:,-2]
        yy[ yy<0 ] = 0

        # estimate IC and EC compartments and promote sparsity
        if singleb0:
            An = A[1:, :nWM] * KERNELS['norms']
            yy = yy[1:].reshape(-1,1)
        else:
            An = A[ self.scheme.dwi_idx, :nWM ] * KERNELS['norms']
            yy = yy[ self.scheme.dwi_idx ].reshape(-1,1)
        x = spams.lasso( np.asfortranarray(yy), D=np.asfortranarray(An), **params ).todense().A1

        # debias coefficients
        x = np.append( x, 1 )
        if self.isExvivo == True :
            x = np.append( x, 1 )
        idx = x>0
        x[idx], _ = scipy.optimize.nnls( A[:,idx], y )

        # return estimates
        xx = x / ( x.sum() + 1e-16 )
        xWM  = xx[:nWM]
        if self.isExvivo == True :
            fISO = xx[-2]
        else :
            fISO = xx[-1]
        xWM = xWM / ( xWM.sum() + 1e-16 )
        f1 = np.dot( KERNELS['icvf'], xWM )
        f2 = np.dot( (1.0-KERNELS['icvf']), xWM )
        v = f1 / ( f1 + f2 + 1e-16 )
        k = np.dot( KERNELS['kappa'], xWM )
        od = 2.0/np.pi * np.arctan2(1.0,k)

        return [v, od, fISO], dirs, x, A
Beispiel #32
0
    def __call__(self, X, D):
        import spams
        lasso_params = {
            'lambda1': self._lambda,
            'lambda2': 0,
            'numThreads': self.n_jobs,
            'mode': 2
        }

        return np.array(spams.lasso(np.asfortranarray(X, np.float64), D=np.asfortranarray(D, np.float64),
                                    return_reg_path=False, **lasso_params).todense())
Beispiel #33
0
def get_concentrations(I, stain_matrix, regularizer=0.01):
    """
    Estimate concentration matrix given an image and stain matrix.

    :param I:
    :param stain_matrix:
    :param regularizer:
    :return:
    """
    OD = convert_RGB_to_OD(I).reshape((-1, 3))
    return spams.lasso(X=OD.T, D=stain_matrix.T, mode=2, lambda1=regularizer, pos=True).toarray().T
def test_lasso():
    np.random.seed(0)
    print("test lasso")
    ##############################################
    # Decomposition of a large number of signals
    ##############################################
    # data generation
    X = np.asfortranarray(np.random.normal(size=(100, 100000)))
    #* X=X./repmat(sqrt(sum(X.^2)),[size(X,1) 1])
    X = np.asfortranarray(X / np.tile(np.sqrt((X * X).sum(axis=0)),
                                      (X.shape[0], 1)),
                          dtype=myfloat)
    D = np.asfortranarray(np.random.normal(size=(100, 200)))
    D = np.asfortranarray(D / np.tile(np.sqrt((D * D).sum(axis=0)),
                                      (D.shape[0], 1)),
                          dtype=myfloat)
    # parameter of the optimization procedure are chosen
    #param.L=20 # not more than 20 non-zeros coefficients (default: min(size(D,1),size(D,2)))
    param = {
        'lambda1': 0.15,  # not more than 20 non-zeros coefficients
        'numThreads':
        -1,  # number of processors/cores to use, the default choice is -1
        # and uses all the cores of the machine
        'mode': spams.PENALTY
    }  # penalized formulation

    tic = time.time()
    alpha = spams.lasso(X, D=D, return_reg_path=False, **param)
    tac = time.time()
    t = tac - tic
    print("%f signals processed per second\n" % (float(X.shape[1]) / t))
    ########################################
    # Regularization path of a single signal
    ########################################
    X = np.asfortranarray(np.random.normal(size=(64, 1)), dtype=myfloat)
    D = np.asfortranarray(np.random.normal(size=(64, 10)))
    D = np.asfortranarray(D / np.tile(np.sqrt((D * D).sum(axis=0)),
                                      (D.shape[0], 1)),
                          dtype=myfloat)
    (alpha, path) = spams.lasso(X, D=D, return_reg_path=True, **param)
    return None
Beispiel #35
0
def write_part(k):
    global nz_code
    print("start " + str(k))
    sup = min(nz - 1, chunk_size * (k + 1))

    print("compute matrix")

    inds = global_inds[chunk_size * k:sup]
    size_of_matrix = sup - chunk_size * k
    print(size_of_matrix)

    abundance_matrix = np.zeros((n, size_of_matrix), dtype=np.float32)

    for i in range(n0, n + n0):
        chunk = np.zeros(size_of_matrix)
        inds_of_values = (nzis[i] >= global_inds[chunk_size * k]) * (
            nzis[i] < global_inds[sup])
        inds_absolute = nzis[i][inds_of_values]
        chunk[inverted_index[inds_absolute] -
              inverted_index[global_inds[chunk_size *
                                         k]]] = values[i][inds_of_values]
        abundance_matrix[i - n0, :] = chunk / norm[inds]

    print("matrix computed")
    print(abundance_matrix)

    x = np.asfortranarray(abundance_matrix)

    a = spams.lasso(x, D=D, **lparam)

    nz_chunk = np.shape(a.data)[0]
    indices[nz_code:(nz_chunk + nz_code)] = a.indices[:]
    indptr[chunk_size * k:(sup + 1)] = a.indptr[:] + indptr[chunk_size * k]
    data[nz_code:(nz_chunk + nz_code)] = a.data[:]

    print(nz_code, nz_chunk)
    nz_code = nz_code + nz_chunk

    a = a.toarray()

    print(np.sum(a > 0, 0))

    clusters = np.argsort(a, axis=0)[-clusters_nb:][::-1]

    mask = a[clusters, np.arange(size_of_matrix)]
    mask = mask > thres

    clusters[~mask] = -1
    alpha[:clusters_nb, inds] = clusters + 1
    alpha.flush()

    print(str(i) + " ok !")
    return 0
def active_support_elastic_net(X, y, alpha, tau=1.0, algorithm='spams', support_init='knn', 
                               support_size=100, maxiter=40):
    n_samples = X.shape[0]

    if n_samples <= support_size:  # skip active support search for small scale data
        supp = np.arange(n_samples, dtype=int)  # this results in the following iteration to converge in 1 iteration
    else:    
        if support_init == 'L2':
            L2sol = np.linalg.solve(np.identity(y.shape[1]) * alpha + np.dot(X.T, X), y.T)
            c0 = np.dot(X, L2sol)[:, 0]
            supp = np.argpartition(-np.abs(c0), support_size)[0:support_size]
        elif support_init == 'knn':
            supp = np.argpartition(-np.abs(np.dot(y, X.T)[0]), support_size)[0:support_size]

    curr_obj = float("inf")
    for _ in range(maxiter):
        Xs = X[supp, :]
        if algorithm == 'spams':
            cs = spams.lasso(np.asfortranarray(y.T), D=np.asfortranarray(Xs.T), 
                             lambda1=tau*alpha, lambda2=(1.0-tau)*alpha)
            cs = np.asarray(cs.todense()).T
        else:
            cs = sparse_encode(y, Xs, algorithm=algorithm, alpha=alpha)
      
        delta = (y - np.dot(cs, Xs)) / alpha
		
        obj = tau * np.sum(np.abs(cs[0])) + (1.0 - tau)/2.0 * np.sum(np.power(cs[0], 2.0)) + alpha/2.0 * np.sum(np.power(delta, 2.0))
        if curr_obj - obj < 1.0e-10 * curr_obj:
            break
        curr_obj = obj
			
        coherence = np.abs(np.dot(delta, X.T))[0]
        coherence[supp] = 0
        addedsupp = np.nonzero(coherence > tau + 1.0e-10)[0]
        
        if addedsupp.size == 0:  # converged
            break

        # Find the set of nonzero entries of cs.
        activesupp = supp[np.abs(cs[0]) > 1.0e-10]  
        
        if activesupp.size > 0.8 * support_size:  # this suggests that support_size is too small and needs to be increased
            support_size = min([round(max([activesupp.size, support_size]) * 1.1), n_samples])
        
        if addedsupp.size + activesupp.size > support_size:
            ord = np.argpartition(-coherence[addedsupp], support_size - activesupp.size)[0:support_size - activesupp.size]
            addedsupp = addedsupp[ord]
        
        supp = np.concatenate([activesupp, addedsupp])
    
    c = np.zeros(n_samples)
    c[supp] = cs
    return c
Beispiel #37
0
def lasso_nn(Y, D):
    # lambda1 controls sparsity, numThreads=-1 if you want to use all cores
    params = {
        'lambda1': 0.007,
        'numThreads': -1,
        'mode': spams.PENALTY,
        'pos': True
    }
    alpha = spams.lasso(np.asfortranarray(Y, dtype=np.float32),
                        D=np.asfortranarray(D, dtype=np.float32),
                        **params)
    return alpha.todense()
def elastic_net_subspace_clustering(X, gamma=50.0, gamma_nz=True, tau=1.0, algorithm='lasso_lars', 
                                    active_support=True, active_support_params=None, n_nonzero=50):
    if algorithm in ('lasso_lars', 'lasso_cd') and tau < 1.0 - 1.0e-10:  
        warnings.warn('algorithm {} cannot handle tau smaller than 1. Using tau = 1'.format(algorithm))
        tau = 1.0
		
    if active_support == True and active_support_params == None:
        active_support_params = {}

    n_samples = X.shape[0]
    rows = np.zeros(n_samples * n_nonzero)
    cols = np.zeros(n_samples * n_nonzero)
    vals = np.zeros(n_samples * n_nonzero)
    curr_pos = 0
 
    for i in progressbar.progressbar(range(n_samples)):
        y = X[i, :].copy().reshape(1, -1)
        X[i, :] = 0
        
        if algorithm in ('lasso_lars', 'lasso_cd', 'spams'):
            if gamma_nz == True:
                coh = np.delete(np.absolute(np.dot(X, y.T)), i)
                alpha0 = np.amax(coh) / tau  # value for which the solution is zero
                alpha = alpha0 / gamma
            else:
                alpha = 1.0 / gamma

            if active_support == True:
                c = active_support_elastic_net(X, y, alpha, tau, algorithm, **active_support_params)
            else:
                if algorithm == 'spams':
                    c = spams.lasso(np.asfortranarray(y.T), D=np.asfortranarray(X.T), 
                                    lambda1=tau * alpha, lambda2=(1.0-tau) * alpha)
                    c = np.asarray(c.todense()).T[0]
                else:
                    c = sparse_encode(y, X, algorithm=algorithm, alpha=alpha)[0]
        else:
          warnings.warn("algorithm {} not found".format(algorithm))
	    	  
        index = np.flatnonzero(c)
        if index.size > n_nonzero:
        #  warnings.warn("The number of nonzero entries in sparse subspace clustering exceeds n_nonzero")
          index = index[np.argsort(-np.absolute(c[index]))[0:n_nonzero]]
        rows[curr_pos:curr_pos + len(index)] = i
        cols[curr_pos:curr_pos + len(index)] = index
        vals[curr_pos:curr_pos + len(index)] = c[index]
        curr_pos += len(index)
        
        X[i, :] = y

#   affinity = sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples)) + sparse.csr_matrix((vals, (cols, rows)), shape=(n_samples, n_samples))
    return sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples))
    def coding_series(self, segment_list, D, a=None, batch=False, iter=-5):
        k = D.shape[1]
        bow_data = numpy.zeros([len(segment_list), k])

        # BoW for data
        for index, item in enumerate(segment_list):
            code = spams.lasso(numpy.asfortranarray(item), D, lambda1=a, pos=True)
            code = numpy.sum(code.todense(), axis=1)
            bow_data[index : index + 1, :] += code.reshape([1, k])
            div = numpy.linalg.norm(bow_data[index, :])
            if div > 0:
                bow_data[index, :] = bow_data[index, :] / div
        return bow_data
 def fit(self, x, y):
     self.coef_ = (
         spams.lasso(
             np.asfortranarray(y, dtype=np.float),
             D=np.asfortranarray(x, dtype=np.float),
             pos=self.positive,
             lambda1=self.lambda_,
             **self.params
         )
         .toarray()
         .flatten()
     )
     return self
Beispiel #41
0
def sparseCoding(X):
    X = np.asfortranarray(X)
    param = { 'K' : NCLUSTER,	# size of the dictionary 
          'lambda1' : 0.15, 
          #'posD' : True,	# dictionary positive constrain
          #'modeD' : 1,	# L1 regulization regularization on D
          'iter' : ITER} # runtime limit 15mins
    
    D = spams.trainDL_Memory(X,**param)
    lparam = _extract_lasso_param(param)
    print 'genrating codes...'
    alpha = spams.lasso(X,D = D,**lparam)
    return D, alpha
def findSparseRep(A, y, epsilon):
    # A: data in columns
    # y: image to match
    # epsilon: allowed error
    yCol = np.asfortranarray(np.reshape(y, (len(y), 1)), dtype=float)
    A = np.asfortranarray(np.array(A), dtype=float)
    param = {'lambda1' : epsilon,
            'numThreads' : 4,
            'pos' : True,
            'mode' : 1}
    x = sp.lasso(yCol, D=A, **param).toarray()
    residuals = [np.linalg.norm(A[:,i]*x[i] - y) for i in range(0,np.shape(A)[1])]
    return (x, residuals)
Beispiel #43
0
def sparse_decode(analysis_coefficients,
                  analysis_operator,
                  lasso_scaling_factor=0.1,
                  ridge_sparsity=None,
                  minimum_loss=1.,
                  minimum_ridge_sparsity=0,
                  method='omp'):
    """
    """
    print(analysis_coefficients.shape)
    print(analysis_operator.shape)
    analysis_coefficients_fortran = np.asfortranarray(analysis_coefficients)
    analysis_operator_fortran = np.asfortranarray(analysis_operator)
    coefficient_norm = np.linalg.norm(analysis_coefficients)

    num_bases, dimensionality = analysis_coefficients.shape
    _, latent_dimensionality = analysis_operator.shape
    if method == 'omp':
        if not ridge_sparsity:
            ridge_sparsity = min(num_bases, latent_dimensionality)
        while ridge_sparsity < minimum_ridge_sparsity:
            sparse_synthesis_coefficients = spams.omp(
                analysis_coefficients_fortran,
                analysis_operator_fortran,
                L=ridge_sparsity,
                numThreads=4)
            synthesis_coefficients = np.asarray(
                sparse_synthesis_coefficients.todense())
            loss = 1 - np.linalg.norm(analysis_coefficients - analysis_operator
                                      .dot(W))**2 / coefficient_norm**2
            if loss < minimium_loss:
                break
            k -= 1
    elif method == 'lasso':
        print("Using lasso")
        lasso_penalty = lasso_scaling_factor * coefficient_norm**2 / dimensionality
        print("Lasso penalty is ", lasso_penalty)
        # TODO: set numThreads to some reasonable amount based on mp.cpu_count() and parallelization level
        sparse_synthesis_coefficients = spams.lasso(
            analysis_coefficients_fortran,
            analysis_operator_fortran,
            lambda1=lasso_penalty,
            mode=1,
            numThreads=4,
            pos=False)
        synthesis_coefficients = np.asarray(
            sparse_synthesis_coefficients.todense())

    print(synthesis_coefficients.sum())

    return synthesis_coefficients
Beispiel #44
0
    def solve(self, A, y, x0, as_signs):

        #print 'dense_solver: y.shape=',y.shape
        #print 'dense_solver: A.shape=',A.shape
        fy = np.asfortranarray(y.reshape(len(y), 1))
        fA = np.asfortranarray(A)
        #print 'fy.shape=',fy.shape
        #print 'fA.shape=',fA.shape
        xnew = lasso(fy, fA, mode=2, lambda1=self.lambda1, lambda2=self.lambda2)
        xnew = np.array(xnew.todense()).reshape(x0.shape)

        #print 'dense_solver: xnew.shape=',xnew.shape

        return xnew
Beispiel #45
0
def generateCode(X,D):
    X = np.asfortranarray(X)
    print X.shape
    D = np.asfortranarray(D)
    print D.shape
    param = { 'K' : NCLUSTER,	# size of the dictionary 
          'lambda1' : 0.05, 
          #'posD' : True,	# dictionary positive constrain
          #'modeD' : 1,	# L1 regulization regularization on D
          'iter' : ITER} # runtime limit 15mins

    lparam = _extract_lasso_param(param)
    print 'genrating codes...'
    alpha = spams.lasso(X,D = D,**lparam)
    return alpha
def _objective(X,D,param,imgname = None):
    print('Evaluating cost function...')
    lparam = _extract_lasso_param(param)
    alpha = spams.lasso(X,D = D,**lparam)
    # NB : as alpha is sparse, D*alpha is the dot product
    xd = X - D * alpha
    R = np.mean(0.5 * (xd * xd).sum(axis=0) + param['lambda1'] * np.abs(alpha).sum(axis=0))
    print("objective function: %f" %R)
    #* display ?
    if imgname != None:
        img = spams.displayPatches(D)
        print("IMG %s" %str(img.shape))
        x = np.uint8(img[:,:,0] * 255.)
        image = Image.fromarray(x,mode = 'L')
        image.save("%s.png" %imgname)
def bow(trajectory, D, a, w_len, interval=1):
    k = D.shape[1]
    histogram = numpy.zeros([len(trajectory), k])
    for index, item in enumerate(trajectory):
        temp = list()
        stamp_index = range(0, item.shape[0]-w_len+1, interval)
        for i in stamp_index:
            temp.append(item[i:i+w_len, :1])
        temp = numpy.hstack(temp)
        code = spams.lasso(numpy.asfortranarray(temp), D, lambda1=a, pos=True)
        code = numpy.sum(code.todense(), axis=1)
        histogram[index:index+1, :] += code.reshape([1, k])
        div = numpy.linalg.norm(histogram[index, :])
        if div > 0:
            histogram[index, :] = histogram[index, :] / div
    return histogram
Beispiel #48
0
 def doFeatureEncoding(self, features):
     """ do feature encoding to original features"""
     encodedFeatures = None
     whitenedFeatures = whiten(features)
     
     if self._featureEncodingMethod == 'vector-quantization':
         # Vector quantization
         # each row is a feature vector
         index, _ = vq(whitenedFeatures, self._codebook)
         row, _ = features.shape
         col = config.codebookSize
         encodedFeatures = np.zeros((row, col))
         
         for i in xrange(len(index)):
             encodedFeatures[i, index[i]] = 1
             
     elif self._featureEncodingMethod == 'sparse-coding':
         # Sparse coding
         # each column is a feature vector
         X = np.asfortranarray(whitenedFeatures.transpose())
         X = np.asfortranarray(X / np.tile(np.sqrt((X*X).sum(axis=0)),
                                           (X.shape[0],1)),
                               dtype= X.dtype)
         D = np.asfortranarray(self._codebook.transpose())
         D = np.asfortranarray(D / np.tile(np.sqrt((D*D).sum(axis=0)),
                                           (D.shape[0],1)), 
                               dtype = D.dtype)
         
         # Parameters of the optimization are chosen
         param = {
             'lambda1': 0.15, 
             'numThreads': -1,
             'mode': 0    
             }
         
         alpha = spams.lasso(X, D, **param)   # alpha is sparse matrix
         
         alphaShape = (D.shape[1], X.shape[1])
         denseMatrix = csc_matrix(alpha, shape = alphaShape).todense()
         encodedFeatures = np.asarray(denseMatrix).transpose()
             
     return encodedFeatures
Beispiel #49
0
    def solve(self, A, y, x0, as_signs):

        #print 'dense_solver: y.shape=',y.shape
        #print 'dense_solver: A.shape=',A.shape
        fy = np.asfortranarray(y.reshape(len(y), 1))
        fA = np.asfortranarray(A)
        #print 'fy.shape=',fy.shape
        #print 'fA.shape=',fA.shape
        if not self.positive:
            xnew = spams.lasso(fy, fA, mode=2, lambda1=self.lambda1, lambda2=self.lambda2)
        else:
            W = np.ones(len(x0))
            params = {'lambda1':self.lambda1, 'pos':True}
            xnew = spams.lassoWeighted(fy, fA, W, **params)

        xnew = np.array(xnew.todense()).reshape(x0.shape)

        #print 'dense_solver: xnew.shape=',xnew.shape

        return xnew
outputPredictOtherAlgo = open(sys.argv[len(sys.argv)-2],'w')
#Compare Output
outputCompare = open(sys.argv[len(sys.argv)-1],'w')



#Caculate
#args for Lasso
alpha1Lambda = 1
compareLambda = 0
alpha_lasso_m1_Ds = []

#a = spams.lasso(X,Ds[i],return_reg_path = False,lambda1 = alpha1Lambda,pos=True,mode=0)
for i in range(numOfDicts):
    tic = time.time()
    alpha_lasso_m1_Ds.append(spams.lasso(X,Ds[i],return_reg_path = False,lambda1 = alpha1Lambda,pos=True,mode=0))
#    alpha_lasso_D = spams.lasso(X,Ds[i],return_reg_path = False,lambda1 = alpha1Lambda,pos=True,mode=0)
#    alpha_lasso_D = spams.lasso(X,Ds[i],return_reg_path = False,lambda1 = compareLambda,pos=True,mode=1)
    
#    alpha_lasso_m1_Ds.append(spams.lasso(X,Ds[i],return_reg_path = False,lambda1 = compareLambda,pos=True,mode=1))
    tac = time.time()
    t = tac - tic
#    print 'alpha_lasoo_m1_D'+str(i)+':'
#    print 'time:'+str(t)

#print alpha_lasso_m1_D1.getcol(0)
#print alpha_lasso_m1_Ds[0].getcol(0)

#save each Dict's Info
allInfos = []
#weight2Page in Dict
def reChooseDict(instNo,currDict,currMean,weightWindowDsCurDict,currStdCompare,currMeanCompare,dicts,testDWindow,numAttrs,numInsts,outputCompare,Lambda):
    outputCompare.write('Re-Choose Dictionary!\n')
    outputPredictSparse.write('Re-Choose Dictionary!\n')
    
    weightWindowDsRe = []
#    tmpsRe = []
    tmpsRe = 0
#    splitByEnterDsRe = []
    splitByEnterDsRe = 0
    numDicts = len(dicts)
    for i in range(numDicts):
        weightWindowDsRe.append(deque())
    dictWeightMeanRe = {}
    testWindow = aL.arffLoader()
    testD = testWindow.fortranArrayPara(testDWindow,numAttrs,numInsts)
    testD = np.asfortranarray(testD / np.tile(np.sqrt((testD*testD).sum(axis=0)),(testD.shape[0],1)))
    for dictNo in range(numDicts):
#        tmpsRe[:] = []
#        splitByEnterDsRe[:] = []
        if(dictNo==currDict):
            weightWindowDsRe[dictNo] = weightWindowDsCurDict
            continue
        alpha_lasso_m1_Ds_batch = spams.lasso(testD,dicts[dictNo],return_reg_path = False,lambda1 = 1,pos=True,mode=0)
        
        for j in range(alpha_lasso_m1_Ds_batch.shape[1]):
#            tmpsRe.append(str(alpha_lasso_m1_Ds_batch.getcol(j)))
            tmpsRe = str(alpha_lasso_m1_Ds_batch.getcol(j))
            #instNo+j才是正確的instance Number
            outputCompare.write(str(instNo-len(testDWindow)+j)+'-D'+str(dictNo)+':'+tmpsRe+'\n\n')
            #split
            #print tmpsRe[i].split('\n')
#            splitByEnterDsRe.append(tmpsRe[j].split('\n'))
            splitByEnterDsRe = tmpsRe.split('\n')
            #        splitByEnterD1 = tmp1.split('\n')
            weightTmp = []
#            for line in splitByEnterDsRe[j]:
            for line in splitByEnterDsRe:
                line = line.strip()
                #mapping page
                pageNo = int(line.split(',')[0].split('(')[1].strip())
                #weight of mapping page
                weight = float(line.split(',')[1].split(')')[1].strip())
                if weight < Lambda + 0.02:
                    weightTmp.append(weight)
#                if weight >= 1:
#                    print 'InstNo:'+str(instNo+j)+', DictNo:'+str(dictNo)+', page:'+str(pageNo)+', weight:'+str(weight)
            maxWeight = max(weightTmp)
            weightTmp[:] = []
            #one page bug if weight = 0.0 transform to 1
            if maxWeight==0:
                maxWeight = 1
            if maxWeight > 1:
                maxWeight = 1
            weightWindowDsRe[dictNo].append(maxWeight)
            
    #choose dict
    for dictNo in range(numOfDicts):
        #若是目前的Dict則直接給值
        if(dictNo==currDict):
            dictWeightMeanRe[dictNo] = currMean
        #若不是則要重新計算一次
        else:
            dictWeightMeanRe[dictNo] = np.mean(weightWindowDsRe[dictNo])
    #找出值最大的Dict,以及平均值
    maxWeightDict,meanCompareRe = max(dictWeightMeanRe.iteritems(), key=lambda x:x[1])
    if(maxWeightDict==currDict):
        #更新currMean
        meanCompareRe = currMean
        #保留舊的currMeanCompare
#        meanCompareRe = currMeanCompare
        #更新stdCompare
        
        #保留舊的stdCompare
#        stdCompareRe = currStdCompare
        #更新stdCompare
        stdCompareRe = np.std(weightWindowDsRe[maxWeightDict])
#        weightWindowDsRe[maxWeightDict] = weightWindowDsCurDict
        outputCompare.write('Keep same model'+str(maxWeightDict)+'!\n')
        outputPredictSparse.write('Keep same model'+str(maxWeightDict)+'!\n')
    else:
#        changeModelRe = 1
        stdCompareRe = np.std(weightWindowDsRe[maxWeightDict])
        outputCompare.write('Change model to model-' + str(maxWeightDict) +',meanCompare:'+str(meanCompareRe)+',stdCompare:'+str(stdCompareRe)+'!\n')
        outputPredictSparse.write('Change model to model-' + str(maxWeightDict) +',meanCompare:'+str(meanCompareRe)+',stdCompare:'+str(stdCompareRe)+'!\n')
    
    return (maxWeightDict,meanCompareRe,stdCompareRe,weightWindowDsRe[maxWeightDict])
Beispiel #52
0
        os.listdir(path)
    except Exception as e:
        os.mkdir(path)

    test_cases = 5
    DwR = []

    for i in range(test_cases):
        ratings = sc.parallelize(XOrged)
        model = pmr.ALS.train(ratings, K, nonnegative=True)
        D = np.array(model.userFeatures().sortByKey().map(lambda (w,fs): fs).collect())
        lparam = {'lambda1': Lambda,
                    'pos': True,
                    'mode': 2,
                    'numThreads': -1}
        alpha = spams.lasso(X, np.asfortranarray(D), **lparam)
        R = np.mean(0.5 * sum((X - D * alpha) ** 2) + Lambda * sum(abs(alpha)))
        DwR += [(D, R)]

    dwr = sc.parallelize(DwR)

    def findBest(E1, E2):
        if E1[1] > E2[1]:
            return E2
        return E1

    (D, R) = dwr.reduce(findBest)
    sio.savemat(path + "bestMat", {'Dbest': D, "R": R})
toc = time.time()
print "time passed "+str(toc-tic)+" s."
    lparam = dict( tmp )
    toc = time.time()	
    t = toc - tic
    print 'time to load dictionary: %f' % t 

    print "D shape",D.shape
    print "X shape",X.shape

    #if args.objective_function:
    #if True:
    if False:
        print "computing objective function"
        
        tic = time.time()

        alpha = spams.lasso( X, D = D, **lparam )
        xd = X - D * alpha 
        R = np.mean(0.5 * (xd * xd).sum(axis=0) + params['lambda1'] * np.abs(alpha).sum(axis=0))
        toc = time.time()

        print "  objective function value: %f" % R
        t = toc - tic
        print 'time of computation for objective function: %f' % t
 
    if doUpsamp:
        print "doing upsampling evaluation"
        tic = time.time()
        # dsz - size of downsampled image patch
        X_ds,dsz = evaluation.downsamplePatchList( X, patchSize, dsfactor )
        X_ds = np.asfortranarray( X_ds )
#    print X
    if(len(testDataWindow)==numAlgoWindow):
        testDataWindow.popleft()
    testDataWindow.append(currentTestData)
        
    instanceNo = instNo
    
    tmps[:] = []
    splitByEnterDs[:] = []
    #Initial in slidingWindow decide the initial Dictionary
    #Caculate
    if(instNo < numAlgoWindow):
        for dictNo in range(numOfDicts):
    #        alpha_lasso_m1_Ds = spams.lasso(X,Ds[dictNo],return_reg_path = False,lambda1 = alpha1Lambda,pos=True,mode=0)
    #        alpha_lasso_m1_Ds = spams.lasso(X,Ds[dictNo],return_reg_path = False,lambda1 = compareLambda,pos=True,mode=1)
            alpha_lasso_m1_Ds = spams.lasso(X,Ds[dictNo],return_reg_path = False,lambda1 = 1,pos=True,mode=0)
    #        alpha_lasso_m2_Ds = spams.lasso(X,Ds[dictNo],return_reg_path = False,lambda1 = 0,pos=True,mode=1)
    #        alpha_lasso_m3_Ds = spams.lasso(X,Ds[dictNo],return_reg_path = False,lambda1 = 0.5,lambda2 = 1,pos=True,mode=2)
            
            #spams.omp
    #        alpha_omp_m1_Ds = spams.omp(X,Ds[dictNo],L=2,return_reg_path = False,numThreads = -1)
    #        alpha_omp_m2_Ds = spams.omp(X,Ds[dictNo],eps= 0.9,return_reg_path = False,numThreads = -1)
    #        alpha_omp_m3_Ds = spams.omp(X,Ds[dictNo],lambda1=0.4,return_reg_path = False,numThreads = -1)
    #        alpha_lasso_m1_Ds.max()
        #    alpha_lasso_m1_Ds.append(spams.lasso(X,Ds[dictNo],return_reg_path = False,lambda1 = compareLambda,pos=True,mode=1))
            
            tmps.append(str(alpha_lasso_m1_Ds.getcol(0)))
    #        tmps.append(str(alpha_omp_m1_Ds.getcol(0)))
            
            outputCompare.write(str(instNo)+'-D'+str(dictNo)+':'+tmps[dictNo]+'\n\n')
            
Beispiel #55
0
 def _encode(self, X, A):
     S = lasso(X, A, return_reg_path = False, **self.spams_param).todense()
     #print "LASSO nnz: %d" % count_nonzero(S)
     return S
X = np.asfortranarray(X / np.tile(np.sqrt((X * X).sum(axis=0)),(X.shape[0],1)),dtype = myfloat)
param = { 'K' : 100, # learns a dictionary with 100 elements
          'lambda1' : 0.15, 'numThreads' : 4, 'batchsize' : 400,
          'iter' : 10}
paramL = {'lambda1' : 0.15, 'numThreads' : 4}

########## FIRST EXPERIMENT ###########
tic = time.time()
D = spams.trainDL(X,**param)
tac = time.time()
t = tac - tic
print 'time of computation for Dictionary Learning: %f' %t
print "DTYPE %s" %str(D.dtype)
#param['approx'] = 0
print 'Evaluating cost function...'
alpha = spams.lasso(X,D,**paramL)
print "XX X %s, D %s, alpha %s" %(str(X.shape),str(D.shape),str(alpha.shape))
y = X
if(alpha.shape[1] > 1000):
    alpha = alpha[:,0:1000]
    y = X[:,0:1000]
#Da = spams.calcXAt(D,ssp.csc_matrix(alpha.T))
a = alpha.todense()
print "XXa %s" %str(a.shape)
Da = np.dot(D,a)
#Da = D * alpha
xd = y - Da
print "YY D %s Da %s y %s %s alpah %s xd %s %s" %(str(D.shape),str(Da.shape),str(y.shape),type(y),str(alpha.shape),str(xd.shape),type(xd))
#R = np.mean(0.5 * (xd * xd).sum(axis=0) + param['lambda1'] * np.abs(alpha).sum(axis=0))
#R = 0.5 * (xd * xd).sum(axis=0)
R = np.multiply(xd,xd)
    # for each fixed dictionary K, we will repeat dictionary
    # learning for 100 times, each with a different initial value
    test_cases = 10
    R = np.zeros((test_cases, ))
    for i in xrange(0, test_cases):
        if randomStart == 1:
            D0 = util.dictLearnInit(X, K, 'random', 0)
        param['D'] = np.asfortranarray(D0)
        lparam = {'lambda1': Lambda,
                  'pos': True,
                  'mode': 2,
                  'numThreads': -1
        }
        D = spams.trainDL(X, **param)
        alpha = spams.lasso(X, D, **lparam)
        R[i] = np.mean(0.5 * sum((X - D * alpha) ** 2) + param['lambda1'] * sum(abs(alpha)))
        print R[i]

        #(permInd, cost) = util.dissimilarityDict(Dtemplate, D, 'euclidean')
        #D = D[:, permInd] # permute the columns of D to match the template Dtemplate

        if i >= 1:
            if R[i] < Rbest:
                Dbest = D
                Rbest = R[i]
        else:
            Dbest = D
            Rbest = R[0]

    # print path
Beispiel #58
0
def _processer(data, mask, variance, block_size, overlap, param_alpha, param_D, dtype=np.float64, n_iter=10, gamma=3., tau=1.):
    # data, mask, variance, block_size, overlap, param_alpha, param_D, dtype, n_iter = arglist
    # gamma = 3.
    # tau = 1.

    orig_shape = data.shape
    mask_array = im2col_nd(mask, block_size[:3], overlap[:3])
    train_idx = np.sum(mask_array, axis=0) > mask_array.shape[0]/2

    # If mask is empty, return a bunch of zeros as blocks
    if not np.any(train_idx):
        return np.zeros_like(data)

    X = im2col_nd(data, block_size, overlap)
    var_mat = np.median(im2col_nd(variance[..., 0:orig_shape[-1]], block_size, overlap)[:, train_idx], axis=0).astype(dtype)
    X_full_shape = X.shape
    X = X[:, train_idx]

    param_alpha['L'] = int(0.5 * X.shape[0])

    D = param_alpha['D']

    alpha = lil_matrix((D.shape[1], X.shape[1]))
    W = np.ones(alpha.shape, dtype=dtype, order='F')

    DtD = np.dot(D.T, D)
    DtX = np.dot(D.T, X)
    DtXW = np.empty_like(DtX, order='F')

    alpha_old = np.ones(alpha.shape, dtype=dtype)
    has_converged = np.zeros(alpha.shape[1], dtype=np.bool)

    xi = np.random.randn(X.shape[0], X.shape[1]) * var_mat
    eps = np.max(np.abs(np.dot(D.T, xi)), axis=0)
    param_alpha['mode'] = 1
    param_alpha['pos'] = True

    for _ in range(n_iter):
        not_converged = np.equal(has_converged, False)
        DtXW[:, not_converged] = DtX[:, not_converged] / W[:, not_converged]

        for i in range(alpha.shape[1]):
            if not has_converged[i]:

                param_alpha['lambda1'] = var_mat[i] * (X.shape[0] + gamma * np.sqrt(2 * X.shape[0]))
                DtDW = (1. / W[..., None, i]) * DtD * (1. / W[:, i])
                alpha[:, i:i+1] = spams.lasso(X[:, i:i+1], Q=np.asfortranarray(DtDW), q=DtXW[:, i:i+1], **param_alpha)

        arr = alpha.toarray()
        nonzero_ind = arr != 0
        arr[nonzero_ind] /= W[nonzero_ind]
        has_converged = np.max(np.abs(alpha_old - arr), axis=0) < 1e-5

        if np.all(has_converged):
            break

        alpha_old = arr
        W[:] = 1. / (np.abs(alpha_old**tau) + eps)

        # compute_weights(alpha_old, alpha, W, tau, eps)

    # alpha = arr
    # X = D.dot(alpha)
    # X = sparse_dot(D,alpha)
    X = np.dot(D, arr)
    weigths = np.ones(X_full_shape[1], dtype=dtype, order='F')
    weigths[train_idx] = 1. / (alpha.getnnz(axis=0) + 1.)

    X2 = np.zeros(X_full_shape, dtype=dtype, order='F')
    X2[:, train_idx] = X

    return col2im_nd(X2, block_size, orig_shape, overlap, weigths)
Beispiel #59
0
print np.tile(np.sqrt((D1*D1).sum(axis=0)),(D1.shape[0],1))
D1 = np.asfortranarray(D1 / np.tile(np.sqrt((D1*D1).sum(axis=0)),(D1.shape[0],1)))
print 'newD:'
print D1

X=np.array([1.0,4,7])#,1,2,3])#,2.0,5.0,8.0])
X = X.reshape(3,1, order='F')
#X = X.reshape(3,2)
print 'X:'
print X
#X = np.asfortranarray(X / np.tile(np.sqrt((X*X).sum(axis=0)),(X.shape[0],1)))
print 'newX:'
print X
#for i in range(3):
#    X[i][0]=i*3+1
#for i in range(3):
#    X[i][1]=i*3+2

tic = time.time()
#alpha = spams.lasso(X,D1,return_reg_path = False,lambda1 = 2,pos=True,mode=0)
alpha = spams.lasso(X,D1,return_reg_path = False,lambda1 = 0.1,pos=True,mode=1,verbose=True)
#alpha = spams.omp(X,D1,L=2,lambda1 = None,return_reg_path = False,numThreads = -1)
#alpha = spams.omp(X,D1,L=None,eps= 0,lambda1 = None,return_reg_path = False,numThreads = -1)
tac = time.time()
t = tac - tic
print 'time:'+str(t)


print 'alpha:'
print alpha