Esempio n. 1
0
def get_laplacian(A, normalization_mode = None):
    """
    Compute the different laplacian of a graphs given a
    Code inspired by networkx python library
    """

    A = scipy.sparse.csr_matrix(A)
    diags = A.sum(axis=1).flatten()#Degree
    n,m = A.shape
    D = scipy.sparse.spdiags(diags, [0], m, n, format='csr')
    L = D - A

    if normalization_mode not in ['sym', 'rw', None]:
        raise Exception('Normalisation mode {} unknown'.format(normalization_mode))

    elif normalization_mode == None:
        return L

    elif normalization_mode == 'sym':
        with scipy.errstate(divide='ignore'):
            diags_sqrt = 1.0/scipy.sqrt(diags)
        diags_sqrt[scipy.isinf(diags_sqrt)] = 0
        DH = scipy.sparse.spdiags(diags_sqrt, [0], m, n, format='csr')
        return DH.dot(L.dot(DH))

    elif normalization_mode == 'rw':
        with scipy.errstate(divide='ignore'):
            diags_inverse = 1.0/diags
        diags_inverse[scipy.isinf(diags_inverse)] = 0
        DH = scipy.sparse.spdiags(diags_inverse, [0], m, n, format='csr')
        return DH.dot(L)
Esempio n. 2
0
def binarize_feature_uniform_p(name, value, min_val, max_val, num_units):
    """
    Now reimplemented in c, so don't use this.  Keeping around in case
    of problems.  But I tested that the c and python versions were identical
    for many values.   -- stefie10
    """
    if (isnan(value) or isnan(min_val) or isnan(max_val)):
        return name + "_nan"

    if (isinf(min_val)):
        discrete = linspace(-100 - 0.1, max_val + 0.1, num_units)
        #print discrete
    elif (isinf(max_val)):
        discrete = linspace(min_val - 0.1, 100 + 0.1, num_units)
        #print discrete
    else:
        discrete = linspace(min_val - 0.1, max_val + 0.1, num_units)
    #print "discrete = ",
    #print repr(discrete)
    #print len(discrete)
    i = bisect(discrete, value)
    #print "i", i
    if (i <= len(discrete) - 1 and i > 0):
        ret_name = name + "_{0:0.2f}".format(
            discrete[i - 1]) + "_{0:0.2f}".format(discrete[i])
    elif (i == 0):
        ret_name = name + "_lt_{0:0.2f}".format(discrete[0])
    elif (i == len(discrete)):
        ret_name = name + "_gt_{0:0.2f}".format(discrete[-1])

    return ret_name
def lapnormadj(A):

    """
    Function to perform Laplacian Normalization on m x n matrix
    :param A: Adjacency Matrix
    :return: Laplace normalised matrix
    """

    import scipy
    import numpy as np
    from scipy.sparse import csgraph
    n,m = A.shape
    d1 = A.sum(axis=1).flatten()
    d2 = A.sum(axis=0).flatten()
    d1_sqrt = 1.0/scipy.sqrt(d1)
    d2_sqrt = 1.0/scipy.sqrt(d2)
    d1_sqrt[scipy.isinf(d1_sqrt)] = 10000
    d2_sqrt[scipy.isinf(d2_sqrt)] = 10000
    la = np.zeros(shape=(n,m))

    for i in range(0,n):
        for j in range(0,m):
          la[i,j] = A[i,j]/(d1_sqrt[i]*d2_sqrt[j])

    #D1 = scipy.sparse.spdiags(d1_sqrt, [0], n,m, format='coo')
    #D2 = scipy.sparse.spdiags(d2_sqrt, [0], n,m, format='coo')

    la[la < 1e-5] = 0

    return  scipy.sparse.coo_matrix(la)
Esempio n. 4
0
def LDA_batch_normalization(dataset, sample_table, batch_col, output_folder, ncomps): # this is actually the batch normalization method
   
    tmp_output_folder = os.path.join(output_folder, 'tmp')

    if not os.path.isdir(tmp_output_folder):
        os.makedirs(tmp_output_folder)
    
    barcodes, filtered_conditions, filtered_matrix, conditions, matrix = dataset
    
    # Remove any remaining NaNs and Infs from the filtered matrix - they would screw
    # up the LDA. 
    filtered_matrix[scipy.isnan(filtered_matrix)] = 0
    filtered_matrix[scipy.isinf(filtered_matrix)] = 0

    # For full matrix, also eliminate NaNs and Infs, BUT preserve the indices and values
    # so they can be added back into the matrix later (not implemented yet, and may never
    # be - there should no longer be NaNs and Infs in the dataset)
    # The NaNs and Infs will mess up the final step of the MATLAB LDA script, which uses
    # matrix multiplication to remove the specified number of components!
    matrix_nan_inds = scipy.isnan(matrix)
    matrix_nan_vals = matrix[matrix_nan_inds]
    matrix_inf_inds = scipy.isinf(matrix)
    matrix_inf_vals = matrix[matrix_inf_inds]

    matrix[matrix_nan_inds] = 0
    matrix[matrix_inf_inds] = 0

    # Save both the small matrix (for determining the components to remove) and the 
    # full matrix for the matlab script
    filtered_matrix_tmp_filename = os.path.join(tmp_output_folder, 'nonreplicating_matrix.txt')
    full_matrix_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix.txt')
    
    np.savetxt(filtered_matrix_tmp_filename, filtered_matrix)
    np.savetxt(full_matrix_tmp_filename, matrix)

    # Map the batch to integers for matlab, and write out to a file so matlab can read
    # Note that yes, the batch_classes should match up with the filtered matrix, not
    # the full matrix
    batch_classes = get_batch_classes(dataset = [barcodes, filtered_conditions, filtered_matrix], sample_table = sample_table, batch_col = batch_col)
    class_tmp_filename = os.path.join(tmp_output_folder, 'classes.txt')
    writeList(batch_classes, class_tmp_filename)
   
    output_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix_lda_normalized.txt')
    runLDAMatlabFunc(filtered_matrix_filename = filtered_matrix_tmp_filename, \
            matrix_filename = full_matrix_tmp_filename, \
            class_filename = class_tmp_filename, \
            ncomps = ncomps, \
            output_filename = output_tmp_filename)
    # The X norm that is returned is the full matrix. In the future, we could add in
    # returning the components to remove so they can be visualized or applied to other
    # one-off datasets
    Xnorm =  scipy.genfromtxt(output_tmp_filename)

    ## Dump the dataset out!
    #output_filename = os.path.join(mtag_effect_folder, 'scaleddeviation_full_mtag_lda_{}.dump.gz'.format(ncomps))
    #of = gzip.open(output_filename, 'wb')
    #cPickle.dump([barcodes, conditions, Xnorm], of)
    #of.close()

    return [barcodes, conditions, Xnorm]
Esempio n. 5
0
def LDA_batch_normalization(dataset, sample_table, batch_col, output_folder, n_comps): # this is actually the batch normalization method
   
    tmp_output_folder = os.path.join(output_folder, 'tmp')

    if not os.path.isdir(tmp_output_folder):
        os.makedirs(tmp_output_folder)
    
    barcodes, filtered_conditions, filtered_matrix, conditions, matrix = dataset
    
    # Remove any remaining NaNs and Infs from the filtered matrix - they would screw
    # up the LDA. 
    filtered_matrix[scipy.isnan(filtered_matrix)] = 0
    filtered_matrix[scipy.isinf(filtered_matrix)] = 0

    # For full matrix, also eliminate NaNs and Infs, BUT preserve the indices and values
    # so they can be added back into the matrix later (not implemented yet, and may never
    # be - there should no longer be NaNs and Infs in the dataset)
    # The NaNs and Infs will mess up the final step of the MATLAB LDA script, which uses
    # matrix multiplication to remove the specified number of components!
    matrix_nan_inds = scipy.isnan(matrix)
    matrix_nan_vals = matrix[matrix_nan_inds]
    matrix_inf_inds = scipy.isinf(matrix)
    matrix_inf_vals = matrix[matrix_inf_inds]

    matrix[matrix_nan_inds] = 0
    matrix[matrix_inf_inds] = 0

    # Save both the small matrix (for determining the components to remove) and the 
    # full matrix for the matlab script
    filtered_matrix_tmp_filename = os.path.join(tmp_output_folder, 'nonreplicating_matrix.txt')
    full_matrix_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix.txt')
    
    np.savetxt(filtered_matrix_tmp_filename, filtered_matrix)
    np.savetxt(full_matrix_tmp_filename, matrix)

    # Map batch classes to integers
    batch_classes = get_batch_classes(dataset = [barcodes, filtered_conditions, filtered_matrix], sample_table = sample_table, batch_col = batch_col)
	
    # Checks number of classes and limits ncomps
    a = [x > 0 for x in np.sum(np.absolute(filtered_matrix), axis=0)]
    classes = np.asarray([batch_classes[i] for i in range(len(batch_classes)) if a[i]])
    n_samples = filtered_matrix.shape[0]
    n_classes = len(np.unique(classes))
    if n_samples == n_classes:
        print "ERROR: The number of samples is equal to the number of classes. Exiting"
    if n_classes <= n_comps:
        print "Fewer classes, " + str(n_classes) + ", than components. Setting components to " + str(n_classes-1)
        n_comps = n_classes-1

    # Runs LDA
    #Xnorm = scikit_lda(filtered_matrix, matrix, batch_classes, n_comps)
    Xnorm = outer_python_lda(filtered_matrix, matrix, batch_classes, n_comps)

    return [barcodes, conditions, Xnorm, n_comps]
def _normalize(spectra):
    stars = spectra.index
    wavelengths = spectra.flux.columns.values.copy()
    flux = spectra.flux.values.copy()
    error = spectra.error.reindex(columns=wavelengths).values.copy()

    #TODO: Should negative fluxes be zero'd too?
    bad_flux = sp.isnan(flux) | sp.isinf(flux)
    bad_error = sp.isnan(error) | sp.isinf(error) | (error < 0)
    bad = bad_flux | bad_error

    flux[bad] = 1
    error[bad] = ERROR_LIM

    #TODO: Where does pixlist come from?
    pixlist = sp.loadtxt('pixlist.txt', dtype=int)
    var = sp.full_like(error, ERROR_LIM**2)
    var[:, pixlist] = 0
    inv_var = 1 / (var**2 + error**2)

    norm_flux = sp.full_like(flux, 1)
    norm_error = sp.full_like(error, ERROR_LIM)
    for star in range(len(stars)):
        for _, (left, right) in CHIPS.items():
            mask = (left < wavelengths) & (wavelengths < right)
            #TODO: Why are we using Chebyshev polynomials rather than smoothing splines?
            #TODO: Why are we using three polynomials rather than one? Are spectra discontinuous between chips?
            #TODO: Is the denominator being zero/negative ever an issue?
            fit = Chebyshev.fit(x=wavelengths[mask],
                                y=flux[star][mask],
                                w=inv_var[star][mask],
                                deg=2)

            norm_flux[star][mask] = flux[star][mask] / fit(wavelengths[mask])
            norm_error[star][mask] = error[star][mask] / fit(wavelengths[mask])

    #TODO: Why is the unreliability threshold different from the limit value?
    unreliable = (norm_error > .3)
    norm_flux[unreliable] = 1
    norm_error[unreliable] = ERROR_LIM

    # In the original, the masking is done in the parallax fitting code.
    # Gonna do it earlier here to save a bit of memory.
    mask = sp.any(
        sp.vstack([(l < wavelengths) & (wavelengths < u)
                   for l, u in CHIPS.values()]), 0)

    norm_flux = pd.DataFrame(norm_flux[:, mask], stars, wavelengths[mask])
    norm_error = pd.DataFrame(norm_error[:, mask], stars, wavelengths[mask])

    return pd.concat({'flux': norm_flux, 'error': norm_error}, 1)
Esempio n. 7
0
    def setRunParams(self, ic=[], params=[], t0=[], tend=[], gt0=[], refine=0,
                     specTimes=[], bounds=[]):
        if not self.initBasic:
            raise InitError, 'You must initialize the integrator before setting params. (initBasic)'

        #if self.initParams == True:
        #    raise InitError, 'You must clear params before setting them. Use clearParams()'

        if self.checkRunParams(ic, params, t0, tend, gt0, refine, specTimes,
                               bounds):
            self.ic = ic
            self.params = params
            self.t0 = float(t0)
            self.tend = float(tend)
            self.gt0 = float(gt0)
            self.refine = int(refine)
            self.specTimes = specTimes

            if self.t0 < self.tend:
                self.direction = 1
            else:
                self.direction = -1

            # Set bounds
            if bounds != []:
                self.upperBounds = bounds[1]
                self.lowerBounds = bounds[0]
                for i in range(self.phaseDim + self.paramDim):
                    if isinf(self.upperBounds[i]) and self.upperBounds[i] > 0:
                        self.upperBounds[i] = abs(float(self.defaultBound))
                    elif isinf(self.upperBounds[i]) and self.upperBounds[i] < 0:
                        self.upperBounds[i] = -abs(float(self.defaultBound))

                    if isinf(self.lowerBounds[i]) and self.lowerBounds[i] > 0:
                        self.lowerBounds[i] = abs(float(self.defaultBound))
                    elif isinf(self.lowerBounds[i]) and self.lowerBounds[i] < 0:
                        self.lowerBounds[i] = -abs(float(self.defaultBound))
            else:
                self.upperBounds = [abs(float(self.defaultBound)) for x in range(self.phaseDim + self.paramDim)]
                self.lowerBounds = [-abs(float(self.defaultBound)) for x in range(self.phaseDim + self.paramDim)]

            retval = self._integMod.SetRunParameters(self.ic, self.params,
                                 self.gt0, self.t0, self.tend, self.refine,
                                 len(self.specTimes), self.specTimes,
                                 self.upperBounds, self.lowerBounds)

            if retval[0] != 1:
                raise InitError, 'SetRunParameters call failed!'

            self.canContinue = False
            self.setParams = True
Esempio n. 8
0
    def run(self, phase=None, throats=None):
        logger.warning('This algorithm can take some time...')
        conduit_lengths = sp.sum(misc.conduit_lengths(network=self._net,
                                 mode='centroid'), axis=1)
        graph = self._net.create_adjacency_matrix(data=conduit_lengths,
                                                  sprsfmt='csr')

        if phase is not None:
            self._phase = phase
            if 'throat.occupancy' in self._phase.props():
                temp = conduit_lengths*(self._phase['throat.occupancy'] == 1)
                graph = self._net.create_adjacency_matrix(data=temp,
                                                          sprsfmt='csr',
                                                          prop='temp')
        path = spgr.shortest_path(csgraph=graph, method='D', directed=False)

        Px = sp.array(self._net['pore.coords'][:, 0], ndmin=2)
        Py = sp.array(self._net['pore.coords'][:, 1], ndmin=2)
        Pz = sp.array(self._net['pore.coords'][:, 2], ndmin=2)

        Cx = sp.square(Px.T - Px)
        Cy = sp.square(Py.T - Py)
        Cz = sp.square(Pz.T - Pz)
        Ds = sp.sqrt(Cx + Cy + Cz)

        temp = path / Ds

        temp[sp.isnan(temp)] = 0
        temp[sp.isinf(temp)] = 0

        return temp
Esempio n. 9
0
    def run(self,phase=None):
        r'''
        '''
        logger.warning('This algorithm can take some time...')
        graph = self._net.create_adjacency_matrix(data=self._net['throat.length'],sprsfmt='csr')

        if phase is not None:
            self._phase = phase
            if 'throat.occupancy' in self._phase.props():
                temp = self._net['throat.length']*(self._phase['throat.occupancy']==1)
                graph = self._net.create_adjacency_matrix(data=temp,sprsfmt='csr',prop='temp')

        #self._net.tic()
        path = spgr.shortest_path(csgraph = graph, method='D', directed = False)
        #self._net.toc()

        Px = sp.array(self._net['pore.coords'][:,0],ndmin=2)
        Py = sp.array(self._net['pore.coords'][:,1],ndmin=2)
        Pz = sp.array(self._net['pore.coords'][:,2],ndmin=2)

        Cx = sp.square(Px.T - Px)
        Cy = sp.square(Py.T - Py)
        Cz = sp.square(Pz.T - Pz)
        Ds = sp.sqrt(Cx + Cy + Cz)

        temp = path/Ds
        #temp = path

        temp[sp.isnan(temp)] = 0
        temp[sp.isinf(temp)] = 0

        return temp
Esempio n. 10
0
    def evaluer(self):
        """ Renvoie une valeur numérique de l'expression
        """
        
        # On crée un dictionnaire de variables : {'nom' : valeur}
        #    (nécessaire pour "eval")

        dict = {}
        for n, v in self.vari.items():
            print " ", n, v
            dict[n] = v.v[0]
        
        global safe_dict
        dict.update(safe_dict)
        
        # On fait l'évaluation
        try:
            v = eval(self.py_expr, {"__builtins__": None}, dict)
        except:
            return False
#        print type (v)
        # On analyse le résultat
        if not type(v) == float and not type(v) == scipy.float64 and not type(v) == int:
            return False
        elif scipy.isinf(v) or scipy.isnan(v):
            return None
        else:
            return v
Esempio n. 11
0
    def sample(self, model, evidence):
        z = evidence['z']
        T = evidence['T']
        g = evidence['g']
        h = evidence['h']
        transition_var_g = evidence['transition_var_g']
        shot_id = evidence['shot_id']

        observation_var_g = model.known_params['observation_var_g']
        observation_var_h = model.known_params['observation_var_h']
        prior_mu_g = model.hyper_params['g']['mu']
        prior_cov_g = model.hyper_params['g']['cov']
        N = len(z)
        n = len(g)

        # Make g, h, and z vector valued to avoid ambiguity
        g = g.copy().reshape((n, 1))
        h = h.copy().reshape((n, 1))

        z_g = ma.asarray(nan + zeros((n, 1)))
        obs_cov = ma.asarray(inf + zeros((n, 1, 1)))
        for i in xrange(n):
            z_i = z[shot_id == i]
            T_i = T[shot_id == i]
            if 1 in T_i and 2 in T_i:
                # Sample mean and variance for multiple observations
                n_obs_g, n_obs_h = sum(T_i == 1), sum(T_i == 2)
                obs_cov_g, obs_cov_h = observation_var_g / n_obs_g, observation_var_h / n_obs_h
                z_g[i] = (mean(z_i[T_i == 1]) / obs_cov_g +
                          mean(z_i[T_i == 2] - h[i]) / obs_cov_h) / (
                              1 / obs_cov_g + 1 / obs_cov_h)
                obs_cov[i] = 1 / (1 / obs_cov_g + 1 / obs_cov_h)
            elif 1 in T_i:
                n_obs_g = sum(T_i == 1)
                z_g[i] = mean(z_i[T_i == 1])
                obs_cov[i] = observation_var_g / n_obs_g
            elif 2 in T_i:
                n_obs_h = sum(T_i == 2)
                z_g[i] = mean(z_i[T_i == 2] - h[i])
                obs_cov[i] = observation_var_h / n_obs_h

        z_g[isnan(z_g)] = ma.masked
        obs_cov[isinf(obs_cov)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([
            prior_mu_g[0],
        ])
        kalman.initial_state_covariance = array([
            prior_cov_g[0],
        ])
        kalman.transition_matrices = eye(1)
        kalman.transition_covariance = array([
            transition_var_g,
        ])
        kalman.observation_matrices = eye(1)
        kalman.observation_covariance = obs_cov
        sampled_g = forward_filter_backward_sample(kalman, z_g, prior_mu_g,
                                                   prior_cov_g)
        return sampled_g.reshape((n, ))
Esempio n. 12
0
def mmse_stsa(infile, outfile, noise_sum):
    signal, params = read_signal(infile, WINSIZE)
    nf = len(signal)/(WINSIZE/2) - 1
    sig_out=sp.zeros(len(signal),sp.float32)

    G = sp.ones(WINSIZE)
    prevGamma = G
    alpha = 0.98
    window = sp.hanning(WINSIZE)
    gamma15=spc.gamma(1.5)
    lambdaD = noise_sum / 5.0
    percentage = 0
    for no in xrange(nf):
        p = int(math.floor(1. * no / nf * 100))
        if (p > percentage):
            percentage = p
            print "{}%".format(p),

        y = get_frame(signal, WINSIZE, no)
        Y = sp.fft(y*window)
        Yr = sp.absolute(Y)
        Yp = sp.angle(Y)
        gamma = Yr**2/lambdaD
        xi = alpha * G**2 * prevGamma + (1-alpha)*sp.maximum(gamma-1, 0)
        prevGamma = gamma
        nu = gamma * xi / (1+xi)
        G = (gamma15 * sp.sqrt(nu) / gamma ) * sp.exp(-nu/2) * ((1+nu)*spc.i0(nu/2)+nu*spc.i1(nu/2))
        idx = sp.isnan(G) + sp.isinf(G)
        G[idx] = xi[idx] / (xi[idx] + 1)
        Yr = G * Yr
        Y = Yr * sp.exp(Yp*1j)
        y_o = sp.real(sp.ifft(Y))
        add_signal(sig_out, y_o, WINSIZE, no)
    
    write_signal(outfile, params, sig_out)
Esempio n. 13
0
def makehist(testpath, npulses):
    """
        This functions are will create histogram from data made in the testpath.
        Inputs
            testpath - The path that the data is located.
            npulses - The number of pulses in the sim.
    """
    sns.set_style("whitegrid")
    sns.set_context("notebook")
    params = ['Ne', 'Te', 'Ti', 'Vi']
    pvals = [1e11, 2.1e3, 1.1e3, 0.]
    histlims = [[4e10, 2e11], [1200., 3000.], [300., 1900.], [-250., 250.]]
    erlims = [[-2e11, 2e11], [-1000., 1000.], [-800., -800], [-250., 250.]]
    erperlims = [[-100., 100.]] * 4
    lims_list = [histlims, erlims, erperlims]
    errdict = makehistdata(params, testpath)[:4]
    ernames = ['Data', 'Error', 'Error Percent']
    sig1 = sp.sqrt(1. / npulses)

    # Two dimensiontal histograms
    pcombos = [i for i in itertools.combinations(params, 2)]
    c_rows = int(math.ceil(float(len(pcombos)) / 2.))
    (figmplf, axmat) = plt.subplots(c_rows,
                                    2,
                                    figsize=(12, c_rows * 6),
                                    facecolor='w')
    axvec = axmat.flatten()
    for icomn, icom in enumerate(pcombos):
        curax = axvec[icomn]
        str1, str2 = icom
        _, _, _ = make2dhist(testpath, PARAMDICT[str1], PARAMDICT[str2],
                             figmplf, curax)
    filetemplate = str(Path(testpath).joinpath('AnalysisPlots', 'TwoDDist'))
    plt.tight_layout()
    plt.subplots_adjust(top=0.95)
    figmplf.suptitle('Pulses: {0}'.format(npulses), fontsize=20)
    fname = filetemplate + '_{0:0>5}Pulses.png'.format(npulses)
    plt.savefig(fname)
    plt.close(figmplf)
    # One dimensiontal histograms
    for ierr, iername in enumerate(ernames):
        filetemplate = str(Path(testpath).joinpath('AnalysisPlots', iername))
        (figmplf, axmat) = plt.subplots(2, 2, figsize=(20, 15), facecolor='w')
        axvec = axmat.flatten()
        for ipn, iparam in enumerate(params):
            plt.sca(axvec[ipn])
            if sp.any(sp.isinf(errdict[ierr][iparam])):
                continue
            binlims = lims_list[ierr][ipn]
            bins = sp.linspace(binlims[0], binlims[1], 100)
            histhand = sns.distplot(errdict[ierr][iparam],
                                    bins=bins,
                                    kde=True,
                                    rug=False)

            axvec[ipn].set_title(iparam)
        figmplf.suptitle(iername + ' Pulses: {0}'.format(npulses), fontsize=20)
        fname = filetemplate + '_{0:0>5}Pulses.png'.format(npulses)
        plt.savefig(fname)
        plt.close(figmplf)
Esempio n. 14
0
File: tmp.py Progetto: jaberg/sclas
def matfile_featfunc(fname,
                     suffix,
                     kernel_type = DEFAULT_KERNEL_TYPE,
                     variable_name = DEFAULT_VARIABLE_NAME):
    
    fname += suffix
    
    error = False
    try:
        if kernel_type == "exp_mu_da":
            # hack for GB with 204 dims
            fdata = io.loadmat(fname)[variable_name].reshape(-1, 204)
        else:
            fdata = io.loadmat(fname)[variable_name].ravel()

    except TypeError:
        fname_error = fname+'.error'
        print "[ERROR] couldn't open", fname, "moving it to", fname_error
        shutil.move(fname, fname_error)
        error = True

    except:
        print "[ERROR] (unknown) with", fname
        raise

    if error:
        raise RuntimeError("An error occured while loading '%s'"
                           % fname)

    assert(not sp.isnan(fdata).any())
    assert(not sp.isinf(fdata).any())

    return fdata
Esempio n. 15
0
    def run(self, phase=None, throats=None):
        logger.warning('This algorithm can take some time...')
        conduit_lengths = sp.sum(misc.conduit_lengths(network=self._net,
                                 mode='centroid'), axis=1)
        graph = self._net.create_adjacency_matrix(data=conduit_lengths,
                                                  sprsfmt='csr')

        if phase is not None:
            self._phase = phase
            if 'throat.occupancy' in self._phase.props():
                temp = conduit_lengths*(self._phase['throat.occupancy'] == 1)
                graph = self._net.create_adjacency_matrix(data=temp,
                                                          sprsfmt='csr',
                                                          prop='temp')
        path = spgr.shortest_path(csgraph=graph, method='D', directed=False)

        Px = sp.array(self._net['pore.coords'][:, 0], ndmin=2)
        Py = sp.array(self._net['pore.coords'][:, 1], ndmin=2)
        Pz = sp.array(self._net['pore.coords'][:, 2], ndmin=2)

        Cx = sp.square(Px.T - Px)
        Cy = sp.square(Py.T - Py)
        Cz = sp.square(Pz.T - Pz)
        Ds = sp.sqrt(Cx + Cy + Cz)

        temp = path / Ds

        temp[sp.isnan(temp)] = 0
        temp[sp.isinf(temp)] = 0

        return temp
Esempio n. 16
0
    def _process_image(self, fname):

        kernel_type = self.kernel_type
        variable_name = self.variable_name

        fname += self.input_suffix

        error = False
        try:
            if kernel_type == "exp_mu_da":
                # hack for GB with 204 dims
                # fdata = io.loadmat(fname)[variable_name].reshape(-1, 204)
                fdata = self._load_image(fname).reshape(-1, 204)
            else:
                fdata = self._load_image(fname).ravel()
                # fdata = io.loadmat(fname)[variable_name].ravel()

        except TypeError:
            fname_error = fname + ".error"
            print "[ERROR] couldn't open", fname, "moving it to", fname_error
            # os.unlink(fname)
            shutil.move(fname, fname_error)
            error = True

        except:
            print "[ERROR] (unknown) with", fname
            raise

        if error:
            raise RuntimeError("An error occured while loading '%s'" % fname)

        assert not sp.isnan(fdata).any()
        assert not sp.isinf(fdata).any()

        return fdata
Esempio n. 17
0
def __regularized_laplacian_matrix(adj_matrix, tau):
    """
    Using ARPACK solver, compute the first K eigen vector.
    The laplacian is computed using the regularised formula from [2]
    [2]Kamalika Chaudhuri, Fan Chung, and Alexander Tsiatas 2018.
        Spectral clustering of graphs with general degrees in the extended planted partition model.
    L = I - D^-1/2 * A * D ^-1/2
    :param adj_matrix: adjacency matrix representation of graph where [m][n] >0 if there is edge and [m][n] = weight
    :param tau: the regularisation constant
    :return: the first K eigenvector
    """
    import scipy.sparse

    # Code inspired from nx.normalized_laplacian_matrix, with changes to allow regularisation
    n, m = adj_matrix.shape
    I = np.eye(n, m)
    diags = adj_matrix.sum(axis=1).flatten()
    # add tau to the diags to produce a regularised diags
    if tau != 0:
        diags = np.add(diags, tau)

    # diags will be zero at points where there is no edge and/or the node you are at
    #  ignore the error and make it zero later
    with scipy.errstate(divide="ignore"):
        diags_sqrt = 1.0 / scipy.sqrt(diags)
    diags_sqrt[scipy.isinf(diags_sqrt)] = 0
    D = scipy.sparse.spdiags(diags_sqrt, [0], m, n, format="csr")

    L = I - (D.dot(adj_matrix.dot(D)))
    return L
Esempio n. 18
0
File: ch2.py Progetto: baxton/KNN
def sigmoid(X):
##    e = sp.exp(-X)
##    e = 0.0000001 if e ==
    v = 1.  / (1. + sp.exp(-X))
    if sp.isnan(v).sum() or sp.isinf(v).sum():
        i=0
    return v
Esempio n. 19
0
    def check_ExpCM_attributes(self):
        """Make sure has the expected attribute values."""
        self.assertEqual(self.nsites, self.expcm_divpressure.nsites)

        # make sure Prxy has rows summing to zero
        self.assertFalse(scipy.isnan(self.expcm_divpressure.Prxy).any())
        self.assertFalse(scipy.isinf(self.expcm_divpressure.Prxy).any())
        diag = scipy.eye(N_CODON, dtype='bool')
        for r in range(self.nsites):
            self.assertTrue(scipy.allclose(0, scipy.sum(self.expcm_divpressure.Prxy[r], 
                    axis=1)))
            self.assertTrue(scipy.allclose(0, self.expcm_divpressure.Prxy[r].sum()))
            self.assertTrue((self.expcm_divpressure.Prxy[r][diag] <= 0).all())
            self.assertTrue((self.expcm_divpressure.Prxy[r][~diag] >= 0).all())

        # make sure prx sums to 1 for each r
        self.assertTrue((self.expcm_divpressure.prx >= 0).all())
        for r in range(self.nsites):
            self.assertTrue(scipy.allclose(1, self.expcm_divpressure.prx[r].sum()))

        # prx is eigenvector or Prxy for the same r, but not different r
        for r in range(self.nsites):
            self.assertTrue(scipy.allclose(0, scipy.dot(self.expcm_divpressure.prx[r],
                    self.expcm_divpressure.Prxy[r])))
            if r > 0:
                self.assertFalse(scipy.allclose(0, scipy.dot(self.expcm_divpressure.prx[r],
                        self.expcm_divpressure.Prxy[r - 1])))

        # phi sums to one
        self.assertTrue(scipy.allclose(1, self.expcm_divpressure.phi.sum()))
Esempio n. 20
0
    def evaluer(self):
        """ Renvoie une valeur numérique de l'expression
        """

        # On crée un dictionnaire de variables : {'nom' : valeur}
        #    (nécessaire pour "eval")

        dict = {}
        for n, v in self.vari.items():
            print " ", n, v
            dict[n] = v.v[0]

        global safe_dict
        dict.update(safe_dict)

        # On fait l'évaluation
        try:
            v = eval(self.py_expr, {"__builtins__": None}, dict)
        except:
            return False
#        print type (v)
# On analyse le résultat
        if not type(v) == float and not type(v) == scipy.float64 and not type(
                v) == int:
            return False
        elif scipy.isinf(v) or scipy.isnan(v):
            return None
        else:
            return v
Esempio n. 21
0
def laplacian_layout(G, norm=False, dim=2, bad=False):
    A = nx.to_scipy_sparse_matrix(G, format='csr')
    A = np.array(A.todense())
    n, m = A.shape

    diags = A.sum(axis=1).flatten()
    D = np.diag(diags)
    L = D - A
    B = np.eye(n)
    if norm == False:
        layout = eig_layout(G, L, B)
    else:
        if bad:
            with scipy.errstate(divide='ignore'):
                #diags_sqrt = 1.0 / scipy.power(diags, 1)
                diags_sqrt = 1.0 / scipy.sqrt(diags)
            diags_sqrt[scipy.isinf(diags_sqrt)] = 0
            DH = np.diag(diags_sqrt)
            L = np.dot(DH, np.dot(L, DH))
            eigenvalues, eigenvectors = scipy.linalg.eigh(L)
            index = np.argsort(eigenvalues)[1:dim + 1]
            pos = np.real(eigenvectors[:, index])

            pos = np.dot(DH, pos)
            pos = dict(zip(G, pos))
            layout = pos
        else:
            B = D
            layout = eig_layout(G, L, B)

    #print(L)
    #print(B)

    return layout
Esempio n. 22
0
    def _oneEvaluation(self, evaluable):
        """ This method should be called by all optimizers for producing an evaluation. """
        if self._wasUnwrapped:
            self.wrappingEvaluable._setParameters(evaluable)
            res = self.__evaluator(self.wrappingEvaluable)
        elif self._wasWrapped:
            res = self.__evaluator(evaluable.params)
        else:
            res = self.__evaluator(evaluable)
            ''' added by JPQ '''
            if self.constrained:
                self.feasible = self.__evaluator.outfeasible
                self.violation = self.__evaluator.outviolation
            # ---
        if isscalar(res):
            # detect numerical instability
            if isnan(res) or isinf(res):
                raise DivergenceError
            # always keep track of the best
            if (self.numEvaluations == 0 or self.bestEvaluation is None
                    or (self.minimize and res <= self.bestEvaluation)
                    or (not self.minimize and res >= self.bestEvaluation)):
                self.bestEvaluation = res
                self.bestEvaluable = evaluable.copy()

        self.numEvaluations += 1

        # if desired, also keep track of all evaluables and/or their fitness.
        if self.storeAllEvaluated:
            if self._wasUnwrapped:
                self._allEvaluated.append(self.wrappingEvaluable.copy())
            elif self._wasWrapped:
                self._allEvaluated.append(evaluable.params.copy())
            else:
                self._allEvaluated.append(evaluable.copy())
        if self.storeAllEvaluations:
            if self._wasOpposed and isscalar(res):
                ''' added by JPQ '''
                if self.constrained:
                    self._allEvaluations.append(
                        [-res, self.feasible, self.violation])
                # ---
                else:
                    self._allEvaluations.append(-res)
            else:
                ''' added by JPQ '''
                if self.constrained:
                    self._allEvaluations.append(
                        [res, self.feasible, self.violation])
                # ---
                else:
                    self._allEvaluations.append(res)
        ''' added by JPQ '''
        if self.constrained:
            return [res, self.feasible, self.violation]
        else:
            # ---
            return res
Esempio n. 23
0
    def sample(self, model, evidence):
        z = evidence['z']
        T = evidence['T']
        g = evidence['g']
        h = evidence['h']
        transition_var_g = evidence['transition_var_g']
        shot_id = evidence['shot_id']

        observation_var_g = model.known_params['observation_var_g']
        observation_var_h = model.known_params['observation_var_h']
        prior_mu_g = model.hyper_params['g']['mu'] 
        prior_cov_g = model.hyper_params['g']['cov'] 
        N = len(z)
        n = len(g)

        ## Make g, h, and z vector valued to avoid ambiguity
        #g = g.copy().reshape((n, 1))
        #h = h.copy().reshape((n, 1))
        #
        pdb.set_trace()
        z_g = ma.asarray(nan + zeros(n))
        obs_cov = ma.asarray(inf + zeros(n))
        if 1 in T:
            z_g[T==1] = z[T==1]
            obs_cov[T==1] = observation_var_g
        if 2 in T:
            z_g[T==2] = z[T==2] - h[T==2]
            obs_cov[T==2] = observation_var_h
        #for i in xrange(n):
        #    z_i = z[shot_id == i]
        #    T_i = T[shot_id == i]
        #    if 1 in T_i and 2 in T_i:
        #        # Sample mean and variance for multiple observations
        #        n_obs_g, n_obs_h = sum(T_i == 1), sum(T_i == 2)
        #        obs_cov_g, obs_cov_h = observation_var_g/n_obs_g, observation_var_h/n_obs_h
        #        z_g[i] = (mean(z_i[T_i == 1])/obs_cov_g + mean(z_i[T_i == 2] - h[i])/obs_cov_h)/(1/obs_cov_g + 1/obs_cov_h)
        #        obs_cov[i] = 1/(1/obs_cov_g + 1/obs_cov_h)
        #    elif 1 in T_i:
        #        n_obs_g = sum(T_i == 1) 
        #        z_g[i] = mean(z_i[T_i == 1])
        #        obs_cov[i] = observation_var_g/n_obs_g
        #    elif 2 in T_i:
        #        n_obs_h = sum(T_i == 2) 
        #        z_g[i] = mean(z_i[T_i == 2] - h[i])
        #        obs_cov[i] = observation_var_h/n_obs_h

        z_g[isnan(z_g)] = ma.masked
        obs_cov[isinf(obs_cov)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([prior_mu_g[0],])
        kalman.initial_state_covariance = array([prior_cov_g[0],])
        kalman.transition_matrices = eye(1)
        kalman.transition_covariance = array([transition_var_g,])
        kalman.observation_matrices = eye(1)
        kalman.observation_covariance = obs_cov
        sampled_g = forward_filter_backward_sample(kalman, z_g, prior_mu_g, prior_cov_g)
        return sampled_g.reshape((n,))
Esempio n. 24
0
 def updateExpectations(self):
     a, b = self.params['a'], self.params['b']
     E = s.divide(a, a + b)
     lnE = special.digamma(a) - special.digamma(a + b)
     lnEInv = special.digamma(b) - special.digamma(
         a + b)  # expectation of ln(1-X)
     lnEInv[s.isinf(
         lnEInv)] = -s.inf  # there is a numerical error in lnEInv if E=1
     self.expectations = {'E': E, 'lnE': lnE, 'lnEInv': lnEInv}
Esempio n. 25
0
def get_DH(A):
    # D^{-1/2}
    diags = A.sum(axis=1).flatten()
    C, R = A.shape
    with scipy.errstate(divide='ignore'):
        diag_s = 1.0 / scipy.sqrt(diags)
    diag_s[scipy.isinf(diag_s)] = 0
    DH = scipy.sparse.spdiags(diag_s, [0], C, R, format='csr')
    return DH
Esempio n. 26
0
 def _oneEvaluation(self, evaluable):
     """ This method should be called by all optimizers for producing an evaluation. """
     if self._wasUnwrapped:
         self.wrappingEvaluable._setParameters(evaluable)
         res = self.__evaluator(self.wrappingEvaluable)
     elif self._wasWrapped:            
         res = self.__evaluator(evaluable.params)
     else:            
         res = self.__evaluator(evaluable)
         ''' added by JPQ '''
         if self.constrained :
             self.feasible = self.__evaluator.outfeasible
             self.violation = self.__evaluator.outviolation
         # ---
     if isscalar(res):
         # detect numerical instability
         if isnan(res) or isinf(res):
             raise DivergenceError
         # always keep track of the best
         if (self.numEvaluations == 0
             or self.bestEvaluation is None
             or (self.minimize and res <= self.bestEvaluation)
             or (not self.minimize and res >= self.bestEvaluation)):
             self.bestEvaluation = res
             self.bestEvaluable = evaluable.copy()
     
     self.numEvaluations += 1
     
     # if desired, also keep track of all evaluables and/or their fitness.                        
     if self.storeAllEvaluated:
         if self._wasUnwrapped:            
             self._allEvaluated.append(self.wrappingEvaluable.copy())
         elif self._wasWrapped:            
             self._allEvaluated.append(evaluable.params.copy())
         else:            
             self._allEvaluated.append(evaluable.copy())        
     if self.storeAllEvaluations:
         if self._wasOpposed and isscalar(res):
             ''' added by JPQ '''
             if self.constrained :
                 self._allEvaluations.append([-res,self.feasible,self.violation])
             # ---
             else:
                 self._allEvaluations.append(-res)
         else:
             ''' added by JPQ '''
             if self.constrained :
                 self._allEvaluations.append([res,self.feasible,self.violation])
             # ---
             else:
                 self._allEvaluations.append(res)
     ''' added by JPQ '''
     if self.constrained :
         return [res,self.feasible,self.violation]
     else:
     # ---
         return res
Esempio n. 27
0
def plot_update(syst,
                const,
                U_r=None,
                U_erf=None,
                G_r=None,
                c_r=None,
                g_r=None,
                c_f=None):
    # update the plotted data
    # check for the minimum/maximum and update axes?

    # with U_r we can have a problem (it can contain Inf and NaN)
    # that doesn't work with pylab
    # replace it with some other value
    if (U_r != None):
        U_plot = copy(U_r)
        U_plot[isinf(U_plot)] = const.inf_value
        U_plot[isnan(U_plot)] = const.nan_value

    # it's necessary to create deep copies of objects that are subject to change
    # i.e., G_r, c_r, c_f - they are all multiplied/divided by some factors. this can
    # lead to problems with display, since pylab.draw() is redrawing all objects
    # if
    #     set_ydata(c_r_ij)
    #     draw()
    # is used, then pylab obtains reference to c_r_ij. when in the program c_r_ij is multiplied by r
    # then a later invocation of
    #     set_ydata(G_r_ij)
    #     draw()
    # will display updated G_r, but also updated c_r - and we don't want that!
    subplot_number = 0
    for i in range(syst['ncomponents']):
        for j in range(i, syst['ncomponents']):
            if (U_r != None):
                p_U_ij[subplot_number].set_ydata(U_plot[i, j])
            if (G_r != None):
                p_G_ij[subplot_number].set_ydata(copy(G_r[i, j]))
            if (c_r != None):
                p_c_ij[subplot_number].set_ydata(copy(c_r[i, j]))
            if (g_r != None):
                p_g_ij[subplot_number].set_ydata(g_r[i, j])
            subplot_number += 1
        # end for j in range(ncomponents)
    # end for i in range(ncomponents)

    for i in range(syst['ncomponents']):
        for j in range(i, syst['ncomponents']):
            if (c_f != None):
                p_c_ij[subplot_number].set_ydata(copy(c_f[i, j]))
            if (U_erf != None):
                p_U_ij[subplot_number].set_ydata(U_erf[i, j])
            subplot_number += 1
        # end for j in range(ncomponents)
    # end for i in range(ncomponents)

    pylab.draw()
Esempio n. 28
0
    def sample(self, model, evidence):
        z = evidence['z']
        g = evidence['g']
        h = evidence['h']
        T = evidence['T']
        phi = evidence['phi']
        transition_var_h = evidence['transition_var_h']
        shot_id = evidence['shot_id']

        observation_var_h = model.known_params['observation_var_h']
        mu_h = model.known_params['mu_h']
        prior_mu_h = model.hyper_params['h']['mu']
        prior_cov_h = model.hyper_params['h']['cov']
        n = len(h)
        N = len(z)

        # Making g, h, and z vector valued to avoid ambiguity
        g = g.copy().reshape((n, 1))
        h = h.copy().reshape((n, 1))

        z_h = ma.asarray(nan + zeros((n, 1)))
        obs_cov = ma.asarray(inf + zeros((n, 1, 1)))
        for i in xrange(n):
            z_i = z[shot_id == i]
            T_i = T[shot_id == i]
            if 2 in T_i:
                # Sample mean and variance for multiple observations
                n_obs = sum(T_i == 2)
                z_h[i] = mean(z_i[T_i == 2])
                obs_cov[i] = observation_var_h / n_obs

        z_h[isnan(z_h)] = ma.masked
        obs_cov[isinf(obs_cov)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([
            prior_mu_h[0],
        ])
        kalman.initial_state_covariance = array([
            prior_cov_h[0],
        ])
        kalman.transition_matrices = array([
            phi,
        ])
        kalman.transition_covariance = array([
            transition_var_h,
        ])
        kalman.transition_offsets = mu_h * (1 - phi) * ones((n, 1))
        kalman.observation_matrices = eye(1)
        kalman.observation_offsets = g
        kalman.observation_covariance = obs_cov
        sampled_h = forward_filter_backward_sample(kalman, z_h, prior_mu_h,
                                                   prior_cov_h)

        return sampled_h.reshape((n, ))
Esempio n. 29
0
def makehist(testpath,npulses):
    """
        This functions are will create histogram from data made in the testpath.
        Inputs
            testpath - The path that the data is located.
            npulses - The number of pulses in the sim.
    """
    sns.set_style("whitegrid")
    sns.set_context("notebook")
    params = ['Ne', 'Te', 'Ti', 'Vi']
    histlims = [[1e10, 3e11], [1000., 3000.], [100., 2500.], [-400., 400.]]
    erlims = [[-2e11, 2e11], [-1000., 1000.], [-800., 800], [-400., 400.]]
    erperlims = [[-100., 100.]]*4
    lims_list = [histlims, erlims, erperlims]
    errdict = makehistdata(params, testpath)[:4]
    ernames = ['Data', 'Error', 'Error Percent']


    # Two dimensiontal histograms
    pcombos = [i for i in itertools.combinations(params, 2)]
    c_rows = int(math.ceil(float(len(pcombos))/2.))
    (figmplf, axmat) = plt.subplots(c_rows, 2, figsize=(12, c_rows*6), facecolor='w')
    axvec = axmat.flatten()
    for icomn, icom in enumerate(pcombos):
        curax = axvec[icomn]
        str1, str2 = icom
        _, _, _ = make2dhist(testpath, PARAMDICT[str1], PARAMDICT[str2], figmplf, curax)
    filetemplate = str(Path(testpath).joinpath('AnalysisPlots', 'TwoDDist'))
    plt.tight_layout()
    plt.subplots_adjust(top=0.95)
    figmplf.suptitle('Pulses: {0}'.format(npulses), fontsize=20)
    fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses)
    plt.savefig(fname)
    plt.close(figmplf)
    # One dimensiontal histograms
    for ierr, iername in enumerate(ernames):
        filetemplate = str(Path(testpath).joinpath('AnalysisPlots', iername))
        (figmplf, axmat) = plt.subplots(2, 2, figsize=(20, 15), facecolor='w')
        axvec = axmat.flatten()
        for ipn, iparam in enumerate(params):
            plt.sca(axvec[ipn])
            if sp.any(sp.isinf(errdict[ierr][iparam])):
                continue
            binlims = lims_list[ierr][ipn]
            bins = sp.linspace(binlims[0], binlims[1], 100)
            xdata = errdict[ierr][iparam]
            xlog = sp.logical_and(xdata >= binlims[0], xdata < binlims[1])

            histhand = sns.distplot(xdata[xlog], bins=bins, kde=True, rug=False)

            axvec[ipn].set_title(iparam)
        figmplf.suptitle(iername +' Pulses: {0}'.format(npulses), fontsize=20)
        fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses)
        plt.savefig(fname)
        plt.close(figmplf)
Esempio n. 30
0
    def score_image(self, image):
        """
        This finds whether the image is cloudy or not.

        :param image:
        :return:
        """

        pickle_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cache/ck_cloud.p')

        # Load the cloud thresholds
        [cloudy_model, partly_cloudy_model, clear_model] = pickle.load(open(pickle_file, "rb"))

        mean, std, window_size = self.process_image(image)
        p = self.fit_model(window_size, std)
        #p = self.fit_model(window_size, mean)

        # rebuild the functions of the window range,
        # find the residual vector and then the euclidean norm.
        # the one with the smallest should be the model.

        fitfunc = lambda p, x: p[0] * x ** p[1]

        clear_residual = scipy.absolute(fitfunc(p, window_size) - fitfunc(clear_model, window_size))
        pc_residual = scipy.absolute(fitfunc(p, window_size) - fitfunc(partly_cloudy_model, window_size))
        cloudy_residual = scipy.absolute(fitfunc(p, window_size) - fitfunc(cloudy_model, window_size))

        clear_residual[scipy.isinf(clear_residual)] = 0.0
        clear_residual[scipy.isnan(clear_residual)] = 0.0
        pc_residual[scipy.isinf(pc_residual)] = 0.0
        pc_residual[scipy.isnan(pc_residual)] = 0.0
        cloudy_residual[scipy.isinf(cloudy_residual)] = 0.0
        cloudy_residual[scipy.isnan(cloudy_residual)] = 0.0

        clear_norm = scipy.linalg.norm(clear_residual)
        pc_norm = scipy.linalg.norm(pc_residual)
        cloudy_norm = scipy.linalg.norm(cloudy_residual)

        smallest_val = [clear_norm, pc_norm, cloudy_norm].index(min([clear_norm, pc_norm, cloudy_norm]))
        lg.debug('score :: ' + str(smallest_val))

        return smallest_val
Esempio n. 31
0
def normalized_laplacian(W):
    n, m = W.shape
    diag = np.diag(W.sum(axis=0))
    with sc.errstate(divide='ignore'):
        diags_sqrt = 1.0 / sc.sqrt(diag)
    diags_sqrt[sc.isinf(diags_sqrt)] = 0
    DH = sp.spdiags(diags_sqrt, [0], m, n, format='csr')
    DH=DH.toarray()
    L=DH.dot(L.dot(DH))
    
    return L
Esempio n. 32
0
def _normalize_diffusion_matrix(A):
    n, m = A.shape
    A_with_selfloop = A
    diags = A_with_selfloop.sum(axis=1).flatten()

    with scipy.errstate(divide='ignore'):
        diags_sqrt = 1.0 / scipy.sqrt(diags)
    diags_sqrt[scipy.isinf(diags_sqrt)] = 0
    DH = sp.spdiags(diags_sqrt, [0], m, n, format='csc')
    d = DH.dot(A_with_selfloop.dot(DH))
    return d
Esempio n. 33
0
def update(i):
    global t, arrow_magn, shrink_factor, full_size
    for k in range(capture_interval):
        wind_field.update(dt)
        plume_model.update(dt)
        # raw_input()
        t += dt
        print(t)

    velocity_field = wind_field.velocity_field
    u, v = velocity_field[:, :, 0], velocity_field[:, :, 1]
    u,v = u[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor],\
        v[0:full_size-1:shrink_factor,0:full_size-1:shrink_factor]
    vector_field.set_UVC(u, v)
    x_wind, y_wind = scipy.cos(constant_wind_angle), scipy.sin(
        constant_wind_angle)
    wind_arrow.set_positions(
        (xmin + (xmax - xmin) / 2, ymax - 0.2 * (ymax - ymin)),
        (xmin + (xmax - xmin) / 2 + arrow_magn * x_wind, ymax - 0.2 *
         (ymax - ymin) + arrow_magn * y_wind))
    text = '{0} min {1} sec'.format(int(scipy.floor(abs(t / 60.))),
                                    int(scipy.floor(abs(t) % 60.)))
    timer.set_text(text)

    conc_array = array_gen.generate_single_array(plume_model.puffs)

    log_im = scipy.log(conc_array.T[::-1])
    cutoff_l = scipy.percentile(log_im[~scipy.isinf(log_im)], 10)
    cutoff_u = scipy.percentile(log_im[~scipy.isinf(log_im)], 99)

    conc_im.set_data(log_im)
    n = matplotlib.colors.Normalize(vmin=cutoff_l, vmax=cutoff_u)
    conc_im.set_norm(n)

    concStorer.store(conc_array.T[::-1])
    last = time.time()

    windStorer.store(velocity_field)
    plumeStorer.store(plume_model.puffs)

    return [conc_im]
Esempio n. 34
0
def cocitation_modularity(partition, adjacency_matrix, resolution=1.0):
    """
    Compute the modularity of a node partition of a cocitation graph.
    Parameters
    ----------
    partition: dict
       The partition of the nodes.
       The keys of the dictionary correspond to the nodes and the values to the communities.
    adjacency_matrix: scipy.csr_matrix or np.ndarray
        The adjacency matrix of the graph (sparse or dense).
    resolution: double, optional
        The resolution parameter in the modularity function (default=1.).

    Returns
    -------
    modularity : float
       The modularity.
    """

    if type(adjacency_matrix) == sparse.csr_matrix:
        adj_matrix = adjacency_matrix
    elif type(adjacency_matrix) == np.ndarray:
        adj_matrix = sparse.csr_matrix(adjacency_matrix)
    else:
        raise TypeError(
            "The argument should be a NumPy array or a SciPy Compressed Sparse Row matrix."
        )

    n_nodes = adj_matrix.shape[0]
    out_degree = np.array(adj_matrix.sum(axis=1).flatten())
    in_degree = adj_matrix.sum(axis=0).flatten()
    total_weight = out_degree.sum()

    with errstate(divide='ignore'):
        in_degree_sqrt = 1.0 / sqrt(in_degree)
    in_degree_sqrt[isinf(in_degree_sqrt)] = 0
    in_degree_sqrt = sparse.spdiags(in_degree_sqrt, [0],
                                    adj_matrix.shape[1],
                                    adj_matrix.shape[1],
                                    format='csr')
    normalized_adjacency = (adj_matrix.dot(in_degree_sqrt)).T

    communities = lab2com(partition)
    mod = 0.

    for community in communities:
        indicator_vector = np.zeros(n_nodes)
        indicator_vector[list(community)] = 1
        mod += np.linalg.norm(normalized_adjacency.dot(indicator_vector))**2
        mod -= (resolution / total_weight) * (np.dot(out_degree,
                                                     indicator_vector))**2

    return float(mod / total_weight)
Esempio n. 35
0
def binning_distance(trains, tau, exponent=2):
    if sp.isinf(tau) or any(tau > st.t_stop - st.t_start for st in trains):
        num_spikes = sp.atleast_2d([st.size for st in trains])
        return sp.absolute(num_spikes.T - num_spikes) ** exponent
    sampling_rate = 1.0 / tau
    binned, dummy = stools.bin_spike_trains({0: trains}, sampling_rate)
    d = sp.empty((len(trains), len(trains)))
    for i in xrange(len(trains)):
        for j in xrange(i, len(trains)):
            d[i, j] = d[j, i] = sp.sum(
                sp.absolute(binned[0][i] - binned[0][j]) ** exponent)
    return d
Esempio n. 36
0
def sphere_features(features, sphere_vectors):
    
    features.shape = features.shape[0], -1

    fmean, fstd = sphere_vectors
    features -= fmean        
    assert((fstd!=0).all())
    features /= fstd

    assert(not sp.isnan(sp.ravel(features)).any())
    assert(not sp.isinf(sp.ravel(features)).any())
    
    return features
 def compute_by_noise_pow(self,signal,n_pow):
     s_spec = sp.fft(signal*self._window)
     s_amp = sp.absolute(s_spec)
     s_phase = sp.angle(s_spec)
     gamma = self._calc_aposteriori_snr(s_amp,n_pow)
     xi = self._calc_apriori_snr(gamma)
     self._prevGamma = gamma
     nu = gamma * xi / (1.0+xi)
     self._G = (self._gamma15*sp.sqrt(nu)/gamma)*sp.exp(-nu/2.0)* ((1.0+nu)*spc.i0(nu/2.0)+nu*spc.i1(nu/2.0))
     idx = sp.less(s_amp**2.0,n_pow)
     self._G[idx] = self._constant
     idx = sp.isnan(self._G) + sp.isinf(self._G)
     self._G[idx] = xi[idx] / ( xi[idx] + 1.0)
     idx = sp.isnan(self._G) + sp.isinf(self._G)
     self._G[idx] = self._constant
     self._G = sp.maximum(self._G,0.0)
     amp = self._G * s_amp
     amp = sp.maximum(amp,0.0)
     amp2 = self._ratio*amp + (1.0-self._ratio)*s_amp
     self._prevAmp = amp
     spec = amp2 * sp.exp(s_phase*1j)
     return sp.real(sp.ifft(spec))
Esempio n. 38
0
    def plot(self, func, interp=True, plotter='imshow'):
        import matplotlib as mpl
        from matplotlib import pylab as pl
        if interp:
            lpi = self.interpolator(func)
            z = lpi[self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
                    self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
        else:
            y, x = sp.mgrid[
                self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
                self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
            z = func(x, y)

        z = sp.where(sp.isinf(z), 0.0, z)

        extent = (self.xrange[0], self.xrange[1], self.yrange[0],
                  self.yrange[1])
        pl.ioff()
        pl.clf()
        pl.hot()  # Some like it hot
        if plotter == 'imshow':
            pl.imshow(sp.nan_to_num(z),
                      interpolation='nearest',
                      extent=extent,
                      origin='lower')
        elif plotter == 'contour':
            Y, X = sp.ogrid[
                self.yrange[0]:self.yrange[1]:complex(0, self.nrange),
                self.xrange[0]:self.xrange[1]:complex(0, self.nrange)]
            pl.contour(sp.ravel(X), sp.ravel(Y), z, 20)
        x = self.x
        y = self.y
        lc = mpl.collections.LineCollection(sp.array([
            ((x[i], y[i]), (x[j], y[j])) for i, j in self.tri.edge_db
        ]),
                                            colors=[(0, 0, 0, 0.2)])
        ax = pl.gca()
        ax.add_collection(lc)

        if interp:
            title = '%s Interpolant' % self.name
        else:
            title = 'Reference'
        if hasattr(func, 'title'):
            pl.title('%s: %s' % (func.title, title))
        else:
            pl.title(title)

        pl.show()
        pl.ion()
Esempio n. 39
0
def to_mallet_dataset(obs):
    """
    Convert to mallet in svmlight format, which also supports continuous features
    """
    mystr = ""
    for d in obs.observations:
        mystr += str(d.label) + " "
        for fname, fvalue in d.features_obs.iteritems():
            if isnan(fvalue) or isinf(fvalue):
                fvalue = 0.0
            mystr += str(fname) + ":%.10f " % fvalue
            mystr += "\n"
        mystr += "\n"
    return mystr
    def learn_embeddings(self):
        n, m = self.adj_matrix.shape
        diags = self.adj_matrix.sum(axis=1).flatten()
        D = sparse.spdiags(diags, [0], m, n, format='csr')
        L = D - self.adj_matrix
        with scipy.errstate(divide='ignore'):
            diags_sqrt = 1.0 / scipy.sqrt(diags)
        diags_sqrt[scipy.isinf(diags_sqrt)] = 0
        DH = sparse.spdiags(diags_sqrt, [0], m, n, format='csr')
        laplacian = DH.dot(L.dot(DH))

        _, v = sparse.linalg.eigs(laplacian, k=self.dim + 1, which='SM')
        embeddings = v[:, 1:].real
        return embeddings
 def compute_by_noise_pow(self,signal,n_pow):
     s_spec = sp.fft(signal *self._window)
     s_amp = sp.absolute(s_spec)
     s_phase = sp.angle(s_spec)
     gamma = self._calc_aposteriori_snr(s_amp,n_pow)
     #xi = self._calc_apriori_snr2(gamma,n_pow)
     xi = self._calc_apriori_snr(gamma)
     self._prevGamma = gamma
     u = 0.5 - self._mu/(4.0*sp.sqrt(gamma*xi))
     self._G = u + sp.sqrt(u**2.0 + self._tau/(gamma*2.0))
     idx = sp.less(s_amp**2.0,n_pow)
     self._G[idx] = self._constant
     idx = sp.isnan(self._G) + sp.isinf(self._G)
     self._G[idx] = xi[idx] / ( xi[idx] + 1.0)
     idx = sp.isnan(self._G) + sp.isinf(self._G)
     self._G[idx] = self._constant
     self._G = sp.maximum(self._G,0.0)
     amp = self._G * s_amp
     amp = sp.maximum(amp,0.0)
     amp2 = self._ratio*amp + (1.0-self._ratio)*s_amp
     self._prevAmp = amp
     spec = amp2 * sp.exp(s_phase*1j)
     return sp.real(sp.ifft(spec))
def lapnormadj(A):

    import scipy
    import numpy as np
    from scipy.sparse import csgraph
    n,m = A.shape
    d1 = A.sum(axis=1).flatten()
    d2 = A.sum(axis=0).flatten()
    d1_sqrt = 1.0/scipy.sqrt(d1)
    d2_sqrt = 1.0/scipy.sqrt(d2)
    d1_sqrt[scipy.isinf(d1_sqrt)] = 0
    d2_sqrt[scipy.isinf(d2_sqrt)] = 0
    la = np.zeros(shape=(n,m))

    for i in range(0,n):
        for j in range(0,m):
          la[i,j] = A[i,j]/(d1_sqrt[i]*d2_sqrt[j])

    #D1 = scipy.sparse.spdiags(d1_sqrt, [0], n,m, format='coo')
    #D2 = scipy.sparse.spdiags(d2_sqrt, [0], n,m, format='coo')


    return  scipy.sparse.coo_matrix(la)
Esempio n. 43
0
    def Update(self, s, y, s_dot_y=None):
        if len(self.s) == self.maxnumvecs:
            self.s.pop(0)
            self.y.pop(0)
            self.rho.pop(0)

        if s_dot_y is None: s_dot_y = dot(s, y)
        rho = 1.0 / s_dot_y
        if isinf(rho):
            print 'Warning, s . y = 0; ignoring pair'
        else:
            self.s.append(s)
            self.y.append(y)
            self.rho.append(rho)
def laplaceNorm(A):
    """

    :param mat: Adjacency matrix
    :return: laplacian normalized matrix
    """
    import scipy
    from scipy.sparse import csgraph
    n,m = A.shape
    diags = A.sum(axis=1).flatten()
    diags_sqrt = 1.0/scipy.sqrt(diags)
    diags_sqrt[scipy.isinf(diags_sqrt)] = 0
    #print diags_sqrt
    DH = scipy.sparse.spdiags(diags_sqrt, [0], m,n, format='coo')
    return  scipy.sparse.coo_matrix(DH * A * DH)
def laplaceNorm(A):
    """
    Function to perform laplcian normalization of mxm matrix
    :param mat: Adjacency matrix
    :return: laplacian normalized matrix
    """
    import scipy
    from scipy.sparse import csgraph
    n,m = A.shape
    diags = A.sum(axis=1).flatten()
    diags_sqrt = 1.0/scipy.sqrt(diags)
    diags_sqrt[scipy.isinf(diags_sqrt)] = 0
    #print diags_sqrt
    DH = scipy.sparse.spdiags(diags_sqrt, [0], m,n, format='coo')
    return  scipy.sparse.coo_matrix(DH * A * DH)
Esempio n. 46
0
    def sample(self, model, evidence):
        z = evidence['z']
        g = evidence['g']
        h = evidence['h']
        T = evidence['T']
        phi  = evidence['phi']
        transition_var_h = evidence['transition_var_h']
        shot_id = evidence['shot_id']

        observation_var_h = model.known_params['observation_var_h']
        mu_h = model.known_params['mu_h']
        prior_mu_h = model.hyper_params['h']['mu']
        prior_cov_h = model.hyper_params['h']['cov']
        n = len(h)
        N = len(z)

        # Making g, h, and z vector valued to avoid ambiguity
        z_h = ma.asarray(nan + zeros(n))
        obs_cov = ma.asarray(inf + zeros(n))
        if 2 in T:
            z_h[T==2] = z[T==2]
            obs_cov[T==2] = observation_var_h
        pdb.set_trace()
        #for i in xrange(n):
        #    z_i = z[shot_id == i]
        #    T_i = T[shot_id == i]
        #    if 2 in T_i:
        #        # Sample mean and variance for multiple observations
        #        n_obs = sum(T_i == 2)
        #        z_h[i] = mean(z_i[T_i == 2])
        #        obs_cov[i] = observation_var_h/n_obs

        z_h[isnan(z_h)] = ma.masked
        obs_cov[isinf(obs_cov)] = ma.masked

        kalman = self._kalman
        kalman.initial_state_mean = array([prior_mu_h[0],])
        kalman.initial_state_covariance = array([prior_cov_h[0],])
        kalman.transition_matrices = array([phi,])
        kalman.transition_covariance = array([transition_var_h,])
        kalman.transition_offsets = mu_h*(1-phi)*ones((n, 1))
        kalman.observation_matrices = eye(1)
        kalman.observation_offsets = g
        kalman.observation_covariance = obs_cov
        sampled_h = forward_filter_backward_sample(kalman, z_h, prior_mu_h, prior_cov_h)

        return sampled_h.reshape((n,))
Esempio n. 47
0
	def lnposteriorargs(self,args,**fixed):
		kwargs = {}
		kwargs.update({key:val for key,val in zip(self.fitted,args)})
		kwargs.update(fixed)
		kwargs.update({par:scipy.nan for par in self.solved()})
		lnpriors = self.lnpriors(**kwargs)
		if scipy.isinf(sum(lnpriors)): return -scipy.inf,{}
		solved = self.solve_update_analytic(**kwargs)
		kwargs.update(solved)
		solved.update(fixed)
		lnpriors = self.lnpriors(**kwargs)
		lnlkls = self.lnlkls(**kwargs)
		lnposteriors = [lnlkl + lnprior for lnlkl,lnprior in zip(lnlkls,lnpriors)]
		for id,lnposterior in zip(self.ids,lnposteriors):
			solved['lnposterior_{}'.format(id)] = lnposterior
			#print id,solved['lnposterior_{}'.format(id)]
		return sum(lnposteriors),solved
Esempio n. 48
0
    def fit(self, adjacency_matrix):
        """Fits the model from data in adjacency_matrix

        Parameters
        ----------
        adjacency_matrix : Scipy csr matrix or numpy ndarray
              Adjacency matrix of the graph
        node_weights : {'uniform', 'degree', array of length n_nodes with positive entries}
              Node weights
        """

        if type(adjacency_matrix) == sparse.csr_matrix:
            adj_matrix = adjacency_matrix
        elif sparse.isspmatrix(adjacency_matrix) or type(adjacency_matrix) == np.ndarray:
            adj_matrix = sparse.csr_matrix(adjacency_matrix)
        else:
            raise TypeError(
                "The argument must be a NumPy array or a SciPy Sparse matrix.")
        n_nodes, m_nodes = adj_matrix.shape
        if n_nodes != m_nodes:
            raise ValueError("The adjacency matrix must be a square matrix.")
        #if csgraph.connected_components(adj_matrix, directed=False)[0] > 1:
            #raise ValueError("The graph must be connected.")
        if (adj_matrix != adj_matrix.maximum(adj_matrix.T)).nnz != 0:
            raise ValueError("The adjacency matrix is not symmetric.")

        # builds standard laplacian
        degrees = adj_matrix.dot(np.ones(n_nodes))
        degree_matrix = sparse.diags(degrees, format='csr')
        laplacian = degree_matrix - adj_matrix

        # applies normalization by node weights 
        with errstate(divide='ignore'):
            degrees_inv_sqrt = 1.0 / sqrt(degrees)
        degrees_inv_sqrt[isinf(degrees_inv_sqrt)] = 0
        weight_matrix = sparse.diags(degrees_inv_sqrt, format='csr')
            
        laplacian = weight_matrix.dot(laplacian.dot(weight_matrix))

        # spectral decomposition
        eigenvalues, eigenvectors = eigsh(laplacian, min(self.embedding_dimension + 1, n_nodes - 1), which='SM')
        self.eigenvalues_ = eigenvalues[1:]
        self.embedding_ = np.array(weight_matrix.dot(eigenvectors[:, 1:]))
        
        return self
Esempio n. 49
0
    def plot(self, func, interp=True, plotter='imshow'):
        import matplotlib as mpl
        from matplotlib import pylab as pl
        if interp:
            lpi = self.interpolator(func)
            z = lpi[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
                    self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
        else:
            y, x = sp.mgrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
                            self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
            z = func(x, y)

        z = sp.where(sp.isinf(z), 0.0, z)

        extent = (self.xrange[0], self.xrange[1],
            self.yrange[0], self.yrange[1])
        pl.ioff()
        pl.clf()
        pl.hot() # Some like it hot
        if plotter == 'imshow':
            pl.imshow(sp.nan_to_num(z), interpolation='nearest', extent=extent, origin='lower')
        elif plotter == 'contour':
            Y, X = sp.ogrid[self.yrange[0]:self.yrange[1]:complex(0,self.nrange),
                self.xrange[0]:self.xrange[1]:complex(0,self.nrange)]
            pl.contour(sp.ravel(X), sp.ravel(Y), z, 20)
        x = self.x
        y = self.y
        lc = mpl.collections.LineCollection(sp.array([((x[i], y[i]), (x[j], y[j]))
            for i, j in self.tri.edge_db]), colors=[(0,0,0,0.2)])
        ax = pl.gca()
        ax.add_collection(lc)

        if interp:
            title = '%s Interpolant' % self.name
        else:
            title = 'Reference'
        if hasattr(func, 'title'):
            pl.title('%s: %s' % (func.title, title))
        else:
            pl.title(title)

        pl.show()
        pl.ion()
Esempio n. 50
0
def makehist(testpath,npulses):
    """ This functions are will create histogram from data made in the testpath.
        Inputs
            testpath - The path that the data is located.
            npulses - The number of pulses in the sim.
    """
    sns.set_style("whitegrid")
    sns.set_context("notebook")
    params = ['Ne','Te','Ti','Vi'] 
    pvals = [1e11,1e11,2.1e3,1.1e3,0.]
    errdict = makehistdata(params,testpath)[:4]
    ernames = ['Data','Error','Error Percent']
    sig1 = sp.sqrt(1./npulses)
    
    
    for ierr, iername in enumerate(ernames):
        filetemplate= str(Path(testpath).join('AnalysisPlots',iername))
        (figmplf, axmat) = plt.subplots(2, 2,figsize=(20, 15), facecolor='w')
        axvec = axmat.flatten()
        for ipn, iparam in enumerate(params):
            plt.sca(axvec[ipn])
            if sp.any(sp.isinf(errdict[ierr][iparam])):
                continue
            histhand = sns.distplot(errdict[ierr][iparam], bins=100, kde=True, rug=False)
            xlim = histhand.get_xlim()
            if ierr==0:
                x0=pvals[ipn]
            else:
                x0=0
            if ierr==2:
                sig=sig1*100.
            else:
                sig=sig1*pvals[ipn]
            x = sp.linspace(xlim[0],xlim[1],100)
            den1 = sp.stats.norm(x0,sig).pdf(x)
            #plt.plot(x,den1,'g--')
            
            axvec[ipn].set_title(iparam)
        figmplf.suptitle(iername +' Pulses: {0}'.format(npulses), fontsize=20)
        fname= filetemplate+'_{0:0>5}Pulses.png'.format(npulses)
        plt.savefig(fname)
        plt.close(figmplf)
Esempio n. 51
0
 def _unbounded_set_value(self, uvalue):
     """Set a varying param's value via a transformed parameter that has
     its range mapped to (-inf,inf)."""
     if self.status != varying:
         raise ParamError, 'Unbounded access valid only for varying parameter!'
     # *** Is this test redundant?
     if not self.bounded:
         raise ParamError, 'No bounds defined for parameter!'
     else:
         expv = exp(uvalue)
         if isinf(expv) or isnan(expv):
             if uvalue > 0:
                 self._set_value(self.hi)
             else:
                 self._set_value(self.lo)
         else:
             # Use _set_value, since this should be in range.  In fact,
             # I've found just-barely out-of-range results (presumably due
             # to roundoff at limits) that trigger exceptions using set_value.
             self._set_value( (self.lo + self.hi*expv)/(1. + expv) )
Esempio n. 52
0
def with_walking(time_arr, mins_per_square=1.3, transfer_constant=5):
    arr = time_arr.copy()
    cross_footprint = sp.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]).astype(bool)
    diag_footprint = sp.array([[1, 0, 1],[0, 1, 0], [1, 0, 1]]).astype(bool)
    arr[sp.isnan(arr)] = sp.inf
    for i in range(60):
        cross_arr = sp.ndimage.minimum_filter(arr, footprint=cross_footprint)
        cross_arr[sp.isnan(cross_arr)] = sp.inf
        cross_changes = (cross_arr != arr)
        cross_arr[cross_changes] += 1*mins_per_square
    
        diag_arr = sp.ndimage.minimum_filter(arr, footprint=diag_footprint)
        diag_arr[sp.isnan(diag_arr)] = sp.inf
        diag_changes = (diag_arr != arr)
        diag_arr[diag_changes] += 1.4*mins_per_square
    
        arr = sp.minimum(cross_arr, diag_arr)
    
    arr[sp.isinf(arr)] = sp.nan
    
    return arr + transfer_constant
Esempio n. 53
0
def gmres(A, b, x0=None, tol=1e-5, restrt=None, maxiter=None, xtype=None, M=None, callback=None, residuals=None):
    '''
    Generalized Minimum Residual Method (GMRES)
        GMRES iteratively refines the initial solution guess to the system Ax = b
    For robustness, Householder reflections are used to orthonormalize the Krylov Space
    Givens Rotations are used to provide the residual norm each iteration

    Parameters
    ----------
    A : array, matrix or sparse matrix
        n x n, linear system to solve
    b : array
        n x 1, right hand side
    x0 : array
        n x 1, initial guess
        default is a vector of zeros
    tol : float
        convergence tolerance
    restrt : int
        number of restarts
        total iterations = restrt*maxiter
    maxiter : int
        maximum number of allowed inner iterations
    xtype : type
        dtype for the solution
    M : matrix-like
        n x n, inverted preconditioner, i.e. solve M A x = b.
        For preconditioning with a mat-vec routine, set
        A.psolve = func, where func := M y
    callback : function
        callback( ||resid||_2 ) is called each iteration,
    residuals : {None, empty-list}
        If empty-list, residuals holds the residual norm history,
        including the initial residual, upon completion

    Returns
    -------
    (xNew, info)
    xNew -- an updated guess to the solution of Ax = b
    info -- halting status of gmres
            0  : successful exit
            >0 : convergence to tolerance not achieved,
                 return iteration count instead.  This value
                 is precisely the order of the Krylov space.
            <0 : numerical breakdown, or illegal input


    Notes
    -----

    Examples
    --------
    >>>from pyamg.krylov import *
    >>>from scipy import rand
    >>>import pyamg
    >>>A = pyamg.poisson((50,50))
    >>>b = rand(A.shape[0],)
    >>>(x,flag) = gmres(A,b)
    >>>print pyamg.util.linalg.norm(b - A*x)

    References
    ----------
    Yousef Saad, "Iterative Methods for Sparse Linear Systems,
    Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003

    '''

    # Convert inputs to linear system, with error checking
    A,M,x,b,postprocess = make_system(A,M,x0,b,xtype)
    dimen = A.shape[0]

    # Choose type
    xtype = upcast(A.dtype, x.dtype, b.dtype, M.dtype)

    # We assume henceforth that shape=(n,) for all arrays
    b = ravel(array(b,xtype))
    x = ravel(array(x,xtype))

    # Should norm(r) be kept
    if residuals == []:
        keep_r = True
    else:
        keep_r = False

    # check number of iterations
    if restrt == None:
        restrt = 1
    elif restrt < 1:
        raise ValueError('Number of restarts must be positive')

    if maxiter == None:
        maxiter = int(max(ceil(dimen/restrt)))
    elif maxiter < 1:
        raise ValueError('Number of iterations must be positive')
    elif maxiter > dimen:
        warn('maximimum allowed inner iterations (maxiter) are the number of degress of freedom')
        maxiter = dimen

    # Scale tol by normb
    normb = linalg.norm(b)
    if normb == 0:
        pass
    #    if callback != None:
    #        callback(0.0)
    #
    #    return (postprocess(zeros((dimen,)), dtype=xtype),0)
    else:
        tol = tol*normb

    # Is this a one dimensional matrix?
    if dimen == 1:
        entry = ravel(A*array([1.0], dtype=xtype))
        return (postprocess(b/entry), 0)

    # Prep for method
    r = b - ravel(A*x)
    normr = linalg.norm(r)
    if keep_r:
        residuals.append(normr)

    # Is initial guess sufficient?
    if normr <= tol:
        if callback != None:
            callback(norm(r))

        return (postprocess(x), 0)

    #Apply preconditioner
    r = ravel(M*r)
    normr = linalg.norm(r)
    # Check for nan, inf
    if any(isnan(r)) or any(isinf(r)):
        warn('inf or nan after application of preconditioner')
        return(postprocess(x), -1)

    # Use separate variable to track iterations.  If convergence fails, we cannot
    # simply report niter = (outer-1)*maxiter + inner.  Numerical error could cause
    # the inner loop to halt before reaching maxiter while the actual ||r|| > tol.
    niter = 0

    # Begin GMRES
    for outer in range(restrt):

        # Calculate vector w, which defines the Householder reflector
        #    Take shortcut in calculating,
        #    w = r + sign(r[1])*||r||_2*e_1
        w = r
        beta = mysign(w[0])*normr
        w[0] += beta
        w = w / linalg.norm(w)

        # Preallocate for Krylov vectors, Householder reflectors and Hessenberg matrix
        # Space required is O(dimen*maxiter)
        H = zeros( (maxiter, maxiter), dtype=xtype)         # upper Hessenberg matrix (actually made upper tri with Given's Rotations)
        W = zeros( (dimen, maxiter), dtype=xtype)           # Householder reflectors
        W[:,0] = w

        # Multiply r with (I - 2*w*w.T), i.e. apply the Householder reflector
        # This is the RHS vector for the problem in the Krylov Space
        g = zeros((dimen,), dtype=xtype)
        g[0] = -beta

        for inner in range(maxiter):
            # Calcute Krylov vector in two steps
            # (1) Calculate v = P_j = (I - 2*w*w.T)v, where k = inner
            v = -2.0*conjugate(w[inner])*w
            v[inner] += 1.0
            # (2) Calculate the rest, v = P_1*P_2*P_3...P_{j-1}*ej.
            for j in range(inner-1,-1,-1):
                v = v - 2.0*dot(conjugate(W[:,j]), v)*W[:,j]

            # Calculate new search direction
            v = ravel(A*v)

            #Apply preconditioner
            v = ravel(M*v)
            # Check for nan, inf
            if any(isnan(v)) or any(isinf(v)):
                warn('inf or nan after application of preconditioner')
                return(postprocess(x), -1)

            # Factor in all Householder orthogonal reflections on new search direction
            for j in range(inner+1):
                v = v - 2.0*dot(conjugate(W[:,j]), v)*W[:,j]

            # Calculate next Householder reflector, w
            #  w = v[inner+1:] + sign(v[inner+1])*||v[inner+1:]||_2*e_{inner+1)
            #  Note that if maxiter = dimen, then this is unnecessary for the last inner
            #     iteration, when inner = dimen-1.  Here we do not need to calculate a
            #     Householder reflector or Given's rotation because nnz(v) is already the
            #     desired length, i.e. we do not need to zero anything out.
            if inner != dimen-1:
                w = zeros((dimen,), dtype=xtype)
                vslice = v[inner+1:]
                alpha = linalg.norm(vslice)
                if alpha != 0:
                    alpha = mysign(vslice[0])*alpha
                    # We do not need the final reflector for future calculations
                    if inner < (maxiter-1):
                        w[inner+1:] = vslice
                        w[inner+1] += alpha
                        w = w / linalg.norm(w)
                        W[:,inner+1] = w

                    # Apply new reflector to v
                    #  v = v - 2.0*w*(w.T*v)
                    v[inner+1] = -alpha
                    v[inner+2:] = 0.0

            # Apply all previous Given's Rotations to v
            if inner == 0:
                # Q will store the cumulative effect of all Given's Rotations
                Q = scipy.sparse.eye(dimen, dimen, format='csr', dtype=xtype)

                # Declare initial Qj, which will be the current Given's Rotation
                rowptr  = hstack( (array([0, 2, 4],int), arange(5,dimen+3,dtype=int)) )
                colindices = hstack( (array([0, 1, 0, 1],int), arange(2, dimen,dtype=int)) )
                data = ones((dimen+2,), dtype=xtype)
                Qj = csr_matrix( (data, colindices, rowptr), shape=(dimen,dimen), dtype=xtype)
            else:
                # Could avoid building a global Given's Rotation, by storing
                # and applying each 2x2 matrix individually.
                # But that would require looping, the bane of wonderful Python
                Q = Qj*Q
                v = Q*v

            # Calculate Qj, the next Given's rotation, where j = inner
            #  Note that if maxiter = dimen, then this is unnecessary for the last inner
            #     iteration, when inner = dimen-1.  Here we do not need to calculate a
            #     Householder reflector or Given's rotation because nnz(v) is already the
            #     desired length, i.e. we do not need to zero anything out.
            if inner != dimen-1:
                if v[inner+1] != 0:
                    # Calculate terms for complex 2x2 Given's Rotation
                    # Note that abs(x) takes the complex modulus
                    h1 = v[inner]; h2 = v[inner+1];
                    h1_mag = abs(h1); h2_mag = abs(h2);
                    if h1_mag < h2_mag:
                        mu = h1/h2
                        tau = conjugate(mu)/abs(mu)
                    else:
                        mu = h2/h1
                        tau = mu/abs(mu)

                    denom = sqrt( h1_mag**2 + h2_mag**2 )
                    c = h1_mag/denom; s = h2_mag*tau/denom;
                    Qblock = array([[c, conjugate(s)], [-s, c]], dtype=xtype)

                    # Modify Qj in csr-format so that it represents the current
                    #   global Given's Rotation equivalent to Qblock
                    if inner != 0:
                        Qj.data[inner-1] = 1.0
                        Qj.indices[inner-1] = inner-1
                        Qj.indptr[inner-1] = inner-1

                    Qj.data[inner:inner+4] = ravel(Qblock)
                    Qj.indices[inner:inner+4] = [inner, inner+1, inner, inner+1]
                    Qj.indptr[inner:inner+3] = [inner, inner+2, inner+4]

                    # Apply Given's Rotation to g,
                    #   the RHS for the linear system in the Krylov Subspace.
                    #   Note that this dot does a matrix multiply, not an actual
                    #   dot product where a conjugate transpose is taken
                    g[inner:inner+2] = dot(Qblock, g[inner:inner+2])

                    # Apply effect of Given's Rotation to v
                    v[inner] = dot(Qblock[0,:], v[inner:inner+2])
                    v[inner+1] = 0.0

            # Write to upper Hessenberg Matrix,
            #   the LHS for the linear system in the Krylov Subspace
            H[:,inner] = v[0:maxiter]

            # Don't update normr if last inner iteration, because
            # normr is calculated directly after this loop ends.
            if inner < maxiter-1:
                normr = abs(g[inner+1])
                if normr < tol:
                    break

                # Allow user access to residual
                if callback != None:
                    callback( normr )
                if keep_r:
                    residuals.append(normr)

            niter += 1

        # end inner loop, back to outer loop

        # Find best update to x in Krylov Space, V.  Solve inner+1 x inner+1 system.
        #   Apparently this is the best way to solve a triangular
        #   system in the magical world of scipy
        piv = arange(inner+1)
        y = lu_solve((H[0:(inner+1),0:(inner+1)], piv), g[0:(inner+1)], trans=0)

        # Use Horner like Scheme to map solution, y, back to original space.
        # Note that we do not use the last reflector.
        update = zeros(x.shape, dtype=xtype)
        for j in range(inner,-1,-1):
            update[j] += y[j]
            # Apply j-th reflector, (I - 2.0*w_j*w_j.T)*upadate
            update = update - 2.0*dot(conjugate(W[:,j]), update)*W[:,j]

        x = x + update
        r = b - ravel(A*x)

        #Apply preconditioner
        r = ravel(M*r)
        normr = linalg.norm(r)
        # Check for nan, inf
        if any(isnan(r)) or any(isinf(r)):
            warn('inf or nan after application of preconditioner')
            return(postprocess(x), -1)

        # Allow user access to residual
        if callback != None:
            callback( normr )
        if keep_r:
            residuals.append(normr)

        # Has GMRES stagnated?
        indices = (x != 0)
        if indices.any():
            change = max(abs( update[indices] / x[indices] ))
            if change < 1e-12:
                # No change, halt
                return (postprocess(x), -1)

        # test for convergence
        if normr < tol:
            return (postprocess(x),0)

    # end outer loop

    return (postprocess(x), niter)
Esempio n. 54
0
 def _oneEvaluation(self, evaluable):
     """ This method should be called by all tabu optimizers for producing an evaluation. 
     
     This is nearly identical to BlackBoxOptimizer's _oneEvaluation except that it subtracts
     the tabuPenalty from the evaluations of tabuEvaluables"""
     
     if not self._setUp:
         return BlackBoxOptimizer._oneEvaluation(self,evaluable)
     if self._wasUnwrapped:
         self.wrappingEvaluable._setParameters(evaluable)
         res = self.evaluator(self.wrappingEvaluable)
     elif self._wasWrapped:            
         res = self.evaluator(evaluable.params)
     else:            
         res = self.evaluator(evaluable)
         ''' added by JPQ '''
         if self.constrained :
             self.feasible = self.evaluator.outfeasible
             self.violation = self.evaluator.outviolation
         # ---
     if isscalar(res):
         # detect numerical instability
         if isnan(res) or isinf(res):
             raise DivergenceError
         #apply pentalty if tabu
         for t in self.tabuList:
             if t(evaluable):
                 res-=self.tabuPenalty
                 break
         # always keep track of the best
         if (self.numEvaluations == 0
             or self.bestEvaluation is None
             or (self.minimize and res <= self.bestEvaluation)
             or (not self.minimize and res >= self.bestEvaluation)):
             self.bestEvaluation = res
             #update tabuList
             self.tabuList.append(self.tabuGenerator(self.bestEvaluable,evaluable))
             l=len(self.tabuList)
             if l > self.maxTabuList:
                 self.tabuList=self.tabuList[(l-self.maxTabuList):l]
             self.bestEvaluable = evaluable.copy()
     
     self.numEvaluations += 1
     
     # if desired, also keep track of all evaluables and/or their fitness.                        
     if self.storeAllEvaluated:
         if self._wasUnwrapped:            
             self._allEvaluated.append(self.wrappingEvaluable.copy())
         elif self._wasWrapped:            
             self._allEvaluated.append(evaluable.params.copy())
         else:            
             self._allEvaluated.append(evaluable.copy())        
     if self.storeAllEvaluations:
         if self._wasOpposed and isscalar(res):
             ''' added by JPQ '''
             if self.constrained :
                 self._allEvaluations.append([-res,self.feasible,self.violation])
             # ---
             else:
                 self._allEvaluations.append(-res)
         else:
             ''' added by JPQ '''
             if self.constrained :
                 self._allEvaluations.append([res,self.feasible,self.violation])
             # ---
             else:
                 self._allEvaluations.append(res)
     ''' added by JPQ '''
     if self.constrained :
         return [res,self.feasible,self.violation]
     else:
     # ---
         return res
Esempio n. 55
0
def yIsPoor(y):
    """Returns True if y is not usable"""
    return max(scipy.isinf(y)) or max(scipy.isnan(y))
Esempio n. 56
0
    def _pathwiseLearn(self, ss, varnames, bases, X_orig, X_orig_regress, y_orig,
                       max_num_bases, target_nmse, verbose=False, **fit_params):
        """Adapted from enet_path() in sklearn.linear_model.
        http://scikit-learn.sourceforge.net/modules/linear_model.html
        Compute Elastic-Net path with coordinate descent.  
        Returns list of model (or None if failure)."""
        if verbose:
            print '    Pathwise learn: begin. max_num_bases=%d' % max_num_bases
        max_iter = 5000 #default 5000. magic number.
        
        #Condition X and y: 
        # -"unbias" = rescale so that (mean=0, stddev=1) -- subtract each row's mean, then divide by stddev
        # -X transposed
        # -X as fortran array
        (X_unbiased, y_unbiased, X_avgs, X_stds, y_avg, y_std) = self._unbiasedXy(X_orig_regress, y_orig)
        X_unbiased = numpy.asfortranarray(X_unbiased) # make data contiguous in memory

        n_samples = X_unbiased.shape[0]
        vals = numpy.dot(X_unbiased.T, y_unbiased)
        vals = [val for val in vals if not scipy.isnan(val)]
        if vals: alpha_max = numpy.abs(max(vals) / (n_samples * ss.l1_ratio()))
        else:    alpha_max = 1.0 #backup: pick a value from the air

        #alphas = lotsa alphas at beginning, and usual rate for rest
        st, fin = numpy.log10(alpha_max*ss.eps()), numpy.log10(alpha_max)
        alphas1 = numpy.logspace(st, fin, num=ss.numAlphas()*10)[::-1][:ss.numAlphas()/4]
        alphas2 = numpy.logspace(st, fin, num=ss.numAlphas())
        alphas = sorted(set(alphas1).union(alphas2), reverse=True)

        if not 'precompute' in fit_params or fit_params['precompute'] is True:
            fit_params['precompute'] = numpy.dot(X_unbiased.T, X_unbiased)
            #if not 'Xy' in fit_params or fit_params['Xy'] is None:
            #    fit_params['Xy'] = numpy.dot(X_unbiased.T, y_unbiased)

        models = [] #build this up
        nmses = [] #for detecting stagnation
        cur_unbiased_coefs = None # init values for coefs
        start_time = time.time()
        for (alpha_i, alpha) in enumerate(alphas):
            #compute (unbiased) coefficients. Recall that mean=0 so no intercept needed
            clf = ElasticNetWithTimeout(alpha=alpha, l1_ratio=ss.l1_ratio(), fit_intercept=False,
                                        max_iter=max_iter, **fit_params)
            try:
                clf.fit(X_unbiased, y_unbiased)
            except TimeoutError:
                print '    Regularized update failed. Returning None'
                return None #failure
            except ValueError:
                print '    Regularized update failed with ValueError.'
                print '    X_unbiased:'
                print X_unbiased
                print '    y_unbiased:'
                print y_unbiased
                sys.exit(1)

            cur_unbiased_coefs = clf.coef_.copy()
            if cur_unbiased_coefs.shape == tuple():
                # This happens when we have only one variable because
                # ElasticNet calls numpy.squeeze(), which reduces a
                # single element array to a 0-d array. That would
                # crash us below in list(cur_unbiased_coefs). We just
                # undo the squeeze.
                cur_unbiased_coefs = cur_unbiased_coefs.reshape((1,))
                
                  
            #compute model; update models
            #  -"rebias" means convert from (mean=0, stddev=1) to original (mean, stddev)
            coefs = self._rebiasCoefs([0.0] + list(cur_unbiased_coefs), X_stds, X_avgs, y_std, y_avg)
            (coefs_n, bases_n, coefs_d, bases_d) = self._allocateToNumerDenom(ss, bases, coefs)
            model = FFXModel(coefs_n, bases_n, coefs_d, bases_d, varnames)
            models.append(model)

            #update nmses
            nmse_unbiased = nmse(numpy.dot(cur_unbiased_coefs, X_unbiased.T), y_unbiased,
                                 min(y_unbiased), max(y_unbiased))
            nmses.append(nmse_unbiased)

            #log
            num_bases = len(numpy.nonzero(cur_unbiased_coefs)[0])
            if verbose and ((alpha_i==0) or (alpha_i+1) % 50 == 0):
                print '      alpha %d/%d (%3e): num_bases=%d, nmse=%.6f, time %.2f s' % \
                    (alpha_i+1, len(alphas), alpha, num_bases, nmse_unbiased, time.time() - start_time)

            #maybe stop
            if scipy.isinf(nmses[-1]):
                if verbose:
                    print '    Pathwise learn: Early stop because nmse is inf'
                return None
            if nmse_unbiased < target_nmse:
                if verbose:
                    print '    Pathwise learn: Early stop because nmse < target'
                return models
            if num_bases > max_num_bases:
                if verbose:
                    print '    Pathwise learn: Early stop because num bases > %d' % max_num_bases
                return models
            if len(nmses) > 15 and round(nmses[-1], 4) == round(nmses[-15], 4):
                if verbose:
                    print '    Pathwise learn: Early stop because nmses stagnated'
                return models
        if verbose:
            print '    Pathwise learn: done'
        return models