Example #1
0
File: fitter.py Project: palm86/pod
def test():
    import datasource
    import model
    import dataset

    f = Fitter()

    # define source of "experimental data"
    function = lambda s, i: (10.0*s/1.0)/(1.0 + s/1.0 + i/5.0)
    data_source = datasource.Generated_ScanDataSource(
        function,
        ['s', 'i'],
        'v',
        [scipy.logspace(-2, 2, 50), scipy.logspace(-2, 2, 10)],
        noise=0.3
    )

    # define model
    model = model.Equation_Model("Vmax*s/Ks/(1 + s/Ks + i/Ki)", ['s', 'i'])

    dataset.ScanDataSet('name', f, data_source, model)

    # specify the optimization algorithm, defaults to scipy_leastsq
    #~ alg = algorithm.scipy_leastsq()
    alg = algorithm.robust_biweight()
    f.setAlgorithm(alg)

    # specify the parameters to be fitted
    f.addParameter('Vmax', init=1.0, min=0, max=100)
    f.addParameter('Ks', init=1.0, min=0, max=10)
    f.addParameter('Ki', init=1.0, min=0, max=10)

    r = f.solve()
    r.writeOutput()
Example #2
0
def generate_responses(isos,
                       flux,
                       struct='wims69',
                       lower=1e-5,
                       upper=2e7,
                       name='response.p',
                       overwrite=False):
    """This generates response functions for given isotopes,
     a given spectrum, and a desired group structure."""
    if os.path.isfile(name) and not overwrite:
        return pickle.load(open(directory + '/code/' + name, 'rb'))
    responses = {}
    eb = energy_groups(struct, lower, upper)
    responses['eb'] = eb
    responses['phi'] = sp.zeros(len(eb) - 1)
    for i in range(len(eb) - 1):
        E = sp.logspace(sp.log10(eb[i + 1]), sp.log10(eb[i]), 1e4)
        x = trapz(flux(E), E)
        print("->", i, eb[i + 1], eb[i], x)
        responses['phi'][i] = trapz(flux(E), E)

    interps = get_cross_section_interps(isos)
    responses['response'] = {}
    for iso in isos:
        print("...response for iso ", iso)
        responses['response'][iso] = sp.zeros(len(eb) - 1)
        fun = lambda x: flux(x) * interps[iso](x)
        for i in range(len(eb) - 1):
            E = sp.logspace(sp.log10(eb[i + 1]), sp.log10(eb[i]), 1e4)
            top = trapz(fun(E), E)
            responses['response'][iso][i] = top / responses['phi'][i]
    pickle.dump(responses, open(directory + '/code/' + name, 'wb'))
    return responses
Example #3
0
def plot(nmin=-2, nmax=1, tmin=0, tmax=2, num=301, B=5.4, npar=1.8):
    Taxis = scipy.logspace(tmin, tmax, num) * 1e-3
    naxis = scipy.logspace(nmin, nmax, num) * 1e20
    n, T = scipy.meshgrid(naxis, Taxis)
    om = scipy.pi * 2 * 4.6e9
    #wpe = n*pow(ec,2)/(e0*mes)
    print(nu(n, T))
    Ains = As(n, T, B, om, npar)
    Bins = Bs(n, T, B, om, npar)
    Cins = Cs(n, T, B, om, npar)

    Q = (-Bins + scipy.sqrt(pow(Bins, 2) - 4 * Ains * Cins)) / 2 / Ains
    output = scipy.sqrt(
        scipy.sqrt(pow(scipy.real(Q), 2) + pow(scipy.imag(Q), 2)) -
        scipy.real(Q)) / pow(2, .5)
    CS = plt.contour(naxis,
                     Taxis * 1e3,
                     om * output / 299792458.,
                     locator=ticker.LogLocator(),
                     linewidths=2.,
                     colors=colors['b'])  #om*output/299792458.
    plt.clabel(CS, inline=1, fontsize=16, fmt=ticker.LogFormatterMathtext())  #

    #CS2 = plt.contour(naxis,Taxis*1e3,pow(2,-1)*abs(scipy.imag(Q)/abs(scipy.sqrt(scipy.real(Q))))*om/299792458.,locator=ticker.LogLocator(),linewidths=2.,colors=colors['g'])#om*output/299792458.
    #plt.clabel(CS2, inline=1, fontsize=16,fmt=ticker.LogFormatterMathtext())#

    wpe = n * pow(ec, 2) / (e0 * mes)
    wce = ec * B / mes
    wci = ec * B / mi
    output2 = nu(n, T) * pow(wpe, .5) / (2 * pow(om, 2)) * scipy.sqrt(
        abs((npar**2 - 1) / (1 - wpe / (pow(wce, 2) * (npar**2 - 1)))))
    #CS3 = plt.contour(naxis,Taxis*1e3,output2*om/299792458.,locator=ticker.LogLocator(),linewidths=2.,colors=colors['r'])#om*output/299792458.

    output3 = nu(n, T) * pow(wpe, .5) / (2 * pow(om, 2)) * npar
    # CS2 = plt.contour(naxis,Taxis*1e3,output3*om/299792458.,locator=ticker.LogLocator(),linewidths=2.,colors=colors['g'])#om*output/299792458.
    #plt.clabel(CS2, inline=1, fontsize=16,fmt=ticker.LogFormatterMathtext())#

    #plt.clabel(CS3, inline=1, fontsize=16,fmt=ticker.LogFormatterMathtext())#

    A2 = pow(ec, 2) / (e0 * mi) / pow(om, 2)
    B2 = -2 * pow(e0 * mes, -.5) * ec / wce * npar
    C2 = npar**2 - 1

    temp = pow((-B2 - pow(B2**2 - 4 * A2 * C2, .5)) / (2 * A2), 2)
    plt.fill_between(naxis,
                     Taxis[0] * 1e3,
                     Taxis[-1] * 1e3,
                     naxis > temp,
                     color='k',
                     alpha=.3,
                     linewidth=2.)

    plt.gca().set_yscale("log")

    plt.gca().set_xscale("log")

    plt.ylabel(r'Electron Temperature [eV]')
    plt.xlabel(r'$n_e$ [m$^{-3}$]')

    plt.show()
Example #4
0
def gridSearch(maxT, sumM, name,loc='svmcrossvalid.p',reduction=50,k=2.2e35,gamma=[-3,3], C=[0,6], nozero=False, conc='c_w_l', lims=[2.2e3,9e3]):

    val, c_w_l, idx = conditionVal(name=name, nozero=nozero, conc=conc)
    idx1 = sample(val,len(val)/reduction)
    idx2 = sample(val,len(val)/reduction) #very small subsamples started for testing algos
    index0 = scipy.logspace(gamma[0],gamma[1],int(abs(gamma[0]-gamma[1])+1))
    index1 = scipy.logspace(C[0],C[1],int(abs(C[0]-C[1])+1))

    data = scipy.io.readsav('W_Abundances_grid_puestu_adpak_fitscaling_74_0.00000_5.00000_1000_idlsave')
    te = data['en']
    idx2 = scipy.logical_and(te > lims[0], te < lims[1])


    temp = val[idx2]/c_w_l[idx2]/sumM[idx2]*k


    output = scipy.zeros((len(index0),len(index1)))
    output2 = scipy.zeros((len(index0),len(index1),len(te[idx2])))



    for i in xrange(len(index0)):
        for j in xrange(len(index1)):
            print(i,j)
            pipe = SVMtest(maxT, sumM, val, c_w_l, reduced=idx1, gamma=index0[i], C=index1[j],k=k)
            output[i,j] = pipe.score(scipy.log(scipy.atleast_2d(maxT[idx2]).T),scipy.log(temp))
            output2[i,j] = scipy.exp(pipe.predict(scipy.log(scipy.atleast_2d(te[idx2]).T)))

    pickle.dump([output,output2],open(loc,'wb'))
    return output,output2
Example #5
0
def test():
    import datasource
    import model
    import dataset

    f = Fitter()

    # define source of "experimental data"
    function = lambda s, i: (10.0 * s / 1.0) / (1.0 + s / 1.0 + i / 5.0)
    data_source = datasource.Generated_ScanDataSource(
        function, ['s', 'i'],
        'v', [scipy.logspace(-2, 2, 50),
              scipy.logspace(-2, 2, 10)],
        noise=0.3)

    # define model
    model = model.Equation_Model("Vmax*s/Ks/(1 + s/Ks + i/Ki)", ['s', 'i'])

    dataset.ScanDataSet('name', f, data_source, model)

    # specify the optimization algorithm, defaults to scipy_leastsq
    #~ alg = algorithm.scipy_leastsq()
    alg = algorithm.robust_biweight()
    f.setAlgorithm(alg)

    # specify the parameters to be fitted
    f.addParameter('Vmax', init=1.0, min=0, max=100)
    f.addParameter('Ks', init=1.0, min=0, max=10)
    f.addParameter('Ki', init=1.0, min=0, max=10)

    r = f.solve()
    r.writeOutput()
Example #6
0
    def _getParamGridFixedEffectModel(self, G0, G1, link):
        if link == 'linear':
            param_grid = dict(alpha=0.5*sp.logspace(-5, 5, 20))
        elif link == 'logistic':
            param_grid = dict(C=sp.logspace(-5, 5, 20))
        else:
            assert False

        return param_grid
Example #7
0
 def D2_setup_scan(self,min1,max1,step1,min2,max2,step2):
     self.P1min = min1
     self.P1max = max1
     self.P1steps = step1
     self.P2min = min2
     self.P2max = max2
     self.P2steps = step2
     self.P1range = scipy.logspace(scipy.log10(min1),scipy.log10(max1),step1)
     self.P2range = scipy.logspace(scipy.log10(min2),scipy.log10(max2),step2)
Example #8
0
    def _getParamGridFixedEffectModel(self, G0, G1, link):
        if link == 'linear':
            param_grid = dict(alpha=0.5 * sp.logspace(-5, 5, 20))
        elif link == 'logistic':
            param_grid = dict(C=sp.logspace(-5, 5, 20))
        else:
            assert False

        return param_grid
Example #9
0
 def D2_setup_scan(self, min1, max1, step1, min2, max2, step2):
     self.P1min = min1
     self.P1max = max1
     self.P1steps = step1
     self.P2min = min2
     self.P2max = max2
     self.P2steps = step2
     self.P1range = scipy.logspace(scipy.log10(min1), scipy.log10(max1),
                                   step1)
     self.P2range = scipy.logspace(scipy.log10(min2), scipy.log10(max2),
                                   step2)
Example #10
0
def betweenness_distrib(graph,
                        use_weights=True,
                        nodes=None,
                        num_nbins=None,
                        num_ebins=None,
                        log=False):
    '''
    Betweenness distribution of a graph.

    Parameters
    ----------
    graph : :class:`~nngt.Graph` or subclass
        the graph to analyze.
    use_weights : bool, optional (default: True)
        use weighted degrees (do not take the sign into account : all weights
        are positive).
    nodes : list or numpy.array of ints, optional (default: all nodes)
        Restrict the distribution to a set of nodes (only impacts the node
        attribute).
    log : bool, optional (default: False)
        use log-spaced bins.

    Returns
    -------
    ncounts : :class:`numpy.array`
        number of nodes in each bin
    nbetw : :class:`numpy.array`
        bins for node betweenness
    ecounts : :class:`numpy.array`
        number of edges in each bin
    ebetw : :class:`numpy.array`
        bins for edge betweenness
    '''
    ia_nbetw, ia_ebetw = graph.get_betweenness(use_weights)
    if nodes is not None:
        ia_nbetw = ia_nbetw[nodes]
    if num_nbins is None:
        num_nbins = max(10, int(len(ia_nbetw) / 50))
    if num_ebins is None:
        num_ebins = max(10, int(len(ia_ebetw) / 50))
    ra_nbins = sp.linspace(ia_nbetw.min(), ia_nbetw.max(), num_nbins)
    ra_ebins = sp.linspace(ia_ebetw.min(), ia_ebetw.max(), num_ebins)
    if log:
        ra_nbins = sp.logspace(sp.log10(sp.maximum(ia_nbetw.min(), 10**-8)),
                               sp.log10(ia_nbetw.max()), num_nbins)
        ra_ebins = sp.logspace(sp.log10(sp.maximum(ia_ebetw.min(), 10**-8)),
                               sp.log10(ia_ebetw.max()), num_ebins)
    ncounts, nbetw = sp.histogram(ia_nbetw, ra_nbins)
    ecounts, ebetw = sp.histogram(ia_ebetw, ra_ebins)
    nbetw = nbetw[:-1] + 0.5 * sp.diff(nbetw)
    ebetw = ebetw[:-1] + 0.5 * sp.diff(ebetw)
    return ncounts, nbetw, ecounts, ebetw
Example #11
0
 def D3_setup_scan(self,min1,max1,step1,min2,max2,step2,min3,max3,step3):
     self.P1min = min1
     self.P1max = max1
     self.P1steps = step1
     self.P2min = min2
     self.P2max = max2
     self.P2steps = step2
     self.P3min = min3
     self.P3max = max3
     self.P3steps = step3
     self.P1range = scipy.logspace(scipy.log10(min1),scipy.log10(max1),step1)
     self.P2range = scipy.logspace(scipy.log10(min2),scipy.log10(max2),step2)
     self.P3range = scipy.logspace(scipy.log10(min3),scipy.log10(max3),step3)
Example #12
0
def plotquarts(a, data1, data2, col, line, lab, log=False):
    n = len(data1)
    mx = -sp.Inf
    mn = sp.Inf
    for i in xrange(n):
        mn = min(mn, min(data1[i]))
        mx = max(mx, max(data1[i]))
    if not log:
        xaxis = sp.linspace(mn, mx, 200)
    else:
        xaxis = sp.logspace(sp.log10(mn), sp.log10(mx), 200) + 1e-9


##    print( data1)
#    print( data2)
#    print( xaxis)
    low0, med0, upp0 = gpbo.core.ESutils.quartsirregular(data1, data2, xaxis)

    #        a.fill_between(xaxis, low0, upp0, facecolor='lightblue', edgecolor='lightblue', alpha=0.5)
    a.plot(xaxis, med0, color=col, linestyle=line, label=lab)
    a.fill_between(xaxis,
                   upp0,
                   low0,
                   edgecolor=col,
                   linestyle=line,
                   facecolor=col,
                   lw=0.0,
                   alpha=0.1)
    return
Example #13
0
def default_frequency_range(syslist):
    """Compute a reasonable default frequency range for frequency
    domain plots.

    Finds a reasonable default frequency range by examining the features
    (poles and zeros) of the systems in syslist.

    Parameters
    ----------
    syslist : list of Lti
        List of linear input/output systems (single system is OK)

    Returns
    -------
    omega : array
        Range of frequencies in rad/sec

    Examples
    --------
    >>> from matlab import ss
    >>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
    >>> omega = default_frequency_range(sys)
    """
    # This code looks at the poles and zeros of all of the systems that
    # we are plotting and sets the frequency range to be one decade above
    # and below the min and max feature frequencies, rounded to the nearest
    # integer.  It excludes poles and zeros at the origin.  If no features
    # are found, it turns logspace(-1, 1)

    # Find the list of all poles and zeros in the systems
    features = np.array(())

    # detect if single sys passed by checking if it is sequence-like
    if (not getattr(syslist, '__iter__', False)):
        syslist = (syslist,)

    for sys in syslist:
        try:
            # Add new features to the list
            features = np.concatenate((features, np.abs(sys.pole())))
            features = np.concatenate((features, np.abs(sys.zero())))
        except:
            pass

    # Get rid of poles and zeros at the origin
    features = features[features != 0];

    # Make sure there is at least one point in the range
    if (features.shape[0] == 0): features = [1];

    # Take the log of the features
    features = np.log10(features)

    #! TODO: Add a check in discrete case to make sure we don't get aliasing

    # Set the range to be an order of magnitude beyond any features
    omega = sp.logspace(np.floor(np.min(features))-1,
                        np.ceil(np.max(features))+1)

    return omega
    def run(self, npts=25, inv_points=None, access_limited=True, **kwargs):
        r"""
        Parameters
        ----------
        npts : int (default = 25)
            The number of pressure points to apply.  The list of pressures
            is logarithmically spaced between the lowest and highest throat
            entry pressures in the network.

        inv_points : array_like, optional
            A list of specific pressure point(s) to apply.

        """
        if 'inlets' in kwargs.keys():
            logger.info('Inlets recieved, passing to set_inlets')
            self.set_inlets(pores=kwargs['inlets'])
        if 'outlets' in kwargs.keys():
            logger.info('Outlets recieved, passing to set_outlets')
            self.set_outlets(pores=kwargs['outlets'])
        self._AL = access_limited
        if inv_points is None:
            logger.info('Generating list of invasion pressures')
            min_p = sp.amin(self['throat.entry_pressure']) * 0.98  # nudge down
            max_p = sp.amax(self['throat.entry_pressure']) * 1.02  # bump up
            inv_points = sp.logspace(sp.log10(min_p), sp.log10(max_p), npts)

        self._npts = sp.size(inv_points)
        # Execute calculation
        self._do_outer_iteration_stage(inv_points)
Example #15
0
	def add_axis(self, param, start, stop, steps, logspace=False):
		"""Add a parameter discretization axis to the the grid

		Arguments
		---------
		param : string
			The name of the model parameter.
		start : float
			The starting value of the model parameter.
		stop : float
			The ending value of the model parameter.
		steps : integer
			The number of steps to insert between the start and stop.
		logspace : boolean
			Space the steps logarithmically?

		Returns
		-------
		None
		"""

		assert param in self.sim.get_model_params().keys()
		
		if logspace:
			self._grid_pts.append( scipy.logspace(start,stop,steps) )
		else:
			self._grid_pts.append( scipy.linspace(start,stop,steps) )
		self._grid_size *= len(self._grid_pts[-1])
		self._grid_order.append(param)
Example #16
0
def create_grid(r_in, r_out, nshell, space = 'powerlaw1', end = True):
    # function to create grid
    if space == 'log10':
        from scipy import log10, logspace
        # get the exponent of the start- and
        # stop-radius in input units
        start = [log10(r_in), 0][r_in == 0]
        stop = log10(r_out)
        radii = logspace(start, stop, num=nshell, endpoint=end)
    elif space == "powerlaw1":
        from scipy import arange
        radii = r_in * (r_out/r_in)**(arange(nshell)/(nshell - 1.0))
    elif space == 'linear':
        from scipy import linspace
        # linearly spaced grid
        radii = linspace(r_in, r_out, num=nshell, endpoint=end)
    elif space == 'powerlaw2':
        from scipy import linspace
        # first check if coefficients to the power-law was given
        #~ if 'exp' in kwargs:
            #~ p_exp = kwargs['exp']
        #~ else: # if not, set it to 2, i.e. r^2
            #~ p_exp = 2
        radii = r_in + (r_out - r_in)*(linspace(r_in, r_out, num=nshell, endpoint=end)/(r_out))**2
        #pr_int('Not implemented yet.')
        #raise ParError(spaced)
    else:
        raise Exception(space)
    return radii
Example #17
0
def compute_rls(data_file, output_filename, rls_type=DEFAULT_RLS, save_out=DEFAULT_SAVE):
    """
    data file contains sampels and labels saved as a mat file
    with respective keywords. Make your own data wrapper if needed
    """

    if path.splitext(output_filename)[-1] != ".mat":
        output_filename += ".mat"

    if path.splitext(data_file)[-1] != ".mat":
        raise ValueError, "mat file needed"

    data = loadmat(data_file)
    X = data["samples"]
    Y = data["labels"]
    lambdas = sp.logspace(-6, 6, 30)

    if rls_type.lower() == "linear":
        w, loos = lrlsloo(X, Y, lambdas)
    elif rls_type.lower() == "nonlinear":
        w, loos = rlsloo(X, Y, lambdas)
    else:
        print "ERROR: specify linear or nonlinear"

    if save_out:
        out_data = {"weights": w, "loos": loos}
        savemat(out_fname, out_data)
    def run(self, npts=25, inv_points=None, access_limited=True, **kwargs):
        r"""
        Parameters
        ----------
        npts : int (default = 25)
            The number of pressure points to apply.  The list of pressures
            is logarithmically spaced between the lowest and highest throat
            entry pressures in the network.

        inv_points : array_like, optional
            A list of specific pressure point(s) to apply.

        """
        if 'inlets' in kwargs.keys():
            logger.info('Inlets recieved, passing to set_inlets')
            self.set_inlets(pores=kwargs['inlets'])
        if 'outlets' in kwargs.keys():
            logger.info('Outlets recieved, passing to set_outlets')
            self.set_outlets(pores=kwargs['outlets'])
        self._AL = access_limited
        if inv_points is None:
            logger.info('Generating list of invasion pressures')
            min_p = sp.amin(self['throat.entry_pressure']) * 0.98  # nudge down
            max_p = sp.amax(self['throat.entry_pressure']) * 1.02  # bump up
            inv_points = sp.logspace(sp.log10(min_p),
                                     sp.log10(max_p),
                                     npts)

        self._npts = sp.size(inv_points)
        # Execute calculation
        self._do_outer_iteration_stage(inv_points)
Example #19
0
def plotprofile(confs,nreps,path,tol=0.9,target=1e-6):
    f=[]
    a=[]
    pmax=1
    for i in range(pmax):
        f_,a_ = plt.subplots(1)
        for item in ([a_.title, a_.xaxis.label, a_.yaxis.label] + a_.get_xticklabels() + a_.get_yticklabels()):
            item.set_fontsize(10)
        f.append(f_)
        a.append(a_)
    colorlist = ['b','r','g','purple','k','grey','orange','c','lightgreen','lightblue','pink','b','r','g','purple','k','grey','orange','c','lightgreen','lightblue','pink']
    lslist = ['solid' , 'dashed', 'dashdot', 'dotted','solid' , 'dashed', 'dashdot', 'dotted','solid' , 'dashed', 'dashdot', 'dotted','solid' , 'dashed', 'dashdot', 'dotted','solid' , 'dashed', 'dashdot', 'dotted']
    ci=-1

    for C in confs:
        print('plotting {}...'.format(C[0]))
        ci+=1
        col = colorlist[ci]
        line = lslist[ci]
        #collect the data
        data=[]
        xoverheads = sp.empty(nreps)
        xntotarget = sp.zeros(nreps)
        overheads = sp.empty(nreps)
        noverheads = [sp.empty(nreps) for i in range(len(C[1]['N']))]
        ninrun = sp.zeros(nreps)
        support = sp.logspace(-2,5,200)
        success = sp.zeros(nreps)
        for ii in range(nreps):
            D = gpbo.optimize.readoptdata(os.path.join(path,'{}_{}.csv'.format(C[0],ii)))
            A = sp.array(D['trueyatxrecc'].values)
            if A.min()>=target:
                xoverheads[ii]=sum(D['taq'])
                overheads[ii]=sum(D['taq'])
                for k,n in enumerate(C[1]['N']):
                    noverheads[k][ii] = sum(D['taq'][:n])
            else:
                success[ii]=1
                i = sp.argmax(A<=target)#while A[i]>=target:
                xoverheads[ii] = sum(D['taq'][:i+1])
                overheads[ii] = sum(D['taq'])
                xntotarget[ii] = i
                ninrun[ii]=len(D['taq'])
                for k,n in enumerate(C[1]['N']):
                    noverheads[k][ii] = sum(D['taq'].values[:n])
        if sp.mean(success)>=tol:
            if C[1]['oracle']:
                a[0].plot(support,sp.mean(xoverheads)+sp.mean(xntotarget)*support,col,label=C[0]+'oracle',linestyle='dashdot')
            if C[1]['full']:
                a[0].plot(support,sp.mean(overheads)+sp.mean(ninrun)*support,col,label=C[0]+'_all',linestyle='dashed')
            for k,n in enumerate(C[1]['N']):
                if sp.percentile(xntotarget,int(tol*100))<n:
                    a[0].plot(support,sp.mean(noverheads[k])+n*support,col,label=C[0]+str(n),linestyle='solid')
        else:
            print('{} only acchived target on {}'.format(C[0],sp.mean(success)))
        a[0].set_xscale('log')
        a[0].set_yscale('log')
        a[0].legend()

    f[0].savefig(os.path.join(path,'profile_{}.png'.format(sp.log10(target))),bbox_inches='tight', pad_inches=0.1)
Example #20
0
 def fit(self, kk=None):
     """
     Fit Fourier spectrum with the function set at class instantination
     ==> NB: fitting is done in logarithmic coordinates
     and fills plotting arrays with data
     --------
     Options:
     --------
     kk
        (k1,k2) <None> spectral interval for function fitting
        by default interval [ kk[1], kk[imax__kk] ] will be fitted
        ==> i.e. k=0 is excluded
     """
     # fitting interval
     if kk:
         ik_min=(self.fft_data.kk[1:self.fft_data.imax__kk]<=kk[0]).nonzero()[0][-1]
         ik_max=(self.fft_data.kk[1:self.fft_data.imax__kk]<=kk[1]).nonzero()[0][-1]
     else:
         ik_min=1;
         ik_max=self.fft_data.imax__kk
     # do fitting
     self.__popt,self.__pcov = scipy.optimize.curve_fit(self.__func_fit,
                                                        scipy.log(self.fft_data.kk[ik_min:ik_max]),
                                                        scipy.log(self.fft_data.Ik[ik_min:ik_max]) )
     # boundaries of fitted interval
     self.kmin = self.fft_data.kk[ik_min]
     self.kmax = self.fft_data.kk[ik_max]
     # fill plot arrays <===============
     self.kk_plot=scipy.logspace( scipy.log10(self.kmin),
                                  scipy.log10(self.kmax),
                                  self.nk_plot )
     self.Ik_plot=self.fitting_function(self.kk_plot)
Example #21
0
    def _neutdata(self, samplename, ic50_in_name,
            nfitpoints, cextend):
        """Gets data for plotting neutralization curve.

        Returns a `pandas.DataFrame` appropriate for passing
        to :func:`dms_tools2.plot.plotFacetedNeutCurves`. The
        arguments have the meanings explained in :meth:`plot`.
        """
        concentrations = scipy.concatenate(
                [self.cs,
                 scipy.logspace(math.log10(self.cs.min() / cextend),
                                math.log10(self.cs.max() * cextend),
                                num=nfitpoints)
                ])
        n = len(concentrations)

        points = scipy.concatenate(
                [self.fs,
                 scipy.full(n - len(self.fs), scipy.nan)])

        fit = scipy.array([self.fracsurvive(c) for c in concentrations])

        if ic50_in_name:
            samplename += ' (IC50 = {0})'.format(self.ic50_str())

        return pandas.DataFrame.from_dict(collections.OrderedDict([
                ('concentration', concentrations),
                ('sample', [samplename] * n),
                ('points', points),
                ('fit', fit),
                ]))
Example #22
0
    def add_axis(self, param, start, stop, steps, logspace=False):
        """Add a parameter discretization axis to the the grid

		Arguments
		---------
		param : string
			The name of the model parameter.
		start : float
			The starting value of the model parameter.
		stop : float
			The ending value of the model parameter.
		steps : integer
			The number of steps to insert between the start and stop.
		logspace : boolean
			Space the steps logarithmically?

		Returns
		-------
		None
		"""

        assert param in self.sim.get_model_params().keys()

        if logspace:
            self._grid_pts.append(scipy.logspace(start, stop, steps))
        else:
            self._grid_pts.append(scipy.linspace(start, stop, steps))
        self._grid_size *= len(self._grid_pts[-1])
        self._grid_order.append(param)
Example #23
0
def degree_distrib(net, deg_type="total", node_list=None, use_weights=True,
                   log=False, num_bins=30):
    '''
    Computing the degree distribution of a network.
    
    Parameters
    ----------
    net : :class:`~nngt.Graph` or subclass
        the network to analyze.
    deg_type : string, optional (default: "total")
        type of degree to consider ("in", "out", or "total").
    node_list : list or numpy.array of ints, optional (default: None)
        Restrict the distribution to a set of nodes (default: all nodes).
    use_weights : bool, optional (default: True)
        use weighted degrees (do not take the sign into account: all weights
        are positive).
    log : bool, optional (default: False)
        use log-spaced bins.
    
    Returns
    -------
    counts : :class:`numpy.array`
        number of nodes in each bin
    deg : :class:`numpy.array`
        bins
    '''
    ia_node_deg = net.get_degrees(node_list, deg_type, use_weights)
    ra_bins = sp.linspace(ia_node_deg.min(), ia_node_deg.max(), num_bins)
    if log:
        ra_bins = sp.logspace(sp.log10(sp.maximum(ia_node_deg.min(),1)),
                               sp.log10(ia_node_deg.max()), num_bins)
    counts,deg = sp.histogram(ia_node_deg, ra_bins)
    ia_indices = sp.argwhere(counts)
    return counts[ia_indices], deg[ia_indices]
Example #24
0
def default_frequency_range(syslist):
    """Compute a reasonable default frequency range for frequency
    domain plots.

    Finds a reasonable default frequency range by examining the features
    (poles and zeros) of the systems in syslist.

    Parameters
    ----------
    syslist : list of Lti
        List of linear input/output systems (single system is OK)

    Returns
    -------
    omega : array
        Range of frequencies in rad/sec

    Examples
    --------
    >>> from matlab import ss
    >>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
    >>> omega = default_frequency_range(sys)
    """
    # This code looks at the poles and zeros of all of the systems that
    # we are plotting and sets the frequency range to be one decade above
    # and below the min and max feature frequencies, rounded to the nearest
    # integer.  It excludes poles and zeros at the origin.  If no features
    # are found, it turns logspace(-1, 1)
    
    # Find the list of all poles and zeros in the systems
    features = np.array(())
    
    # detect if single sys passed by checking if it is sequence-like
    if (not getattr(syslist, '__iter__', False)):
        syslist = (syslist,)

    for sys in syslist:
        try:
            # Add new features to the list
            features = np.concatenate((features, np.abs(sys.pole())))
            features = np.concatenate((features, np.abs(sys.zero())))
        except:
            pass

    # Get rid of poles and zeros at the origin
    features = features[features != 0];

    # Make sure there is at least one point in the range
    if (features.shape[0] == 0): features = [1];

    # Take the log of the features
    features = np.log10(features)

    #! TODO: Add a check in discrete case to make sure we don't get aliasing
                        
    # Set the range to be an order of magnitude beyond any features
    omega = sp.logspace(np.floor(np.min(features))-1, 
                        np.ceil(np.max(features))+1)   

    return omega
Example #25
0
 def D3_setup_scan(self, min1, max1, step1, min2, max2, step2, min3, max3,
                   step3):
     self.P1min = min1
     self.P1max = max1
     self.P1steps = step1
     self.P2min = min2
     self.P2max = max2
     self.P2steps = step2
     self.P3min = min3
     self.P3max = max3
     self.P3steps = step3
     self.P1range = scipy.logspace(scipy.log10(min1), scipy.log10(max1),
                                   step1)
     self.P2range = scipy.logspace(scipy.log10(min2), scipy.log10(max2),
                                   step2)
     self.P3range = scipy.logspace(scipy.log10(min3), scipy.log10(max3),
                                   step3)
Example #26
0
def visualize2SVM(maxT, sumM, name, nozero=False, k=2.2e35, useall=False, lims=[2.2e3,9e3], minval=[2,5], conc='c_w_l',loc='svmcrossvalid.p',idx2=None):
    """ log plot of the data with only PEC data"""
    val, c_w_l, idx = conditionVal(name=name, nozero=nozero, conc=conc)
              
    data = scipy.io.readsav('W_Abundances_grid_puestu_adpak_fitscaling_74_0.00000_5.00000_1000_idlsave')
    y = time.time()

    xax = scipy.logspace(2,5,201)
    yax = scipy.logspace(-5,1,101)
    xtemp = xax[1:]/2.+xax[:-1]/2.
    ytemp = yax[1:]/2.+yax[:-1]/2.

    Y,X = scipy.meshgrid(xtemp, ytemp)
    if idx2 is None:
        histdata, xed, yed = scipy.histogram2d(maxT,val/c_w_l/sumM*k,bins=[xax,yax])
    else:
        histdata, xed, yed = scipy.histogram2d(maxT[idx2],(val/c_w_l/sumM*k)[idx2],bins=[xax,yax])
        
    extent = [xed[0], xed[-1], yed[0], yed[-1]]
    plt.pcolormesh(xax,yax,histdata.T, cmap='viridis', rasterized=True,vmin=1.,norm=LogNorm())
    plt.gca().set_yscale('log')
    plt.gca().set_xscale('log')
    plt.gca().set_ylim(1e-4,1e1)
    plt.gca().set_xlim(1e3,2e4)
   
    cmap = plt.cm.get_cmap('viridis')
    cmap.set_under('white')
 
    idx3 = scipy.logical_and(data['en'] > lims[0], data['en'] < lims[1])
    data2 = pickle.load(open(loc,'rb')) 
    fz = data2[1][minval[0]][minval[1]]


    print(len(fz))
    print(data['en'][idx3].shape)

    plt.loglog(data['en'][idx3],fz/fz.max(),lw=3.5,color='darkorange',linestyle='-',label='SVM fit')

    plt.xlabel(r'$T_e$ [eV]')
    plt.ylabel(r'const$\cdot I_{CSXR} / c_W \sum M$')

    colorbar = plt.colorbar()
    colorbar.ax.set_ylabel('datapoint density (a.u.)')
    leg = plt.legend(loc=4,fontsize=20,title=r'\underline{\hspace{1em} C=10$^4$, $\gamma=.1$ \hspace{1em}}')
    plt.setp(leg.get_title(),fontsize=14)
    plt.subplots_adjust(bottom=.12,right=1.)
Example #27
0
def plotFigureSingVals(FigFolder, s, plotStylei, subfigIndx):
    font = {'family': 'sans-serif', 'weight': 'normal', 'size': 14}

    matplotlib.rc('font', **font)

    # set tick width
    matplotlib.rcParams['xtick.major.size'] = 7
    matplotlib.rcParams['xtick.major.width'] = 1.0
    matplotlib.rcParams['xtick.minor.size'] = 3
    matplotlib.rcParams['xtick.minor.width'] = 1.0
    matplotlib.rcParams['ytick.major.size'] = 7
    matplotlib.rcParams['ytick.major.width'] = 1.0
    matplotlib.rcParams['ytick.minor.size'] = 3
    matplotlib.rcParams['ytick.minor.width'] = 1.0
    matplotlib.rcParams['axes.linewidth'] = 1.0

    # Make LATEX font the same as text font:
    matplotlib.rcParams['mathtext.fontset'] = 'custom'
    matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
    matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
    matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'

    fig = plt.figure(1, figsize=(10, 4))
    ax = plt.subplot(subfigIndx)
    plt.plot(range(1, len(s) + 1), s, plotStylei, linewidth=2.0)
    plt.yscale('log')
    plt.xscale('log')
    ax.set_yticks(sp.logspace(-8, 0, num=5, base=10))
    plt.xlabel('Singular Value Index, $i$', labelpad=10)
    plt.ylabel('Scaled Singular Value, $\lambda_i/\lambda_1$', labelpad=10)
    # Set axis limits:
    plt.ylim(1.e-8, 3.)
    plt.xlim(0, 1000)
    ax.minorticks_off()
    ax.tick_params(axis='both',
                   bottom=True,
                   top=True,
                   left=True,
                   right=True,
                   direction='in')
    if subfigIndx == 121:
        ax.legend(['LowRankMedNoise', 'LowRankHiNoise', '$S_\mathrm{D}$'],
                  numpoints=1,
                  loc="lower left",
                  prop={'size': 12})
    if subfigIndx == 122:
        ax.legend(['PolySlow', 'PolyFast', 'ExpSlow', 'ExpFast'],
                  numpoints=1,
                  loc="lower left",
                  prop={'size': 12})
    fig.subplots_adjust(left=0.01,
                        bottom=0.01,
                        right=0.99,
                        top=0.99,
                        wspace=0.4,
                        hspace=0.4)
    fig.savefig(FigFolder + '/Figure1.pdf', format='pdf', bbox_inches='tight')
Example #28
0
def run():

    # parameter dictionary
    p = {}
    p['number_batches'] = 3
    p['leakage_penalty'] = 0.04
    p['assembly_width'] = 21.5036
    p['assembly_power'] = 3.4 / 193
    p['active_height'] = 366.0
    p['fuel_radius'] = 0.4096
    p['cladding_inner_radius'] = 0.4180
    p['cladding_outer_radius'] = 0.4750
    p['number_pins'] = 264
    p['power_share'] = 'reactivity'

    T_F = 900 * sp.ones(p['number_batches'])  # batch fuel temperatures (K)
    T_C = 580 * sp.ones(
        p['number_batches'])  # batch moderator temperatures (K)

    num_thick = 20
    #thick = sp.linspace(0.0, 500, num_thick)
    thick = sp.logspace(-1, sp.log10(5 * 10**2), num_thick)
    T_F_FeCrAl = sp.zeros((3, num_thick))
    T_C_FeCrAl = sp.zeros((3, num_thick))
    T_F_SiC = sp.zeros((3, num_thick))
    T_C_SiC = sp.zeros((3, num_thick))
    PPF_FeCrAl = sp.zeros((3, num_thick))
    PPF_SiC = sp.zeros((3, num_thick))

    solver = NRM(p, rho=rho, m2=m2, k_cladding=k_cladding)

    for i in range(num_thick):
        p['t_fecral'] = thick[i]
        p['t_sic'] = 0.0
        B, ppf, T_F, T_C = solver.solve(T_F, T_C)
        T_F_FeCrAl[:, i] = T_F[:]
        T_C_FeCrAl[:, i] = T_C[:]
        PPF_FeCrAl[:, i] = ppf[:]

        p['t_fecral'] = 0.0
        p['t_sic'] = thick[i]
        B, ppf, T_F, T_C = solver.solve(T_F, T_C)
        T_F_SiC[:, i] = T_F[:]
        T_C_SiC[:, i] = T_C[:]
        PPF_SiC[:, i] = ppf[:]

    pickle.dump(
        {
            'thick': thick,
            'T_F_FeCrAl': T_F_FeCrAl,
            'T_C_FeCrAl': T_C_FeCrAl,
            'PPF_FeCrAl': PPF_FeCrAl,
            'T_F_SiC': T_F_SiC,
            'T_C_SiC': T_C_SiC,
            'PPF_SiC': PPF_SiC
        }, open('example_3.p', 'wb'))
Example #29
0
    def plot(self, indice):
        self.diagrama.ax.clear()
        self.diagrama.ax.set_xlim(0.1, 10)
        self.diagrama.ax.set_ylim(0, 1)
        self.diagrama.ax.set_xscale("log")

        self.diagrama.ax.set_title(QtWidgets.QApplication.translate(
            "pychemqt", "Heat Transfer Temperature Effectiveness"), size='12')
        self.diagrama.ax.set_xlabel("NTU", size='12')
        self.diagrama.ax.set_ylabel("P", size='14')
        self.diagrama.ax.set_xticklabels(["0.1", "1.0", "10"])
        xticklabels = ["0.2", "0.3", "", "0.5", "", "0.7", "", "", "2.0",
                       "3.0", "", "5.0", "", "7.0", "", ""]
        self.diagrama.ax.set_xticklabels(xticklabels, minor=True)

        flujo = self.flujo[indice][1]
        self.mixed.setVisible(flujo == "CrFSMix")
        kwargs = {}
        if flujo == "CrFSMix":
            kwargs["mixed"] = str(self.mixed.currentText())

        R = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1., 1.2, 1.4, 1.6, 1.8,
             2., 2.5, 3., 4., 6., 8., 10., 15.]

        NTU = logspace(-1.5, 1, 100)
        for ri in R:
            e = [0]+[TemperatureEffectiveness(N, ri, flujo, **kwargs) for N in NTU[1:]]
            self.diagrama.plot(NTU, e, "k")
            self.diagrama.ax.annotate(" R=%0.1f" % ri, (NTU[-1], e[-1]),
                                      size="medium", ha="left", va="center")

#        F=[0.3]
#        for f in F:
#            p=[]
#            NTU=[]
#            for r in R:
#                func=lambda P: CorrectionFactor(P, r, flujo)-f
#                print func(0.)
#                pi=fsolve(func, 0.2)
#                p.append(pi)
#                NTU.append(NTU_fPR(p, r, flujo))

#            p=[fsolve(lambda P: CorrectionFactor(P, r, flujo)-f, 0.5)[0] for r in R]
#            NTU=[NTU_fPR(pi, ri) for pi, ni in zip(p, R)]
#            self.diagrama.plot(NTU, p, "--")

        self.diagrama.draw()

        if flujo == "CrFSMix" and self.mixed.currentIndex():
            img = image.imread('images/equation/%s2.png' % flujo)
        else:
            img = image.imread('images/equation/%s.png' % flujo)
        self.image.set_data(img)
        self.refixImage()
Example #30
0
 def _make_forces(self, extreme_forces, num):
     a = extreme_forces[0]
     b = extreme_forces[1]
     c = extreme_forces[2]
     #print a, b, c, num
     forces = numpy.append(
         b -( scipy.logspace(scipy.log10(a), scipy.log10(b),
         num=num/2+1) - a),
         scipy.logspace(scipy.log10(b), scipy.log10(c),
         num=num-num/2)[1:]
         )
     forces = numpy.array( sorted(list(set(forces))) )
     if len(forces) != num:
         Exception("problem with forces: length ", len(forces))
     #print forces
     #for i in range(len(forces)):
     #    f = forces[i]
     #    rand = random.uniform( 0.99*f, 1.01*f)
     #    forces[i] = rand
     return forces
Example #31
0
def run() :

    # parameter dictionary
    p = {}                 
    p['number_batches'] = 3       
    p['leakage_penalty'] = 0.04
    p['assembly_width'] = 21.5036
    p['assembly_power'] = 3.4/193
    p['active_height'] = 366.0
    p['fuel_radius'] = 0.4096
    p['cladding_inner_radius'] = 0.4180
    p['cladding_outer_radius'] = 0.4750
    p['number_pins'] = 264
    p['power_share'] = 'reactivity'
        
    T_F = 900*sp.ones(p['number_batches'])    # batch fuel temperatures (K)
    T_C = 580*sp.ones(p['number_batches'])    # batch moderator temperatures (K)
    
    num_thick = 20
    #thick = sp.linspace(0.0, 500, num_thick)
    thick = sp.logspace(-1, sp.log10(5*10**2), num_thick)
    T_F_FeCrAl = sp.zeros((3, num_thick))
    T_C_FeCrAl = sp.zeros((3, num_thick))
    T_F_SiC = sp.zeros((3, num_thick))
    T_C_SiC = sp.zeros((3, num_thick))
    PPF_FeCrAl = sp.zeros((3, num_thick))
    PPF_SiC = sp.zeros((3, num_thick))
    
    solver = NRM(p, rho=rho, m2=m2, k_cladding=k_cladding)

    
    for i in range(num_thick) :    
        p['t_fecral'] = thick[i]
        p['t_sic'] = 0.0
        B, ppf, T_F, T_C = solver.solve(T_F, T_C)
        T_F_FeCrAl[:, i] = T_F[:]
        T_C_FeCrAl[:, i] = T_C[:]
        PPF_FeCrAl[:, i] = ppf[:]
    
        p['t_fecral'] = 0.0
        p['t_sic'] = thick[i]
        B, ppf, T_F, T_C = solver.solve(T_F, T_C)
        T_F_SiC[:, i] = T_F[:]
        T_C_SiC[:, i] = T_C[:]
        PPF_SiC[:, i] = ppf[:]
    
    pickle.dump({'thick': thick,
                 'T_F_FeCrAl': T_F_FeCrAl,
                 'T_C_FeCrAl': T_C_FeCrAl,
                 'PPF_FeCrAl': PPF_FeCrAl,
                 'T_F_SiC': T_F_SiC,
                 'T_C_SiC': T_C_SiC,
                 'PPF_SiC': PPF_SiC}, open('example_3.p', 'w'))
Example #32
0
    def run(self, npts=25, inv_pressures=None):
        r"""
        Run the algorithm for specified number of points or at given capillary
        pressures.

        Parameters
        ----------
        npts : scalar
            The number of points to obtain on the curve.  The points are
            automatically selected to span the range of capillary pressures
            using a logarithmic spacing (more points are lower capillary
            pressure values).

        inv_pressures : array_like
            A list of capillary pressures to apply. List should contain
            increasing and unique values.
        """
        # If no invasion points are given then generate some
        if inv_pressures is None:
            logger.info('Generating list of invasion pressures')
            min_p = sp.amin(self['throat.entry_pressure']) * 0.98  # nudge down
            max_p = sp.amax(self['throat.entry_pressure']) * 1.02  # bump up
            inv_points = sp.logspace(sp.log10(min_p),
                                     sp.log10(max_p),
                                     npts)
        else:
            # Make sure the given invastion points are sensible
            inv_points = sp.unique(inv_pressures)
        self._inv_points = inv_points

        # Ensure inlets are set
        if sp.sum(self['pore.inlets']) == 0:
            raise Exception('Inlet pores have not been specified')

        # Ensure outlet pores are set if trapping is enabled
        if self._trapping:
            if sp.sum(self['pore.outlets']) == 0:
                raise Exception('Outlet pores have not been specified')

        # Generate curve from points
        for inv_val in self._inv_points:
            # Apply one applied pressure and determine invaded pores
            logger.info('Applying capillary pressure: ' + str(inv_val))
            self._apply_percolation(inv_val)
            if self._trapping:
                logger.info('Checking for trapping')
                self._check_trapping(inv_val)

        # Find invasion sequence values (to correspond with IP algorithm)
        Pinv = self['pore.inv_Pc']
        self['pore.inv_seq'] = sp.searchsorted(sp.unique(Pinv), Pinv)
        Tinv = self['throat.inv_Pc']
        self['throat.inv_seq'] = sp.searchsorted(sp.unique(Tinv), Tinv)
def bin_flux(flux, struct):
    lower = 1e-5
    upper = 2e7
    eb = energy_groups(struct, lower, upper)
    phi = sp.zeros(len(eb) - 1)
    for i in range(len(eb) - 1):
        E = sp.logspace(sp.log10(eb[i + 1]), sp.log10(eb[i]), 1e4)
        x = trapz(flux(E), E)
        print("->", i, eb[i + 1], eb[i], x)
        phi[i] = trapz(flux(E), E)
    phi = phi[::-1] / np.sum(phi)
    return phi
Example #34
0
    def runContinuation(self,parameter,low,high,density,par3d=None,logrange=True,runQuiet=True):
        """
        Run the continuation using the following parameters:

        Args:

        - parameter = str(the parameter to be scanned)
        - low = float(lower bound)
        - high = float(upper bound)
        - density = int(the number of initial points)
        - par3d = float(extra 3d parameter to insert into the output array) this parameter is not set ONLY used in output
        - logrange = boolean [default = True], if True generate the result using logspace(log10(low), log10(high), density) otherwise use a linear range
        - runQuiet = boolean [default = True], if True do not display intermediate results to screen, disable for debugging

        After running the continuation the results are stored in numpy arrays

        - mod.res_idx  = scan parameter values (and optionally par3d)
        - mod.res_metab = steady-state species concentrations
        - mod.res_flux = steady-state flux values

        """

        self.pitcon_scan_density = density
        self.pitcon_scan_parameter = parameter
        self.pitcon_scan_parameter_3d = par3d
        if logrange:
            self.pitcon_range_low = scipy.log10(low)
            self.pitcon_range_high = scipy.log10(high)
            self.model.pitcon_par_space = scipy.logspace(self.pitcon_range_low,self.pitcon_range_high,self.pitcon_scan_density)
        else:
            self.pitcon_range_low = low
            self.pitcon_range_high = high
            self.model.pitcon_par_space = scipy.linspace(self.pitcon_range_low,self.pitcon_range_high,self.pitcon_scan_density)

        self.model.pitcon_flux_gen = 1
        if runQuiet:
            self.model.SetQuiet()
        else:
            self.model.SetLoud()

        if self.pitcon_scan_parameter_3d != None:
            self.pitcon_res = self.model.PITCON(self.pitcon_scan_parameter, self.pitcon_scan_parameter_3d)
            self.res_idx = self.pitcon_res[:,:2]
            self.res_metab = self.pitcon_res[:,2:len(self.model.species)+2:]
            self.res_flux = self.pitcon_res[:,len(self.model.species)+2:]
        else:
            self.pitcon_res = self.model.PITCON(self.pitcon_scan_parameter)
            self.res_idx = self.pitcon_res[:,0]
            self.res_idx = self.res_idx.reshape(self.res_idx.shape[0],1)
            self.res_metab = self.pitcon_res[:,1:len(self.model.species)+1]
            self.res_flux = self.pitcon_res[:,len(self.model.species)+1:]

        print '\n\tContinuation complete\n'
Example #35
0
def plot_pk_lin():
	from matplotlib import pyplot
	k,pklin = load_pklin()
	pyregpt = PyRegPT()
	pyregpt.set_pk_lin(k,pklin)
	kout = scipy.logspace(scipy.log10(k[0])-2,scipy.log10(k[-1])+1,1000,base=10)
	for interpol in ['lin','poly']:
		pyplot.loglog(kout,pyregpt.find_pk_lin(kout,interpol=interpol),label=interpol)
	pyplot.axvline(x=k[0],ymin=0.,ymax=1.)
	pyplot.axvline(x=k[-1],ymin=0.,ymax=1.)
	pyplot.legend()
	pyplot.show()
Example #36
0
 def define_bins(self, **kwargs):
     r"""
     This defines the bins for a logscaled histogram
     """
     self.data_vector.sort()
     sf = self.args['scale_fact']
     num_bins = int(sp.logn(sf, self.data_vector[-1]) + 1)
     #
     # generating initial bins from 1 - sf**num_bins
     low = list(sp.logspace(0, num_bins, num_bins + 1, base=sf))[:-1]
     high = list(sp.logspace(0, num_bins, num_bins + 1, base=sf))[1:]
     #
     # Adding "catch all" bins for anything between 0 - 1 and less than 0
     if self.data_vector[0] < 1.0:
         low.insert(0, 0.0)
         high.insert(0, 1.0)
     if self.data_vector[0] < 0.0:
         low.insert(0, self.data_vector[0])
         high.insert(0, 0.0)
     #
     self.bins = [bin_ for bin_ in zip(low, high)]
Example #37
0
 def setScanJobs(self, start, end, intervals, job, log=False):
     """Splits a range into a number of jobs with intervals"""
     assert intervals >= 1, '\n* Minimum of 1 interval'
     if log:
         kpoints = scipy.logspace(scipy.log10(start), scipy.log10(end), intervals+1)
     else:
         kpoints = scipy.linspace(start, end, intervals+1)
     self.job_list = []
     for p in range(len(kpoints)-1):
         job2 = job % (kpoints[p], kpoints[p+1])
         self.job_list.append(job2)
         print(job2)
Example #38
0
    def runContinuation(self,parameter,low,high,density,par3d=None,logrange=True,runQuiet=True):
        """
        Run the continuation using the following parameters:

        Args:

        - parameter = str(the parameter to be scanned)
        - low = float(lower bound)
        - high = float(upper bound)
        - density = int(the number of initial points)
        - par3d = float(extra 3d parameter to insert into the output array) this parameter is not set ONLY used in output
        - logrange = boolean [default = True], if True generate the result using logspace(log10(low), log10(high), density) otherwise use a linear range
        - runQuiet = boolean [default = True], if True do not display intermediate results to screen, disable for debugging

        After running the continuation the results are stored in numpy arrays

        - mod.res_idx  = scan parameter values (and optionally par3d)
        - mod.res_metab = steady-state species concentrations
        - mod.res_flux = steady-state flux values

        """

        self.pitcon_scan_density = density
        self.pitcon_scan_parameter = parameter
        self.pitcon_scan_parameter_3d = par3d
        if logrange:
            self.pitcon_range_low = scipy.log10(low)
            self.pitcon_range_high = scipy.log10(high)
            self.model.pitcon_par_space = scipy.logspace(self.pitcon_range_low,self.pitcon_range_high,self.pitcon_scan_density)
        else:
            self.pitcon_range_low = low
            self.pitcon_range_high = high
            self.model.pitcon_par_space = scipy.linspace(self.pitcon_range_low,self.pitcon_range_high,self.pitcon_scan_density)

        self.model.pitcon_flux_gen = 1
        if runQuiet:
            self.model.SetQuiet()
        else:
            self.model.SetLoud()

        if self.pitcon_scan_parameter_3d != None:
            self.pitcon_res = self.model.PITCON(self.pitcon_scan_parameter, self.pitcon_scan_parameter_3d)
            self.res_idx = self.pitcon_res[:,:2]
            self.res_metab = self.pitcon_res[:,2:len(self.model.species)+2:]
            self.res_flux = self.pitcon_res[:,len(self.model.species)+2:]
        else:
            self.pitcon_res = self.model.PITCON(self.pitcon_scan_parameter)
            self.res_idx = self.pitcon_res[:,0]
            self.res_idx = self.res_idx.reshape(self.res_idx.shape[0],1)
            self.res_metab = self.pitcon_res[:,1:len(self.model.species)+1]
            self.res_flux = self.pitcon_res[:,len(self.model.species)+1:]

        print '\n\tContinuation complete\n'
 def define_bins(self, **kwargs):
     r"""
     This defines the bins for a logscaled histogram
     """
     self.data_vector.sort()
     sf = self.args['scale_fact']
     num_bins = int(sp.logn(sf, self.data_vector[-1]) + 1)
     #
     # generating initial bins from 1 - sf**num_bins
     low = list(sp.logspace(0, num_bins, num_bins + 1, base=sf))[:-1]
     high = list(sp.logspace(0, num_bins, num_bins + 1, base=sf))[1:]
     #
     # Adding "catch all" bins for anything between 0 - 1 and less than 0
     if self.data_vector[0] < 1.0:
         low.insert(0, 0.0)
         high.insert(0, 1.0)
     if self.data_vector[0] < 0.0:
         low.insert(0, self.data_vector[0])
         high.insert(0, 0.0)
     #
     self.bins = [bin_ for bin_ in zip(low, high)]
Example #40
0
    def run(self, npts=25, inv_pressures=None):
        r"""
        Run the algorithm for specified number of points or at given capillary
        pressures.

        Parameters
        ----------
        npts : scalar
            The number of points to obtain on the curve.  The points are
            automatically selected to span the range of capillary pressures
            using a logarithmic spacing (more points are lower capillary
            pressure values).

        inv_pressures : array_like
            A list of capillary pressures to apply. List should contain
            increasing and unique values.
        """
        # If no invasion points are given then generate some
        if inv_pressures is None:
            logger.info('Generating list of invasion pressures')
            min_p = sp.amin(self['throat.entry_pressure']) * 0.98  # nudge down
            max_p = sp.amax(self['throat.entry_pressure']) * 1.02  # bump up
            inv_points = sp.logspace(sp.log10(min_p), sp.log10(max_p), npts)
        else:
            # Make sure the given invastion points are sensible
            inv_points = sp.unique(inv_pressures)
        self._inv_points = inv_points

        # Ensure inlets are set
        if sp.sum(self['pore.inlets']) == 0:
            raise Exception('Inlet pores have not been specified')

        # Ensure outlet pores are set if trapping is enabled
        if self._trapping:
            if sp.sum(self['pore.outlets']) == 0:
                raise Exception('Outlet pores have not been specified')

        # Generate curve from points
        for inv_val in self._inv_points:
            # Apply one applied pressure and determine invaded pores
            logger.info('Applying capillary pressure: ' + str(inv_val))
            self._apply_percolation(inv_val)
            if self._trapping:
                logger.info('Checking for trapping')
                self._check_trapping(inv_val)

        # Find invasion sequence values (to correspond with IP algorithm)
        Pinv = self['pore.inv_Pc']
        self['pore.inv_seq'] = sp.searchsorted(sp.unique(Pinv), Pinv)
        Tinv = self['throat.inv_Pc']
        self['throat.inv_seq'] = sp.searchsorted(sp.unique(Tinv), Tinv)
Example #41
0
    def makeRange(self,start,end,points,log):
        """
        Should be pretty self evident it defines a range:

        - float(start)
        - float(end)
        - int(points)
        - bool(log)
        """
        if log:
            rng = scipy.logspace(scipy.log10(start),scipy.log10(end),points)
        else:
            rng = scipy.linspace(start,end,points)
        return rng
Example #42
0
def test_gaussian_multiple_populations_crossval_kde(db_path, sampler):
    sigma_x = 1
    sigma_y = .5
    y_observed = 2

    def model(args):
        return {"y": st.norm(args['x'], sigma_y).rvs()}

    models = [model]
    models = list(map(SimpleModel, models))
    nr_populations = 4
    population_size = ConstantPopulationSize(600)
    parameter_given_model_prior_distribution = [
        Distribution(x=st.norm(0, sigma_x))
    ]
    parameter_perturbation_kernels = [
        GridSearchCV(MultivariateNormalTransition(),
                     {"scaling": sp.logspace(-1, 1.5, 5)})
    ]
    abc = ABCSMC(models,
                 parameter_given_model_prior_distribution,
                 MinMaxDistanceFunction(measures_to_use=["y"]),
                 population_size,
                 transitions=parameter_perturbation_kernels,
                 eps=MedianEpsilon(.2),
                 sampler=sampler)
    abc.new(db_path, {"y": y_observed})

    minimum_epsilon = -1

    abc.do_not_stop_when_only_single_model_alive()
    history = abc.run(minimum_epsilon, max_nr_populations=nr_populations)
    posterior_x, posterior_weight = history.get_distribution(0, None)
    posterior_x = posterior_x["x"].as_matrix()
    sort_indices = sp.argsort(posterior_x)
    f_empirical = sp.interpolate.interp1d(
        sp.hstack((-200, posterior_x[sort_indices], 200)),
        sp.hstack((0, sp.cumsum(posterior_weight[sort_indices]), 1)))

    sigma_x_given_y = 1 / sp.sqrt(1 / sigma_x**2 + 1 / sigma_y**2)
    mu_x_given_y = sigma_x_given_y**2 * y_observed / sigma_y**2
    expected_posterior_x = st.norm(mu_x_given_y, sigma_x_given_y)
    x = sp.linspace(-8, 8)
    max_distribution_difference = sp.absolute(
        f_empirical(x) - expected_posterior_x.cdf(x)).max()
    assert max_distribution_difference < 0.052
    assert history.max_t == nr_populations - 1
    mean_emp, std_emp = mean_and_std(posterior_x, posterior_weight)
    assert abs(mean_emp - mu_x_given_y) < .07
    assert abs(std_emp - sigma_x_given_y) < .12
Example #43
0
    def makeRange(self,start,end,points,log):
        """
        Should be pretty self evident it defines a range:

        - float(start)
        - float(end)
        - int(points)
        - bool(log)
        """
        if log:
            rng = scipy.logspace(scipy.log10(start),scipy.log10(end),points)
        else:
            rng = scipy.linspace(start,end,points)
        return rng
Example #44
0
def getquarts(data1, data2, log=False):
    n = len(data1)
    mx = -sp.Inf
    mn = sp.Inf
    for i in xrange(n):
        mn = min(mn, min(data1[i]))
        mx = max(mx, max(data1[i]))
    if not log:
        xaxis = sp.linspace(mn, mx, 200)
    else:
        xaxis = sp.logspace(sp.log10(mn), sp.log10(mx), 200)

    low0, med0, upp0 = gpbo.core.ESutils.quartsirregular(data1, data2, xaxis)
    return xaxis, low0, med0, upp0
Example #45
0
def betweenness_distrib(net, use_weights=True, log=False):
    '''
    Computing the betweenness distribution of a network
    
    Parameters
    ----------
    net : :class:`~nngt.Graph` or subclass
        the network to analyze.
    use_weights : bool, optional (default: True)
        use weighted degrees (do not take the sign into account : all weights
        are positive).
    log : bool, optional (default: False)
        use log-spaced bins.
    
    Returns
    -------
    ncounts : :class:`numpy.array`
        number of nodes in each bin
    nbetw : :class:`numpy.array`
        bins for node betweenness
    ecounts : :class:`numpy.array`
        number of edges in each bin
    ebetw : :class:`numpy.array`
        bins for edge betweenness
    '''
    ia_nbetw, ia_ebetw = net.get_betweenness(use_weights)
    num_nbins, num_ebins = int(len(ia_nbetw) / 50), int(len(ia_ebetw) / 50)
    ra_nbins = sp.linspace(ia_nbetw.min(), ia_nbetw.max(), num_nbins)
    ra_ebins = sp.linspace(ia_ebetw.min(), ia_ebetw.max(), num_ebins)
    if log:
        ra_nbins = sp.logspace(sp.log10(sp.maximum(ia_nbetw.min(),10**-8)),
                               sp.log10(ia_nbetw.max()), num_nbins)
        ra_ebins = sp.logspace(sp.log10(sp.maximum(ia_ebetw.min(),10**-8)),
                               sp.log10(ia_ebetw.max()), num_ebins)
    ncounts,nbetw = sp.histogram(ia_nbetw, ra_nbins)
    ecounts,ebetw = sp.histogram(ia_ebetw, ra_ebins)
    return ncounts, nbetw[:-1], ecounts, ebetw[:-1]
Example #46
0
    def plot(self, indice):
        self.diagrama.axes2D.clear()
        self.diagrama.axes2D.set_xlim(0.1, 10)
        self.diagrama.axes2D.set_ylim(0, 1)
        self.diagrama.axes2D.set_xscale("log")

        self.diagrama.axes2D.set_title(QtGui.QApplication.translate("pychemqt", "Heat Transfer Temperature Effectiveness"), size='12')
        self.diagrama.axes2D.set_xlabel("NTU", size='12')
        self.diagrama.axes2D.set_ylabel("P", size='14')
        self.diagrama.axes2D.set_xticklabels(["0.1", "1.0", "10"])
        self.diagrama.axes2D.set_xticklabels(["0.2", "0.3", "", "0.5", "", "0.7", "", "", "2.0", "3.0", "", "5.0", "", "7.0", "", ""], minor=True)

        flujo=self.flujo[indice][1]
        self.mixed.setVisible(flujo=="CrFSMix")
        kwargs={}
        if flujo=="CrFSMix":
            kwargs["mixed"]=str(self.mixed.currentText())

        R=[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1., 1.2, 1.4, 1.6, 1.8, 2., 2.5, 3., 4., 6., 8., 10., 15.]

        NTU=logspace(-1.5, 1, 100)
        for  ri in R:
            e=[0]+[Heat_ExchangerDesign.TemperatureEffectiveness(N, ri, flujo, **kwargs) for N in NTU[1:]]
            self.diagrama.plot(NTU, e, "k")
            self.diagrama.axes2D.annotate(" R=%0.1f" %ri, (NTU[-1], e[-1]), size="medium", horizontalalignment="left", verticalalignment="center")

#        F=[0.3]
#        for f in F:
#            p=[]
#            NTU=[]
#            for r in R:
#                func=lambda P: Heat_ExchangerDesign.CorrectionFactor(P, r, flujo)-f
#                print func(0.)
#                pi=fsolve(func, 0.2)
#                p.append(pi)
#                NTU.append(Heat_ExchangerDesign.NTU_fPR(p, r, flujo))

#            p=[fsolve(lambda P: Heat_ExchangerDesign.CorrectionFactor(P, r, flujo)-f, 0.5)[0] for r in R]
#            NTU=[Heat_ExchangerDesign.NTU_fPR(pi, ri) for pi, ni in zip(p, R)]
#            self.diagrama.plot(NTU, p, "--")

        self.diagrama.draw()

        if flujo=="CrFSMix" and self.mixed.currentIndex():
            img=image.imread('images/equation/%s2.png' %flujo)
        else:
            img=image.imread('images/equation/%s.png' %flujo)
        self.image.set_data(img)
        self.refixImage()
Example #47
0
    def P_CONTINUATION(self, *args):
        """
        [str(p), float(min), float(max), int(points), *float(y-value)]
        """
        self.setStatus('CONTINUING')
        args = args[0]
        print 'Args', args

        self.model.pitcon_par_space = scipy.logspace(\
            scipy.log10(float(args[1].strip())), scipy.log10(float(args[2].strip())), int(args[3].strip()))

        if len(args) == 4:
            self.RESULT = self.model.PITCON(args[0].strip())
        elif len(args) == 5:
            self.RESULT = self.model.PITCON(args[0].strip(), float(args[4].strip()))
        return True
Example #48
0
 def run(self, npts=20, sizes=None):
     self._make_dt()
     if npts is not None:
         sizes = sp.logspace(sp.log10(sp.amax(self._imdt)), 0.1, npts)
     self._make_seeds(sizes=sizes)
     imresults = sp.zeros(sp.shape(self.image))
     print('Dilating seeds')
     print('0%|'+'-'*len(sizes)+'|100%')
     print('  |', end='')
     for r in sizes:
         print('.', end='')
         sys.stdout.flush()
         im = self._dilate_seeds(im_seeds=self._imseeds, radius=r)
         imresults[(imresults == 0) * im] = r
     print('')
     self._iminv = imresults
Example #49
0
    def P_CONTINUATION(self, *args):
        """
        [str(p), float(min), float(max), int(points), *float(y-value)]
        """
        self.setStatus('CONTINUING')
        args = args[0]
        print 'Args', args

        self.model.pitcon_par_space = scipy.logspace(\
            scipy.log10(float(args[1].strip())), scipy.log10(float(args[2].strip())), int(args[3].strip()))

        if len(args) == 4:
            self.RESULT = self.model.PITCON(args[0].strip())
        elif len(args) == 5:
            self.RESULT = self.model.PITCON(args[0].strip(), float(args[4].strip()))
        return True
 def run(self):
     #See if setup has been run
     try: capillary_pressure = self._p_cap
     except: 
         raise Exception('setup has not been run, cannot proceed!')
     #Create a pore and throat conditions list to store inv_val at which each is invaded
     self._p_inv = sp.zeros((self._net.num_pores(),))
     self._p_seq = sp.zeros_like(self._p_inv)
     self._t_inv = sp.zeros((self._net.num_throats(),))
     self._t_seq = sp.zeros_like(self._t_inv)
     #Determine the invasion pressures to apply
     self._t_cap = self._net.get_throat_data(phase=self._fluid_inv,prop=capillary_pressure)
     min_p = sp.amin(self._t_cap)*0.98  # nudge min_p down slightly
     max_p = sp.amax(self._t_cap)*1.02  # bump max_p up slightly
     self._inv_points = sp.logspace(sp.log10(min_p),sp.log10(max_p),self._npts)
     self._do_outer_iteration_stage()
Example #51
0
 def run(self, npts=20, sizes=None):
     self._make_dt()
     if npts is not None:
         sizes = sp.logspace(sp.log10(sp.amax(self._imdt)), 0.1, npts)
     self._make_seeds(sizes=sizes)
     imresults = sp.zeros(sp.shape(self.image))
     print('Dilating seeds')
     print('0%|' + '-' * len(sizes) + '|100%')
     print('  |', end='')
     for r in sizes:
         print('.', end='')
         sys.stdout.flush()
         im = self._dilate_seeds(im_seeds=self._imseeds, radius=r)
         imresults[(imresults == 0) * im] = r
     print('')
     self._iminv = imresults
Example #52
0
def calc_cstr_locus_fast(Cf, rate_fn, t_end, num_pts):
    '''
    Quick (potentially inexact) CSTR solver using a standard non-linear solver
    (Newton). The initial guess is based on the previous solution.
    Note: this method will not find multiple solutions and may behave poorly
    with systems with multiple solutions. Use only if you know that the system
    is 'simple' (no multiple solutions) and you need a quick answer

    Parameters:
        Cf

        rate_fn

        t_end

        num_pts

    Returns:
        cstr_cs

        cstr_ts
    '''

    cstr_ts = sp.hstack([0., sp.logspace(-3, sp.log10(t_end), num_pts - 1)])
    cstr_cs = []

    # loop through each cstr residence time and solve for the corresponding
    # cstr effluent concentration
    C_guess = Cf
    for ti in cstr_ts:

        # define CSTR function
        def cstr_fn(C):
            return Cf + ti * rate_fn(C, 1) - C

        # solve
        ci = scipy.optimize.newton_krylov(cstr_fn, C_guess)

        cstr_cs.append(ci)

        # update guess
        C_guess = ci

    # convert to numpy array
    cstr_cs = sp.array(cstr_cs)

    return cstr_cs, cstr_ts
Example #53
0
    def __plot(self, metodo=0, eD=[]):
        """Plot the Moody chart using the indicate method
        método de cálculo:
            0   -   Colebrook
            1   -   Chen (1979)
            2   -   Romeo (2002)
            3   -   Goudar-Sonnad
            4   -   Manadilli (1997)
            5   -   Serghides
            6   -   Churchill (1977)
            7   -   Zigrang-Sylvester (1982)
            8   -   Swamee-Jain (1976)")      
            
        eD: lista con las líneas de rugosidades relativas a dibujar
        Prmin: escala del eje x, minimo valor de Pr a representar
        Prmax: escala del eje y, maximo valor de Pr a representar
        """
        if not eD:
            eD=[0, 1e-6, 5e-6, 1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 4e-4, 6e-4, 8e-4, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.006, 0.008, 0.01, 0.0125, 0.015, 0.0175, 0.02, 0.025, 0.03, 0.035, 0.04, 0.045, 0.05, 0.06, 0.07]
        F=f_list[metodo]
        
        #laminar
        Re=[600, 2400]
        f=[64./R for R in Re]
        self.diagrama.axes2D.plot(Re, f, "k")
        #turbulento
        Re=logspace(log10(2400), 8, 50)
        for e in eD:
            self.diagrama.axes2D.plot(Re, [F(Rei, e) for Rei in Re], "k")
            self.diagrama.axes2D.annotate(representacion(e, tol=4.5), (Re[45], F(Re[45], e)), size="small", horizontalalignment="center", verticalalignment="bottom", rotation=arctan((log10(F(Re[47], e))-log10(F(Re[35], e)))/(log10(Re[47])-log10(Re[35])))*360/2/pi)

        #Transición
        f=[(1/(1.14-2*log10(3500/R)))**2 for R in Re]
        self.diagrama.axes2D.plot(Re, f, "k", lw=0.5, linestyle=":")
        
        self.diagrama.axes2D.add_artist(ConnectionPatch((600, 0.009), (2400, 0.009), "data", "data", arrowstyle="<|-|>", mutation_scale=20, fc="w"))
        self.diagrama.axes2D.add_artist(ConnectionPatch((2400, 0.009), (6000, 0.009), "data", "data", arrowstyle="<|-|>", mutation_scale=20, fc="w"))
        self.diagrama.axes2D.add_artist(ConnectionPatch((6000, 0.095), (40000, 0.095), "data", "data", arrowstyle="<|-|>", mutation_scale=20, fc="w"))
        self.diagrama.axes2D.add_artist(ConnectionPatch((40000, 0.095), (9.9e7, 0.095), "data", "data", arrowstyle="<|-|>", mutation_scale=20, fc="w"))
        self.diagrama.axes2D.text(15000, 0.094, QtGui.QApplication.translate("pychemqt", "Transition Zone"), size="small", verticalalignment="top", horizontalalignment="center")
        self.diagrama.axes2D.text(2e6, 0.094, QtGui.QApplication.translate("pychemqt", "Turbulent flux fully desarrolled"), size="small", verticalalignment="top", horizontalalignment="center")
        self.diagrama.axes2D.text(4000, 0.0091, QtGui.QApplication.translate("pychemqt", "Critic\nzone"), size="small", verticalalignment="bottom", horizontalalignment="center")
        self.diagrama.axes2D.text(1200, 0.0091, QtGui.QApplication.translate("pychemqt", "Laminar flux"), size="small", verticalalignment="bottom", horizontalalignment="center")
Example #54
0
def calc_pfr_trajectory(Cf, rate_fn, t_end, NUM_PTS=250, linspace_ts=False):
    '''
    Convenience function that integrate the PFR trajecotry from the feed point
    specified Cf, using scipy.integrate.odeint().
    Time is based on a logscaling

    Parameters:
        Cf          (d x 1) numpy array. Feed concentration to the PFR.

        rate_fn     Python function. Rate function in (C,t) format that returns
                    an array equal to the length of Cf.

        t_end       Float indicating the residence time of the PFR.

        NUM_PTS     Optional. Number of PFR points.
                    Default value is 250 points.

    Returns:
        pfr_cs      (NUM_PTS x d) numpy array representing the PFR trajectory
                    points.

        pfr_ts      (NUM_PTS x 1) numpy array of PFR residence times
                    corresponding to pfr_cs.
    '''

    # TODO: optional accuracy for integration

    # since logspace can't give log10(0), append 0.0 to the beginning of pfr_ts
    # and decrese NUM_PTS by 1
    if linspace_ts:
        pfr_ts = sp.linspace(0, t_end, NUM_PTS)
    else:
        pfr_ts = sp.append(0.0, sp.logspace(-3, sp.log10(t_end), NUM_PTS - 1))

    pfr_cs = scipy.integrate.odeint(rate_fn, Cf, pfr_ts)

    return pfr_cs, pfr_ts
Example #55
0
	def __init__(self):
		self._tempangles = numpy.array([1,0.6,0.4,0.3,0.2,0.1,0.05,0.0])
		self._angles = numpy.arccos(self._tempangles)
		self._datafile = list(csv.reader(open("muonflux2.csv", "rb" ), delimiter = '\t'))	# reads each row from the csv into a list, stores each list as an element of a list
		self._fluxtable = [item for sublist in self._datafile for item in sublist]	# flatten the list
		self._concatflux = []
		for i in xrange(len(self._fluxtable)/2):
			x = float(self._fluxtable.pop(0))
			y = float(self._fluxtable.pop(0))
			self._concatflux.append(x*10**y)
		
		self._fluxdata = numpy.array(self._concatflux).reshape(-1,9)	# takes flattened array and reshapes to a 9 column matrix with arbitrary row size
		self._cumsums = self._fluxdata
		# this loop iterates over sliced rows to replace them with cumulative sums of the slice. The slice is the whole row minus the first element
		for i in xrange(numpy.alen(self._cumsums)):
			self._cumsums[i,1:] = numpy.cumsum(self._cumsums[i,1:])
		
		
		self.interpgrid=scipy.interpolate.RectBivariateSpline(self._fluxdata[:,0],self._angles,self._fluxdata[:,1:],kx=1,ky=1)
		self.precision = 30
		self.newx = scipy.logspace(math.log10(self._fluxdata[0,0]),math.log10(self._fluxdata[-1,0]),self.precision)
		#print self.newx # the interpolated energies
		self.interpedgrid = self.interpgrid(self.newx,self._angles)
		
		self.interpcumsumgrid = scipy.interpolate.RectBivariateSpline(self._fluxdata[:,0],self._angles,self._cumsums[:,1:],kx=1,ky=1)
		self.interpedcumsums = self.interpcumsumgrid(self.newx,self._angles)
		
		
		self._ncs = numpy.cumsum(self.interpedcumsums[:,-1]) # takes the cumulative sum of the last column of cumsums[]
		
		self._ncs = self._ncs/self._ncs[-1] # normalizes to the last element which is the largest
		
		
		# this loop does the same thing, but for the rows
		for i in xrange(numpy.alen(self.interpedcumsums)):
			self.interpedcumsums[i,:] /= self.interpedcumsums[i,-1]
Example #56
0
def get_error(num_points, num_iter=100):
    """
    Compute mean and standard deviation from the mean. Standard deviation from
    the mean is error.
    """

    num_points = sp.floor(num_points)

    pi_values = []
    for i in range(num_iter):
        pi_values.append(find_pi_approx(num_points)[1])

    pi_values = sp.array(pi_values)
    return num_points, sp.std(pi_values)


if __name__ == '__main__':
    points = sp.logspace(4, 6, 100)
    x = []
    y = []
    for i in points:
        num_points, err = get_error(i)
        print(num_points, err)
        x.append(num_points)
        y.append(err)

    plt.figure(0)
    plt.plot(x, y)
    plt.show()
Example #57
0
	def trainModel(self, do_pca = False,out_dir='./cache', rftop = 40, class_labels = SP.array(['G1','S','G2M']), cv=10, npc=3, is_SVM=1, is_RFE=0 , scale=False):
		if not os.path.exists(out_dir):
			os.makedirs(out_dir)	
		CFG = {}
		CFG['is_RFE'] = is_RFE # use recursive feature selection (can be slow for large datasets)
		CFG['is_SVM'] = is_SVM # use SVM with univariate feature selection (faster than RFE)
		CFG['CV_inner'] = cv #inner CV for RFE_CV: either an int or 'LOOCV'
		CFG['out_dir'] = out_dir
		CFG['do_pca'] = do_pca
		CFG['lassotop'] = 20
		self.cv = cv
		Y = self.Y
		labels = self.labels
		var_names = self.geneNames
		numClasses = self.numClasses
		predRF = SP.zeros((len(labels),numClasses))
		predSVM = SP.zeros((len(labels),numClasses)) 
		predSVMrbf = SP.zeros((len(labels),numClasses))
		predGNB = SP.zeros((len(labels),numClasses))
		predLR = SP.zeros((len(labels),numClasses))
		predLRall = SP.zeros((len(labels),numClasses))
		names_dict={}
		if self.cv == 'LOOCV':
			loo = LeaveOneOut(len(labels))
			CV_list = (list(iter(loo)))
			CV_list.append((SP.array(range(Y.shape[0])), SP.array(range(Y.shape[0]))))#all data...
		else:
			skf = StratifiedKFold(labels, n_folds=self.cv)
			CV_list = (list(iter(skf)))
			CV_list.append((SP.array(range(Y.shape[0])), SP.array(range(Y.shape[0]))))#all data...
		lambda_best = SP.zeros((1,len(CV_list))).ravel()	
		print("Performing cross validation ...")
		for i in range(len(CV_list)):
			if i<len(CV_list)-1:
				print("Fold " + str(i+1) + " of " + str(len(CV_list)-1))
			else:
				print("Final model")
			# string label for this fold
			#get data of a CV run
			cv_tr = CV_list[i][0]
			cv_tst = CV_list[i][1]
			lab_tr = labels[cv_tr]
			Ytr = Y[cv_tr,:]
			Ytst = Y[cv_tst,:]
			lab_tst = labels[cv_tst]
			if (i==len(CV_list)-1):
				foldlabel = 'full'
				if (self.Y_tst==None):
					Ytst = Y[cv_tst,:]
					lab_tst = labels[cv_tst]
				else:
					foldlabel = 'Test'
					Ytst = self.Y_tst
					lab_tst = self.labels_tst
			else:
				foldlabel = str(i)	
			if do_pca>=1:
				npc = npc#3	
				#do PCA to get features
				pcaCC = PCA(n_components=npc, whiten=False)
				pcaCC.fit(Ytr)
				pcaTst=pcaCC.transform(Ytst)
				pcaTr=pcaCC.transform(Ytr)
				#selection = SelectKBest(k=1)
				#combined_features = FeatureUnion([("pca", pcaCC), ("univ_select", selection)])
				combined_features = FeatureUnion([("pca", pcaCC)])
				gnb = GaussianNB()
				y_pred = gnb.fit(pcaTr, lab_tr).predict_proba(pcaTst)
				if i<len(CV_list)-1:
					predGNB[cv_tst,:] =y_pred#[:,1]
				else:
					predGNB_ts = y_pred#[:,1]
			if do_pca==2:
				Ytr = SP.concatenate((Ytr, pcaTr),1)
				Ytst = SP.concatenate((Ytst, pcaTst),1)
				pcnames = []
				for pci in range(npc):
					pcnames.append('PC'+str(pci+1))
				var_names = SP.concatenate((var_names, SP.array(pcnames)),1)				
			print("  Computing random forest ...")
			
			if CFG['is_RFE']==1:#Recursive feature selection with SVM
				print("  Computing RFE with SVM ...")
				svc = SVC(kernel="linear", probability=False, class_weight='auto')#use linear SVM for selection
				rfecv = RFECV(estimator=svc, step=1,scoring='f1')
				param_grid = dict(estimator__C=[0.1, 1, 10, 100, 1000])
				clf_rfe = GridSearchCV(rfecv, param_grid=param_grid, cv=3, scoring='f1')#GridSearch to find optimal parameters
				clf_rfe.fit(Ytr, lab_tr)
				svc = SVC(kernel="linear", probability=False,C=clf_rfe.best_estimator_.estimator.C, class_weight='auto')#use linear SVM for selection
				if CFG['CV_inner']=='':
					rfecv = RFECV(estimator=svc, step=1,scoring='f1')
				elif CFG['CV_inner']=='LOOCV':
					rfecv = RFECV(estimator=svc, step=1,scoring='f1', cv=LeaveOneOut(len(lab_tr)))
				else:
					rfecv = RFECV(estimator=svc, step=1,scoring='f1', cv=StratifiedKFold(lab_tr, n_folds=CFG['CV_inner']))
				clf_rfe.best_estimator_.fit(Ytr, lab_tr)
				predicted = clf_rfe.best_estimator_.predict(Ytst)
				if i<len(CV_list)-1:
					predSVM[cv_tst,:] = predicted
				else:
					predSVM_ts[cv_tst] = predicted
				classifier = svm.SVC(kernel='rbf', gamma=0.05, class_weight='auto', probability=True)#rbf kernel for prediction
				param_grid = dict(C=[0.1, 1], gamma=[1e-1,1e-2,1e-3])
				clf_rbf = GridSearchCV(classifier, param_grid=param_grid, cv=3, scoring='f1')
				clf_rbf.fit(Ytr[:,clf_rfe.best_estimator_.ranking_==1], lab_tr)
				clf_rbf.best_estimator_.fit(Ytr[:,clf_rfe.best_estimator_.ranking_==1], lab_tr)
				predicted = clf_rbf.best_estimator_.predict_proba(Ytst[:,clf_rfe.best_estimator_.ranking_==1])
				if i<len(CV_list)-1:
					predSVMrbf[cv_tst,:] = predicted
				fpr, tpr, thresholds = metrics.roc_curve(lab_tst, predicted[:,1])
				if (i==len(CV_list)-1) | CFG["CV_plots"]>0:
					PL.figure()
					PL.plot(fpr, tpr)
					PL.savefig(CFG['out_dir']+'/RF_SVM_'+foldlabel+'.pdf')
					names_dict[foldlabel+'_SVM']=self.geneNames[clf_rfe.best_estimator_.ranking_==1]			
			elif CFG['is_SVM']==1:#univariate FS with rbf SVM; choose this if you hava a large data set (many features, eg RNAseq)
				print("  SVM feature selection ...")
				classifier = svm.SVC(kernel='rbf', gamma=0.05, class_weight='auto', probability=True)
				selection = SelectKBest(k=1)
				combined_features = FeatureUnion([("univ_select", selection)])				
				
				X_features = combined_features.fit(Ytr, lab_tr).transform(Ytr)
				scaler = preprocessing.StandardScaler().fit(Ytr)
				YtrS = scaler.transform(Ytr)
				YtstS = scaler.transform(Ytst)
				
				classifier.fit(X_features, lab_tr)
				pipeline = Pipeline([("features", combined_features), ("svm", classifier)])
				if CFG['do_pca']==3:
					param_grid = dict(features__pca__n_components=SP.unique(SP.round_(SP.logspace(1.0,max(SP.log2(Ytr.shape[1]), SP.log2(10)),num=min(5,Ytr.shape[1]),base=2.0))),
									  features__univ_select__k=SP.unique(SP.round_(SP.logspace(3.0,SP.log2(Ytr.shape[1]),num=min(10,Ytr.shape[1]),base=2.0))),
									  svm__C=[0.1, 1, 10], svm__gamma=[1e-1,1e-2,1e-3])
				else:
					C_range = 10. ** SP.arange(0, 2)
					gamma_range = 10. ** SP.arange(-5, 1)
					param_grid = dict(features__univ_select__k=SP.unique(SP.round_(SP.logspace(3.0,SP.log2(Ytr.shape[1]),num=min(10,Ytr.shape[1]),base=2.0))),
									  svm__C=C_range, svm__gamma=gamma_range)
				clf = GridSearchCV(pipeline, param_grid=param_grid, cv=5, scoring='f1')
				clf.fit(YtrS, lab_tr)
				print("The best classifier is: ", clf.best_estimator_)
				select_best=clf.best_estimator_.get_params()['features__univ_select']
				#names_dict[foldlabel+'_SVM']=self.geneNames[SP.argsort(-1.0*select_best.scores_)[0:(select_best.k-1)]]
				expected = lab_tst
				predicted = clf.best_estimator_.predict_proba(YtstS)
				if i<len(CV_list)-1:
					predSVM[cv_tst,:] = predicted
				else:
					predSVM_ts = predicted
				#print(clf.best_estimator_)

				classifier = svm.SVC(kernel='rbf', gamma=0.05, class_weight='auto', probability=True)#rbf kernel for prediction
				param_grid = dict(C=[1,10], gamma=[ 1e-1,1e-2,1e-3])
				clf_rbf = GridSearchCV(classifier, param_grid=param_grid, cv=5, scoring='f1')
				clf_rbf.fit(Ytr, lab_tr)
				clf_rbf.best_estimator_.fit(Ytr, lab_tr)
				predicted = clf_rbf.best_estimator_.predict_proba(Ytst)
				if i<len(CV_list)-1:
					predSVMrbf[cv_tst,:] = predicted
				else:
					predSVMrbf_ts = predicted
		
			#do lasso with regularisation path
			cs = l1_min_c(Ytr, lab_tr, loss='log') * SP.logspace(0, 3)
			print("  Computing regularization path ...")
		
			lasso = linear_model.LogisticRegression(C=cs[0]*10.0, penalty='l1', tol=1e-6)
			param_grid = dict(C=cs)
			clf_lr = GridSearchCV(lasso, param_grid=param_grid, cv=5, scoring='f1')
			clf_lr.fit(Ytr, lab_tr)
			clf_lr.best_estimator_.fit(Ytr, lab_tr)
			lambda_best[i] = clf_lr.best_params_.get('C')
			predicted = clf_lr.best_estimator_.predict_proba(Ytst)

			clf = linear_model.LogisticRegression(C=cs[0]*10.0, penalty='l1', tol=1e-6)
			coefs_ = []
			for c in cs:
				clf.set_params(C=c)
				clf.fit(Ytr, lab_tr)
				coefs_.append(clf.coef_.ravel().copy())
		
		
			if i<len(CV_list)-1:
				predLR[cv_tst,:] = predicted
			else:
				predLR_ts = predicted
			coefs_ = SP.array(coefs_)
			# get ordering by importance (how many times they appear)
			order=(coefs_!=0).sum(axis=0).argsort()
			order=order[::-1] # descending
			# store this order
			featrank_lasso = order
			showtop= min(Ytr.shape[1], CFG['lassotop'])

			clfAll = linear_model.LogisticRegression(C=1e5, penalty='l2', tol=1e-6)
			clfAll.fit(Ytr, lab_tr)
			predicted = clfAll.predict_proba(Ytst)
			if i<len(CV_list)-1:
				predLRall[cv_tst,:] = predicted
			else:
				predLRall_ts = predicted
			forest = ExtraTreesClassifier(n_estimators=500,
										  random_state=0, criterion="entropy", bootstrap=False)
			#forest = RandomForestClassifier(n_estimators=500,
			#							 random_state=0, criterion="entropy")
			forest.fit(Ytr, lab_tr)
			pred = forest.predict_proba(Ytst)
			#pdb.set_trace()
			if i<len(CV_list)-1:
				predRF[cv_tst,:] = pred#[:,1]
			else:
				predRF_ts = pred#[:,1]
			importances = forest.feature_importances_
			std = SP.std([tree.feature_importances_ for tree in forest.estimators_],
						 axis=0)
		
			topfeat=min(Ytr.shape[1], rftop)
			indices = SP.argsort(importances)[::-1][0:topfeat]
			# store full feature ranking
			featrank_rf = SP.argsort(importances)[::-1]
			# Plot the feature importances of the forest
			if (i==len(CV_list)-1):
				PL.figure()
				#PL.title("Feature importances, Fold "+foldddPPlabel+', AUC='+str(SP.round_(metrics.auc(fpr, tpr),3)))
				PL.title("Feature importances")
				#PL.bar(range(topfeat), importances[indices],color="r", yerr=std[indices], align="center")
				PL.bar(range(topfeat), importances[indices],color="r", align="center")
				PL.xticks(range(topfeat), indices, rotation=70)
				PL.gca().set_xticklabels(var_names[indices])
				PL.setp(PL.gca().get_xticklabels(), fontsize=8)
				PL.xlim([-1, topfeat])
				PL.savefig(out_dir+'/RF_featureimportance_'+foldlabel+'.pdf')
	
	
		f2 = open(os.path.join(out_dir,'classification_reportCV.txt')  ,'w')
		
		predRFv = SP.argmax(predRF_ts,axis=1)+1
		predRF_trv = SP.argmax(predRF,axis=1)+1
		self.scores = predRF
		self.scores_tst = predRF_ts
		self.ranking = var_names[indices]

		predLRv = SP.argmax(predLR_ts,axis=1)+1
		predLR_trv = SP.argmax(predLR,axis=1)+1
		self.scoresLR = predLR
		self.scoresLR_tst = predLR_ts
		
		predLRallv = SP.argmax(predLRall_ts,axis=1)+1
		predLRall_trv = SP.argmax(predLRall,axis=1)+1
		self.scoresLRall = predLRall
		self.scoresLRall_tst = predLRall_ts
		if CFG['is_SVM']==1:
			predSVMv = SP.argmax(predSVM_ts,axis=1)+1
			predSVM_trv = SP.argmax(predSVM,axis=1)+1
			self.scoresSVM = predSVM
			self.scoresSVM_tst = predSVM_ts
			
			predSVMrbfv = SP.argmax(predSVMrbf_ts,axis=1)+1
			predSVMrbf_trv = SP.argmax(predSVMrbf,axis=1)+1
			self.scoresSVMrbf = predSVMrbf
			self.scoresSVMrbf_tst = predSVMrbf_ts

		predGNBv = SP.argmax(predGNB_ts,axis=1)+1
		predGNB_trv = SP.argmax(predGNB,axis=1)+1
		self.scoresGNB = predGNB
		self.scoresGNB_tst = predGNB_ts

		print("Classification report for classifier %s:\n%s\n" % ('Gaussian Naive Bayes', metrics.classification_report(self.labels, predGNB_trv)))
		print("Classification report for classifier %s:\n%s\n" % ('Random Forest', metrics.classification_report(self.labels, predRF_trv)))
		print("Classification report for classifier %s:\n%s\n" % ('LR', metrics.classification_report(self.labels, predLR_trv)))
		print("Classification report for classifier %s:\n%s\n" % ('LRall', metrics.classification_report(self.labels, predLRall_trv)))
		if CFG['is_RFE']==1:
			print("Classification report for classifier %s:\n%s\n" % ('SVM ', metrics.classification_report(labels, predSVM>0.5)),file=f2)
		elif CFG['is_SVM']==1:
			print("Classification report for classifier %s:\n%s\n" % ('SVM', metrics.classification_report(self.labels, predSVM_trv)))
			print("Classification report for classifier %s:\n%s\n" % ('SVMrbf', metrics.classification_report(self.labels, predSVMrbf_trv)))
		f2.close()
Example #58
0
def roomcomp(impresp, filter, target, ntaps, mixed_phase, opformat, trim, nsthresh, noplot):

  print "Loading impulse response"
  
  # Read impulse response
  Fs, data = wavfile.read(impresp)
  data = norm(np.hstack(data))


  if trim:
    print "Removing leading silence"
    for spos,sval in enumerate(data):
        if abs(sval)>nsthresh:
            lzs=max(spos-1,0)
            ld =len(data)
            print 'Impulse starts at position ', spos, '/', len(data)
            print 'Trimming ', float(lzs)/float(Fs), ' seconds of silence'
            data=data[lzs:len(data)] #remove everything before sample at spos
            break
		  
  print "\nSample rate = ", Fs
  
  print "\nGenerating correction filter"

  ###
  ## Logarithmic pole positioning
  ###

  fplog = np.hstack((sp.logspace(sp.log10(20.), sp.log10(200.), 14.), sp.logspace(sp.log10(250.), 
             sp.log10(20000.), 13.))) 
  plog = freqpoles(fplog, Fs)

  ###
  ## Preparing data
  ###

  # making the measured response minumum-phase
  cp, minresp = rceps(data)

  # Impulse response
  imp = np.zeros(len(data), dtype=np.float64)
  imp[0]=1.0

  # Target
  outf = []
  db = []

  if target is 'flat':
    
    # Make the target output a bandpass filter
    Bf, Af = sig.butter(4, 30/(Fs/2), 'high')
    outf = sig.lfilter(Bf, Af, imp) 
    
  else:
    
    # load target file
    t = np.loadtxt(target)
    frq = t[:,0]; pwr = t[:,1]
    
    # calculate the FIR filter via windowing method
    fir = sig.firwin2(501, frq, np.power(10, pwr/20.0), nyq = frq[-1])	
    # Minimum phase, zero padding	
    cp, outf = rceps(np.append(fir, np.zeros(len(minresp) - len(fir))))
      
  ###
  ## Filter design
  ###

  #Parallel filter design
  (Bm, Am, FIR) = parfiltid(minresp, outf, plog)

  # equalized loudspeaker response - filtering the 
  # measured transfer function by the parallel filter
  equalizedresp = parfilt(Bm, Am, FIR, data)

  # Equalizer impulse response - filtering a unit pulse
  equalizer = norm(parfilt(Bm, Am, FIR, imp))

  # Windowing with a half hanning window in time domain
  han = np.hanning(ntaps*2)[-ntaps:]
  equalizer = han * equalizer[:ntaps]

  ###
  ## Mixed-phase compensation
  ## Based on the paper "Mixed Time-Frequency approach for Multipoint
  ## Room Rosponse Equalization," by A. Carini et al.
  ## To use this feature, your Room Impulse Response should have all
  ## the leading zeros removed.
  ###
  if mixed_phase is True:
    
    # prototype function
    hp = norm(np.real(equalizedresp))

    # time integration of the human ear is ~24ms
    # See "Measuring the mixing time in auditoria," by Defrance & Polack
    hop_size = 0.024
    samples = hop_size * Fs

    bins = np.int(np.ceil(len(hp) / samples))

    tmix = 0

    # Kurtosis method
    for b in range(bins):
      start = np.int(b * samples)
      end = np.int((b+1) * samples)
      k = kurtosis(hp[start:end])
      if k <= 0:
        tmix = b * hop_size
        break

    # truncate the prototype function
    taps = np.int(tmix*Fs)

    print "\nmixing time(secs) = ", tmix, "; taps = ", taps
    
    if taps > 0:
      # Time reverse the array
      h = hp[:taps][::-1]
      # create all pass filter
      phase = np.unwrap(np.angle(h))
      H = np.exp(1j*phase)
      # convert from db to linear
      mixed = np.power(10, np.real(H)/20.0)
      # create filter's impulse response
      mixed = np.real(ifft(mixed))
      
      # convolve and window to desired length
      equalizer = conv(equalizer, mixed)
      equalizer = han * equalizer[:ntaps]
      
      #data = han * data[:ntaps]
      #eqresp = np.real(conv(equalizer, data))
    else:
      print "zero taps; skipping mixed-phase computation"
  if opformat in ('wav', 'wav24'):      
  # Write data
    wavwrite_24(filter, Fs, norm(np.real(equalizer)))
    print '\nOutput format is wav24'
    print 'Output filter length =', len(equalizer), 'taps'
    print 'Output filter written to ' + filter
	
    print "\nUse sox to convert output .wav to raw 32 bit IEEE floating point if necessary,"
    print "or to merge left and right channels into a stereo .wav"
    print "\nExample: sox leq48.wav -t f32 leq48.bin"
    print "         sox -M le148.wav req48.wav output.wav\n"

  elif opformat == 'wav32':
    wavwrite_32(filter, Fs, norm(np.real(equalizer)))
    print '\nOutput format is wav32'
    print 'Output filter length =', len(equalizer), 'taps'
    print 'Output filter written to ' + filter
    print "\nUse sox to convert output .wav to raw 32 bit IEEE floating point if necessary,"
    print "or to merge left and right channels into a stereo .wav"
    print "\nExample: sox leq48.wav -t f32 leq48.bin"
    print "         sox -M le148.wav req48.wav output.wav\n"
  elif opformat == 'bin':
    # direct output to bin avoids float64->pcm16->float32 conversion by going direct 
    #float64->float32
    f = open(filter, 'wb')
    norm(np.real(equalizer)).astype('float32').tofile(f)
    f.close()
    print '\nOutput filter length =', len(equalizer), 'taps'
    print 'Output filter written to ' + filter
  else:
    print 'Output format not recognized, no file generated.'


  ###
  ## Plots
  ###
  if not noplot:
    data *= 500
    # original loudspeaker-room response
    tfplot(data, Fs, avg = 'abs')
    # 1/3 Octave smoothed
    tfplots(data, Fs, 'r')

    #tfplot(mixed, Fs, 'r')

    # equalizer transfer function
    tfplot(0.75*equalizer, Fs, 'g')
    # indicating pole frequencies
    plt.vlines(fplog, -2, 2, color='k', linestyles='solid')

    # equalized loudspeaker-room response
    tfplot(equalizedresp*0.01, Fs, avg = 'abs')
    # 1/3 Octave smoothed
    tfplots(equalizedresp*0.01, Fs, 'r')

    # Add labels
    # May need to reposition these based on input data
    plt.text(325,30,'Unequalized loudspeaker-room response')
    plt.text(100,-15,'Equalizer transfer function')
    plt.text(100,-21,'(Black lines: pole locations)')
    plt.text(130,-70,'Equalized loudspeaker-room response')

    a = plt.gca()
    a.set_xlim([20, 20000])
    a.set_ylim([-80, 80])
    plt.ylabel('Amplitude (dB)', color='b')
    plt.xlabel('Frequency (Hz)')
    plt.grid()
    plt.legend()
    plt.show()