Esempio n. 1
0
def Trms2_vs_umag(uvs, bms, umag_px, uv_bm_area=2., umin=4., umax=200., logstep=.1):
    ubins = 10**n.arange(n.log10(umin), n.log10(umax), logstep)
    errs, Trms2, wgts = [], [], []
    ratio = n.abs(uvs)**2 / n.abs(bms)**2
    for u in ubins:
        r_px_inner = u * 10**(-logstep/2) / umag_px
        r_px_outer = u * 10**(logstep/2) / umag_px
        rng = ring(uvs.shape[0], r_px_inner, r_px_outer)
        uvs2_r = n.abs(uvs)**2 * rng
        bms2_r = n.abs(bms)**2 * rng
        wgts.append(bms2_r.sum())
        uvs2_r_avg = uvs2_r.sum() / wgts[-1]
        print '-'*20
        print u / umag_px, uvs2_r_avg,
        print n.sum(ratio*rng) / n.sum(rng)
        Trms2.append(uvs2_r_avg)
        # Estimate average variance of samples around the ring
        sig2_r = n.abs(uvs2_r - uvs2_r_avg * bms2_r)**2
        sig = n.sqrt(sig2_r.sum() / bms2_r.sum())
        # Estimate number of independent samples around ring
        #nsamples = rng.sum() * umag_px**2 / uv_bm_area / 2
        #err = n.sqrt(var) / n.sqrt(nsamples)
        errs.append(sig)
        print u, Trms2[-1], errs[-1]
    Trms2 = n.array(Trms2); errs = n.array(errs); wgts = n.array(wgts)
    #Cls = Cl_from_Trms(Trms, ells)
    #errs = Cl_from_Trms(errs, ells)
    return ubins, Trms2, errs, wgts
Esempio n. 2
0
def remlplen_ichige(fp, fs, dp, ds):
    """Determine the length of the low pass filter with passband frequency
    fp, stopband frequency fs, passband ripple dp, and stopband ripple ds.
    fp and fs must be normalized with respect to the sampling frequency.
    Note that the filter order is one less than the filter length.

    References
    ----------
    K. Ichige, M. Iwaki, and R. Ishii, Accurate Estimation of Minimum
    Filter Length for Optimum FIR Digital Filters, IEEE Transactions on
    Circuits and Systems, 47(10):1008-1017, October 2000.

    """
    
    dF = fs-fp
    v = lambda dF,dp:2.325*((-log10(dp))**-0.445)*dF**(-1.39)
    g = lambda fp,dF,d:(2.0/pi)*arctan(v(dF,dp)*(1.0/fp-1.0/(0.5-dF)))
    h = lambda fp,dF,c:(2.0/pi)*arctan((c/dF)*(1.0/fp-1.0/(0.5-dF)))
    Nc = ceil(1.0+(1.101/dF)*(-log10(2.0*dp))**1.1)
    Nm = (0.52/dF)*log10(dp/ds)*(-log10(dp))**0.17
    N3 = ceil(Nc*(g(fp,dF,dp)+g(0.5-dF-fp,dF,dp)+1.0)/3.0)
    DN = ceil(Nm*(h(fp,dF,1.1)-(h(0.5-dF-fp,dF,0.29)-1.0)/2.0))
    N4 = N3+DN

    return int(N4)
Esempio n. 3
0
def cd_sphere_vector_bool(Re):
    from numpy import log10,array,polyval
       
    condition1 = Re < 0
    condition2 = logical_and(0 < Re, Re <= 0.5)
    condition3 = logical_and(0.5 < Re, Re <= 100.0)
    condition4 = logical_and(100.0 < Re, Re <= 1.0e4)
    condition5 = logical_and(1.0e4 < Re, Re <= 3.35e5)
    condition6 = logical_and(3.35e5< Re, Re <= 5.0e5)
    condition7 = logical_and(5.0e5 < Re, Re <= 8.0e6)
    condition8 = Re > 8.0e6
    
    cd = zeros_like(Re)
    cd[condition1] = 0.0
    
    cd[condition2] = 24/Re[condition2]
    
    p = array([4.22,-14.05,34.87,0.658])
    cd[condition3] = polyval(p,1.0/Re[condition3]) 
    
    p = array([-30.41,43.72,-17.08,2.41])
    cd[condition4] = polyval(p,1.0/log10(Re[condition4]))
    
    p = array([-0.1584,2.031,-8.472,11.932])
    cd[condition5] = polyval(p,log10(Re[condition5]))
    
    cd[condition6] = 91.08*(log10(Re[condition6]/4.5e5))**4 + 0.0764
    
    p  = array([-0.06338,1.1905,-7.332,14.93])
    cd[condition7] = polyval(p,log10(Re[condition7]))
    
    cd[condition8] = 0.2
    
    return cd
    def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
        """
        See :meth:`superclass method
        <.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
        for spec of input and result values.
        """

        # Distance term
        R = np.sqrt(dists.rjb ** 2 + 11.29 ** 2)

        # Magnitude term
        M = rup.mag - 6

        # Site term only distinguishes between lava and ash;
        # since ash sites have Vs30 in the range 60-200m/s,
        # we use this upper value as class separator
        S = np.zeros(R.shape)
        S[sites.vs30 <= 200] = 1

        # Mean ground motion (log10)
        mean = (0.518 + 0.387*M - np.log10(R) - 0.00256*R + 0.335*S)

        # Converting to natural log
        mean /= np.log10(np.e)

        # Check for standard deviation type
        assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
                   for stddev_type in stddev_types)

        # Constant (total) standard deviation
        stddevs = [0.237/np.log10(np.e) + np.zeros(R.shape)]

        return mean, stddevs
def compute_angular_momentum_transfer(snapshot, Rmin, Rmax, NR = None, Nphi = None, alpha=0.1, h0=0.1):
    """
    Compute the angular momentum transfer as a function of
    radius from a simulation snapshot and a prescribed grid
    data structure containing the coordinates of grid points
    where the torque balance should be evaluated.

    """
    

    if (NR is None):
        NR = 512
    if (Nphi is None):
        Nphi = int(2 * np.pi / (10**((np.log10(Rmax) - np.log10(Rmin))/NR) - 1))

    # Create a polar grid
    grid = grid_polar(NR = NR, Nphi = Nphi, Rmin= Rmin,Rmax = Rmax,scale='log')

    mdot = mass_advection(snapshot,grid)
    
    torque_adv = angular_momentum_advection(snapshot,grid)

    torque_visc = angular_momentum_viscosity(snapshot,grid, alpha = alpha, h0 = h0)

    torque_grav = angular_momentum_gravity(snapshot,grid)
    
    return grid.R.mean(axis=0),mdot, torque_adv,torque_visc,torque_grav
Esempio n. 6
0
def cd_sphere(Re):
    "Computes the drag coefficient of a sphere as a function of the Reynolds number Re."
    # Curve fitted after fig . A -56 in Evett & Liu :% " Fluid Mechanics & Hydraulics ",
    # Schaum ' s Solved Problems McGraw - Hill 1989.
    
    from numpy import log10,array,polyval    
    
    if Re <= 0.0:
        CD = 0.0
    elif Re > 8.0e6:
        CD = 0.2
    elif Re > 0.0 and Re <= 0.5:
        CD = 24.0/Re
    elif Re > 0.5 and Re <= 100.0:
        p = array([4.22,-14.05,34.87,0.658])
        CD = polyval(p,1.0/Re) 
    elif Re > 100.0 and Re <= 1.0e4:
        p = array([-30.41,43.72,-17.08,2.41])
        CD = polyval(p,1.0/log10(Re))
    elif Re > 1.0e4 and Re <= 3.35e5:
        p = array([-0.1584,2.031,-8.472,11.932])
        CD = polyval(p,log10(Re))
    elif Re > 3.35e5 and Re <= 5.0e5:
        x1 = log10(Re/4.5e5)
        CD = 91.08*x1**4 + 0.0764
    else:
        p = array([-0.06338,1.1905,-7.332,14.93])
        CD = polyval(p,log10(Re))
    return CD
Esempio n. 7
0
def cd_sphere_vector(Re):
    "Computes the drag coefficient of a sphere as a function of the Reynolds number Re."
    # Curve fitted after fig . A -56 in Evett & Liu :% " Fluid Mechanics & Hydraulics ",
    # Schaum ' s Solved Problems McGraw - Hill 1989.

    from numpy import log10,array,polyval
    CD = zeros_like(Re)
   
    CD = where(Re<0,0.0,0.0)     # condition 1
    
    CD = where((Re > 0.0) & (Re <=0.5),24/Re,CD) # condition 2

    p = array([4.22,-14.05,34.87,0.658])
    CD = where((Re > 0.5) & (Re <=100.0),polyval(p,1.0/Re),CD) #condition 3

    p = array([-30.41,43.72,-17.08,2.41])
    CD = where((Re >100.0)  & (Re <=1.0e4) ,polyval(p,1.0/log10(Re)),CD) #condition 4

    p = array([-0.1584,2.031,-8.472,11.932])
    CD = where((Re > 1.0e4)  &  (Re <=3.35e5),polyval(p,log10(Re)),CD) #condition 5

    CD = where((Re > 3.35e5) & (Re <=5.0e5),91.08*(log10(Re/4.5e5))**4 + 0.0764,CD) #condition 6

    p  = array([-0.06338,1.1905,-7.332,14.93])
    CD = where((Re > 5.05e5)  &  (Re <=8.0e6),polyval(p,log10(Re)),CD) #condition 7
    
    CD = where(Re>8.0e6,0.2,CD)  # condition 8

    return CD
Esempio n. 8
0
def srcdiff_plot(env, model, **kwargs):
    obj_index       = kwargs.pop('obj_index', 0)
    src_index       = kwargs.pop('src_index', 0)
    with_colorbar   = kwargs.pop('with_colorbar', False)
    xlabel          = kwargs.pop('xlabel', r'arcsec')
    ylabel          = kwargs.pop('ylabel', r'arcsec')

    obj, data = model['obj,data'][obj_index]
    S = obj.basis.subdivision
    R = obj.basis.mapextent

    g = obj.basis.srcdiff_grid(data)[src_index]
    vmin = np.log10(np.amin(g[g>0]))
    g = g.copy() + 1e-10
    kw = default_kw(R, kwargs) #, vmin=vmin, vmax=vmin+2)

    #loglev = logspace(1, log(amax(g)-amin(g)), 20, base=math.e) + amin(g)
    pl.matshow(np.log10(g), **kw)
    matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
    if with_colorbar: glspl.colorbar()
#   pl.over(contour, g, 50,  colors='w',               linewidths=1, 
#        extent=[-R,R,-R,R], origin='upper', extend='both')
    #pl.grid()

    pl.xlabel(xlabel)
    pl.ylabel(ylabel)
Esempio n. 9
0
def get_cs(e_0=100, z=74):
    """
    Returns a function representing the scaled bremsstrahlung cross_section.

    Args:
        e_0 (float): The electron kinetic energy, used to scale u=e_e/e_0.
        z (int): Atomic number of the material.

    Returns:
        A function representing cross_section(e_g,u) in mb/keV, with e_g in keV.

    """
    # NOTE: Data is given for E0>1keV. CS values below this level should be used with caution.
    # The default behaviour is to keep it constant
    with open(os.path.join(data_path, "cs/grid.csv"), 'r') as csvfile:
        r = csv.reader(csvfile, delimiter=' ', quotechar='|',
                       quoting=csv.QUOTE_MINIMAL)
        t = next(r)
        e_e = np.array([float(a) for a in t[0].split(",")])
        log_e_e = np.log10(e_e)
        t = next(r)
        k = np.array([float(a) for a in t[0].split(",")])
    t = []
    with open(os.path.join(data_path, "cs/%d.csv" % z), 'r') as csvfile:
        r = csv.reader(csvfile, delimiter=' ', quotechar='|',
                       quoting=csv.QUOTE_MINIMAL)
        for row in r:
            t.append([float(a) for a in row[0].split(",")])
    t = np.array(t)
    scaled = interpolate.RectBivariateSpline(log_e_e, k, t, kx=3, ky=1)
    m_electron = 511
    z2 = z * z
    return lambda e_g, u: (u * e_0 + m_electron) ** 2 * z2 / (u * e_0 * e_g * (u * e_0 + 2 * m_electron)) * (
        scaled(np.log10(u * e_0), e_g / (u * e_0)))
Esempio n. 10
0
def test_Moster13SmHm_behavior():
	"""
	"""
	default_model = Moster13SmHm()
	mstar1 = default_model.mean_stellar_mass(prim_haloprop = 1.e12)
	ratio1 = mstar1/3.4275e10
	np.testing.assert_array_almost_equal(ratio1, 1.0, decimal=3)

	default_model.param_dict['n10'] *= 1.1
	mstar2 = default_model.mean_stellar_mass(prim_haloprop = 1.e12)
	assert mstar2 > mstar1

	default_model.param_dict['n11'] *= 1.1
	mstar3 = default_model.mean_stellar_mass(prim_haloprop = 1.e12)
	assert mstar3 == mstar2

	mstar4_z1 = default_model.mean_stellar_mass(prim_haloprop = 1.e12, redshift=1)
	default_model.param_dict['n11'] *= 1.1
	mstar5_z1 = default_model.mean_stellar_mass(prim_haloprop = 1.e12, redshift=1)
	assert mstar5_z1 != mstar4_z1

	mstar_realization1 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=43)
	mstar_realization2 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=43)
	mstar_realization3 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=44)
	assert np.array_equal(mstar_realization1, mstar_realization2)
	assert not np.array_equal(mstar_realization1, mstar_realization3)

	measured_scatter1 = np.std(np.log10(mstar_realization1))
	model_scatter = default_model.param_dict['scatter_model_param1']
	np.testing.assert_allclose(measured_scatter1, model_scatter, rtol=1e-3)

	default_model.param_dict['scatter_model_param1'] = 0.3
	mstar_realization4 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=43)
	measured_scatter4 = np.std(np.log10(mstar_realization4))
	np.testing.assert_allclose(measured_scatter4, 0.3, rtol=1e-3)
Esempio n. 11
0
  def __init__(self,fname=None,mindx=None,mlist=[],logopt = 0, Lbox=2750., massfxnfname=None):
    """
    Read in a list of masses.  If logopt==1, then input masses are understood to be logarithmic.
    """
    self.Lbox = Lbox
    if mlist != []:
      if logopt == 0:
        self.m = np.array(mlist)
        self.lg10m = np.log10(self.m)
      else:
        self.lg10m = np.array(mlist)
        self.m = 10**(self.log10m)
      self.lg10mcen, self.Nofm = None, None ## set these later with massfxn.
    elif massfxnfname is not None:
      self.lg10mcen, self.Nofm = np.loadtxt(massfxnfname,unpack=True,usecols=[0,1])


    else:
      if 0==0:
#      try:
        if logopt == 0:
          self.m = np.loadtxt(fname,usecols=[mindx],unpack=True)
          self.lg10m = np.log10(self.m)
        else:
          self.lg10m = np.loadtxt(fname,usecols=[mindx],unpack=True)
          self.m = 10**(self.lg10m)
        self.lg10mcen, self.Nofm = None, None ## set these later with massfxn.
      else:
#      except:
        print 'file read did not work.'
        self.m = None
        self.lg10m = None
Esempio n. 12
0
def display_fluxes(fluxes, id_convert=None, mnx_path=None, log=False, abstol=1e-6, **kwargs):

    if id_convert is not None:

        if mnx_path is None:
            print('Please specify local path of MetaNetX database.')
            return

        mnx = MetaNetX(mnx_path, version=3)

        # TODO: still need to find how to deal with ambiguous conversions

        fluxes = {
            kegg_id: abs(value)
            for r_id, value in fluxes.items()
            for kegg_id in mnx.translate_reaction_id(r_id[2:], id_convert, 'kegg')
        }
    else:
        fluxes = {key: abs(val) for key, val in fluxes.items()}

    if log:
        lb = np.log10(min([x for x in fluxes.values() if x > abstol]))
        fluxes = {key: np.log10(val) - lb for key, val in fluxes.items() if val > abstol}

    return iPATH_display(fluxes, **kwargs)
	def plotPSD(self, chan, time_interval):
		Npackets = np.int(time_interval * self.accum_freq)
		plot_range = (Npackets / 2) + 1
		figure = plt.figure(num= None, figsize=(12,12), dpi=80, facecolor='w', edgecolor='w')
		# I 
		plt.suptitle('Channel ' + str(chan) + ' , Freq = ' + str((self.freqs[chan] + self.LO_freq)/1.0e6) + ' MHz') 
		plot1 = figure.add_subplot(311)
		plot1.set_xscale('log')
		plot1.set_autoscale_on(True)
		plt.ylim((-160,-80))
		plt.title('I')
		line1, = plot1.plot(np.linspace(0, self.accum_freq/2., (Npackets/2) + 1), np.zeros(plot_range), label = 'I', color = 'green', linewidth = 1)
		plt.grid()
		# Q
		plot2 = figure.add_subplot(312)
		plot2.set_xscale('log')
		plot2.set_autoscale_on(True)
		plt.ylim((-160,-80))
		plt.title('Q')
		line2, = plot2.plot(np.linspace(0, self.accum_freq/2., (Npackets/2) + 1), np.zeros(plot_range), label = 'Q', color = 'red', linewidth = 1)
		plt.grid()
		# Phase
		plot3 = figure.add_subplot(313)
		plot3.set_xscale('log')
		plot3.set_autoscale_on(True)
		plt.ylim((-120,-70))
		#plt.xlim((0.0001, self.accum_freq/2.))
		plt.title('Phase')
		plt.ylabel('dBc rad^2/Hz')
		plt.xlabel('log Hz')
		line3, = plot3.plot(np.linspace(0, self.accum_freq/2., (Npackets/2) + 1), np.zeros(plot_range), label = 'Phase', color = 'black', linewidth = 1)
		plt.grid()
		plt.show(block = False)
		count = 0
		stop = 1.0e10
		while count < stop:
			Is, Qs, phases = self.get_stream(chan, time_interval)
			I_mags = np.fft.rfft(Is, Npackets)
			Q_mags = np.fft.rfft(Is, Npackets)
			phase_mags = np.fft.rfft(phases, Npackets)
			I_vals = (np.abs(I_mags)**2 * ((1./self.accum_freq)**2 / (1.0*time_interval)))
			Q_vals = (np.abs(Q_mags)**2 * ((1./self.accum_freq)**2 / (1.0*time_interval)))
			phase_vals = (np.abs(phase_mags)**2 * ((1./self.accum_freq)**2 / (1.0*time_interval)))
			phase_vals = 10*np.log10(phase_vals)
			phase_vals -= phase_vals[0]
			#line1.set_ydata(Is)
			#line2.set_ydata(Qs)
			#line3.set_ydata(phases)
			line1.set_ydata(10*np.log10(I_vals))
			line2.set_ydata(10*np.log10(Q_vals))
			line3.set_ydata(phase_vals)
			plot1.relim()
			plot1.autoscale_view(True,True,False)
			plot2.relim()
			plot2.autoscale_view(True,True,False)
			#plot3.relim()
			plot3.autoscale_view(True,True,False)
			plt.draw()
			count +=1
		return
Esempio n. 14
0
def test_behroozi10_smhm_z1():
    """ The arrays ``logmh_z01`` and ``logmratio_z01`` were provided by Peter Behroozi
    via private communication. These quantities are in the h=0.7 units adopted in
    Behroozi+10. This test function treats these arrays as truth,
    and enforces that the result computed by Halotools agrees with them.
    """
    model = Behroozi10SmHm()

    logmh_z1 = np.array(
        [11.368958, 11.493958, 11.618958, 11.743958,
        11.868958, 11.993958, 12.118958, 12.243958,
        12.368958, 12.493958, 12.618958, 12.743958,
        12.868958, 12.993958, 13.118958, 13.243958,
        13.368958, 13.493958, 13.618958, 13.743958,
        13.868958, 13.993958, 14.118958, 14.243958]
        )

    logmratio_z1 = np.array(
        [-2.145909, -2.020974, -1.924020, -1.852937,
        -1.804730, -1.776231, -1.764455, -1.766820,
        -1.781140, -1.805604, -1.838727, -1.879292,
        -1.926290, -1.978890, -2.036405, -2.098245,
        -2.163930, -2.233045, -2.305230, -2.380185,
        -2.457643, -2.537377, -2.619191, -2.702901]
        )

    halo_mass_z1 = (10.**logmh_z1)*model.littleh
    logmratio_z1 = np.log10((10.**logmratio_z1)*model.littleh)

    z1_sm = model.mean_stellar_mass(prim_haloprop=halo_mass_z1, redshift=1.0)
    z1_ratio = z1_sm / halo_mass_z1
    z1_result = np.log10(z1_ratio)
    assert np.allclose(z1_result, logmratio_z1, rtol=0.02)
def plot_hist(axis, data_list, label_list, logx, logy, overlaid):
    """
    """

    if logx:
        # Setup the logarithmic scale on the X axis
        data_array = np.array(data_list)
        vmin = np.log10(data_array.min())
        vmax = np.log10(data_array.max())
        bins = np.logspace(vmin, vmax, 50) # Make a range from 10**vmin to 10**vmax
    else:
        bins = 50

    if overlaid:
        for data_array, label in zip(data_list, label_list):
            res_tuple = axis.hist(data_array,
                                  bins=bins,
                                  log=logy,           # Set log scale on the Y axis
                                  histtype=HIST_TYPE,
                                  alpha=ALPHA,
                                  label=label)
    else:
        res_tuple = axis.hist(data_list,
                              bins=bins,
                              log=logy,               # Set log scale on the Y axis
                              histtype=HIST_TYPE,
                              alpha=ALPHA,
                              label=label_list)
def plot_sphere_x( s, fname ):
  """ put plot of ionization fractions from sphere `s` into fname """

  plt.figure()
  s.Edges.units = 'kpc'
  s.r_c.units = 'kpc'
  xx = s.r_c
  L = s.Edges[-1]

  plt.plot( xx, np.log10( s.xHe1 ),
            color='green', ls='-', label = r'$x_{\rm HeI}$' )
  plt.plot( xx, np.log10( s.xHe2 ),
            color='green', ls='--', label = r'$x_{\rm HeII}$' )
  plt.plot( xx, np.log10( s.xHe3 ),
            color='green', ls=':', label = r'$x_{\rm HeIII}$' )

  plt.plot( xx, np.log10( s.xH1 ),
            color='red', ls='-', label = r'$x_{\rm HI}$' )
  plt.plot( xx, np.log10( s.xH2 ),
            color='red', ls='--', label = r'$x_{\rm HII}$' )

  plt.xlim( -L/20, L+L/20 )
  plt.xlabel( 'r_c [kpc]' )

  plt.ylim( -4.5, 0.2 )
  plt.ylabel( 'log 10 ( x )' )

  plt.grid()
  plt.legend(loc='best', ncol=2)
  plt.tight_layout()
  plt.savefig( 'doc/img/x_' + fname )
Esempio n. 17
0
def pltytfield():
    fig=plt.figure()
    ppy=yt.ProjectionPlot(ds, "x", "Bxy", weight_field="density") #Project X-component of B-field from z-direction
    By=ppy._frb["Bxy"]
    ax=fig.add_subplot(111)
    plt.xticks(tick_locs,tick_lbls)
    plt.yticks(tick_locs,tick_lbls)
    Bymag=ax.pcolormesh(np.log10(By))
    cbar_m=plt.colorbar(Bymag)
    cbar_m.set_label("Bxy")
    plt.title("Bxy in yz plane")

    fig=plt.figure()
    ppy=yt.ProjectionPlot(ds, "y", "Bxy", weight_field="density") #Project X-component of B-field from z-direction
    By=ppy._frb["Bxy"]
    ax=fig.add_subplot(111)
    plt.xticks(tick_locs,tick_lbls)
    plt.yticks(tick_locs,tick_lbls)
    Bymag=ax.pcolormesh(np.log10(By))
    cbar_m=plt.colorbar(Bymag)
    cbar_m.set_label("Bxy")
    plt.title("Bxy in xz plane")

    fig=plt.figure()
    ppy=yt.ProjectionPlot(ds, "z", "Bxy", weight_field="density") #Project X-component of B-field from z-direction
    By=ppy._frb["Bxy"]
    ax=fig.add_subplot(111)
    plt.xticks(tick_locs,tick_lbls)
    plt.yticks(tick_locs,tick_lbls)
    Bymag=ax.pcolormesh(np.log10(By))
    cbar_m=plt.colorbar(Bymag)
    cbar_m.set_label("Bxy")
    plt.title("Bxy in xy plane")
Esempio n. 18
0
def plot_kappa_vs_n(show=False):
    import biggles

    np = 1000

    nmin = 0.25
    nmax = 8.0
    nvals = numpy.logspace(numpy.log10(nmin), numpy.log10(nmax), np)
    kappas = numpy.zeros(np)

    kfinder = KappaFinder()
    for i in xrange(np):
        kappas[i] = kfinder(nvals[i])

    plt = biggles.FramedPlot()
    plt.xlabel = "sersic n"
    plt.ylabel = r"$\kappa$"
    plt.xlog = True
    plt.ylog = True

    pts = biggles.Points(nvals, kappas, type="filled circle", size=0.5, color="blue")
    plt.add(pts)

    if show:
        plt.show()

    f = os.path.expanduser("~/tmp/kappa-vs-n.eps")
    print f
    plt.write_eps(f)
	def correlation_test(self, x, y, tol):
		# Find scaling / correlation between Menv and intensity
		# Return fit parameters and flag indicating which fit is best

		# Always first try linear fit with y = a + b*x:
		fit_m = ofit.lin_test(x, y)
		if (fit_m[3] < tol) & (fit_m[2] > tol):
			print "A proportionality I = %4.2f * Menv is acceptable at 1 sigma" %(fit_m[1])
			print "Fit probabilities are %6.4f and %6.4f respectively" %(fit_m[2], fit_m[3])
			return [0, fit_m[1]], 'lin'
		elif (fit_m[3] < tol) & (fit_m[2] < tol):
			print "A linear fit with I = %4.2f + %4.2f * Menv is acceptable at 1 sigma" %(fit_m[0], fit_m[1])
			print "Fit probabilities are %6.4f and %6.4f respectively" %(fit_m[2], fit_m[3])
			return [fit_m[0], fit_m[1]], 'lin'
		# If a linear fit is not good enough, try power-law:
		else:
			fit_m = ofit.lin_test(np.log10(x), np.log10(y))
			if (fit_m[3] < tol) & (fit_m[2] > tol):
				print "A power-law fit with log(I) = %4.2f * log(Menv) is acceptable at 1 sigma" %(fit_m[0], fit_m[1])
				print "Fit probabilities are %6.4f and %6.4f respectively" %(fit_m[2], fit_m[3])
				return [0, fit_m[1]], 'pow'
			elif (fit_m[3] < tol) & (fit_m[2] < tol):
				print "A power-law fit with log(I) = %4.2f + %4.2f * log(Menv) is acceptable at 1 sigma" %(fit_m[0], fit_m[1])
				print "Fit probabilities are %6.4f and %6.4f respectively" %(fit_m[2], fit_m[3])
				return [fit_m[0], fit_m[1]], 'pow'
			else:
				print 'I and Menv are not correlated, neither linearly nor by power-law'
				print 'Model is terminated, please try again'
				sys.exit()
Esempio n. 20
0
    def sutherland_gaunt_factor(self, wavelength):
        """
        Calculates the free-free gaunt factor calculations of [1]_.

        The Gaunt factors of [1]_ are read in using `ChiantiPy.tools.io.gffRead`
        as a function of :math:`u` and :math:`\gamma^2`. The data are interpolated
        to the appropriate wavelength and temperature values using
        `~scipy.ndimage.map_coordinates`.

        References
        ----------
        .. [1] Sutherland, R. S., 1998, MNRAS, `300, 321 <http://adsabs.harvard.edu/abs/1998MNRAS.300..321S>`_
        """
        # calculate scaled quantities
        lower_u = ch_const.planck*(1.e8*ch_const.light)/ch_const.boltzmann/np.outer(self.Temperature,wavelength)
        gamma_squared = (self.Z**2)*ch_const.ryd2erg/ch_const.boltzmann/self.Temperature[:,np.newaxis]*np.ones(lower_u.shape)
        # convert to index coordinates
        i_lower_u = (np.log10(lower_u) + 4.)*10.
        i_gamma_squared = (np.log10(gamma_squared) + 4.)*5.
        # read in sutherland data
        gf_sutherland_data = ch_io.gffRead()
        # interpolate data to scaled quantities
        gf_sutherland = map_coordinates(gf_sutherland_data['gff'],
                                        [i_gamma_squared.flatten(), i_lower_u.flatten()]).reshape(lower_u.shape)

        return np.where(gf_sutherland < 0., 0., gf_sutherland)
Esempio n. 21
0
    def _calculate_histogram(self):
        """Recalculate the histogram, creating new patches"""
        self.clear()
        try:
            data = self.layer[self.att].ravel()
            if not np.isfinite(data).any():
                return False
        except IncompatibleAttribute as exc:
            self.disable_invalid_attributes(*exc.args)
            return False

        if data.size == 0:
            return

        if self.lo > np.nanmax(data) or self.hi < np.nanmin(data):
            return
        if self.xlog:
            data = np.log10(data)
            rng = [np.log10(self.lo), np.log10(self.hi)]
        else:
            rng = self.lo, self.hi
        nbinpatch = self._axes.hist(data,
                                    bins=self.nbins,
                                    range=rng)
        self._y, self.x, self.artists = nbinpatch
        return True
Esempio n. 22
0
def update_all_baseline_plots(i, fig, crawler, lines, norm_cross=False, forward=True):

    if forward:
        try:
            crawler.forward()
        except EOFError as err:
            print err
            raw_input("End of File. Press enter to quit.")
            sys.exit()

    burst = crawler

    for k in range(len(BASELINES)):
        if k < 4:
            #autos
            lines[k].set_data(FREQS, 10*np.log10(burst.autos[BASELINES[k]]))
            #overlays
            lines[-(k+1)].set_data(FREQS,10*np.log10(burst.autos[BASELINES[k]]))

        elif norm_cross:

			norm_val = np.array(burst.cross[BASELINES[k]])/np.sqrt(np.array(burst.autos[BASELINES[k][0]*2])*np.array(burst.autos[BASELINES[k][1]*2]))
			lines[k]['real'].set_data(FREQS, np.real(norm_val))
			lines[k]['imag'].set_data(FREQS, np.imag(norm_val))
        else:
			lines[k].set_data(FREQS, 10*np.log10(np.abs(np.real(burst.cross[BASELINES[k]]))))



    return lines
Esempio n. 23
0
File: core.py Progetto: adrn/gala
    def __repr__(self):
        pars = ""
        if not isinstance(self.parameters, OrderedDict):
            keys = sorted(self.parameters.keys()) # to ensure the order is always the same
        else:
            keys = self.parameters.keys()

        for k in keys:
            v = self.parameters[k].value
            par_fmt = "{}"
            post = ""

            if hasattr(v, 'unit'):
                post = " {}".format(v.unit)
                v = v.value

            if isinstance(v, float):
                if v == 0:
                    par_fmt = "{:.0f}"
                elif np.log10(v) < -2 or np.log10(v) > 5:
                    par_fmt = "{:.2e}"
                else:
                    par_fmt = "{:.2f}"

            elif isinstance(v, int) and np.log10(v) > 5:
                par_fmt = "{:.2e}"

            pars += ("{}=" + par_fmt + post).format(k, v) + ", "

        if isinstance(self.units, DimensionlessUnitSystem):
            return "<{}: {} (dimensionless)>".format(self.__class__.__name__, pars.rstrip(", "))
        else:
            return "<{}: {} ({})>".format(self.__class__.__name__, pars.rstrip(", "), ",".join(map(str, self.units._core_units)))
Esempio n. 24
0
File: ltisys.py Progetto: JT5D/scipy
def _default_response_frequencies(A, n):
    """Compute a reasonable set of frequency points for bode plot.

    This function is used by `bode` to compute the frequency points (in rad/s)
    when the `w` argument to the function is None.

    Parameters
    ----------
    A : ndarray
        The system matrix, which is square.
    n : int
        The number of time samples to generate.

    Returns
    -------
    w : ndarray
        The 1-D array of length `n` of frequency samples (in rad/s) at which
        the response is to be computed.
    """
    vals = linalg.eigvals(A)
    # Remove poles at 0 because they don't help us determine an interesting
    # frequency range. (And if we pass a 0 to log10() below we will crash.)
    poles = [pole for pole in vals if pole != 0]
    # If there are no non-zero poles, just hardcode something.
    if len(poles) == 0:
        minpole = 1
        maxpole = 1
    else:
        minpole = min(abs(real(poles)))
        maxpole = max(abs(real(poles)))
    # A reasonable frequency range is two orders of magnitude before the
    # minimum pole (slowest) and two orders of magnitude after the maximum pole
    # (fastest).
    w = numpy.logspace(numpy.log10(minpole) - 2, numpy.log10(maxpole) + 2, n)
    return w
Esempio n. 25
0
 def error_vs_updates(self, output_dir=None):
     fig = plt.figure()
     ax = fig.add_subplot(111)
     ax.scatter(log10(centered(self.w_truth)) - log10(centered(self.medians)), self.updates, c=self.stds, cmap=plt.jet(), s=25, clip_on=False, lw=0.5)
     ax.set_xlabel('log10(w_truth)-log10(median w)')
     ax.set_ylabel('num updates')
     show(fig, output_dir, 'error_vs_updates.png')
Esempio n. 26
0
    def fluence_dist(self):
        """ Plots the fluence distribution and gives the mean and median fluence
        values of the sample """
        fluences = []
        for i in range(0,len(self.fluences),1):
            try:
                fluences.append(float(self.fluences[i]))

            except ValueError:
                continue

        fluences = np.array(fluences)
        mean_fluence = np.mean(fluences)
        median_fluence = np.median(fluences)
        print('Mean Fluence =',mean_fluence,'(15-150 keV) [10^-7 erg cm^-2]')
        print('Median Fluence =',median_fluence,'(15-150 keV) [10^-7 erg cm^-2]')

        plt.figure()
        plt.xlabel('Fluence (15-150 keV) [$10^{-7}$ erg cm$^{-2}$]')
        plt.ylabel('Number of GRBs')
        plt.xscale('log')
        minimum, maximum = min(fluences), max(fluences)
        plt.axvline(mean_fluence,color='red',linestyle='-')
        plt.axvline(median_fluence,color='blue',linestyle='-')
        plt.hist(fluences,bins= 10**np.linspace(np.log10(minimum),np.log10(maximum),20),color='grey',alpha=0.5)
        plt.show()
Esempio n. 27
0
def create_center_frequencies(stt=180, stp=7000, n_bands=32, kind='log'):
    '''
    Define center frequencies for spectrograms.

    Generally this is for auditory spectrogram extraction. Most auditory
    analysis uses 180 - 7000 Hz, so for now those
    are the defaults.

    Parameters
    ----------
    stt : float | int
        The starting frequency
    stp : float | int
        The end frequency
    n_bands : int
        The number of bands to calculate
    kind : 'log' | 'erb'
        Whether to use log or erb spacing

    Returns
    -------
    freqs : array, shape (n_frequencies,)
        An array of center frequencies.
    '''
    if kind == 'log':
        freqs = np.logspace(np.log10(stt), np.log10(stp), n_bands).astype(int)
    elif kind == 'erb':
        freqs = hears.erbspace(stt * Hz, stp * Hz, n_bands)
    else:
        print("I don't know what kind of spacing that is")
    return freqs
Esempio n. 28
0
def compatibility(par_low, par_high):
    """Quantify spectral compatibility of power-law
    measurements in two energy bands.

    Reference: 2008ApJ...679.1299F Equation (2)

    Compute spectral compatibility parameters for the
    situation where two power laws were measured in a low
    and a high spectral energy band.
    par_low and par_high are the measured parameters,
    which must be lists in the following order:
    e, f, f_err, g, g_err
    where e is the pivot energy, f is the flux density
    and g the spectral index
    """
    # Unpack power-law paramters
    e_high, f_high, f_err_high, g_high, g_err_high = par_high
    e_low, f_low, f_err_low, g_low, g_err_low = par_low

    log_delta_e = np.log10(e_high) - np.log10(e_low)
    log_delta_f = np.log10(f_high) - np.log10(f_low)
    # g_match is the index obtained by connecting the two points
    # with a power law, i.e. a straight line in the log_e, log_f plot
    g_match = -log_delta_f / log_delta_e

    # sigma is the number of standar deviations the match index
    # is different from the measured index in one band.
    # (see Funk et al. (2008ApJ...679.1299F) eqn. 2)
    sigma_low = (g_match - g_low) / g_err_low
    sigma_high = (g_match - g_high) / g_err_high
    sigma_comb = np.sqrt(sigma_low ** 2 + sigma_high ** 2)

    return g_match, sigma_low, sigma_high, sigma_comb
Esempio n. 29
0
    def t90_dist(self):
        """ Plots T90 distribution, gives the mean and median T90 values of the
        sample and calculates the number of short, long bursts in the sample """
        t90s = []
        for i in range(0,len(self.t90s),1):
            try:
                t90s.append(float(self.t90s[i]))

            except ValueError:
                continue

        t90s = np.array(t90s)
        mean_t90 = np.mean(t90s)
        median_t90 = np.median(t90s)
        print('Mean T90 time =',mean_t90,'s')
        print('Median T90 time=',median_t90,'s')
        mask = np.ma.masked_where(t90s < 2, t90s)
        short_t90s = t90s[mask == False]
        long_t90s = t90s[mask != False]
        print('Number of Short/Long GRBs =',len(short_t90s),'/',len(long_t90s))

        plt.figure()
        plt.xlabel('T$_{90}$ (s)')
        plt.ylabel('Number of GRBs')
        plt.xscale('log')
        minimum, maximum, = min(short_t90s), max(long_t90s)
        plt.axvline(mean_t90,color='red',linestyle='-')
        plt.axvline(median_t90,color='blue',linestyle='-')
        plt.hist(t90s,bins= 10**np.linspace(np.log10(minimum),np.log10(maximum),20),color='grey',alpha=0.5)
        plt.show()
Esempio n. 30
0
    def get_log_fluxes(self):

        # Initialize arrays
        log_flux = np.zeros(self.flux.shape, dtype=np.float64)
        log_error = np.zeros(self.error.shape, dtype=np.float64)
        weight = np.zeros(self.valid.shape, dtype=np.float64)

        # Fluxes
        r = self.valid == 1
        log_flux[r] = np.log10(self.flux[r]) - 0.5 * (self.error[r] / self.flux[r]) ** 2. / np.log(10.)
        log_error[r] = np.abs(self.error[r] / self.flux[r]) / np.log(10.)
        weight[r] = 1. / log_error[r] ** 2.

        # Lower and upper limits
        r = (self.valid == 2) | (self.valid == 3)
        log_flux[r] = np.log10(self.flux[r])
        log_error[r] = self.error[r]

        # Log10[Fluxes]
        r = self.valid == 4
        log_flux[r] = self.flux[r]
        log_error[r] = self.error[r]
        weight[r] = 1. / log_error[r] ** 2.

        # Ignored points
        r = self.valid == 9
        log_flux[r] = np.log10(self.flux[r]) - 0.5 * (self.error[r] / self.flux[r]) ** 2. / np.log(10.)
        log_error[r] = np.abs(self.error[r] / self.flux[r]) / np.log(10.)

        return weight, log_flux, log_error
	h = 0.6777
	L_box = 1000.0 / h
	cosmo = cosmoMD
if env == "UNIT_fA1_DIR" or env == "UNIT_fA1i_DIR" or env == "UNIT_fA2_DIR":
	cosmoUNIT = FlatLambdaCDM(H0=67.74 * u.km / u.s / u.Mpc, Om0=0.308900)
	h = 0.6774
	L_box = 1000.0 / h
	cosmo = cosmoUNIT


area = 27143./2. # deg2
sel_area = (abs(data['g_lat'])>20) # &(data['dec']<0)

Mvir = data['HALO_Mvir']
M500c = data['HALO_M500c'].data
logM500c = n.log10(M500c )
log_vmax = n.log10(data['HALO_vmax'])
b_2_a = data['HALO_b_to_a_500c']

HALO_rs = data['HALO_rs'] 
HALO_rvir = data['HALO_rvir'] 
zzs = data['redshift_S']

N_clu = len(Mvir)


path_2_cbp = os.path.join(os.environ['GIT_CBP'])

Mpc=3.0856776e+24
msun=1.98892e33
nsim = 1000000
    #Find minimum MSE and its index
    min_MSE = np.amin(Kfold_MSE_Lasso)
    min_index = np.where(Kfold_MSE_Lasso == np.amin(Kfold_MSE_Lasso))

    print("Minimum MSE value for Lasso", min_MSE)

    #Plot the MSEs and the corresponding polynomial degree and lambda value

    f, ax = plt.subplots(figsize=(9, 6))

    polymincrop = 5

    ax.add_patch(Rectangle((min_index[1][0], min_index[0][0]-polymincrop), 1, 1, fill=False, edgecolor='pink', lw=3))

    lambdas_labels = np.array_str(np.log10(lambdas))
    Kfold_MSE_Lasso_scaled = 100 * Kfold_MSE_Lasso
    Poly_lables = ["2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13"]
    sns.set(font_scale=1.2)
    ax = sns.heatmap(Kfold_MSE_Lasso_scaled[polymincrop:, :10],
                     cbar=False,
                     annot=True,
                     square=True,
                     yticklabels=Poly_lables[polymincrop-1:],
                     fmt='.2f')
    plt.xlabel(r"$\lambda$ values enumerated low->high")
    plt.ylabel("Polynomial order")
    plt.title(r'MSE of Lasso, scaled $10^2$')
    plt.tight_layout()
    plt.show()
Esempio n. 33
0
from photutils import detect_sources

threshold = 2.5
npixels = 5

segm = detect_sources(sig, threshold, npixels)


# --- now measure the flux in every source

# fluxes = np.zeros(segm.nlabels)
# for i in range(segm.nlabels):
#     masked_sci = np.ma.masked_where(segm.data != i+1, sci)
#     flux = np.sum(masked_sci)
#     fluxes[i] = flux

fluxes = np.array([np.sum(sci[np.where(segm.data == i+1)]) for i in range(segm.nlabels)])


from photutils import deblend_sources

segm_deblend = deblend_sources(sig, segm, npixels=npixels, nlevels=32, contrast=0.001)

fluxes_deblended = np.array([np.sum(sci[np.where(segm_deblend.data == i+1)]) for i in range(segm_deblend.nlabels)])

plt.hist(np.log10(fluxes), bins=10, alpha = 0.5, label = 'simple')
plt.hist(np.log10(fluxes_deblended), bins=10, alpha = 0.5, label = 'deblended')
plt.legend()
plt.show()
Esempio n. 34
0
    def __init__(self):
        # communication parameters
        self.roach_ip = "192.168.1.13"

        # model parameters
        self.bandwidth = 140
        
        # calibration model parameters
        self.cal_acclen_reg = "cal_acc_len"
        self.cal_cntrst_reg = "cal_cnt_rst"
        self.cal_phase_regs = ['cal_phase_re', 'cal_phase_im']
        self.cal_addr_regs  = ['cal_phase_addr']
        self.cal_nbits      = 18
        self.cal_binpt      = 17
        self.cal_we_reg     = 'cal_phase_we'
        self.cal_awidth     = 8   # bits
        self.cal_dwidth     = 128 # bits
        self.cal_pow_dtype  = '>u8'
        self.cal_xab_dtype  = '>i8'
        self.cal_pow_brams  = ["cal_probe0_xpow_pow0", "cal_probe0_xpow_pow1",
                               "cal_probe1_xpow_pow0", "cal_probe1_xpow_pow1",
                               "cal_probe2_xpow_pow0", "cal_probe2_xpow_pow1",
                               "cal_probe3_xpow_pow0", "cal_probe3_xpow_pow1"]
        self.cal_xab_brams  = ["cal_probe0_xab_ab0", "cal_probe0_xab_ab1",
                               "cal_probe0_xab_ab2", "cal_probe0_xab_ab3",
                               "cal_probe1_xab_ab0", "cal_probe1_xab_ab1",
                               "cal_probe1_xab_ab2", "cal_probe1_xab_ab3",
                               "cal_probe2_xab_ab0", "cal_probe2_xab_ab1",
                               "cal_probe2_xab_ab2", "cal_probe2_xab_ab3",
                               "cal_probe3_xab_ab0", "cal_probe3_xab_ab1",
                               "cal_probe3_xab_ab2", "cal_probe3_xab_ab3"]

        # beamforming model parameters
        self.bf_acclen_reg = "bf_acc_len"
        self.bf_cntrst_reg = "bf_cnt_rst"
        self.bf_phase_regs = ['bf_phase_re', 'bf_phase_im']
        self.bf_addr_regs  = ['bf_addr_x', 'bf_addr_y', 
                              'bf_addr_t', 'bf_addr_sub']
        self.bf_we_reg     = 'bf_phase_we'
        self.bf_nbits      = 18
        self.bf_binpt      = 17
        self.bf_awidth     = 10 # bits
        self.bf_dwidth     = 64 # bits
        self.bf_dtype      = '>u8'
        self.bf_brams      = ["bf_probe0_xpow_s0",  "bf_probe0_xpow_s1", 
                              "bf_probe0_xpow_s2",  "bf_probe0_xpow_s3",
                              "bf_probe0_xpow_s4",  "bf_probe0_xpow_s5", 
                              "bf_probe0_xpow_s6",  "bf_probe0_xpow_s7",
                              "bf_probe0_xpow_s8",  "bf_probe0_xpow_s9",  
                              "bf_probe0_xpow_s10", "bf_probe0_xpow_s11",
                              "bf_probe0_xpow_s12", "bf_probe0_xpow_s13",
                              "bf_probe0_xpow_s14", "bf_probe0_xpow_s15"]

        # front-end parameters
        self.ninputs = 16
        # element positions: a 2D array with the XYZ position of each element 
        # in the array. The position is given in wavelength units. The 
        # coordinate system origin of the center of the array.
        self.elpos = [[(0.75, 0,  0.75), (0.25, 0,  0.75), (-0.25, 0,  0.75), (-0.75, 0,  0.75)],
                      [(0.75, 0,  0.25), (0.25, 0,  0.25), (-0.25, 0,  0.25), (-0.75, 0,  0.25)],
                      [(0.75, 0, -0.25), (0.25, 0, -0.25), (-0.25, 0, -0.25), (-0.75, 0, -0.25)],
                      [(0.75, 0, -0.75), (0.25, 0, -0.75), (-0.25, 0, -0.75), (-0.75, 0, -0.75)]]

        # derivative parameters
        self.nchannels = 2**self.cal_awidth
        self.freqs     = np.linspace(0, self.bandwidth, self.nchannels, endpoint=False)
        self.dBFS      = 6.02*8 + 1.76 + 10*np.log10(self.nchannels/2) # Hard-coded 8-bits ADC
Esempio n. 35
0
ppchd = pchd[pDchd >= .1]
ppspd = pspd[pDspd >= .1]
ppusd = pusd[pDusd >= .1]

ppDfrd = pDfrd[pDfrd >= .1]
ppDitd = pDitd[pDitd >= .1]
ppDbrd = pDbrd[pDbrd >= .1]
ppDchd = pDchd[pDchd >= .1]
ppDspd = pDspd[pDspd >= .1]
ppDusd = pDusd[pDusd >= .1]

N = 40
points = np.arange(40) / 40  * 3.8
t = 10 ** points
tt = t  / 10.
plt.plot(np.log10(t), np.log10(tt),':', color ='gray')

tt = t  / 100.
plt.plot(np.log10(t), np.log10(tt),':', color ='gray')

tt = t  / 100. * 6.3
plt.plot(np.log10(t), np.log10(tt),'--', color ='gray')
plt.text(1.2,.7, r'10%' , fontsize=18, color = 'gray')
plt.text(3.4,2, r'6.3%', fontsize=18, color = 'gray')
plt.text(3,.7, r'1%' , fontsize=18, color = 'gray')
plt.scatter(np.log10(ppfrd), np.log10(ppDfrd), label = 'Francia', color ='C9')
plt.scatter(np.log10(ppusd), np.log10(ppDusd), label = 'US', color ='green')
plt.scatter(np.log10(ppspd), np.log10(ppDspd), label = r'España', color ='orange')
plt.scatter(np.log10(ppbrd), np.log10(ppDbrd), label = 'Brasil', color ='blue')
plt.scatter(np.log10(ppchd), np.log10(ppDchd), label = 'Chile', color ='k')
def IntegrateWell(CommunityInstance,
                  well_info,
                  T0=0,
                  T=1,
                  ns=2,
                  return_all=False,
                  log_time=False,
                  compress_resources=False,
                  compress_species=True):
    """
        Integrator for Propagate and TestWell methods of the Community class
        """
    #MAKE LOGARITHMIC TIME AXIS FOR LONG SINGLE RUNS
    if log_time:
        t = 10**(np.linspace(np.log10(T0), np.log10(T0 + T), ns))
    else:
        t = np.linspace(T0, T0 + T, ns)

    #UNPACK INPUT
    y0 = well_info['y0']

    #COMPRESS STATE AND PARAMETERS TO GET RID OF EXTINCT SPECIES
    S = well_info['params']['S']
    M = len(y0) - S
    not_extinct = y0 > 0
    if not compress_species:
        not_extinct[:S] = True
    if not compress_resources:  #only compress resources if we're running non-renewable dynamics
        not_extinct[S:] = True
    S_comp = np.sum(
        not_extinct[:S])  #record the new point dividing species from resources
    not_extinct_idx = np.where(not_extinct)[0]
    y0_comp = y0[not_extinct]
    not_extinct_consumers = not_extinct[:S]
    not_extinct_resources = not_extinct[S:]

    #Compress parameters
    params_comp = CompressParams(not_extinct_consumers, not_extinct_resources,
                                 well_info['params'],
                                 CommunityInstance.dimensions, S, M)

    #INTEGRATE AND RESTORE STATE VECTOR TO ORIGINAL SIZE
    if return_all:
        out = integrate.odeint(CommunityInstance.dydt,
                               y0_comp,
                               t,
                               args=(params_comp, S_comp),
                               mxstep=10000,
                               atol=1e-4)
        traj = np.zeros((np.shape(out)[0], S + M))
        traj[:, not_extinct_idx] = out
        return t, traj
    else:
        out = integrate.odeint(CommunityInstance.dydt,
                               y0_comp,
                               t,
                               args=(params_comp, S_comp),
                               mxstep=10000,
                               atol=1e-4)[-1]
        yf = np.zeros(len(y0))
        yf[not_extinct_idx] = out
        return yf
def m500_to_qty(logM500, z, slope_efunc, slope_m500, normalization): return n.e**normalization * \
    (cosmoMD.efunc(z) / E035)**(slope_efunc) * (10**(logM500 - n.log10(6) - 14))**(slope_m500)


def logM500_to_logMgas(
Esempio n. 38
0
def Plot_Topology_Robustness(R_T, runs):

    data_points = np.zeros((9, len(COLOURS), runs), dtype=np.float64)
    N_runs = -1
    N_mut = -1
    Colours = []

    for run in range(runs):
        #lines=[line.rstrip('\n') for line in open('/rscratch/asl47/Topology_Robustness_{}_R{}.txt'.format(R_T,run+1))]
        #lines=[line.rstrip('\n') for line in open('/rscratch/asl47/Topology_Robustness_R{}_2.txt'.format(run+1))]
        lines = [
            line.rstrip('\n')
            for line in open('/rscratch/asl47/Topology_Robustness_{}_R{}.txt'.
                             format(R_T, run + 1))
        ]
        for line in lines:
            if 'runs' in line:
                N_runs = int(line.split()[-2])
            elif 'N_mutations:' in line:
                N_mut = int(line.split()[-1])
            else:
                for C, R in enumerate(line.split(' ')[3::4]):
                    data_points[N_mut][int(C)][run] = float(R) / N_runs

    pcs = [cm.inferno(x) for x in np.linspace(0, 0.9, len(COLOURS))]
    mean_data = np.mean(data_points, axis=2, dtype=np.float64)
    MuLs_local = np.logspace(-2, np.log10(8), 1000)
    binom_Set = binom(8, MuLs / 8.)
    binom_local = binom(8, MuLs_local / 8.)
    c_r = {}

    #for C,colour_data in enumerate(mean_data.T):
    #    plt.plot(range(9),colour_data,color=pcs[C],ls='-')
    #plt.plot(MuLs_local,np.sum(binom_local.pmf(T)*(1-mean_data[T][C]) for T in xrange(9)),color=pcs[C],ls=':')

    f, (ax, ax2) = plt.subplots(2, 1)
    for C, colour_data in enumerate(mean_data.T):
        ax.plot(list(range(9)), colour_data, color=pcs[C])
        ax2.plot(MuLs_local,
                 np.sum(
                     binom_local.pmf(T) * (1 - mean_data[T][C])
                     for T in range(9)),
                 color=pcs[C])
        c_r[C] = np.sum(
            binom_Set.pmf(T) * (1 - mean_data[T][C]) for T in range(9))

    sm = plt.cm.ScalarMappable(cmap=cm.inferno,
                               norm=plt.Normalize(vmin=0, vmax=0.9))
    sm._A = []

    cax = f.add_axes([0.1, 0.625, 0.5, 0.05])

    cbar = f.colorbar(sm, cax=cax, orientation='horizontal')
    cbar.set_ticks(np.arange(0, 0.9, 1. / (1 + len(COLOURS))))
    cbar.set_ticklabels(COLOURS)
    cbar.ax.set_title('Colours')
    ax.set_yscale('log', nonposy='mask')

    ax.set_ylim((10**-8, 1))
    ax.set_xlabel(r'$N_{\textrm{mutations}}$', fontsize=18)
    ax.set_ylabel(r'$\chi_{\mathrm{robust}}$', fontsize=18)
    ax.set_title(r'\textbf{{{}}}'.format('Thick Cross' if R_T ==
                                         'TC' else 'Small Cross'),
                 fontsize=24)

    ax2.set_xlabel(r'$\langle \mu L\rangle$', fontsize=18)
    ax2.set_ylabel(r'$\chi_{\mathrm{del}}$', fontsize=18)
    ax2.set_xscale('log')
    ax2.set_yscale('log')

    plt.show(block=False)
    return c_r
Esempio n. 39
0
    def figure(
        self,
        title="Manhattan Plot",
        showgrid=True,
        xlabel=None,
        ylabel='-log10(p)',
        point_size=5,
        showlegend=True,
        col=None,
        suggestiveline_value=-np.log10(1e-8),
        suggestiveline_color='blue',
        suggestiveline_width=1,
        genomewideline_value=-np.log10(5e-8),
        genomewideline_color='red',
        genomewideline_width=1,
        highlight=True,
        highlight_color="red",
    ):
        """

    Keyword arguments:

    - title (string; optional) The title of the graph. (Default:
        "Manhattan Plot")
    - showgrid (bool; optional): Boolean indicating whether gridlines should be
        shown. (Default: True)
    - xlabel (string; optional): Label of the x axis. (Default: None)
    - ylabel: (string; optional): Label of the y axis. (Default:
        "-log10(p)")
    - point_size (number; optional): Size of the points of the Scatter
        plot. (Default: 5)
    - showlegend (bool; optional): Boolean indicating whether legends should be
        shown. (Default: True)
    - col (string; optional): A string representing the color of the
        points of the Scatter plot. Can be in any color format accepted by
        plotly_js graph_objs. (Default: None) Default = None
    - suggestiveline_value (bool/float; optional): A value which must
        be False to deactivate the option, or a numerical value
        corresponding to the p-value at which the line should be drawn.
        The line has no influence on the data points. (Default:
        -np.log10(1e-8))
    - suggestiveline_color (string; optional): Color of the suggestive
      line. (Default: "grey")
    - suggestiveline_width (number): Width of the suggestive
            line. (Default: 2)
    - genomewideline_value (bool/float; optional): A boolean which must be
        False to deactivate the option, or a numerical value corresponding
        to the p-value above which the data points are considered
        significant. (Default: -np.log10(5e-8))
    - genomewideline_color (string; optional): Color of the genome wide
        line. Can be in any color format accepted by plotly_js
        graph_objs. (Default: "red")
    - genomewideline_width (number; optional): Width of the genome wide
      line. (Default: 1)
    - highlight (bool; optional): turning on/off the highlighting of
        data points considered significant. (Default: True)
    - highlight_color (string; optional): Color of the data points
        highlighted because they are significant Can be in any color
        format accepted by plotly_js graph_objs. (Default: "red")

    Returns:
    - A figure formatted for plotly.graph_objs."""

        xmin = min(self.data[self.pos].values)
        xmax = max(self.data[self.pos].values)

        horizontallines = []

        if suggestiveline_value:
            suggestiveline = go.layout.Shape(name=SUGGESTIVE_LINE_LABEL,
                                             type="line",
                                             fillcolor=suggestiveline_color,
                                             line=dict(
                                                 color=suggestiveline_color,
                                                 width=suggestiveline_width),
                                             x0=xmin,
                                             x1=xmax,
                                             xref="x",
                                             y0=suggestiveline_value,
                                             y1=suggestiveline_value,
                                             yref="y")
            horizontallines.append(suggestiveline)

        if genomewideline_value:
            genomewideline = go.layout.Shape(name=GENOMEWIDE_LINE_LABEL,
                                             type="line",
                                             fillcolor=genomewideline_color,
                                             line=dict(
                                                 color=genomewideline_color,
                                                 width=genomewideline_width),
                                             x0=xmin,
                                             x1=xmax,
                                             xref="x",
                                             y0=genomewideline_value,
                                             y1=genomewideline_value,
                                             yref="y")
            horizontallines.append(genomewideline)

        data_to_plot = []  # To contain the data traces
        tmp = pd.DataFrame()  # Empty DataFrame to contain the highlighted data

        if highlight:
            if not isinstance(highlight, bool):
                if self.snpName not in self.data.columns.values:
                    raise KeyError(
                        "snp argument specified for highlight as %s but "
                        "column not found in the data.frame" % self.snpName)
            else:
                if not genomewideline_value:
                    raise Warning(
                        "The genomewideline_value you entered is not a "
                        "positive value, or False, you cannot set highlight "
                        "to True in that case.")
                tmp = self.data

                # Sort the p-values (or -log10(p-values) above the line
                if genomewideline_value:
                    if self.logp:
                        tmp = tmp.loc[
                            -np.log10(tmp[self.pName]) > genomewideline_value]
                    else:
                        tmp = tmp.loc[tmp[self.pName] > genomewideline_value]

                highlight_hover_text = _get_hover_text(
                    tmp,
                    snpname=self.snpName,
                    genename=self.geneName,
                    annotationname=self.annotationName)

                if not tmp.empty:
                    data_to_plot.append(
                        go.Scattergl(x=tmp[self.pos].values,
                                     y=-np.log10(tmp[self.pName].values)
                                     if self.logp else tmp[self.pName].values,
                                     mode="markers",
                                     text=highlight_hover_text,
                                     marker=dict(color=highlight_color,
                                                 size=point_size),
                                     name="Point(s) of interest"))

        # Remove the highlighted data from the DataFrame if not empty
        if tmp.empty:
            data = self.data
        else:
            data = self.data.drop(self.data.index[tmp.index])

        if self.nChr == 1:

            if col is None:
                col = ['black']

            # If single chromosome, ticks and labels automatic.
            layout = go.Layout(title=title,
                               xaxis={
                                   'title':
                                   self.xlabel if xlabel is None else xlabel,
                                   'showgrid': showgrid,
                                   'range': [xmin, xmax],
                               },
                               yaxis={'title': ylabel},
                               hovermode='closest')

            hover_text = _get_hover_text(data,
                                         snpname=self.snpName,
                                         genename=self.geneName,
                                         annotationname=self.annotationName)

            data_to_plot.append(
                go.Scattergl(x=data[self.pos].values,
                             y=-np.log10(data[self.pName].values)
                             if self.logp else data[self.pName].values,
                             mode="markers",
                             showlegend=showlegend,
                             marker={
                                 'color': col[0],
                                 'size': point_size,
                                 'name': "chr%i" % data[self.chrName].unique()
                             },
                             text=hover_text))
        else:
            # if multiple chrms, use the ticks and labels you created above.
            layout = go.Layout(title=title,
                               xaxis={
                                   'title':
                                   self.xlabel if xlabel is None else xlabel,
                                   'showgrid': showgrid,
                                   'range': [xmin, xmax],
                                   'tickmode': "array",
                                   'tickvals': self.ticks,
                                   'ticktext': self.ticksLabels,
                                   'ticks': "outside"
                               },
                               yaxis={'title': ylabel},
                               hovermode='closest')

            icol = 0
            if col is None:
                col = [
                    'black' if np.mod(i, 2) else 'grey'
                    for i in range(self.nChr)
                ]

            for i in data[self.index].unique():

                tmp = data[data[self.index] == i]

                chromo = tmp[self.chrName].unique()  # Get chromosome name

                hover_text = _get_hover_text(
                    data,
                    snpname=self.snpName,
                    genename=self.geneName,
                    annotationname=self.annotationName)

                data_to_plot.append(
                    go.Scattergl(x=tmp[self.pos].values,
                                 y=-np.log10(tmp[self.pName].values)
                                 if self.logp else tmp[self.pName].values,
                                 mode="markers",
                                 showlegend=showlegend,
                                 name="Chr%i" % chromo,
                                 marker={
                                     'color': col[icol],
                                     'size': point_size
                                 },
                                 text=hover_text))

                icol = icol + 1

        layout.shapes = horizontallines

        return go.Figure(data=data_to_plot, layout=layout)
Esempio n. 40
0
degreeCount = collections.Counter(degree_sequence)
deg, cnt = zip(*degreeCount.items())
plt.bar(deg, cnt, width=0.80, color="b")
plt.title("Degree Histogram")
plt.ylabel("Count")
plt.xlabel("Degree")

plt.show()
plt.clf()

print("The network is a power-law")
print("G is a directed graph with unweighted links")
print("The network is weakly connected")
print("The network is highly clustered")

bin_edges = np.logspace(np.log10(kmin), np.log10(kmax), num=20)
density, _ = np.histogram(degrees, bins=bin_edges, density=True)
fig = plt.figure(figsize=(6, 4))
log_be = np.log10(bin_edges)
x = 10**((log_be[1:] + log_be[:-1]) / 2)
plt.loglog(x, density, marker='o', linestyle='none')
plt.xlabel(r"degree $k$", fontsize=16)
plt.ylabel(r"$P(k)$", fontsize=16)
plt.show()
plt.clf()

fig = plt.figure(figsize=(10, 10))
nx.draw_spring(G, node_size=20, node_color="purple")
plt.show()
plt.clf()
Esempio n. 41
0
def comm_profile(logdir, cfg, df_gpu):
    total_traffic = 0.0
    total_h2d_traffic = 0.0
    total_d2h_traffic = 0.0
    total_p2p_traffic = 0.0
    total_memcopy_time = 0.0

    # sofa_fieldnames = [
    #    'timestamp',
    #    "event",
    #    "duration",
    #    "deviceId",
    #    "copyKind",
    #    "payload",
    #    "bandwidth",
    #    "pkt_src",
    #    "pkt_dst",
    #    "pid",
    #    "tid",
    #    "name",
    #    "category"]
    n_gpus = 0
    for i in range(len(df_gpu)):
        if df_gpu.iat[i, 3] > n_gpus:
            n_gpus = int(df_gpu.iat[i, 3])

    if n_gpus == 0:
        print_warning("No GPU communication traces are collected.")
        return

    print_title("Data Traffic for each CopyKind (MB)")
    data_copyKind = grouped_df = df_gpu.groupby("copyKind")["payload"]
    for key, item in grouped_df:
        print((
            "[%s]: %lf" %
            (cktable[key], grouped_df.get_group(key).sum() / 1000000.0)))
        if int(key) == 1:
            total_h2d_traffic = grouped_df.get_group(key).sum() / 1000000.0
        if int(key) == 2:
            total_d2h_traffic = grouped_df.get_group(key).sum() / 1000000.0
        if int(key) == 10:
            total_p2p_traffic = grouped_df.get_group(key).sum() / 1000000.0
        if int(key) != 8:
            total_traffic = total_traffic + \
                grouped_df.get_group(key).sum() / 1000000.0
    print(("Total traffic: %.2lf" % total_traffic))

    print_title("Data Communication Time for each CopyKind (s)")
    durations_copyKind = grouped_df = df_gpu.groupby("copyKind")["duration"]
    for key, item in grouped_df:
        print(("[%s]: %lf" % (cktable[key], grouped_df.get_group(key).sum())))
        if key == 0:
            total_kernel_time = grouped_df.get_group(key).sum()
        else:
            total_memcopy_time = total_memcopy_time + \
                grouped_df.get_group(key).sum()

    bw = (data_copyKind.sum() / 1000000) / durations_copyKind.sum() / 1000
    bw_h2d = bw_d2h = bw_p2p = avg_bw = 1e-10

    for i in range(len(bw)):
        key = list(bw.keys())[i]
        if cktable[key] == 'H2D' or cktable[key] == 'D2H' or cktable[key] == 'D2D' or cktable[key] == 'P2P': 
            print(("Averaged Achieved %s Unidirectional Bandwidth: %.1f (GB/s)" % (cktable[key], bw.iloc[i])))
        else:
            continue

    print_title("Summary of Comm.")
    print(("MeasuredTotalTraffic : %lf (MB)" % total_traffic))
    print(("MeasuredTotalH2DTraffic : %lf (MB)" % total_h2d_traffic))
    print(("MeasuredTotalD2HTraffic : %lf (MB)" % total_d2h_traffic))
    print(("MeasuredTotalP2PTraffic : %lf (MB)" % total_p2p_traffic))

    accum = np.zeros((1 + n_gpus, 1 + n_gpus))
    accum_count = np.zeros((1 + n_gpus, 1 + n_gpus))

    # TODO: Parallelize payload accumulatoin
    #print("df length: %d" % len(df_gpu))
    #cpu_count = mp.cpu_count()
    #pool = mp.Pool(processes=cpu_count)
    #res_accum = pool.map( partial(payload_sum), df_gpu)

    for i in range(len(df_gpu)):
        if df_gpu.iat[i, 4] == 0 or df_gpu.iat[i, 4] == 8:
            continue
        src = df_gpu.iat[i, 7]
        dst = df_gpu.iat[i, 8]
        payload = df_gpu.iat[i, 5]
        accum[src][dst] = float(accum[src][dst] + payload)
        accum_count[src][dst] = int(accum_count[src][dst] + 1)

    print("Traffic Matrix (log10(B)):")
    row_str = "\tHOST\t"
    for i in range(1, accum.shape[1]):
        row_str = row_str + "GPU%d" % i + "\t"
    print(row_str)
    for i in range(accum.shape[0]):
        if i == 0:
            row_str = "HOST\t"
        else:
            row_str = "GPU%d\t" % i
        
        for j in range(accum.shape[1]):
            row_str = row_str + "%d" % (int(np.log10(1 + accum[i][j]))) + "\t"
        print(row_str)

    print("Traffic Matrix (MB):")
    row_str = "\tHOST\t"
    for i in range(1, accum.shape[1]):
        row_str = row_str + "GPU%d" % i + "\t"
    print(row_str)
    for i in range(accum.shape[0]):
        if i == 0:
            row_str = "HOST\t"
        else:
            row_str = "GPU%d\t" % i
        
        for j in range(accum.shape[1]):
            row_str = row_str + "%d" % (accum[i][j] / (1024 * 1024)) + "\t"
        print(row_str)


    df_gpu.to_csv(
        logdir + '/' + 'comm.csv',
        columns=[
            "timestamp",
            "pkt_src",
            "pkt_dst",
            "payload",
            "bandwidth"])
    def temperature_guess(self,
                          T_in,
                          p_in,
                          T_out,
                          tube_diameters_in,
                          tube_diameters_out,
                          tube_conductivity,
                          emissions_guess,
                          coating_thickness,
                          coating_conductivity,
                          tube_roughness,
                          uconvloss,
                          passive=None,
                          tube_material=None):
        '''
		Makes a first guess on temperature profiles approximating the enthalpy gain of the water/steam mixture to be equal to flux input on the tubes external walls. The tube walls are coated with a selective coating. Default arguments are for Pyromark2500(R).

		Arguments:
		- T_in: Inlet temperature of the water  in (K).
		- p_in: Inlet pressure of the water in (pa).
		- T_out: Outlet temeprature of the water in (K).
		- tube_diameters_in: inner diameter of the tubes in (m).
		- tube_diameters_out: outer diameter of the tubesin (m).
		- tube_conductivity: thermal conductivity of teh tubes in (W/mK).
		- emissions_guess: emissive losses guess to account for thermal emissions in (W).
		- passive: array of the indices of the adiabatic surfaces in the cavity.
		- coating_thickness: thickness of the coating layer on the tubes in (m).
		- coating_conductivity: coating thermal conductivity in (W/mK).

		Returns:
		- strings 'good_geom' or 'bad_geom' depending on the mass flow guess to meet the input/output arguments and amount of actually going in the receiver. This is a quick hack to prevent issues with receivers forcing in the required inpu/output by lowering the mass flow too much/putting it negative, thus impacting the enthalpy guess... and basically screwing-up the convergence process for non-performing geometries.
		'''
        # Get active surfaces net radiative power
        active = N.ones(len(self.areas), dtype=N.bool)
        active[0] = 0.  # aperture

        if passive != None:
            active[passive] = 0.

        # Check the tube_diameters arrays or make one:
        tube_diameters_in = N.hstack([tube_diameters_in])
        tube_diameters_out = N.hstack([tube_diameters_out])
        if len(tube_diameters_in) != (N.sum(active) + 1):
            if len(tube_diameters_in) == 1:
                tube_diameters_in = N.ones(N.sum(active) +
                                           1) * tube_diameters_in[0]
            else:
                stop
            if len(tube_diameters_out) == 1:
                tube_diameters_out = N.ones(N.sum(active) +
                                            1) * tube_diameters_out[0]
            else:
                stop

        # Equivalent tube sections radii:
        R_in = (tube_diameters_in[:-1] + tube_diameters_in[1:]) / 4.
        R_out = (tube_diameters_out[:-1] + tube_diameters_out[1:]) / 4.
        self.R_in = R_in
        self.R_out = R_out

        # Get tubes lengths and positions:
        tube_lengths = N.array(self.areas[active] / (2. * R_out))
        self.tube_lengths = tube_lengths + 2. * N.pi * (R_out - R_in)
        tube_positions = N.add.accumulate(N.hstack([0, self.tube_lengths]))

        # Initialise pressures and steam quality at each position along the flow path:
        self.p = N.ones(len(tube_positions)) * p_in
        self.qual = N.zeros(len(tube_positions) - 1)
        self.v = N.zeros(len(tube_positions))

        # Correlation single phase:
        def single_phase_u(Re, Pr, f_F, k, tube_D):
            if Re < 1e4:
                #if Re<1e5: # STG code error
                # Gnielinski:
                return ((Re - 1000.) * Pr * (f_F * k / (2. * tube_D))) / (
                    1. + 12.7 * (Pr**(2. / 3.) - 1.) * N.sqrt(f_F / 2.))
            else:
                # Petukhov
                return (Re * Pr * (f_F * k / (2. * tube_D)) /
                        (1.07 + 12.7 *
                         (Pr**(2. / 3.) - 1.) * N.sqrt(f_F / 2.)))

        # Evaluate convective losses and qnets:
        Qconvloss = uconvloss * self.areas[1:] * (self.T[1:] - self.T[0])
        qnets = self.bin_abs[active[1:]] - emissions_guess[
            active[1:]] - Qconvloss[active[1:]]

        # Get starting enthalpy via Freesteam and initialise enthalpy array:
        h_in = steam_pT(p_in, T_in).h
        h_out = steam_pT(self.p[-1], T_out).h
        hs_p = h_in + N.add.accumulate(
            N.hstack([0, qnets]) / N.sum(qnets)) * (h_out - h_in)
        self.h = N.ones(len(qnets) + 1) * h_in

        # Enthalpy convergence loop:
        conv_h = N.ones(len(hs_p)) * N.inf
        iterh = 0
        while (conv_h > 1e-9).any():
            # Evaluate the mass-flow:
            self.m = N.sum(qnets) / (h_out - h_in)

            # Evaluate convective losses and qnets:
            Qconvloss = uconvloss * self.areas[1:] * (self.T[1:] - self.T[0])
            qnets = self.bin_abs[active[1:]] - emissions_guess[
                active[1:]] - Qconvloss[active[1:]]

            # Initialise internal convective heat trasnfer coefficient:
            uconv = N.zeros(len(tube_diameters_in) - 1)

            # Go through the flow-path, actualise the pressures and evaluate the heat transfer coefficients.
            for i in xrange(len(tube_positions) - 1):

                # Evaluate the steam properties:
                steam_state = steam_ph(self.p[i], hs_p[i])
                rho = steam_state.rho
                Cp = steam_state.cp
                x = steam_state.x

                Tsat = Tsat_p(self.p[i])
                steam_L = steam_Tx(Tsat, 0.)
                steam_G = steam_Tx(Tsat, 1.)
                h_LG = steam_G.h - steam_L.h

                qual = (hs_p[i] - steam_L.h) / h_LG
                v = self.m / (rho * N.pi * (tube_diameters_in[i] / 2.)**2.)
                if qual <= 0.:
                    mu = steam_state.mu
                    k = steam_state.k
                    # Calculate Reynolds, Prandtl, Darcy and Fanning friction factors
                    Re = rho * v * tube_diameters_in[i] / mu
                    Pr = mu / (k / Cp)

                S = N.log(Re / (1.816 * N.log(1.1 * Re /
                                              (N.log(1. + 1.1 * Re)))))
                f_D = (
                    -2. *
                    N.log10(tube_roughness /
                            (3.71 * tube_diameters_in[i]) + 2.18 * S / Re)
                )**(
                    -2.
                )  # Brkic using Lambert W-function approximation to solve Colebrook's implicit equation.
                f_F = 0.25 * f_D

                # Calculate heat transfer coefficient:
                uconv[i] = single_phase_u(Re, Pr, f_F, k, tube_diameters_in[i])

                rho_L = steam_L.rho
                rho_G = steam_G.rho

                mult = 1.  # Carey Friction factor multiplier

                if ((qual > 0.) and (qual < 1.)):
                    mu_L = steam_L.mu
                    mu_G = steam_G.mu
                    #v_L = self.m/(rho_L*N.pi*(tube_diameters_in[i]/2.)**2.)
                    #Re_L = rho_L*v_L*tube_diameters_in[i]/mu_L
                    #mult = (1.+(mu_L/mu_G-1.)*qual)**(-0.25) # Carey Friction factor multiplier
                    #f_D = 4.*mult*(1.58*N.log(Re_L)-3.28)**(-2) # Correlation in Kandlikar (less precise presumably) which gives significantly lower heat transfer coefficients in the pre-dryout region

                    if (qual < 0.8):

                        k_L = steam_L.k
                        Cp_L = steam_L.cp
                        v_L = self.m / (rho_L * N.pi *
                                        (tube_diameters_in[i] / 2.)**2.)

                        h_LG = steam_G.h - steam_L.h

                        Re_L = rho_L * v_L * tube_diameters_in[i] / mu_L
                        #Re_L = rho_L*v_L*(1.-qual)*tube_diameters_in[i]/mu_L # from Notes from Jose. My interpretation is all liquid in the tube (saturated then).
                        Pr_L = mu_L / (k_L / Cp_L)
                        S_L = N.log(Re_L /
                                    (1.816 * N.log(1.1 * Re_L /
                                                   (N.log(1. + 1.1 * Re_L)))))
                        f_F_L = 0.25 * (
                            (-2. * N.log10(tube_roughness /
                                           (3.71 * tube_diameters_in[i]) +
                                           2.18 * S_L / Re_L))**(-2.))
                        #f_F_L = (1.58*N.log(Re_L)-3.28)**(-2) # Correlation in Kandlikar (less precise presumably) which gives significantly lower heat transfer coefficients in the pre-dryout region

                        # Kandlikar
                        Co = (rho_G / rho_L)**0.5 * ((1. - qual) / qual)**0.8

                        if i == 0:
                            Bo = 0.
                        else:
                            Bo = qnets[i] / (N.pi * R_in[i] * tube_lengths[i]
                                             ) / (rho * v * h_LG)
                            #Bo = qnets[i]/(N.pi*R_out[i]*tube_lengths[i])/(rho*v*h_LG) # Changed to the outer flux to compare with the STG version

                        uconv_L = single_phase_u(Re_L, Pr_L, f_F_L, k_L,
                                                 tube_diameters_in[i])

                        uconvNB = uconv_L * (0.6683 * Co**(-0.2) + 1058. *
                                             Bo**0.7) * (1. - qual)**0.8
                        uconvCB = uconv_L * (1.136 * Co**(-0.9) + 667.2 *
                                             Bo**0.7) * (1. - qual)**0.8

                        uconv[i] = N.amax([uconvNB, uconvCB])

                    elif qual < 0.9:  # Here the validity of the correlation is changed from the STG version.
                        # Groeneveld
                        a = 1.09e-3
                        b = 0.989
                        c = 1.41
                        d = -1.15

                        Y = 1. - 0.1 * ((rho_L / rho_G - 1.) *
                                        (1. - qual))**0.4

                        k_G = steam_G.k
                        Cp_G = steam_G.cp
                        v_G = self.m / (rho_G * N.pi *
                                        (tube_diameters_in[i] / 2.)**2.)

                        Re_G = rho_G * v_G * tube_diameters_in[i] / mu_G
                        Pr_G = mu_G / (k_G / Cp_G)

                        uconv[i] = a * (
                            Re_G * (qual + rho_G / rho_L * (1. - qual))
                        )**b * Pr_G**c * Y**d * k_G / tube_diameters_in[i]

                # Calculate pressure drop for the next element:
                #f_D = 4.*f_F # For using the friction factor formula advised by Kandlikar
                dp = f_D * self.tube_lengths[i] / (2. *
                                                   R_in[i]) * rho * v**2. / 2.
                steam_next = steam_ph(self.p[i + 1], self.h[i + 1])
                rho_next = steam_next.rho
                v_next = self.m / (rho_next * N.pi *
                                   (tube_diameters_in[i + 1] / 2.)**2.)
                self.p[i + 1] = self.p[
                    i] + rho * v**2. / 2. - rho_next * v_next**2. / 2. - dp
                self.v[i] = v

                # Store dryness fractions;
                self.qual[i] = qual

                # Re-evaluate enthalpies:
                hs_p[i + 1] = hs_p[i] + qnets[i] / self.m

            # Final velocity storage:
            self.v[i + 1] = v_next

            # Evaluate the enthalpy at the outlet:
            #h_out = steam_pT(self.p[-1], T_out).h
            h_out = hs_p[-1]

            #FIXME: need a more reliable convergence insurance
            if self.m < 0.01:
                print 'bad_geom'
                return 'bad_geom'

            # Evaluate convergence:
            conv_h = N.abs((self.h - hs_p) / self.h)
            self.h = (self.h + hs_p) / 2.

            iterh += 1
            if iterh > 100:
                print conv_h
                print self.h
                stop

        # Get the tube elements properties:
        self.uconv = uconv

        # Get temperatures from enthalpies via Freesteam
        T_guess_fluid = N.zeros(len(self.h))
        T_guess_fluid[0] = T_in
        for i in xrange(1, len(self.h)):
            T_guess_fluid[i] = steam_ph(self.p[i], self.h[i]).T

        # Central differences scheme:
        self.T_guess_fluid = (T_guess_fluid[:-1] + T_guess_fluid[1:]) / 2.

        T_guess_wall = N.zeros(len(self.areas) - 1)

        Rconv = 1. / (N.pi * tube_lengths * R_in * self.uconv)
        Rcond = 1. / (N.pi * tube_lengths) * (
            N.log(R_out / R_in) / tube_conductivity + N.log(
                (R_out + coating_thickness) / R_out) / coating_conductivity)

        T_guess_wall[active[1:]] = self.T_guess_fluid + qnets * (Rconv + Rcond)

        self.T_wall_in = self.T_guess_fluid + qnets * (Rconv)

        self.T_guess = T_guess_wall
        self.tube_positions = tube_positions
        self.Q_conv_loss = Qconvloss

        assert (self.T_guess == float('inf')).any() == False, str(
            self.T_guess) + str(T_guess_wall) + str(emissions_guess) + str(
                self.m) + str([
                    self.apertureRadius, self.frustaRadii, self.frustaDepths,
                    self.coneDepth
                ])
        return 'good_geom'
Esempio n. 43
0
import matplotlib.pyplot as plt
from astropy.io import fits
import numpy as np

hdu = fits.open('The Hipparcos Main Catalogue.fit')
hdu.info()
hdu[0].header

#1pc~100pc
Plx1 = (hdu[1].data['Plx']>10)
Plx2 = (hdu[1].data['Plx']<1000)
Plxs = Plx1*Plx2
dat = hdu[1].data[Plxs]

S = dat['Plx']/1000
Mv = 5+dat['Vmag']+5*np.log10(S)

n = plt.hist(Mv,14,range=(-2,12),ls='--',histtype='step')
plt.close()

plt.figure(figsize=(10,7))
V = 4/3*np.pi*(100**3-1**3)
x = n[1][0:len(n[1])-1]+0.5
y = np.log10(n[0][0:]/V)+10
plt.plot(x, y,'k')

plt.xlabel('Mv')
plt.ylabel('log $\phi$(M)+10')
plt.savefig('IMF.png')
plt.show()
Esempio n. 44
0
def ManhattanPlot(
    dataframe,
    chrm="CHR",
    bp="BP",
    p="P",
    snp="SNP",
    gene="GENE",
    annotation=None,
    logp=True,
    title="Manhattan Plot",
    showgrid=True,
    xlabel=None,
    ylabel='-log10(p)',
    point_size=5,
    showlegend=True,
    col=None,
    suggestiveline_value=-np.log10(1e-8),
    suggestiveline_color='#636efa',
    suggestiveline_width=1,
    genomewideline_value=-np.log10(5e-8),
    genomewideline_color='#EF553B',
    genomewideline_width=1,
    highlight=True,
    highlight_color="red",
):
    """Returns a figure for a manhattan plot.

Keyword arguments:
- dataframe (dataframe; required): A pandas dataframe which must contain at
    least the following  three columns:
            - the chromosome number
            - genomic base-pair position
            - a numeric quantity to plot such as a p-value or zscore
- chrm (string; optional): A string denoting the column name for the
    chromosome.  This column must be float or integer.  Minimum number
    of chromosomes required is 1. If you have X, Y, or MT chromosomes,
    be sure to renumber these 23, 24, 25, etc. (Default: "CHR")
- bp (string; optional): A string denoting the column name for the
    chromosomal position. (Default: "BP")
- p (string; optional): A string denoting the column name for the
    float quantity to be plotted on the y-axis. This column must be
    numeric. This does not have to be a p-value. It can be any
    numeric quantity such as peak heights, bayes factors, test
    statistics. If it is not a p-value, make sure to set logp = FALSE.
    (Default: "P")
- snp (string; optional): A string denoting the column name for the
    SNP names (e.g. rs number). More generally, this column could be
    anything that identifies each point being plotted. For example, in
    an Epigenomewide association study (EWAS) this could be the probe
    name or cg number. This column should be a character. This
    argument is optional, however it is necessary to specify if you
    want to highlight points on the plot using the highlight argument
    in the figure method. (Default: "SNP")
- gene (string; optional): A string denoting the column name for the
    GENE names. This column could be a string or a float. More
    generally this could be any annotation information that you want
    to include in the plot. (Default: "GENE")
- annotation (string; optional): A string denoting the column name for
    an annotation. This column could be a string or a float.  This
    could be any annotation information that you want to include in
    the plot (e.g. zscore, effect size, minor allele frequency).
    (Default: None)
- logp (bool; optional): If True, the -log10 of the p-value is
    plotted.  It isn't very useful to plot raw p-values; however,
    plotting the raw value could be useful for other genome-wide plots
    (e.g., peak heights, bayes factors, test statistics, other
    "scores", etc.) (Default: True)
- title (string; optional) The title of the graph. (Default: "Manhattan Plot")
- showgrid (bool; optional): Boolean indicating whether gridlines should be
    shown. (Default: True)
- xlabel (string; optional): Label of the x axis. (Default: None)
- ylabel: (string; optional): Label of the y axis. (Default:
    "-log10(p)")
- point_size (number; optional): Size of the points of the Scatter
    plot. (Default: 5)
- showlegend (bool; optional): Boolean indicating whether legends should be
    shown. (Default: True)
- col (string; optional): A string representing the color of the
    points of the Scatter plot. Can be in any color format accepted by
    plotly_js graph_objs. (Default: None) Default = None
- suggestiveline_value (bool/float; optional): A value which must
    be False to deactivate the option, or a numerical value
    corresponding to the p-value at which the line should be drawn.
    The line has no influence on the data points. (Default:
    -np.log10(1e-8))
- suggestiveline_color (string; optional): Color of the suggestive
  line. (Default: "grey")
- suggestiveline_width (number): Width of the suggestive
        line. (Default: 2)
- genomewideline_value (bool/float; optional): A boolean which must be
    False to deactivate the option, or a numerical value corresponding
    to the p-value above which the data points are considered
    significant. (Default: -np.log10(5e-8))
- genomewideline_color (string; optional): Color of the genome wide
    line. Can be in any color format accepted by plotly_js
    graph_objs. (Default: "red")
- genomewideline_width (number; optional): Width of the genome wide
  line. (Default: 1)
- highlight (bool; optional): turning on/off the highlighting of data points
    considered significant. (Default: True)
- highlight_color (string; optional): Color of the data points
    highlighted because they are significant Can be in any color
    format accepted by plotly_js graph_objs. (Default: "red")

    # ...
    Example 1: Random Manhattan Plot
    '''
    dataframe = pd.DataFrame(
        np.random.randint(0,100,size=(100, 3)),
        columns=['P', 'CHR', 'BP'])
    fig = create_manhattan(dataframe, title='XYZ Manhattan plot')

    plotly.offline.plot(fig, image='png')
    '''

    """

    mh = _ManhattanPlot(dataframe,
                        chrm=chrm,
                        bp=bp,
                        p=p,
                        snp=snp,
                        gene=gene,
                        annotation=annotation,
                        logp=logp)

    return mh.figure(title=title,
                     showgrid=showgrid,
                     xlabel=xlabel,
                     ylabel=ylabel,
                     point_size=point_size,
                     showlegend=showlegend,
                     col=col,
                     suggestiveline_value=suggestiveline_value,
                     suggestiveline_color=suggestiveline_color,
                     suggestiveline_width=suggestiveline_width,
                     genomewideline_value=genomewideline_value,
                     genomewideline_color=genomewideline_color,
                     genomewideline_width=genomewideline_width,
                     highlight=highlight,
                     highlight_color=highlight_color)
Esempio n. 45
0
    def on_batch_end(self, batch, logs=None):
        if self.current_epoch_ > 1:
            return

        if self.use_validation_set:
            X, Y = self.validation_data[0], self.validation_data[1]

            # use 5 random batches from test set for fast approximate of loss
            num_samples = self.batch_size * self.validation_sample_rate

            if num_samples > X.shape[0]:
                num_samples = X.shape[0]

            idx = np.random.choice(X.shape[0], num_samples, replace=False)
            x = X[idx]
            y = Y[idx]

            values = self.model.evaluate(x,
                                         y,
                                         batch_size=self.batch_size,
                                         verbose=False)
            loss = values[0]
        else:
            loss = logs['loss']

        # smooth the loss value and bias correct
        running_loss = self.loss_smoothing_beta * loss + (
            1. - self.loss_smoothing_beta) * loss
        running_loss = running_loss / (
            1. - self.loss_smoothing_beta**self.current_batch_)

        # stop logging if loss is too large
        if self.current_batch_ > 1 and self.stopping_criterion_factor is not None and (
                running_loss >
                self.stopping_criterion_factor * self.best_loss_):

            if self.verbose:
                print(
                    " - LRFinder: Skipping iteration since loss is %d times as large as best loss (%0.4f)"
                    % (self.stopping_criterion_factor, self.best_loss_))
            return

        if running_loss < self.best_loss_ or self.current_batch_ == 1:
            self.best_loss_ = running_loss

        current_lr = K.get_value(self.model.optimizer.lr)

        self.history.setdefault('running_loss_', []).append(running_loss)
        if self.lr_scale == 'exp':
            self.history.setdefault('log_lrs', []).append(np.log10(current_lr))
        else:
            self.history.setdefault('log_lrs', []).append(current_lr)

        # compute the lr for the next batch and update the optimizer lr
        if self.lr_scale == 'exp':
            current_lr *= self.lr_multiplier_
        else:
            current_lr = self.lr_multiplier_[self.current_batch_ - 1]

        K.set_value(self.model.optimizer.lr, current_lr)

        # save the other metrics as well
        for k, v in logs.items():
            self.history.setdefault(k, []).append(v)

        if self.verbose:
            if self.use_validation_set:
                print(" - LRFinder: val_loss: %1.4f - lr = %1.8f " %
                      (values[0], current_lr))
            else:
                print(" - LRFinder: lr = %1.8f " % current_lr)
Esempio n. 46
0
def bitbertober(bitber_exp):
    ber = sum([10**i for i in bitber_exp])
    ber_exp = np.log10(ber)
    return ber_exp
Esempio n. 47
0
 def run(self, dataSlice, slicePoint=None):
     return 1.25 * np.log10(np.sum(10.**(.8*dataSlice[self.colname])))
Esempio n. 48
0
print('velocity variance: %10.4f\n' % (np.sum(wvel*wvel)/totsize))


fig,theAx=plt.subplots(1,1,figsize=(8,8))
frequencies[0]=np.NaN
Power[0]=np.NaN
Power_half=Power[:halfpoint:]
theAx.loglog(frequencies,Power_half)
theAx.set_title('raw wvel spectrum with $f^{-5/3}$')
theAx.set(xlabel='frequency (HZ)',ylabel='Power (m^2/s^2)')
#
# pick one point the line should pass through (by eye)
# note that y intercept will be at log10(freq)=0
# or freq=1 Hz
#
leftspec=np.log10(Power[1]*1.e-3)
logy=leftspec - 5./3.*np.log10(frequencies)
yvals=10.**logy
theAx.loglog(frequencies,yvals,'r-')
thePoint=theAx.plot(1.,Power[1]*1.e-3,'g+')
thePoint[0].set_markersize(15)
thePoint[0].set_marker('h')
thePoint[0].set_markerfacecolor('g')


# %% [markdown]
# ## power spectrum layout

# %% [markdown]
# Here is what the entire power spectrum looks like, showing positive and negative frequencies
Esempio n. 49
0
                                                               'N', 'OR', 'STAT', 'P'])

add2 = add2.set_index('Position')

to_condition['P value 1st'] = add1.P
to_condition['P value 2nd'] = add2.P

to_condition['Region'] = (range(1, 1 + len(to_condition)))
to_condition['Region'] = 'r' + to_condition.Region.astype(str) + ' lead'

to_condition

frames = [g, to_condition]
result = pd.concat(frames)

result = result.sort_values('P value').loc[~result.index.duplicated(
    keep='last')]

result.to_csv('table_for_paper.csv')

add1['-logP'] = -(np.log10(add1.P))
add2['-logP'] = -(np.log10(add2.P))

fig, ax = plt.subplots(3, 1, sharey=True, sharex=True)

ax[0].scatter(fisher['BP'], fisher['-logP'])
ax[1].scatter(add1['BP'], add1['-logP'])
ax[2].scatter(add2['BP'], add2['-logP'])
plt.show()
plt.savefig('regresions.tiff')
Esempio n. 50
0
                        errors_a,
                        markers[markerIndex],
                        label=legend,
                        markersize=3)
        pressurePlot.loglog(timeStepList,
                            errors_p,
                            markers[markerIndex],
                            label=legend,
                            markersize=3)

        markerIndex += 1

        try:
            print '\n' + legend
            for i in np.arange(nbComputations - 1):
                print(np.log10(errors_u[i + 1]) - np.log10(errors_u[i])) / (
                    np.log10(timeStepList[i + 1]) - np.log10(timeStepList[i]))
        except:
            pass

velocityPlot.set_xlabel('Time step [s]')
velocityPlot.set_ylabel('Error in velocity [-]')
velocityPlot.grid('on')
lgd = velocityPlot.legend(loc='upper center',
                          bbox_to_anchor=(0.5, 1.45),
                          ncol=3,
                          fancybox=True,
                          shadow=False)
velocityFig.savefig('tubeflow_velocity.pdf',
                    bbox_extra_artists=(lgd, ),
                    transparent=True,
Esempio n. 51
0
    ## dist_sat[i,j,k]:
        ## i --> the node i
        ## j --> 0 for x-position, 1 for y-position, 2 for z-position
        ## k --> the step time in sat pass
    
#### FOR COMPUTE DISTANCE MAGNITUDE (ABS) FROM END-DEVICE TO SAT PASSING BY ####
distance = np.zeros((sites_pos.shape[0],leo_pos.shape[0]))
distance[:,:] = (dist_sat[:,0,:]**2 + dist_sat[:,1,:]**2 + dist_sat[:,2,:]**2)**(1/2)
## WHERE:
    ## distance[i,j]:
        ## i --> the node i
        ## j --> the step time in sat pass

##MATRIX FOR LINK BUDGET Lpl ###
Lpl = np.zeros((sites_pos.shape[0],leo_pos.shape[0])) 
Lpl = 20*np.log10(distance*1000) + 20*np.log10(freq) - 147.55 #DISTANCE MUST BE IN METERS
## WHERE:
    ## Lpl[i,j]:
        ## i --> the node i
        ## j --> the step time in sat pass 

##MATRIX FOR LINK BUDGET, USING Prx ###
Prx = np.zeros((sites_pos.shape[0],leo_pos.shape[0])) 

Prx = Ptx + G_sat + G_device - Lpl #DISTANCE IS CONVERTED TO METERS
## WHERE:
    ## Prx[i,j]:
        ## i --> the node i
        ## j --> the step time in sat pass 

Esempio n. 52
0
from scipy.optimize import curve_fit
from scipy.misc import factorial
from scipy.stats import ks_2samp
from scipy import stats
import sys
import pdb

filename = sys.argv[1]

datain = np.genfromtxt(filename, delimiter=",")
data = datain[:, 1] * 1e9
min = np.min(data)
max = np.max(data)
bins = 10 * np.size(data)

time = np.logspace(np.log10(min), np.log10(max), num=bins)
mu = np.mean(data)

time_centers = np.r_[0.5 * (time[:-1] + time[1:])]


def analyticalCDF(times, tau):
    return 1 - np.exp(-times / tau)


#print np.std(data)
#print stats.sem(data)

hist, bins2 = np.histogram(data, bins=time, density=False)
cdf = np.cumsum(hist) * 1.0 / data.size
Esempio n. 53
0
#combine total subsector basis
basis = np.hstack((cube_basisL, basis))
basis = np.hstack((basis, cube_basisR))

# project Hamiltonian + dynamics
basis = np.unique(basis, axis=1)
basis, temp = np.linalg.qr(basis)
from Diagnostics import print_wf
for n in range(0, np.size(basis, axis=1)):
    print("\n")
    print_wf(basis[:, n], pxp, 1e-2)

H_rot = np.dot(np.conj(np.transpose(basis)), np.dot(H.sector.matrix(), basis))
e, u = np.linalg.eigh(H_rot)
overlap = np.log10(np.abs(u[1, :])**2)

H.sector.find_eig()
z = zm_state(2, 1, pxp)
eig_overlap(z, H).plot()
plt.scatter(e,
            overlap,
            marker="s",
            s=200,
            alpha=0.6,
            color="red",
            label="subcube")

#perm approx for comp, FSA
perm_basis = np.zeros(pxp.dim)
for n in range(0, len(sector_refs)):
        return helper_func(θ)


# #######################################
# PYCCL PARAMETERS 
#########################################
nz = 1000 #redshift resolution
zmin = 0.
zmax = 2.
z = np.linspace(zmin,zmax,nz)
# number of tomographic bins
nbins = 1 
# number of cross/auto angular power spectra
ncombinations = int(nbins*(nbins+1)/2)
# 100 log equal spaced ell samples should be fine according to https://arxiv.org/pdf/0705.0163.pdf
ells = np.logspace(np.log10(100),np.log10(6000),100)
# I think this is 1
delta_l = 1
"""
Assume a redshift distribution given by
    z^alpha * exp(z/z0)^beta
    with alpha=1.3, beta = 1.5 and z0 = 0.65
"""
dNdz_true = ccl.dNdzSmail(alpha = 1.3, beta = 1.5, z0=0.65)
# Assumes photo-z error is Gaussian with a bias is 0.05(1+z)
pz = ccl.PhotoZGaussian(sigma_z0=0.05)

fsky = 15000/41252.96 # fraction of the sky observed by Euclid
# sampled ells
sn = 0.26 
# TODO: inspect this num_dens and nzs
from sklearn.preprocessing import PolynomialFeatures# 导入多项式
from sklearn.model_selection import cross_val_score  # 交叉检验
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score  # 批量导入指标算法
import pandas as pd  # 导入pandas
import matplotlib.pyplot as plt  # 导入图形展示库
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import random
import math


###todo : 导入数据 开始###
data = np.loadtxt(r'D:\Course Plus\华中数模\第十二届华中地区数学建模大赛B题\回归数据\train_data.txt',
                      encoding='utf-8',skiprows=1,delimiter=',')  # 读取数据文件

data = np.log10(data+1)

X = data[:, :-1]  # 分割自变量
Y = data[:, -1]  # 分割因变量
###todo : 导入数据 结束###

# model_br = BayesianRidge(n_iter=2, tol=0.01, param_1=0.1, param_2=0.00001, lambda_1=0.00001,
#                              lambda_2=50)  # 建立贝叶斯岭回归模型对象
# model_lr = LinearRegression(fit_intercept=True, normalize=True)  # 建立普通线性回归模型对
#
# model_svr = SVR(kernel='rbf', C=3400, param=0.0016, tol=0.001,
#                 epsilon=0.14)
# model_svr = SVR(kernel='linear', C=10,  tol=0.000001,
#                 epsilon=0.000001)
param = [0.00001,0.0001,0.001,0.01,0.1,1,10,100]
# param = [1,10,100,1000]
Esempio n. 56
0
    def eval(self,nu=None,fill_SED=True,get_model=False,loglog=False,label=None,phys_output=False):
        """
        evaluates the SED for the current parameters and fills the :class:`.SED` member
        """
        
        
        if nu is None:
            #print ("--->", self.nu_min,self.nu_max,self.nu_size)
            
            x1=np.log10(self.nu_min)

            x2=np.log10(self.nu_max)
            
            lin_nu=np.logspace(x1,x2,self.nu_size)

            log_nu=np.log10(lin_nu)
            
            model=np.zeros(lin_nu.size)
                 
          
        else:
            
            if np.shape(nu)==():
 
                nu=np.array([nu])
            
            if loglog==True:
                lin_nu=np.power(10.,nu)
                log_nu=nu
            else:
                log_nu=np.log10(nu)
                lin_nu=nu
        
            
            
        #print lin_nu 
        model=np.zeros(lin_nu.size)
        
        
        
        for model_comp in self.components_list:
            
            #print "model",model_comp.name
            
            if loglog==False:
                model+= model_comp.eval(nu=lin_nu,fill_SED=fill_SED,get_model=True,loglog=loglog)
            else:
                model+= np.power(10.,model_comp.eval(nu=log_nu,fill_SED=fill_SED,get_model=True,loglog=loglog))
         
        
    
        if fill_SED==True:
 
            self.SED.fill(nu=lin_nu,nuFnu=model)
            #TODO ADD cosmo properly to all components
            #self.SED.fill_nuLnu(z=self.jet_obj.get_par_by_type('redshift').val, dl=self.jet_obj.get_DL_cm())
            
        if get_model==True:
            
            if loglog==True:
                model=np.log10(model)
                    
            
            return model        
        
        else:
            
            return None
Esempio n. 57
0
def dist_mod(z, cosmo):
    ld = cosmology.funcs.luminosity_distance(z, cosmo=cosmo).value
    dist_mod = 5.0 * (np.log10(ld * 1000.0 * 1000.0) - 1.0)
    return dist_mod
Esempio n. 58
0
        0.15671181
    ]  #, 0.03627285 -0.05957562 -0.13237366 -0.17996786 -0.21153889 -0.23183063]
    nhi = [
        14.06075461, 13.56412484, 13.03211818, 12.44580853, 11.79029004,
        11.080321, 10.33237421, 9.55585859, 8.75205882, 7.95434865, 7.16719688,
        6.40544992, 5.67945472, 5.01192144, 4.39851996, 3.83386894, 3.33763753,
        2.89468833, 2.50068819
    ]  #,  2.15612715,  1.85457562,  1.59877366,  1.38206786,  1.18913889,  1.02633063]
    mstars = [
        1.00000000e+03, 1.46779927e+03, 2.15443469e+03, 3.16227766e+03,
        4.64158883e+03, 6.81292069e+03, 1.00000000e+04, 1.46779927e+04,
        2.15443469e+04, 3.16227766e+04, 4.64158883e+04, 6.81292069e+04,
        1.00000000e+05, 1.46779927e+05, 2.15443469e+05, 3.16227766e+05,
        4.64158883e+05, 6.81292069e+05, 1.00000000e+06
    ]  #,   1.46779927e+06, 2.15443469e+06,   3.16227766e+06,   4.64158883e+06,   6.81292069e+06, 1.00000000e+07]  #[1e3, 1e4, 1e5, 1e6]
    Mvs = [np.log10(m / 3.2e9) * -2.5 + -18.8 for m in mstars]
    print len(mstars), len(Mvs), len(nlums)

    #shortened lists
    Mvs2 = [
        np.log10(1e3 / 3.2e9) * -2.5 + -18.8,
        np.log10(1e4 / 3.2e9) * -2.5 + -18.8,
        np.log10(1e5 / 3.2e9) * -2.5 + -18.8,
        np.log10(1e6 / 3.2e9) * -2.5 + -18.8
    ]
    nlums2 = np.array([nlums[0], nlums[6], nlums[12], nlums[18]])
    print Mvs2, nlums2
    nlos2 = np.array([nlo[0], nlo[6], nlo[12], nlo[18]])
    nhis2 = np.array([nhi[0], nhi[6], nhi[12], nhi[18]])
    print nhis2 - nlums2
    print nlums2 - nlos2
Esempio n. 59
0
def main():
    savepath = '/scratch/dac29/output/processed_data/berlind_groupcat/mock_runs/4th_run/custom_catalogues/'
    #############################################################################################
    catalogue = sys.argv[1]
    #open the mock group cat
    filepath = cu.get_output_path(
    ) + 'processed_data/berlind_groupcat/mock_runs/4th_run/'
    catalogue_1 = catalogue + '_radec_mock'
    f = h5py.File(filepath + catalogue_1 + '.hdf5', 'r')
    GC = f.get(catalogue_1)
    GC = np.array(GC)
    print 'length:', len(GC)
    for name in GC.dtype.names:
        print '\t', name

    #open the true groups
    filepath = cu.get_output_path(
    ) + 'processed_data/hearin_mocks/custom_catalogues/'
    catalogue_2 = catalogue
    f = h5py.File(filepath + catalogue_2 + '.hdf5', 'r')
    mock = f.get(catalogue_2)
    mock = np.array(mock)
    print 'length:', len(mock)
    for name in mock.dtype.names:
        print '\t', name

    #open the ra,dec mock fed into the group finder
    filepath = cu.get_output_path(
    ) + 'processed_data/hearin_mocks/custom_catalogues/'
    filename = catalogue + '_radec_mock.dat'
    mock_radec = ascii.read(filepath + filename,
                            delimiter='\s',
                            Reader=ascii.Basic,
                            data_start=1)
    print 'length:', len(mock_radec)
    print mock_radec
    mock_radec = np.array(mock_radec)
    #############################################################################################

    #define some things....
    c = 299792.458  #km/s
    cosmo = cosmology.FlatLambdaCDM(H0=100, Om0=0.27)  #h=1
    Omega_m = 0.27
    z_upper_lim = 0.068
    z_lower_lim = 0.020

    #create a new catalogue to store results
    dtype=[('ID','>i8'),('k_1','>i8'),('k_2','>i8'),('RA','>f8'),('DEC','>f8'),('Z','>f8'),('red','>i8'),\
           ('M_u,0.1','>f8'),('M_g,0.1','>f8'),('M_r,0.1','>f8'),('M_i,0.1','>f8'),('M_z,0.1','>f8'),('MSTAR','>f8'),\
           ('GROUP_ID','>i8'),('MGROUP','>f8'),('ZGROUP','>f8'),('R200','>f8'),\
           ('CEN_IND','>i8'),('RANK','>i8'),('RPROJ','>f8'),('N_sat','>i8'),('N_sat_red','>i8'),('N_sat_blue','>i8'),\
           ('HALO_M','>f8'),('HALO_RANK','>i8')]
    dtype = np.dtype(dtype)
    data = np.recarray((len(GC), ), dtype=dtype)
    data.fill(-99.9)  #empty value indicator

    #catalgues matched by row index
    data['GROUP_ID'] = GC['IDgroup']

    #caclulate index into ra-dec mock file
    index = np.argsort(mock_radec['ID'])
    sorted_x = mock_radec['ID'][index]
    ind = np.searchsorted(sorted_x, GC['IDgal'])
    ind = index[ind]

    #grab data from ra-dec mock
    data['k_1'] = ind
    data['ID'] = mock_radec['ID'][ind]
    data['RA'] = mock_radec['ra'][ind]
    data['DEC'] = mock_radec['dec'][ind]
    data['Z'] = mock_radec['z'][ind]

    #calculate index into xyz mock
    ind = mock_radec['k'][ind]
    #grab values from xyz mock
    data['k_2'] = ind
    data['M_g,0.1'] = mock['M_r,0.1'][ind] + mock['g-r'][ind]
    data['M_r,0.1'] = mock['M_r,0.1'][ind]
    data['HALO_M'] = mock['M200b_host'][ind]

    #determine cen/sat designation in xyz mock
    result = np.where(mock['ID_host'][ind] == -1)[0]
    data['HALO_RANK'][result] = 1  #central
    result = np.where(mock['ID_host'][ind] != -1)[0]
    data['HALO_RANK'][result] = 0  #satellite

    #calculate galaxy colors
    color = data['M_g,0.1'] - data['M_r,0.1']
    LHS = 0.7 - 0.032 * (data['M_r,0.1'] + 16.5)  #Weinmann 2006
    blue = np.where(color < LHS)[0]  #indices of blue galaxies
    red = np.where(color > LHS)[0]  #indicies of red galaxies

    #record color designation
    data['red'][red] = 1
    data['red'][blue] = 0

    for i in range(0, len(data)):
        group_id = data['GROUP_ID'][i]
        members = np.where(data['GROUP_ID'] == group_id)[0]
        central = np.where(
            data['M_r,0.1'][members] == min(data['M_r,0.1'][members]))[0][0]
        central = members[central]
        satellites = np.where(members != central)[0]
        satellites = members[satellites]
        #record rank
        data['RANK'][central] = 1
        data['RANK'][satellites] = 0
        #record number of satellites in the group
        data['N_sat'][members] = len(satellites)
        sat_red = np.where(np.in1d(satellites, red) == True)[0]
        data['N_sat_red'][members] = len(sat_red)
        sat_blue = np.where(np.in1d(satellites, blue) == True)[0]
        data['N_sat_blue'][members] = len(sat_blue)
        #record other group information
        data['CEN_IND'][members] = central
        data['ZGROUP'][members] = data['Z'][central]
        #calculate projected distance from central
        da = cu.spheredist(data['RA'][central], data['DEC'][central],
                           data['RA'][members], data['DEC'][members])
        da = np.radians(da)  #convert to radians
        chi = cosmology.funcs.comoving_distance(
            data['ZGROUP'][central], cosmo=cosmo).value * 1000.0  #in kpc
        data['RPROJ'][members] = chi / (
            1.0 + data['ZGROUP'][members]) * da  #caclulate physical seperation
        data['RPROJ'][central] = 0.0  #==0 if it is the central

    for i in range(0, len(data)):
        group_id = data['GROUP_ID'][i]
        members = np.where(data['GROUP_ID'] == group_id)[0]
        central = np.where(
            data['M_r,0.1'][members] == min(data['M_r,0.1'][members]))[0][0]
        central = members[central]
        data['CEN_IND'][members] = central

    #read in mass halo function
    filepath = '/scratch/dac29/fortran_code/mass_functions/'
    filename = 'Bolshoi_Massfunc.dat'
    names = ['dM', 'dn', 'nsum']
    dndM = ascii.read(filepath + filename,
                      delimiter='\s',
                      names=names,
                      data_start=0)
    dndM = np.array(dndM)

    #idenditify centrals and satellites
    centrals = np.where(data['RPROJ'] == 0)[0]
    satellites = np.where(data['RPROJ'] > 0)[0]

    #calculate group total r-band luminosities
    S_r = 4.64
    group_L = np.zeros((len(data), ), dtype=np.float)

    for i in range(0, len(centrals)):
        gal = np.where(data['GROUP_ID'] == data['GROUP_ID'][centrals[i]])[0]
        group_L[gal] = np.log10(
            np.sum(10.0**(solar_lum(data['M_r,0.1'][gal], S_r))))
    tot_lum = group_L[centrals]

    #calculate abundance matched masses for groups
    geo_f = 1.0 / 8.0  #gemoetric factor: spherical octant
    r_max = cosmology.funcs.comoving_distance(z_upper_lim,
                                              cosmo=cosmo).value  #in Mpc
    r_min = cosmology.funcs.comoving_distance(z_lower_lim,
                                              cosmo=cosmo).value  #in Mpc
    mock_volume = (4.0 / 3.0) * math.pi * (r_max**3.0 - r_min**3) * geo_f

    #caclulate the group luminosity function
    N_gal = np.cumsum(np.zeros(len(centrals)) +
                      1)  #cumulative number of groups
    n_gal = N_gal / mock_volume  #number density
    L_gal = np.sort(tot_lum)[::-1]  #group luminosity
    ind = np.argsort(tot_lum)[::-1]

    #integrate halo mass function
    n_halo = dndM['nsum'][::-1]  #cumulative number desnity
    M_halo = dndM['dM'][::-1]  #halo mass

    #interpolate the halo mass function
    x = np.log10(n_halo)
    y = M_halo
    f = interpolate.interp1d(x,
                             y,
                             kind='cubic',
                             bounds_error='False',
                             fill_value=0.0)

    data['MGROUP'][centrals[ind]] = f(np.log10(n_gal))

    for i in range(0, len(centrals)):
        gal = np.where(data['GROUP_ID'] == data['GROUP_ID'][centrals[i]])[0]
        data['MGROUP'][gal] = data['MGROUP'][centrals[i]]

    R_200 = 258.1 * (10.0**data['MGROUP'] / (10.0**12.0))**(1.0 / 3.0) * (
        Omega_m / 0.25)**(1.0 / 3.0) * (1.0 + data['ZGROUP'])**(-1.0)
    data['R200'] = R_200

    print 'saving hdf5 version of the catalogue...'
    filename = catalogue + '_groups'
    f = h5py.File(savepath + filename + '.hdf5', 'w')
    dset = f.create_dataset(filename, data=data)
    f.close()

    print 'saving ascii version of the catalogue...'
    data_table = table.table.Table(data=data)
    ascii.write(data_table, savepath + filename + '.dat')
    print data_table
Esempio n. 60
0
            cosmology.Dinterp[auxcut] / params.boxsize)

        # Check which replications are compressed by the lens
        replicationsinside = geometry[(geometry['nearestpoint'] < dlsup *
                                       (1 + params.beta_buffer))
                                      & (geometry['farthestpoint'] >= dlinf *
                                         (1 - params.beta_buffer))]

        if not rank:
            print(" Replications inside:")
        # Loop on the replications
        for ii, repi in enumerate(replicationsinside):

            if not rank:
                print(" * Replication [{}/{}] of snap [{}/{}] {} {} {} "\
                           .format( str(ii + 1).zfill(int(np.log10(replicationsinside.size) + 1)), replicationsinside.size,
                                    str(snapnum + 1).zfill(int(np.log10(params.numfiles) + 1)), params.numfiles,
                                    repi['x'], repi['y'], repi['z']))

            # Set the 1st guess for plc-crossing to a-collapse
            aplcslicei = np.copy(aplcslice)
            aplcslicei[Zaccslice != -1] = 1.0 / (Zaccslice[Zaccslice != -1] +
                                                 1)
            aplcslicei[Zaccslice == -1] = 1.0

            # Position shift of the replication
            shift = (np.array(repi[['x', 'y', 'z']].tolist()).dot(
                params.change_of_basis)).astype(np.float32)
            # Get the scale parameter of the moment that the particle crossed the PLC
            if not rank:
                t0 = time()