コード例 #1
0
def plotFitOfHighOrderData():  
    plt.title("Polynomial function regression")
    plt.grid()
    
    size = 5000
    testSize = 100
    order = 10
    covs = [0.03,3.41,-1.8,3,0.091,5,30.0,3.12,-0.02,1.2]
    # Generate input features from uniform distribution
    np.random.seed(20) # Set the seed so we can get reproducible results
    x_poly = np.array(sorted(uniform(-1, 1, testSize)))
    
    
    # Evaluate the real function for training example inputs
    y_poly = real_function(4, 6, 0, x_poly,covs)  
   
    x_samples = np.array(sorted(uniform(-1, 1, size)))
    sigma = 20
    y_samples =  real_function(4, 6, sigma, x_samples,covs )  
    
    plt.plot(x_poly, y_poly, c='black', label='poly data')
    
    plt.scatter(x_samples, y_samples, s=1, c='green', label='sample')
    train(x_samples, y_samples, x_poly, y_poly, order-1, plotFlag= True) 
    plt.legend()
    plt.show() 
コード例 #2
0
 def testProperties01(self):
     """The magnitudue of the product is the product of the magnitudes"""
     q1 = Quaternion(tuple(RA.uniform(self.min,
                                      self.max, (1, 4)).tolist()[0]))
     q2 = Quaternion(tuple(RA.uniform(self.min,
                                      self.max, (1, 4)).tolist()[0]))
     self.assertEqual((q1*q2).magnitude(), q1.magnitude()*q2.magnitude())
コード例 #3
0
def IMRPhenomC_param_generator(flow, tmplt_class, bank, **kwargs):
    """
    Generate random parameters for the IMRPhenomC waveform model.
    Specify the min and max mass of the bigger component, then the min
    and max mass of the total mass. This function includes
    restrictions on q and chi based on IMRPhenomC's range of
    believability, namely q <=20 and |chi| <= 0.9.

    @param flow: low frequency cutoff
    @param tmplt_class: Template generation class for this waveform
    @param bank: sbank bank object
    @param kwargs: constraints on waveform parameters. See urand_tau0tau3_generator for more usage help. If no spin limits are specified, the IMRPhenomC limits will be used.
    """

    # get spin limits. IMRPhenomC has special bounds on chi, so we
    # will silently truncate
    s1min, s1max = kwargs.pop('spin1', (-0.9, 0.9))
    s2min, s2max = kwargs.pop('spin2', (s1min, s1max))
    s1min, s1max = (max(-0.9, s1min), min(0.9, s1max))
    s2min, s2max = (max(-0.9, s2min), min(0.9, s2max))

    for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):

        q = max(mass1/mass2, mass2/mass1)
        if q <= 20:
            spin1 = uniform(s1min, s1max)
            spin2 = uniform(s2min, s2max)
        else:
            raise ValueError("mass ratio out of range")

        yield tmplt_class(mass1, mass2, spin1, spin2, bank=bank)
コード例 #4
0
def derive_qois(df_original):
    df = df_original.copy()
    ns = df.shape[0]

    rstar_d = normal(rstar.value, rstare.value, size=ns) * Rsun
    period = df.p.values if 'p' in df.columns else df.pr.values

    df['period'] = period
    df['k_true'] = sqrt(df.k2_true)
    df['k_app'] = sqrt(df.k2_app)
    df['cnt'] = 1. - df.k2_app / df.k2_true
    df['a_st'] = as_from_rhop(df.rho.values, period)
    df['a_au'] = df.a_st * rstar_d.to(AU)
    df['inc'] = degrees(i_from_ba(df.b.values, df.a_st.values))
    df['t14'] = d_from_pkaiews(period, df.k_true.values, df.a_st.values,
                               radians(df.inc.values), 0.0, 0.0, 1)
    df['t14_h'] = 24 * df.t14

    df['r_app'] = df.k_app.values * rstar_d.to(Rjup)
    df['r_true'] = df.k_true.values * rstar_d.to(Rjup)
    df['r_app_point'] = df.k_app.values * rstar.to(Rjup)
    df['r_true_point'] = df.k_true.values * rstar.to(Rjup)

    df['r_app_rsun'] = df.k_app.values * rstar_d.to(Rsun)
    df['r_true_rsun'] = df.k_true.values * rstar_d.to(Rsun)
    df['teff_p'] = Teq(normal(*star_teff, size=ns), df.a_st,
                       uniform(0.25, 0.50, ns), uniform(0, 0.4, ns))
    return df
コード例 #5
0
def double_spin_precessing_param_generator(flow, tmplt_class, bank, **kwargs):
    """
    Currently a stub to test precessing template generation.
    """
    spin1_bounds = kwargs.pop('spin1', (0., 0.9))
    spin2_bounds = kwargs.pop('spin2', (0., 0.9))

    for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
        # Choose the rest from hardcoded limits
        spin1mag = uniform(*spin1_bounds)
        spin2mag = uniform(*spin2_bounds)
        spin1ang1 = uniform(0, numpy.pi)
        spin1ang2 = uniform(0, 2*numpy.pi)
        spin2ang1 = uniform(0, numpy.pi)
        spin2ang2 = uniform(0, 2*numpy.pi)
        spin1z = spin1mag * numpy.cos(spin1ang1)
        spin1x = spin1mag * numpy.sin(spin1ang1) * numpy.cos(spin1ang2)
        spin1y = spin1mag * numpy.sin(spin1ang1) * numpy.sin(spin1ang2)    
        spin2z = spin2mag * numpy.cos(spin2ang1)
        spin2x = spin2mag * numpy.sin(spin2ang1) * numpy.cos(spin2ang2)
        spin2y = spin2mag * numpy.sin(spin2ang1) * numpy.sin(spin2ang2)
        # Check orientation angles use correct limits
        theta = uniform(0, numpy.pi)
        phi = uniform(0, 2*numpy.pi)
        psi = uniform(0, 2*numpy.pi)
        iota = uniform(0, numpy.pi)
        yield tmplt_class(mass1, mass2, spin1x, spin1y, spin1z, spin2x, spin2y,
                          spin2z, theta, phi, iota, psi, bank=bank)
コード例 #6
0
def aligned_spin_param_generator(flow, **kwargs):
    """
    Specify the min and max mass of the bigger component, the min and
    max mass of the total mass and the min and max values for the
    z-axis spin angular momentum.
    """
    if 'ns_bh_boundary_mass' in kwargs and 'bh_spin' in kwargs \
            and 'ns_spin' in kwargs:
        # get args
        bh_spin_bounds = kwargs.pop('bh_spin')
        ns_spin_bounds = kwargs.pop('ns_spin')
        ns_bh_boundary = kwargs.pop('ns_bh_boundary_mass')
        # the rest will be checked in the call to urand_tau0tau3_generator

        for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
            spin1 = uniform(*(
                bh_spin_bounds if mass1 > ns_bh_boundary else ns_spin_bounds))
            spin2 = uniform(*(
                bh_spin_bounds if mass2 > ns_bh_boundary else ns_spin_bounds))
            yield mass1, mass2, spin1, spin2
    else:
        # get args
        spin1_bounds = kwargs.pop('spin1', (-1., 1.))
        spin2_bounds = kwargs.pop('spin2', (-1., 1.))
        # the rest will be checked in the call to urand_tau0tau3_generator

        for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
            spin1 = uniform(*spin1_bounds)
            spin2 = uniform(*spin2_bounds)
            yield mass1, mass2, spin1, spin2
コード例 #7
0
ファイル: tau0tau3.py プロジェクト: spxiwh/lalsuite
def aligned_spin_param_generator(flow, **kwargs):
    """
    Specify the min and max mass of the bigger component, the min and
    max mass of the total mass and the min and max values for the
    z-axis spin angular momentum.
    """
    if 'ns_bh_boundary_mass' in kwargs and 'bh_spin' in kwargs \
            and 'ns_spin' in kwargs:
        # get args
        bh_spin_bounds = kwargs.pop('bh_spin')
        ns_spin_bounds = kwargs.pop('ns_spin')
        ns_bh_boundary = kwargs.pop('ns_bh_boundary_mass')
        # the rest will be checked in the call to urand_tau0tau3_generator

        for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
            spin1 = uniform(*(bh_spin_bounds if mass1 > ns_bh_boundary else ns_spin_bounds))
            spin2 = uniform(*(bh_spin_bounds if mass2 > ns_bh_boundary else ns_spin_bounds))
            yield mass1, mass2, spin1, spin2
    else:
        # get args
        spin1_bounds = kwargs.pop('spin1', (-1., 1.))
        spin2_bounds = kwargs.pop('spin2', (-1., 1.))
        # the rest will be checked in the call to urand_tau0tau3_generator

        for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
            spin1 = uniform(*spin1_bounds)
            spin2 = uniform(*spin2_bounds)
            yield mass1, mass2, spin1, spin2
コード例 #8
0
def IMRPhenomC_param_generator(flow, tmplt_class, bank, **kwargs):
    """
    Generate random parameters for the IMRPhenomC waveform model.
    Specify the min and max mass of the bigger component, then the min
    and max mass of the total mass. This function includes
    restrictions on q and chi based on IMRPhenomC's range of
    believability, namely q <=20 and |chi| <= 0.9.

    @param flow: low frequency cutoff
    @param tmplt_class: Template generation class for this waveform
    @param bank: sbank bank object
    @param kwargs: constraints on waveform parameters. See urand_tau0tau3_generator for more usage help. If no spin limits are specified, the IMRPhenomC limits will be used.
    """

    # get spin limits. IMRPhenomC has special bounds on chi, so we
    # will silently truncate
    s1min, s1max = kwargs.pop('spin1', (-0.9, 0.9))
    s2min, s2max = kwargs.pop('spin2', (s1min, s1max))
    s1min, s1max = (max(-0.9, s1min), min(0.9, s1max))
    s2min, s2max = (max(-0.9, s2min), min(0.9, s2max))

    for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):

        q = max(mass1 / mass2, mass2 / mass1)
        if q <= 20:
            spin1 = uniform(s1min, s1max)
            spin2 = uniform(s2min, s2max)
        else:
            raise ValueError("mass ratio out of range")

        yield tmplt_class(mass1, mass2, spin1, spin2, bank=bank)
コード例 #9
0
def testFitOfHighOrderData():  
    
    testSize = 5000
    order = 10
    covsSet = [[1.1,0,0,0,0,-1.89,0,0,9.1,0],[0.03,3.41,-1.8,3,0.091,5,30.0,3.12,-0.02,1.2],[30,341,-1132,322,91,5231,30765,388,1344,87]]
    sizes = [10,100,1000]
    sigmas = [0.01,10,500]
    np.random.seed(20) # Set the seed so we can get reproducible results
    x_poly = np.array(sorted(uniform(-1, 1, testSize)))
    modelDict = {0:'LS', 1:'RLS' ,2:'LASSO' ,3:'RR' ,4:'BR'}
    with open('./outcome.txt', 'w') as f:
        tempWrite = []
        for covs in covsSet:
            for size in sizes:
                for sigma in sigmas:
                    tempWrite.append('\r\n==============paras=============\r\n')
                    tempWrite.append(' covs:'+str(covs))
                    tempWrite.append(' size:'+str(size))
                    tempWrite.append(' sigma:'+str(sigma))
                    
                    y_poly = real_function(4, 6, 0, x_poly,covs)  
                    x_samples = np.array(sorted(uniform(-1, 1, size)))
                    y_samples =  real_function(4, 6, sigma, x_samples,covs )  
                    out = train(x_samples, y_samples, x_poly, y_poly, order-1, plotFlag= False)
                    out =  [elem for elem in out if elem != None]
                    maxELe = np.max(out)
                    minELe = np.min(out)
                    tempWrite.append('\r\n WORST:'+str(modelDict.get(out.index(maxELe)))+' ,MSE:'+str(maxELe))
                    tempWrite.append('\r\n BEST:'+str(modelDict.get(out.index(minELe)))+' ,MSE:'+str(minELe))
        f.writelines(tempWrite)
    print 'complete!'
コード例 #10
0
def create_uniform_particles(x_range, y_range, hdg_range, N):
    particles = np.empty((N, 3))
    particles[:, 1] = uniform(x_range[0], x_range[1], size=N)
    particles[:, 2] = uniform(y_range[0], y_range[1], size=N)
    particles[:, 0] = uniform(hdg_range[0], hdg_range[1], size=N)
    particles[:, 0] %= 2 * np.pi
    return particles
コード例 #11
0
 def testProperties01(self):
     """The magnitudue of the product is the product of the magnitudes"""
     q1 = Quaternion(
         tuple(RA.uniform(self.min, self.max, (1, 4)).tolist()[0]))
     q2 = Quaternion(
         tuple(RA.uniform(self.min, self.max, (1, 4)).tolist()[0]))
     self.assertEqual((q1 * q2).magnitude(),
                      q1.magnitude() * q2.magnitude())
コード例 #12
0
ファイル: rmsdtest.py プロジェクト: peach-eater/pymol-scripts
 def test_computeRMSD_Random(self):
     """3. for two random sets of points, rmsd(x,y) == rmsd(y,x)"""
     min = -10000.
     max = 10000.
     num_points = 20
     dimension = 3
     point_list_1 = RandomArray.uniform(min, max, (num_points, dimension))
     point_list_2 = RandomArray.uniform(min, max, (num_points, dimension))
     self.assertEqual(
         rmsd.RMSDCalculator(point_list_1).computeRMSD(point_list_2),
         rmsd.RMSDCalculator(point_list_2).computeRMSD(point_list_1))
コード例 #13
0
 def test_computeRMSD_Random(self):
     """3. for two random sets of points, rmsd(x,y) == rmsd(y,x)"""
     min = -10000.
     max = 10000.
     num_points = 20
     dimension = 3
     point_list_1 = RandomArray.uniform(min, max, (num_points, dimension))
     point_list_2 = RandomArray.uniform(min, max, (num_points, dimension))
     self.assertEqual(
         rmsd.RMSDCalculator(point_list_1).computeRMSD(point_list_2),
         rmsd.RMSDCalculator(point_list_2).computeRMSD(point_list_1))
コード例 #14
0
    def testProperties00(self):
        """The product of the conjugate is the conjucate of the product"""
        q1 = Quaternion(tuple(RA.uniform(self.min,
                                         self.max, (1, 4)).tolist()[0]))
        q2 = Quaternion(tuple(RA.uniform(self.min,
                                         self.max, (1, 4)).tolist()[0]))
##          pc = q1.conjugate()*q2.conjugate()
##          qp = q1*q2
##          cp = qp.conjugate()
##          self.assertEqual( pc, cp)
        # the commented code fails with the same error as this line...
        self.assertEqual( q1.conjugate()*q2.conjugate(), (q2*q1).conjugate())
コード例 #15
0
 def testProperties00(self):
     """The product of the conjugate is the conjucate of the product"""
     q1 = Quaternion(
         tuple(RA.uniform(self.min, self.max, (1, 4)).tolist()[0]))
     q2 = Quaternion(
         tuple(RA.uniform(self.min, self.max, (1, 4)).tolist()[0]))
     ##          pc = q1.conjugate()*q2.conjugate()
     ##          qp = q1*q2
     ##          cp = qp.conjugate()
     ##          self.assertEqual( pc, cp)
     # the commented code fails with the same error as this line...
     self.assertEqual(q1.conjugate() * q2.conjugate(),
                      (q2 * q1).conjugate())
コード例 #16
0
def aligned_spin_param_generator(flow, tmplt_class, bank, **kwargs):
    """
    Specify the min and max mass of the bigger component, the min and
    max mass of the total mass and the min and max values for the
    z-axis spin angular momentum.
    """
    dur_min, dur_max = kwargs.pop('duration', (None, None))

    if 'ns_bh_boundary_mass' in kwargs and 'bh_spin' in kwargs \
            and 'ns_spin' in kwargs:
        # get args
        bh_spin_bounds = kwargs.pop('bh_spin')
        ns_spin_bounds = kwargs.pop('ns_spin')
        ns_bh_boundary = kwargs.pop('ns_bh_boundary_mass')
        # the rest will be checked in the call to urand_tau0tau3_generator

        for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
            spin1 = uniform(*(bh_spin_bounds if mass1 > ns_bh_boundary else ns_spin_bounds))
            spin2 = uniform(*(bh_spin_bounds if mass2 > ns_bh_boundary else ns_spin_bounds))

            t = tmplt_class(mass1, mass2, spin1, spin2, bank=bank)
            if (dur_min is not None and t._dur < dur_min) \
                    or (dur_max is not None and t._dur > dur_max):
                continue
            yield t
    else:
        # get args
        spin1_bounds = kwargs.pop('spin1', (-1., 1.))
        spin2_bounds = kwargs.pop('spin2', (-1., 1.))

        # the rest will be checked in the call to urand_tau0tau3_generator
        for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):

            mtot = mass1 + mass2
            chis_min = (mass1*spin1_bounds[0] + mass2*spin2_bounds[0])/mtot
            chis_max = (mass1*spin1_bounds[1] + mass2*spin2_bounds[1])/mtot
            chis = uniform(chis_min, chis_max)

            s2min = max(spin2_bounds[0], (mtot*chis - mass1*spin1_bounds[1])/mass2)
            s2max = min(spin2_bounds[1], (mtot*chis - mass1*spin1_bounds[0])/mass2)

            spin2 = uniform(s2min, s2max)
            spin1 = (chis*mtot - mass2*spin2)/mass1

            t = tmplt_class(mass1, mass2, spin1, spin2, bank=bank)
            if (dur_min is not None and t._dur < dur_min) \
                    or (dur_max is not None and t._dur > dur_max):
                continue
            yield t
コード例 #17
0
ファイル: tau0tau3.py プロジェクト: smirshekari/lalsuite
def aligned_spin_param_generator(flow, **kwargs):
    """
    Specify the min and max mass of the bigger component, the min and
    max mass of the total mass and the min and max values for the
    z-axis spin angular momentum.
    """
    # get args
    spin1_bounds = kwargs.pop('spin1', (-1., 1.))
    spin2_bounds = kwargs.pop('spin2', (-1., 1.))
    # the rest will be checked in the call to urand_tau0tau3_generator

    for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
        spin1 = uniform(*spin1_bounds)
        spin2 = uniform(*spin2_bounds)
        yield mass1, mass2, spin1, spin2
コード例 #18
0
ファイル: tau0tau3.py プロジェクト: spxiwh/sbank
def nonspin_hom_param_generator(flow, tmplt_class, bank, **constraints):
    """
    Wrapper for urand_tau0tau3_generator() to remove spin options
    for EOBNRv2 waveforms.
    """
    constraints.pop('spin1', None)
    constraints.pop('spin2', None)
    for mass1, mass2 in urand_tau0tau3_generator(flow, **constraints):
        theta = uniform(0, numpy.pi)
        phi = uniform(0, 2*numpy.pi)
        psi = uniform(0, 2*numpy.pi)
        iota = uniform(0, numpy.pi)
        orb_phase = uniform(0, 2*numpy.pi)
        yield tmplt_class(mass1, mass2, 0, 0, 0, 0, 0, 0,
                          theta, phi, iota, psi, orb_phase, bank)
コード例 #19
0
def aligned_spin_param_generator(flow, tmplt_class, bank, **kwargs):
    """
    Specify the min and max mass of the bigger component, the min and
    max mass of the total mass and the min and max values for the
    z-axis spin angular momentum.
    """
    dur_min, dur_max = kwargs.pop('duration', (None, None))

    # define a helper function to apply the appropriate spin bounds
    if 'ns_bh_boundary_mass' in kwargs and 'bh_spin' in kwargs \
            and 'ns_spin' in kwargs:
        bh_spin_bounds = kwargs.pop('bh_spin')
        ns_spin_bounds = kwargs.pop('ns_spin')
        ns_bh_boundary = kwargs.pop('ns_bh_boundary_mass')

        def spin_bounds(mass1, mass2):
            return (bh_spin_bounds if mass1 > ns_bh_boundary else ns_spin_bounds), \
                   (bh_spin_bounds if mass2 > ns_bh_boundary else ns_spin_bounds)
    else:
        spin1b = kwargs.pop('spin1', (-1., 1.))
        spin2b = kwargs.pop('spin2', (-1., 1.))

        def spin_bounds(mass1, mass2):
            return spin1b, spin2b

    # the rest will be checked in the call to urand_tau0tau3_generator
    for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):

        spin1_bounds, spin2_bounds = spin_bounds(mass1, mass2)

        mtot = mass1 + mass2
        chis_min = (mass1 * spin1_bounds[0] + mass2 * spin2_bounds[0]) / mtot
        chis_max = (mass1 * spin1_bounds[1] + mass2 * spin2_bounds[1]) / mtot
        chis = uniform(chis_min, chis_max)

        s2min = max(spin2_bounds[0],
                    (mtot * chis - mass1 * spin1_bounds[1]) / mass2)
        s2max = min(spin2_bounds[1],
                    (mtot * chis - mass1 * spin1_bounds[0]) / mass2)

        spin2 = uniform(s2min, s2max)
        spin1 = (chis * mtot - mass2 * spin2) / mass1

        t = tmplt_class(mass1, mass2, spin1, spin2, bank=bank)
        if (dur_min is not None and t.dur < dur_min) \
                or (dur_max is not None and t.dur > dur_max):
            continue
        yield t
コード例 #20
0
        return [pasta, nome, serie]
    
    def series_lineares_abruptas_revista(self, tamanho_conceitos, qtd_series, grafico):
        '''
        método para criar as series lineares feitas no artigo ICTAI
        :param: tamanho_conceitos: tamanho das observacoes que cada conceito vai ter
        :param: qtd_series: quantidade de series que serão criadas
        :param: grafico: variavel booleana para plotar apos a criacao da serie 
        '''

        observacoes_iniciais = array([uniform(0, 0.5) for i in range(4)])
        variancia = 0.02
        
        self.modelo_AR(parametros=[0.14876092573738822, 0.05087244788237593, 0.4330193805067835, 0.3667339588762431], variancia=variancia, tamanho_do_conceito=tamanho_conceitos, observacoes_iniciais=observacoes_iniciais)
        self.modelo_AR(parametros=[-0.318229212036593, 0.4133521130815502, 1.14841972367221, -0.24486472090297637], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.00301790735789223, -0.3277435418893056, 0.14635639512590287, 1.171740105825536], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.4429405258368569, 0.4466229373805038, 1.351792157828681, -0.3561327432116702], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.026851477518947557, 0.22016898814054223, -0.03814933593273199, 0.8447046999175475], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.4785914515620302, 0.8558602481837317, 0.024539136949191378, 0.5980008075169353], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.026851477518947557, 0.22016898814054223, -0.03814933593273199, 0.8447046999175475], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.4429405258368569, 0.4466229373805038, 1.351792157828681, -0.3561327432116702], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.00301790735789223, -0.3277435418893056, 0.14635639512590287, 1.171740105825536], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.318229212036593, 0.4133521130815502, 1.14841972367221, -0.24486472090297637], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        
        serie = self.Obter_Serie()
        nome = "lin-abt" 
        pasta = "lineares/abruptas/"
        
        if(grafico == True):
            self.Plotar()
コード例 #21
0
     return [pasta, nome, serie]    
         
 def series_hibridas_ictai(self, tamanho_conceitos, qtd_series, grafico): 
     '''
     método para criar as series hibridas feitas no artigo ICTAI
     :param: tamanho_conceitos: tamanho das observacoes que cada conceito vai ter
     :param: qtd_series: quantidade de series que serão criadas
     :param: grafico: variavel booleana para plotar apos a criacao da serie 
     '''
     
     variancia = 0.1
     
     observacoes_iniciais = array([uniform(-2, 2) for i in range(3)])             
     self.modelo_AR(parametros=[0.003, -0.005, 1.0], variancia=variancia, tamanho_do_conceito=tamanho_conceitos, observacoes_iniciais= observacoes_iniciais)
     #self.modelo_AR(parametros=[0.003, -0.005, 1.0], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
     self.modelo_sazonal(tendencia=1, beta_vetor=self.observacoes, variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
     self.modelo_nlinear2(parametros=[0.059, 0.086, 0.62, 0.21], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
     self.modelo_AR(parametros=[0.018, 0.95, 0.032], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
     self.modelo_sazonal(tendencia=2, beta_vetor=self.observacoes, variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
     self.modelo_nlinear1(parametros=[0.55, 0.024, 0.41, 0.009], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
     self.modelo_AR(parametros=[0.018, 0.95, 0.032], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
     self.modelo_sazonal(tendencia=2, beta_vetor=self.observacoes, variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
     self.modelo_nlinear2(parametros=[0.059, 0.086, 0.62, 0.21], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
     self.modelo_nlinear1(parametros=[0.55, 0.024, 0.41, 0.009], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
         
     serie = self.Obter_Serie()
     pasta = "hibridas/"
     nome = "hib-"
     
     if(grafico == True):
         self.Plotar()
コード例 #22
0
    def setUp(self):
        """Called for every test."""
        self.decimals = 4 # for Numeric.around; 7 is SciPy default.
        self.idmtx = Transformation().getMatrix(transpose=1)

        self.known_points = [ [1., 0., 2.],
                              [1., 0., 1.],
                              [1., 1., 1.],
                              [0., 0., 1.],
                              [0., 0., 0.],
                              [0., 1., 0.],
                              [0., 1., -1.],
                              [1., 1., -1.],
                              [1., 2., -1.],
                              [1., 1., -2.]]
        npts = len(self.known_points)
        dim = 3
        self.max = 9999999.
        self.min = -self.max
        self.random_points = RandomArray.uniform(self.min,
                                                 self.max, (npts,dim)).tolist()

        # create a simple torsion system for both point lists
        torTree = TestTorTree()
        torTree.append(TestTorsion(4, 3, [0,1,2]))
        torTree.append(TestTorsion(3, 1, [0,2]))
        torTree.append(TestTorsion(6, 7, [8,9]))
        self.torTree = torTree
コード例 #23
0
        del df

    def series_lineares_ictai(self, tamanho_conceitos, qtd_series, grafico):
        '''
        método para criar as series lineares feitas no artigo ICTAI
        :param: tamanho_conceitos: tamanho das observacoes que cada conceito vai ter
        :param: qtd_series: quantidade de series que serão criadas
        :param: grafico: variavel booleana para plotar apos a criacao da serie 
        '''

        observacoes_iniciais = array([uniform(-2, 2) for i in range(3)])
        variancia = 0.1
        
        self.modelo_AR(parametros=[0.42, 0.28, 0.005], variancia=variancia, tamanho_do_conceito=tamanho_conceitos, observacoes_iniciais=observacoes_iniciais)
        #self.modelo_AR(parametros=[0.42, 0.28, 0.005], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.003, -0.005, 1.0], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.018, 0.95, 0.032], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.11, 0.32, -0.028, 0.038, 0.48], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.032, 0.41, -0.24, -0.22, 1.0], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.14, -0.29, 0.0025, 1.0], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.032, 0.41, -0.24, -0.22, 1.0], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.018, 0.95, 0.032], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.003, -0.005, 1.0], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.42, 0.28, 0.005], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        
        serie = self.Obter_Serie()
        nome = "lin-" 
        pasta = "lineares/"
        
        if(grafico == True):
            self.Plotar()
コード例 #24
0
    def setUp(self):
        """Called for every test."""
        self.decimals = 2 # for rounding; 7 is SciPy default.

        self.known_points = [ [1., 0., 2.],
                              [1., 0., 1.],
                              [1., 1., 1.],
                              [0., 0., 1.],
                              [0., 0., 0.],
                              [0., 1., 0.],
                              [0., 1., -1.],
                              [1., 1., -1.],
                              [1., 2., -1.],
                              [1., 1., -2.]]
        # create a simple torsion system for known_points
        torTree = TestTorTree()
        torTree.append(TestTorsion(4, 3, [0,1,2]))
        torTree.append(TestTorsion(3, 1, [0,2]))
        torTree.append(TestTorsion(6, 7, [8,9]))
        self.torTree = torTree

        npts = 5
        dim = 3
        self.max = 9999.
        self.min = -self.max
        self.random_points = RandomArray.uniform(self.min,
                                                 self.max, (npts,dim)).tolist()
コード例 #25
0
 def test_applyTranslation03(self):
     """applyTranslation03 -- random pts x (random there and back)"""
     state = StateToCoords(self.random_points, tolist=1)
     trn = RandomArray.uniform(self.min, self.max, (3,))
     state.applyTranslation(trn)
     trn = -1*trn
     self.assertArrayEqual(self.random_points, state.applyTranslation(trn))
コード例 #26
0
 def test_applyQuaternion04(self):
     """applyQuaternion04  -- random pts 360 about random-axis"""
     state = StateToCoords(self.random_points, tolist=1)
     q = RandomArray.uniform(self.min, self.max, (4,))
     q[3] = 360.0
     result = state.applyQuaternion(q)
     self.assertArrayEqual(self.random_points, result)
コード例 #27
0
        return [pasta, nome, serie]
    
    def series_nlineares_graduais_revista(self, tamanho_conceitos, qtd_series, grafico):
        '''
        método para criar as series lineares feitas no artigo ICTAI
        :param: tamanho_conceitos: tamanho das observacoes que cada conceito vai ter
        :param: qtd_series: quantidade de series que serão criadas
        :param: grafico: variavel booleana para plotar apos a criacao da serie 
        '''

        observacoes_iniciais = array([uniform(0, 0.5) for i in range(4)])
        variancia = 0.02
        
        self.modelo_nlinear1(parametros=[0.0203825939140348, 0.14856960377126693, 0.12154840218302701, 0.6913077037309644], variancia=variancia, tamanho_do_conceito=tamanho_conceitos, observacoes_iniciais=observacoes_iniciais)
        self.modelo_nlinear1(parametros=[0.21432208811179806, 0.1747177586312132, 0.25627781880181116, 0.34924372007037097], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.6747722679575656, 0.0400499490190765, 0.12859434021708172, 0.1411115580708043], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.2592127990366997, 0.18679044833178132, 0.2510160243812225, 0.29144511960870556], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.33266433992324546, -0.11265182345778371, 0.05425610414373307, 0.715124757201], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.1779748207049134, -0.09139762327444532, 0.3628849251594744, 0.5451838112044337], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.33266433992324546, -0.11265182345778371, 0.05425610414373307, 0.715124757201], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.2592127990366997, 0.18679044833178132, 0.2510160243812225, 0.29144511960870556], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.6747722679575656, 0.0400499490190765, 0.12859434021708172, 0.1411115580708043], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.21432208811179806, 0.1747177586312132, 0.25627781880181116, 0.34924372007037097], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        
        
        serie = self.Obter_Serie()
        nome = "nlin-grad" 
        pasta = "nlineares/graduais/"
        
        if(grafico == True):
            self.Plotar()
コード例 #28
0
        return [pasta, nome, serie]
    
    def series_nlineares_abruptas_revista(self, tamanho_conceitos, qtd_series, grafico):
        '''
        método para criar as series lineares feitas no artigo ICTAI
        :param: tamanho_conceitos: tamanho das observacoes que cada conceito vai ter
        :param: qtd_series: quantidade de series que serão criadas
        :param: grafico: variavel booleana para plotar apos a criacao da serie 
        '''

        observacoes_iniciais = array([uniform(0, 0.5) for i in range(4)])
        variancia = 0.02
        
        self.modelo_nlinear1(parametros=[-0.06658679980732536, 0.23421353635081468, 0.15495114023325046, 0.6768101219541569], variancia=variancia, tamanho_do_conceito=tamanho_conceitos, observacoes_iniciais=observacoes_iniciais)
        self.modelo_nlinear1(parametros=[-0.506870130353138, 0.2589111765633722, 1.3970013340136547, -0.14964763809967313], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[-0.4387715888915295, 0.3747437070432394, 1.3330941335780706, -0.26908562619916504], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.06975366774909564, -0.05196107339800573, 0.6352865482608727, 0.3344985733604905], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[-0.4429405258368569, 0.4466229373805038, 1.351792157828681, -0.3561327432116702], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[-0.2763541765783329, 0.3343598857377247, 0.4102952504128611, 0.5315753100371876], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[-0.4429405258368569, 0.4466229373805038, 1.351792157828681, -0.3561327432116702], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.06975366774909564, -0.05196107339800573, 0.6352865482608727, 0.3344985733604905], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[-0.506870130353138, 0.2589111765633722, 1.3970013340136547, -0.14964763809967313], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[-0.06658679980732536, 0.23421353635081468, 0.15495114023325046, 0.6768101219541569], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        
        serie = self.Obter_Serie()
        nome = "nlin-abt" 
        pasta = "nlineares/abruptas/"
        
        if(grafico == True):
            self.Plotar()
コード例 #29
0
        return [pasta, nome, serie]
    
    def series_lineares_graduais_revista(self, tamanho_conceitos, qtd_series, grafico):
        '''
        método para criar as series lineares feitas no artigo ICTAI
        :param: tamanho_conceitos: tamanho das observacoes que cada conceito vai ter
        :param: qtd_series: quantidade de series que serão criadas
        :param: grafico: variavel booleana para plotar apos a criacao da serie 
        '''

        observacoes_iniciais = array([uniform(0, 0.5) for i in range(4)])
        variancia = 0.02
        
        self.modelo_AR(parametros=[0.006607488803146307, -0.2529881594354167, 0.8552562304577513, 0.3905674250981309], variancia=variancia, tamanho_do_conceito=tamanho_conceitos, observacoes_iniciais=observacoes_iniciais)
        self.modelo_AR(parametros=[-0.4429405258368569, 0.4466229373805038, 1.351792157828681, -0.3561327432116702], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.00301790735789223, -0.3277435418893056, 0.14635639512590287, 1.171740105825536], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.33266433992324546, -0.11265182345778371, 0.05425610414373307, 0.7151247572018152], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.6335599337412476, 0.334852076313965, 1.3595185287185048, -0.07363691675509275], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.44071503228306824, 0.07407529241129636, 1.2573688275751191, 0.1076310711909298], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.6335599337412476, 0.334852076313965, 1.3595185287185048, -0.07363691675509275], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.33266433992324546, -0.11265182345778371, 0.05425610414373307, 0.7151247572018152], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[0.00301790735789223, -0.3277435418893056, 0.14635639512590287, 1.171740105825536], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_AR(parametros=[-0.4429405258368569, 0.4466229373805038, 1.351792157828681, -0.3561327432116702], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        
        serie = self.Obter_Serie()
        nome = "lin-grad" 
        pasta = "lineares/graduais/"
        
        if(grafico == True):
            self.Plotar()
コード例 #30
0
        return [pasta, nome, serie]

    def series_nlineares_ictai(self, tamanho_conceitos, qtd_series, grafico):
        '''
        método para criar as series não lineares feitas no artigo ICTAI
        :param: tamanho_conceitos: tamanho das observacoes que cada conceito vai ter
        :param: qtd_series: quantidade de series que serão criadas
        :param: grafico: variavel booleana para plotar apos a criacao da serie 
        '''
        
        observacoes_iniciais = array([uniform(-2, 2) for i in range(4)])
        
        variancia = 0.1
        
        self.modelo_nlinear1(parametros=[0.55, 0.024, 0.41, 0.009], variancia=variancia, tamanho_do_conceito=tamanho_conceitos, observacoes_iniciais=observacoes_iniciais)
        #self.modelo_nlinear1(parametros=[0.55, 0.024, 0.41, 0.009], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear2(parametros=[0.059, 0.086, 0.62, 0.21], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.47, 0.57, 0.14, -0.19], variancia=0.3, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.55, 0.024, 0.41, 0.009], variancia=0.4, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear2(parametros=[0.059, 0.086, 0.62, 0.21], variancia=0.4, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear2(parametros=[0.55, 1.0, 0.0028, -0.58], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear2(parametros=[0.059, 0.086, 0.62, 0.21], variancia=0.4, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.55, 0.024, 0.41, 0.009], variancia=0.4, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear1(parametros=[0.47, 0.57, 0.14, -0.19], variancia=0.3, tamanho_do_conceito=tamanho_conceitos)
        self.modelo_nlinear2(parametros=[0.059, 0.086, 0.62, 0.21], variancia=variancia, tamanho_do_conceito=tamanho_conceitos)
        
        serie = self.Obter_Serie()
        pasta = "nlineares/"
        nome = "nlin-"
        
        if(grafico == True):
            self.Plotar()
コード例 #31
0
def test_large_collision_domain_network__smoke():
    """In this test we validate that all stations are really in a single
    domain and run the model for some time. We actually do not test any
    meaningful properties, except connections and that only server receives
    data.
    """
    num_stations = randint(5, 15)
    source_interval = Exponential(uniform(1.0, 10.0))
    payload_size = Exponential(randint(10, 100))

    sr = simulate(
        CollisionDomainNetwork,
        stime_limit=500,
        params=dict(
            num_stations=num_stations,
            payload_size=payload_size,
            source_interval=source_interval,
            mac_header_size=MAC_HEADER,
            phy_header_size=PHY_HEADER,
            ack_size=ACK_SIZE,
            preamble=PREAMBLE,
            bitrate=BITRATE,
            difs=DIFS,
            sifs=SIFS,
            slot=SLOT,
            cwmin=CWMIN,
            cwmax=CWMAX,
            connection_radius=CONNECTION_RADIUS,
            speed_of_light=SPEED_OF_LIGHT,
            queue_capacity=None,
        ),
        loglevel=Logger.Level.WARNING
    )

    access_point = sr.data.stations[0]
    clients = sr.data.stations[1:]
    conn_man = sr.data.connection_manager

    # Test that connections are established between all stations:
    for i in range(num_stations):
        radio = sr.data.get_iface(i).radio
        peers = set(conn_man.get_peers(radio))
        assert len(peers) == num_stations - 1 and radio not in peers

    # Test that the number of packets received by any client sink is 0:
    for client in clients:
        assert client.sink.num_packets_received == 0

    # Test that the number of packets generated by the sources - (queue sizes
    # + number of packets in transceivers) at the end of simulation is
    # almost equal to the number of received packets by the access point sink:
    num_packets_sent = [
        (cli.source.num_packets_sent -
         cli.interfaces[0].queue.size() -
         (1 if cli.interfaces[0].transmitter.state else 0))
        for cli in clients
    ]
    num_packets_received = access_point.sink.num_packets_received
    assert_allclose(sum(num_packets_sent), num_packets_received, rtol=0.05)
コード例 #32
0
ファイル: tau0tau3.py プロジェクト: GeraintPratten/lalsuite
def nonspin_hom_param_generator(flow, tmplt_class, bank, **constraints):
    """
    Wrapper for urand_tau0tau3_generator() to remove spin options
    for EOBNRv2 waveforms.
    """
    if constraints.has_key('spin1'):
        constraints.pop('spin1')
    if constraints.has_key('spin2'):
        constraints.pop('spin2')
    for mass1, mass2 in urand_tau0tau3_generator(flow, **constraints):
        theta = uniform(0, numpy.pi)
        phi = uniform(0, 2*numpy.pi)
        psi = uniform(0, 2*numpy.pi)
        iota = uniform(0, numpy.pi)
        orb_phase = uniform(0, 2*numpy.pi)
        yield tmplt_class(mass1, mass2, 0, 0, 0, 0, 0, 0,
                          theta, phi, iota, psi, orb_phase, bank)
コード例 #33
0
 def test_applyQuaternion06(self):
     """applyQuaternion06  -- random pts 2*180 about random-axis"""
     state = StateToCoords(self.random_points, tolist=1)
     q = RandomArray.uniform(self.min, self.max, (4,))
     q[3] = 180.0
     for n in xrange(2):
         result = state.applyQuaternion(q)
     self.assertArrayEqual(self.random_points, result)
コード例 #34
0
def single_spin_precessing_param_generator(flow, tmplt_class, bank, **kwargs):
    """
    Currently a stub to test precessing template generation.
    """
    spin1_bounds = kwargs.pop('spin1', (0., 0.9))
    spin2_bounds = kwargs.pop('spin2', (0., 0.9))

    for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
        # Choose the rest from hardcoded limits
        spin1mag = uniform(*spin1_bounds)
        spin1ang1 = uniform(0, numpy.pi)
        spin1ang2 = uniform(0, 2 * numpy.pi)
        spin1z = spin1mag * numpy.cos(spin1ang1)
        spin1x = spin1mag * numpy.sin(spin1ang1) * numpy.cos(spin1ang2)
        spin1y = spin1mag * numpy.sin(spin1ang1) * numpy.sin(spin1ang2)

        # Check orientation angles use correct limits
        theta = uniform(0, numpy.pi)
        phi = uniform(0, 2 * numpy.pi)
        psi = uniform(0, 2 * numpy.pi)
        iota = uniform(0, numpy.pi)
        yield tmplt_class(mass1,
                          mass2,
                          spin1x,
                          spin1y,
                          spin1z,
                          theta,
                          phi,
                          iota,
                          psi,
                          bank=bank)
コード例 #35
0
    def setUp(self):
        """Called for every test."""

        npts = 500
        dim = 3
        self.max = 9999999.
        self.min = -self.max
        self.random_points = RandomArray.uniform(self.min, self.max,
                                                 (npts, dim)).tolist()
コード例 #36
0
def add_poisson_gaussian_noise(image,
                               alpha=5,
                               sigma=0.01,
                               sap=0.0,
                               quant_bits=8,
                               dtype=numpy.float32,
                               clip=True,
                               fix_seed=True):
    if fix_seed:
        numpy.random.seed(0)
    rnd = normal(size=image.shape)
    rnd_bool = uniform(size=image.shape) < sap

    noisy = image + numpy.sqrt(alpha * image + sigma**2) * rnd
    noisy = noisy * (1 - rnd_bool) + rnd_bool * uniform(size=image.shape)
    noisy = numpy.around((2**quant_bits) * noisy) / 2**quant_bits
    noisy = numpy.clip(noisy, 0, 1) if clip else noisy
    noisy = noisy.astype(dtype)
    return noisy
コード例 #37
0
def IMRPhenomB_param_generator(flow, tmplt_class, bank, **kwargs):
    """
    Specify the min and max mass of the bigger component, then
    the min and max mass of the total mass. This function includes
    restrictions on q and chi based on IMRPhenomB's range of believability.
    Ref: http://arxiv.org/pdf/0908.2090

    @param flow: Lower frequency at which to generate waveform
    @param tmplt_class: Template generation class for this waveform
    @param bank: sbank bank object
    @param kwargs: must specify a component_mass range; mtotal, q, chi, and tau0
    ranges are optional. If no chi is specified, the IMRPhenomB limits will be used.
    See urand_tau0tau3_generator for more usage help.
    """
    # get args

    # FIXME: PhenomB ignores spin2 and therefore requires symmetry in
    # the spins. In BBH use cases, this is OK, but for NSBH
    # applications this is undesired. The weird chi-q bounds make
    # implementing this trick
    smin, smax = kwargs.pop('spin1', (-1.0, 1.0))
    kwargs.pop('spin2')
    Warning(
        "PhenomB: spin2 limits not implemented. Using spin1 limits for both components."
    )
    # the rest will be checked in the call to urand_tau0tau3_generator

    # IMRPhenomB has special bounds on chi, so we will silently truncate
    chi_low_bounds = (max(-0.85, smin), min(0.85, smax))
    chi_high_bounds = (max(-0.5, smin), min(0.75, smax))
    for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
        q = max(mass1 / mass2, mass2 / mass1)
        if 4 < q <= 10:
            spin1 = uniform(
                *chi_high_bounds)  #guaranteed to give chi in correct range
            spin2 = uniform(*chi_high_bounds)
        elif 1 <= q <= 4:
            spin1 = uniform(
                *chi_low_bounds)  #guaranteed to give chi in correct range
            spin2 = uniform(*chi_low_bounds)
        else:
            raise ValueError("mass ratio out of range")
        yield tmplt_class(mass1, mass2, spin1, spin2, bank=bank)
コード例 #38
0
def urand_mtotal_generator(mtotal_min, mtotal_max):
    """
    This is a generator for random total mass values corresponding to a
    uniform distribution of mass pairs in (tau0, tau3) space.  See also
    urand_eta_generator(), and see LIGO-T1300127 for details.
    """
    alpha = mtotal_min*(1-(mtotal_min/mtotal_max)**(7./3.))**(-3./7.)
    beta = (mtotal_min/mtotal_max)**(7./3.)/(1-(mtotal_min/mtotal_max)**(7./3.))
    n = -3./7.
    while 1:   # NB: "while 1" is inexplicably much faster than "while True"
        yield alpha*(uniform(0, 1)+beta)**n
コード例 #39
0
ファイル: tau0tau3.py プロジェクト: spxiwh/sbank
def urand_mtotal_generator(mtotal_min, mtotal_max):
    """
    This is a generator for random total mass values corresponding to a
    uniform distribution of mass pairs in (tau0, tau3) space.  See also
    urand_eta_generator(), and see LIGO-T1300127 for details.
    """
    alpha = mtotal_min*(1-(mtotal_min/mtotal_max)**(7./3.))**(-3./7.)
    beta = (mtotal_min/mtotal_max)**(7./3.)/(1-(mtotal_min/mtotal_max)**(7./3.))
    n = -3./7.
    while 1:   # NB: "while 1" is inexplicably much faster than "while True"
        yield alpha*(uniform(0, 1)+beta)**n
コード例 #40
0
def urand_eta_generator(eta_min, eta_max):
    """
    This is a generator for random eta (symmetric mass ratio) values
    corresponding to a uniform distribution of mass pairs in (tau0, tau3)
    space.  See also urand_mtotal_generator(), and see LIGO-T1300127 for
    details.
    """
    alpha = eta_min / sqrt(1 - (eta_min / eta_max)**2)
    beta = (eta_min / eta_max)**2 / (1 - (eta_min / eta_max)**2)
    while 1:  # NB: "while 1" is inexplicably much faster than "while True"
        yield alpha / sqrt(uniform(0, 1) + beta)
コード例 #41
0
def test_infinite_queue_stores_many_enough_packets():
    n = 50
    packets = [
        NetworkPacket(data=AppData(0, uniform(0, 1000), 0, 0))
        for _ in range(n)
    ]
    times = list(cumsum(uniform(0, 20, n)))

    sim = Mock()
    sim.stime = 0

    queue = Queue(sim)

    for pkt, t in zip(packets, times):
        sim.stime = t
        queue.push(pkt)

    assert queue.size() == n
    assert len(queue.size_trace) == n + 1
    assert queue.num_dropped == 0
コード例 #42
0
def urand_eta_generator(eta_min, eta_max):
    """
    This is a generator for random eta (symmetric mass ratio) values
    corresponding to a uniform distribution of mass pairs in (tau0, tau3)
    space.  See also urand_mtotal_generator(), and see LIGO-T1300127 for
    details.
    """
    alpha = eta_min/sqrt(1-(eta_min/eta_max)**2)
    beta = (eta_min/eta_max)**2/(1-(eta_min/eta_max)**2)
    while 1:   # NB: "while 1" is inexplicably much faster than "while True"
        yield alpha/sqrt(uniform(0, 1)+beta)
コード例 #43
0
def derive_qois(data: DataFrame,
                rstar: tuple = None,
                teff: tuple = None,
                distance_unit: Unit = R_jup):
    df = data.copy()
    ns = df.shape[0]

    df['period'] = period = df.p.values if 'p' in df else df.pr.values

    if 'k2_true' in df:
        df['k_true'] = sqrt(df.k2_true)
    if 'k2_app' in df:
        df['k_app'] = sqrt(df.k2_app)

    if 'k2_true' in df and 'k2_app' in df:
        df['cnt'] = 1. - df.k2_app / df.k2_true

    if 'g' in df:
        if 'k' in df:
            df['b'] = df.g * (1 + df.k)
        elif 'k_true' in df:
            df['b'] = df.g * (1 + df.k_true)

    df['a'] = as_from_rhop(df.rho.values, period)
    df['inc'] = i_from_ba(df.b.values, df.a.values)
    df['t14'] = d_from_pkaiews(period, df.k_true.values, df.a.values,
                               df.inc.values, 0.0, 0.0, 1)
    df['t14_h'] = 24 * df.t14

    if rstar is not None:
        from astropy.units import R_sun
        rstar_d = (normal(*rstar, size=ns) * R_sun).to(distance_unit).value
        df['r_app'] = df.k_app.values * rstar_d
        df['r_true'] = df.k_true.values * rstar_d
        df['a_au'] = df.a * (rstar_d * distance_unit).to(AU)

    if teff is not None:
        df['teq_p'] = equilibrium_temperature(normal(*teff, size=ns), df.a,
                                              uniform(0.25, 0.50, ns),
                                              uniform(0, 0.4, ns))
    return df
コード例 #44
0
ファイル: rmsdtest.py プロジェクト: peach-eater/pymol-scripts
 def test_computeRMSD_RandomOffset(self):
     """5. offset point by random value returns offset*sqrt(3)"""
     min = -10000.
     max = 10000.
     num_points = 20
     dimension = 3
     point_list_1 = RandomArray.uniform(min, max, (num_points, dimension))
     delta = point_list_1[0][0]
     point_list_2 = point_list_1 + delta
     answer = rmsd.RMSDCalculator(point_list_1).computeRMSD(point_list_2)
     self.assertEqual(round(answer, self.decimals),
                      round(abs(delta) * math.sqrt(3.0), self.decimals))
コード例 #45
0
def IMRPhenomB_param_generator(flow, tmplt_class, bank, **kwargs):
    """
    Specify the min and max mass of the bigger component, then
    the min and max mass of the total mass. This function includes
    restrictions on q and chi based on IMRPhenomB's range of believability.
    Ref: http://arxiv.org/pdf/0908.2090

    @param flow: Lower frequency at which to generate waveform
    @param tmplt_class: Template generation class for this waveform
    @param bank: sbank bank object
    @param kwargs: must specify a component_mass range; mtotal, q, chi, and tau0
    ranges are optional. If no chi is specified, the IMRPhenomB limits will be used.
    See urand_tau0tau3_generator for more usage help.
    """
    # get args

    # FIXME: PhenomB ignores spin2 and therefore requires symmetry in
    # the spins. In BBH use cases, this is OK, but for NSBH
    # applications this is undesired. The weird chi-q bounds make
    # implementing this trick
    smin, smax = kwargs.pop('spin1', (-1.0, 1.0))
    kwargs.pop('spin2')
    Warning("PhenomB: spin2 limits not implemented. Using spin1 limits for both components.")
    # the rest will be checked in the call to urand_tau0tau3_generator

    # IMRPhenomB has special bounds on chi, so we will silently truncate
    chi_low_bounds = (max(-0.85, smin), min(0.85, smax))
    chi_high_bounds = (max(-0.5, smin), min(0.75, smax))
    for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
        q = max(mass1/mass2, mass2/mass1)
        if 4 < q <= 10:
            spin1 = uniform(*chi_high_bounds) #guaranteed to give chi in correct range
            spin2 = uniform(*chi_high_bounds)
        elif 1 <= q <= 4:
            spin1 = uniform(*chi_low_bounds) #guaranteed to give chi in correct range
            spin2 = uniform(*chi_low_bounds)
        else:
            raise ValueError("mass ratio out of range")
        yield tmplt_class(mass1, mass2, spin1, spin2, bank=bank)
コード例 #46
0
 def test_computeRMSD_RandomOffset(self):
     """5. offset point by random value returns offset*sqrt(3)"""
     min = -10000.
     max = 10000.
     num_points = 20
     dimension = 3
     point_list_1 = RandomArray.uniform(min, max, (num_points, dimension))
     delta = point_list_1[0][0]
     point_list_2 = point_list_1 + delta
     answer = rmsd.RMSDCalculator(point_list_1).computeRMSD(point_list_2)
     self.assertEqual(
         round(answer, self.decimals),
         round(abs(delta)*math.sqrt(3.0), self.decimals))
コード例 #47
0
ファイル: tau0tau3.py プロジェクト: cpankow/lalsuite
def aligned_spin_param_generator(flow, **kwargs):
    """
    Specify the min and max mass of the bigger component, the min and
    max mass of the total mass and the min and max values for the
    z-axis spin angular momentum.
    """
    if 'ns_bh_boundary_mass' in kwargs and 'bh_spin' in kwargs \
            and 'ns_spin' in kwargs:
        # get args
        bh_spin_bounds = kwargs.pop('bh_spin')
        ns_spin_bounds = kwargs.pop('ns_spin')
        ns_bh_boundary = kwargs.pop('ns_bh_boundary_mass')
        # the rest will be checked in the call to urand_tau0tau3_generator

        for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):
            spin1 = uniform(*(bh_spin_bounds if mass1 > ns_bh_boundary else ns_spin_bounds))
            spin2 = uniform(*(bh_spin_bounds if mass2 > ns_bh_boundary else ns_spin_bounds))
            yield mass1, mass2, spin1, spin2
    else:
        # get args
        spin1_bounds = kwargs.pop('spin1', (-1., 1.))
        spin2_bounds = kwargs.pop('spin2', (-1., 1.))

        # the rest will be checked in the call to urand_tau0tau3_generator
        for mass1, mass2 in urand_tau0tau3_generator(flow, **kwargs):

            mtot = mass1 + mass2
            chis_min = (mass1*spin1_bounds[0] + mass2*spin2_bounds[0])/mtot
            chis_max = (mass1*spin1_bounds[1] + mass2*spin2_bounds[1])/mtot
            chis = uniform(chis_min, chis_max)

            s2min = max(spin2_bounds[0], (mtot*chis - mass1*spin1_bounds[1])/mass2)
            s2max = min(spin2_bounds[1], (mtot*chis - mass1*spin1_bounds[0])/mass2)

            spin2 = uniform(s2min, s2max)
            spin1 = (chis*mtot - mass2*spin2)/mass1

            yield mass1, mass2, spin1, spin2
コード例 #48
0
 def create_pv_population(self, npop=50):
     pvp = zeros((0, len(self.ps)))
     npv, i = 0, 0
     while npv < npop and i < 10:
         pvp_trial = self.ps.sample_from_prior(npop)
         pvp_trial[:, 5] = pvp_trial[:, 4]
         cref = uniform(0, 0.99, size=npop)
         pvp_trial[:, 4] = pvp_trial[:, 5] / (1. - cref)
         lnl = self.lnposterior(pvp_trial)
         ids = where(isfinite(lnl))
         pvp = concatenate([pvp, pvp_trial[ids]])
         npv = pvp.shape[0]
         i += 1
     pvp = pvp[:npop]
     return pvp
コード例 #49
0
    def Criar_Particula(self):
        for i in range(self.numero_particulas):
            p = Particulas()
            p.dimensao = array([
                uniform(posMin, posMax) for i in range(self.numero_dimensoes)
            ])
            p.fitness = self.Funcao(p.dimensao)
            p.velocidade = array([0.0 for i in range(self.numero_dimensoes)])
            p.best = p.dimensao
            p.fit_best = p.fitness
            p.c1 = self.c1_fixo
            p.c2 = self.c2_fixo
            p.inercia = self.inercia_inicial
            p.phi = 0
            self.particulas.append(p)

        self.gbest = self.particulas[0]
コード例 #50
0
 def generate_trajectory(
     scenario: Scenario,
     planning_problem: PlanningProblem,
     time_steps: int,
     max_tries: int = 1000
 ) -> Tuple[TrajectoryPrediction, List[VehicleInfo]]:
     """
     Generates an as straight as possible linear trajectory.
     :param scenario: The scenario the car is driving in.
     :param planning_problem: The preplanning problem to solve.
     :param time_steps: The number of time steps to simulate.
     :param max_tries: The maximum number of tries to find a valid next state. It this number is succeeded some time
     during generation the trajectory may not have as many time steps as specified.
     :return: A tuple containing the generated prediction for a trajectory and a list containing drawable
     representations of all the states of the trajectory.
     """
     shape: Shape = Rectangle(DrawConfig.car_length, DrawConfig.car_width,
                              planning_problem.initial_state.position,
                              planning_problem.initial_state.orientation)
     states: List[MyState] = [MyState(planning_problem.initial_state)]
     vehicles: List[VehicleInfo] = [
         VehicleInfo(
             MyState(planning_problem.initial_state), None,
             DrawHelp.convert_to_drawable(planning_problem.initial_state))
     ]
     for i in range(1, time_steps):
         last_state_copy: MyState = deepcopy(states[i - 1])
         found_valid_next: bool = False
         tries: int = 0
         while not found_valid_next and tries < max_tries:
             next_state: MyState = GenerationHelp.predict_next_state(
                 scenario, last_state_copy)
             next_vehicle: VehicleInfo = VehicleInfo(next_state, None)
             if is_valid(next_vehicle, scenario):
                 states.append(next_state)
                 vehicles.append(next_vehicle)
                 found_valid_next = True
             else:
                 tries += 1
                 last_state_copy.orientation \
                     = states[i - 1].state.orientation + uniform(-GenerationConfig.max_yaw, GenerationConfig.max_yaw)
         if not found_valid_next:
             break
     return TrajectoryPrediction(
         Trajectory(0, list(map(lambda s: s.state, states))),
         shape), vehicles
コード例 #51
0
    def Fine_Tuning(self, execucao):
        x_new = []
        p_x_new = 0
        global r_cloud
        rj = uniform(-1, 1)
        w_max = 0.9
        w_min = 0.6

        for k in range(len(self.gbest.dimensao)):
            x_new.append(self.gbest.dimensao[k] + (r_cloud * rj))

        p_x_new = self.old_tree.get_node(x_new, self.old_tree.maturidade)
        p_g_best = self.old_tree.get_node(self.gbest.dimensao,
                                          self.old_tree.maturidade)

        if (p_x_new <= p_g_best):
            p_x_new = self.Funcao(x_new, execucao)
            if (p_x_new <= self.gbest.fitness):
                self.gbest.dimensao = copy.deepcopy(x_new)
                self.gbest.fitness = copy.deepcopy(p_x_new)

        r_cloud = r_cloud * (w_min + (random.random() * (w_max - w_min)))
コード例 #52
0
    def setUp(self):
        """Called for every test."""
        self.decimals = 4  # for Numeric.around; 7 is SciPy default.
        self.idmtx = Transformation().getMatrix(transpose=1)

        self.known_points = [[1., 0., 2.], [1., 0., 1.], [1., 1., 1.],
                             [0., 0., 1.], [0., 0., 0.], [0., 1., 0.],
                             [0., 1., -1.], [1., 1., -1.], [1., 2., -1.],
                             [1., 1., -2.]]
        npts = len(self.known_points)
        dim = 3
        self.max = 9999999.
        self.min = -self.max
        self.random_points = RandomArray.uniform(self.min, self.max,
                                                 (npts, dim)).tolist()

        # create a simple torsion system for both point lists
        torTree = TestTorTree()
        torTree.append(TestTorsion(4, 3, [0, 1, 2]))
        torTree.append(TestTorsion(3, 1, [0, 2]))
        torTree.append(TestTorsion(6, 7, [8, 9]))
        self.torTree = torTree
コード例 #53
0
def uniform(minimum, maximum, shape=[]):
    """uniform(minimum, maximum, shape=[]) returns array of given shape of random reals
    in given range"""
    if shape == []:
        shape = None
    return mt.uniform(minimum, maximum, shape)
コード例 #54
0
ファイル: tutorial.py プロジェクト: hhiester/convert2vtk
import numpy
# no unlimited dimension, just assign to slice.
lats =  numpy.arange(-90,91,2.5)
lons =  numpy.arange(-180,180,2.5)
latitudes[:] = lats
longitudes[:] = lons
print 'latitudes =\n',latitudes[:]
print 'longitudes =\n',longitudes[:]

# append along two unlimited dimensions by assigning to slice.
nlats = len(rootgrp.dimensions['lat'])
nlons = len(rootgrp.dimensions['lon'])
print 'temp shape before adding data = ',temp.shape
from numpy.random.mtrand import uniform # random number generator.
temp[0:5,0:10,:,:] = uniform(size=(5,10,nlats,nlons))
print 'temp shape after adding data = ',temp.shape
# levels have grown, but no values yet assigned.
print 'levels shape after adding pressure data = ',levels.shape

# assign values to levels dimension variable.
levels[:] =  [1000.,850.,700.,500.,300.,250.,200.,150.,100.,50.]
# fancy slicing
tempdat = temp[::2, [1,3,6], lats>0, lons>0]
print 'shape of fancy temp slice = ',tempdat.shape
print temp[0, 0, [0,1,2,3], [0,1,2,3]].shape

# fill in times.
from datetime import datetime, timedelta
from netCDF4 import num2date, date2num, date2index
dates = [datetime(2001,3,1)+n*timedelta(hours=12) for n in range(temp.shape[0])]
コード例 #55
0
ファイル: tst_masked.py プロジェクト: mathause/netCDF4p
import unittest
import os
import tempfile
import numpy as NP
from numpy import ma
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.random.mtrand import uniform
import netCDF4p as netCDF4

# test automatic conversion of masked arrays, and
# packing/unpacking of short ints.

# create an n1dim by n2dim random ranarr.
FILE_NAME = tempfile.mktemp(".nc")
ndim = 10
ranarr = 100.*uniform(size=(ndim))
ranarr2 = 100.*uniform(size=(ndim))
# used for checking vector missing_values
arr3 = NP.linspace(0,9,ndim)
mask = NP.zeros(ndim,NP.bool); mask[-1]=True; mask[-2]=True
marr3 = NP.ma.array(arr3, mask=mask, dtype=NP.int32)
packeddata = 10.*uniform(size=(ndim))
missing_value = -9999.
missing_value2 = NP.nan
missing_value3 = [8,9]
ranarr[::2] = missing_value
ranarr2[::2] = missing_value2
NP.seterr(invalid='ignore') # silence warnings from ma.masked_values
maskedarr = ma.masked_values(ranarr,missing_value)
maskedarr2 = ma.masked_values(ranarr2,missing_value2)
scale_factor = (packeddata.max()-packeddata.min())/(2.*32766.)
コード例 #56
0
ファイル: tst_types.py プロジェクト: Unidata/netcdf4-python
import sys
import unittest
import os
import tempfile
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.random.mtrand import uniform
import netCDF4

# test primitive data types.

# create an n1dim by n2dim random ranarr.
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
n1dim = 5
n2dim = 10
ranarr = 100.*uniform(size=(n1dim,n2dim))
zlib=False;complevel=0;shuffle=0;least_significant_digit=None
datatypes = ['f8','f4','i1','i2','i4','i8','u1','u2','u4','u8','S1']
FillValue = 1.0
issue273_data = np.ma.array(['z']*10,dtype='S1',\
mask=[False,False,False,False,False,True,False,False,False,False])

class PrimitiveTypesTestCase(unittest.TestCase):

    def setUp(self):
        self.file = FILE_NAME
        f = netCDF4.Dataset(self.file,'w')
        f.createDimension('n1', None)
        f.createDimension('n2', n2dim)
        for typ in datatypes:
            foo = f.createVariable('data_'+typ, typ, ('n1','n2',),zlib=zlib,complevel=complevel,shuffle=shuffle,least_significant_digit=least_significant_digit,fill_value=FillValue)