Exemplo n.º 1
0
    def read_samples(cls, filename_samples):
        """
        Read LALinference posterior_samples
        """
        import os
        if not os.path.isfile(filename_samples):
            raise ValueError("Sample file supplied does not exist")

        if "hdf5" in filename_samples:
            samples_out = h5py.File(filename_samples, 'r')
            key = samples_out.keys()[0]
            samples_out = samples_out[key]
            key = samples_out.keys()[0]
            samples_out = samples_out[key]
            key = samples_out.keys()[0]
            samples_out = samples_out[key]
            data_out = pd.DataFrame.from_records(np.array(samples_out))
            data_out.rename(columns={'mc': 'mchirp'}, inplace=True)

            data_out["eta"] = lightcurve_utils.q2eta(data_out["q"])
            data_out["m1"], data_out["m2"] = lightcurve_utils.mc2ms(
                data_out["mchirp"], data_out["eta"])
            data_out['q'] = 1.0 / data_out['q']

            data_out = Table.from_pandas(data_out)

        else:
            data_out = Table.read(filename_samples, format='ascii')

            if 'm1_detector_frame_Msun' in list(data_out.columns):
                data_out['m1'] = data_out['m1_detector_frame_Msun']
                print('setting m1 to m1_source')
            if 'm2_detector_frame_Msun' in list(data_out.columns):
                data_out['m2'] = data_out['m2_detector_frame_Msun']
                print('setting m2 to m2_source')

            if 'dlam_tilde' in list(data_out.columns):
                data_out['dlambdat'] = data_out['dlam_tilde']
                print('setting dlambdat to dlam_tilde')
            if 'lam_tilde' in list(data_out.columns):
                data_out['lambdat'] = data_out['lam_tilde']
                print('setting lambdat to lam_tilde')

            data_out['mchirp'], data_out['eta'], data_out[
                'q'] = lightcurve_utils.ms2mc(data_out['m1'], data_out['m2'])
            data_out['q'] = 1.0 / data_out['q']

        return KNTable(data_out)
Exemplo n.º 2
0
        data_out = data_out[opts.name]

    elif opts.doGoingTheDistance or opts.doMassGap:

        truths = {}
        if opts.doGoingTheDistance:
            data_out = lightcurve_utils.going_the_distance(
                opts.dataDir, opts.name)
        elif opts.doMassGap:
            data_out, truths = lightcurve_utils.massgap(
                opts.dataDir, opts.name)

        if "m1" in truths:
            eta = lightcurve_utils.q2eta(truths["q"])
            m1, m2 = truths["m1"], truths["m2"]
            mchirp, eta, q = lightcurve_utils.ms2mc(m1, m2)
            q = 1 / q
            chi_eff = truths["a1"]
            chi_eff = 0.75

            eta = lightcurve_utils.q2eta(np.mean(data_out["q"]))
            m1, m2 = lightcurve_utils.mc2ms(np.mean(data_out["mc"]), eta)
            mchirp, eta, q = lightcurve_utils.ms2mc(m1, m2)
            q = 1 / q

        else:
            eta = lightcurve_utils.q2eta(data_out["q"])
            m1, m2 = lightcurve_utils.mc2ms(data_out["mc"], eta)
            q = m2 / m1
            mc = data_out["mc"]
Exemplo n.º 3
0
    def initialize_object(cls,
                          input_samples,
                          Nsamples=1000,
                          twixie_flag=False):
        """
                Read low latency posterior_samples
                """
        names = ['weight', 'm1', 'm2', 'spin1', 'spin2', 'dist_mbta']
        data_out = Table(input_samples, names=names)

        data_out['mchirp'], data_out['eta'], data_out[
            'q'] = lightcurve_utils.ms2mc(data_out['m1'], data_out['m2'])

        data_out['chi_eff'] = ((data_out['m1'] * data_out['spin1'] +
                                data_out['m2'] * data_out['spin2']) /
                               (data_out['m1'] + data_out['m2']))

        #modify 'weight' using twixie informations
        if (twixie_flag):
            twixie_file = "/home/reed.essick/mass-dip/production/O1O2-ALL_BandpassPowerLaw-MassDistBeta/twixie-sample-emcee_O1O2-ALL_MassDistBandpassPowerLaw1D-MassDistBeta2D_CLEAN.hdf5"
            (data_twixie, logprob_twixie,
             params_twixie), (massDist1D_twixie, massDist2D_twixie), (
                 ranges_twixie, fixed_twixie), (
                     posteriors_twixie,
                     injections_twixie) = backends.load_emcee_samples(
                         twixie_file, backends.DEFAULT_EMCEE_NAME)
            nstp_twixie, nwlk_twixie, ndim_twixie = data_twixie.shape
            num_1D_params_twixie = len(
                distributions.KNOWN_MassDist1D[massDist1D_twixie]._params)
            mass_model_twixie = distributions.KNOWN_MassDist1D[
                massDist1D_twixie](
                    *data_twixie[0, 0, :num_1D_params_twixie]
                )  ### assumes 1D model params always come first, which should be OK
            mass_model_twixie = distributions.KNOWN_MassDist2D[
                massDist2D_twixie](mass_model_twixie,
                                   *data_twixie[0, 0, num_1D_params_twixie:])
            min_mass_twixie, max_mass_twixie = 1.0, 100.0
            m_grid_twixie = np.linspace(min_mass_twixie, max_mass_twixie, 100)
            ans_twixie = utils.qdist(data_twixie,
                                     mass_model_twixie,
                                     m_grid_twixie,
                                     np.median(data_out['q']),
                                     num_points=100)
            ans_twixie = np.array([list(item) for item in ans_twixie])
            twixie_func = interpolate.interp1d(ans_twixie[:, 0], ans_twixie[:,
                                                                            1])
            data_out['weight'] = data_out['weight'] * twixie_func(
                data_out['q'])

        data_out['weight'] = data_out['weight'] / np.max(data_out['weight'])
        kernel = 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1)
        gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=0)
        params = np.vstack((data_out['mchirp'], data_out['q'],
                            data_out['chi_eff'], data_out['dist_mbta'])).T
        data = np.array(data_out['weight'])
        gp.fit(params, data)

        mchirp_min, mchirp_max = np.min(data_out['mchirp']), np.max(
            data_out['mchirp'])
        q_min, q_max = np.min(data_out['q']), np.max(data_out['q'])
        chi_min, chi_max = np.min(data_out['chi_eff']), np.max(
            data_out['chi_eff'])
        dist_mbta_min, dist_mbta_max = np.min(data_out['dist_mbta']), np.max(
            data_out['dist_mbta'])

        cnt = 0
        samples = []
        while cnt < Nsamples:
            mchirp = np.random.uniform(mchirp_min, mchirp_max)
            q = np.random.uniform(q_min, q_max)
            chi_eff = np.random.uniform(chi_min, chi_max)
            dist_mbta = np.random.uniform(dist_mbta_min, dist_mbta_max)
            samp = np.atleast_2d(np.array([mchirp, q, chi_eff, dist_mbta]))
            weight = gp.predict(samp)[0]
            thresh = np.random.uniform(0, 1)
            if weight > thresh:
                samples.append([mchirp, q, chi_eff, dist_mbta])
                cnt = cnt + 1
        samples = np.array(samples)
        data_out = Table(data=samples,
                         names=['mchirp', 'q', 'chi_eff', 'dist_mbta'])
        data_out["eta"] = lightcurve_utils.q2eta(data_out["q"])
        data_out["m1"], data_out["m2"] = lightcurve_utils.mc2ms(
            data_out["mchirp"], data_out["eta"])
        data_out["q"] = 1.0 / data_out["q"]

        return KNTable(data_out)
Exemplo n.º 4
0
    def read_samples(cls, filename_samples):
        """
        Read LALinference posterior_samples
        """
        import os
        if not os.path.isfile(filename_samples):
            raise ValueError("Sample file supplied does not exist")

        if "hdf" in filename_samples:
            samples_out = h5py.File(filename_samples, 'r')
            samples_out = samples_out['lalinference']

            data_out = Table(samples_out)
            data_out['q'] = data_out['m1'] / data_out['m2']
            data_out['mchirp'] = (data_out['m1'] * data_out['m2'])**(
                3. / 5.) / (data_out['m1'] + data_out['m2'])**(1. / 5.)

            data_out['theta'] = data_out['iota']
            idx = np.where(data_out['theta'] > 90.)[0]
            data_out['theta'][idx] = 180 - data_out['theta'][idx]

            data_out["eta"] = lightcurve_utils.q2eta(data_out["q"])
            data_out["m1"], data_out["m2"] = lightcurve_utils.mc2ms(
                data_out["mchirp"], data_out["eta"])
            data_out['q'] = 1.0 / data_out['q']

        else:
            data_out = Table.read(filename_samples, format='ascii')

            if 'mass_1_source' in list(data_out.columns):
                data_out['m1'] = data_out['mass_1_source']
                print('setting m1 to m1_source')
            if 'mass_2_source' in list(data_out.columns):
                data_out['m2'] = data_out['mass_2_source']
                print('setting m2 to m2_source')

            if 'm1_detector_frame_Msun' in list(data_out.columns):
                data_out['m1'] = data_out['m1_detector_frame_Msun']
                print('setting m1 to m1_source')
            if 'm2_detector_frame_Msun' in list(data_out.columns):
                data_out['m2'] = data_out['m2_detector_frame_Msun']
                print('setting m2 to m2_source')

            if 'dlam_tilde' in list(data_out.columns):
                data_out['dlambdat'] = data_out['dlam_tilde']
                print('setting dlambdat to dlam_tilde')
            if 'lam_tilde' in list(data_out.columns):
                data_out['lambdat'] = data_out['lam_tilde']
                print('setting lambdat to lam_tilde')

            if 'delta_lambda_tilde' in list(data_out.columns):
                data_out['dlambdat'] = data_out['delta_lambda_tilde']
                print('setting dlambdat to delta_lambda_tilde')
            if 'lambda_tilde' in list(data_out.columns):
                data_out['lambdat'] = data_out['lambda_tilde']
                print('setting lambdat to lambda_tilde')

            if 'm1' not in list(data_out.columns):
                eta = lightcurve_utils.q2eta(data_out['mass_ratio'])
                m1, m2 = lightcurve_utils.mc2ms(data_out["chirp_mass"], eta)
                data_out['m1'] = m1
                data_out['m2'] = m2

            data_out['mchirp'], data_out['eta'], data_out[
                'q'] = lightcurve_utils.ms2mc(data_out['m1'], data_out['m2'])
            data_out['q'] = 1.0 / data_out['q']
            data_out['chi_eff'] = ((data_out['m1'] * data_out['spin1'] +
                                    data_out['m2'] * data_out['spin2']) /
                                   (data_out['m1'] + data_out['m2']))
            data_out["dist"] = data_out["luminosity_distance_Mpc"]

        return KNTable(data_out)
def run_EOS(EOS, m1, m2, thetas, type_set = 'None', N_EOS = 100, model_set = 'Bu2019inc', chirp_q = False):
    chi = 1 
    if type_set == 'None':
        sys.exit('Type is not defined')
    
    num_samples = N_EOS
    if type_set == 'BNS_chirp_q':
        type_set = 'BNS' 
    if not chirp_q:
        q = m2/m1
        mchirp = np.power((m1*m2), 3/5) / np.power((m1+m2), 1/5)
        eta = m1*m2/( (m1+m2)*(m1+m2) )
    if chirp_q:
        print('running chirp_q')
        m1 = np.random.normal(m1, .05, 1)
        m2 = np.random.normal(m2, .05, 1)
        #m1 = np.ones(100) * m1
        #m2 = np.ones(100) * m2
        q = m2 
        mchirp = m1
        eta = lightcurve_utils.q2eta(q) 
        m1, m2 = lightcurve_utils.mc2ms(mchirp, eta)

    #chi_eff = np.random.uniform(-1,1,100)
    chi_eff = np.ones(1)*chi
    Xlan_val = 1e-3
    Xlan =  Xlan_val
    
    #if lan_override:
    #    Xlan_val = lan_override_val 
    
    #Xlans = np.ones(1)*Xlan_val 
    c1 = np.ones(1)
    c2 = np.ones(1)
    mb1 = np.ones(1)
    mb2 = np.ones(1)
    
    data = np.vstack((m1,m2,chi_eff,mchirp,eta,q)).T
    samples = KNTable((data), names = ('m1','m2','chi_eff','mchirp','eta','q'))

    #data = np.vstack((m1s,m2s,dists,lambda1s,lambda2s,chi_effs,Xlans,c1,c2,mb1,mb2,mchirps,etas,qs,mej,vej, dyn_mej, wind_mej, mbnss)).T
    #samples = KNTable((data), names = ('m1','m2','dist','lambda1','lambda2','chi_eff','Xlan','c1','c2','mb1','mb2','mchirp','eta','q','mej','vej', 'dyn_mej', 'wind_mej', 'mbns'))    


    lambda1s=[]
    lambda2s=[]
    m1s=[]
    m2s=[]
    #dists=[]
    chi_effs=[]
    Xlans=[]
    qs=[]
    etas=[]
    mchirps=[]
    mbnss=[]
    
 

     
    print('...running')
    
    nsamples = num_samples

    m1s, m2s, dists_mbta = [], [], []
    lambda1s, lambda2s, chi_effs = [], [], []
    mbnss = []
    if EOS == "gp":
        # read Phil + Reed's EOS files
        eospostdat = np.genfromtxt("/home/philippe.landry/nseos/eos_post_PSRs+GW170817+J0030.csv",names=True,dtype=None,delimiter=",")
        idxs = np.array(eospostdat["eos"])
        weights = np.array([np.exp(weight) for weight in eospostdat["logweight_total"]])
    elif EOS == "Sly":
        eosname = "SLy"
        eos = EOS4ParameterPiecewisePolytrope(eosname)

    Xlan_min, Xlan_max = -9, -1 
 
    for ii, row in enumerate(samples): 
        #m1, m2, dist_mbta, chi_eff = row["m1"], row["m2"], row["dist_mbta"], row["chi_eff"]
        m1, m2, chi_eff = row["m1"], row["m2"], row["chi_eff"]
        if EOS == "spec":
            indices = np.random.randint(0, 2396, size=nsamples)
        elif EOS == "gp":
            indices = np.random.choice(np.arange(0,len(idxs)), size=nsamples,replace=True,p=weights/np.sum(weights))
        for jj in range(nsamples):
            if (EOS == "spec") or (EOS == "gp"):
                index = indices[jj] 
                lambda1, lambda2 = -1, -1
                mbns = -1
            # samples lambda's from Phil + Reed's files
            if EOS == "spec":
                while (lambda1 < 0.) or (lambda2 < 0.) or (mbns < 0.):
                    eospath = "/home/philippe.landry/nseos/eos/spec/macro/macro-spec_%dcr.csv" % index
                    data_out = np.genfromtxt(eospath, names=True, delimiter=",")
                    marray, larray = data_out["M"], data_out["Lambda"]
                    f = interp.interp1d(marray, larray, fill_value=0, bounds_error=False)
                    if float(f(m1)) > lambda1: lambda1 = f(m1)
                    if float(f(m2)) > lambda2: lambda2 = f(m2)
                    if np.max(marray) > mbns: mbns = np.max(marray)

                    if (lambda1 < 0.) or (lambda2 < 0.) or (mbns < 0.):
                        index = int(np.random.randint(0, 2396, size=1)) # pick a different EOS if it returns negative Lambda or Mmax
                        lambda1, lambda2 = -1, -1
                        mbns = -1

            elif EOS == "gp":
                while (lambda1 < 0.) or (lambda2 < 0.) or (mbns < 0.):
                    phasetr = 0
                    eospath = "/home/philippe.landry/nseos/eos/gp/mrgagn/DRAWmod1000-%06d/MACROdraw-%06d/MACROdraw-%06d-%d.csv" % (idxs[index]/1000, idxs[index], idxs[index], phasetr)
                    while os.path.isfile(eospath):
                        data_out = np.genfromtxt(eospath, names=True, delimiter=",")
                        marray, larray = data_out["M"], data_out["Lambda"]
                        f = interp.interp1d(marray, larray, fill_value=0, bounds_error=False)
                        if float(f(m1)) > lambda1: lambda1 = f(m1) # pick lambda from least compact stable branch
                        if float(f(m2)) > lambda2: lambda2 = f(m2)
                        if np.max(marray) > mbns: mbns = np.max(marray) # get global maximum mass
                    
                        phasetr += 1 # check all stable branches
                        eospath = "/home/philippe.landry/nseos/eos/gp/mrgagn/DRAWmod1000-%06d/MACROdraw-%06d/MACROdraw-%06d-%d.csv" % (idxs[index]/1000, idxs[index], idxs[index], phasetr)
                    if (lambda1 < 0.) or (lambda2 < 0.) or (mbns < 0.):
                        index = int(np.random.choice(np.arange(0,len(idxs)), size=1,replace=True,p=weights/np.sum(weights))) # pick a different EOS if it returns negative Lambda or Mmax
                        lambda1, lambda2 = -1, -1
                        mbns = -1
                    
            elif EOS == "Sly":
                lambda1, lambda2 = eos.lambdaofm(m1), eos.lambdaofm(m2)
                mbns = eos.maxmass()

            m1s.append(m1)
            m2s.append(m2)
            #dists_mbta.append(dist_mbta)
            lambda1s.append(lambda1)
            lambda2s.append(lambda2)
            chi_effs.append(chi_eff)
            #Xlans.append(10**np.random.uniform(Xlan_min, Xlan_max))
            Xlans.append(Xlan)
            mbnss.append(mbns)
    
    #print(len(thetas))
    #check theta implementation for reproducibility
    #thetas = 180. * np.arccos(np.random.uniform(-1., 1., len(samples) * nsamples)) / np.pi
    idx_thetas = np.where(thetas > 90.)[0]
    thetas[idx_thetas] = 180. - thetas[idx_thetas]
    #thetas = list(thetas)
    Xlans = np.ones(np.array(m1s).shape) * Xlan_val
    #print(mbnss)
    #thetas = np.ones(100)
    # make final arrays of masses, distances, lambdas, spins, and lanthanide fractions
    data = np.vstack((m1s,m2s,lambda1s,lambda2s,Xlans,chi_effs,thetas,mbnss)).T
    samples = KNTable(data, names=('m1', 'm2', 'lambda1', 'lambda2','Xlan','chi_eff','theta', 'mbns'))       
 
    # limit masses
    #samples = samples.mass_cut(mass1=3.0,mass2=3.0)
     
    print("m1: %.5f +-%.5f"%(np.mean(samples["m1"]),np.std(samples["m1"])))
    print("m2: %.5f +-%.5f"%(np.mean(samples["m2"]),np.std(samples["m2"])))
       
    
    # Downsample 
    #samples = samples.downsample(Nsamples=100)
    print(data)
    print(samples) 
    samples = samples.calc_tidal_lambda(remove_negative_lambda=True)
    
    print(samples)
    print(samples['lambda1'], samples['lambda2'])
    # Calc compactness
    samples = samples.calc_compactness(fit=True)
    
    # Calc baryonic mass 
    print(samples)
    samples = samples.calc_baryonic_mass(EOS=None, TOV=None, fit=True)
    
       
    #----------------------------------------------------------------------------------
    if (not 'mej' in samples.colnames) and (not 'vej' in samples.colnames):
        #mbns = 2.1
        #idx1 = np.where((samples['m1'] < mbns) & (samples['m2'] < mbns))[0]
        #idx2 = np.where((samples['m1'] > mbns) | (samples['m2'] > mbns))[0]
        
        #1 BNS, 2 NSBH, 3 BBH    
        idx1 = np.where((samples['m1'] <= samples['mbns']) & (samples['m2'] <= samples['mbns']))[0]
        idx2 = np.where((samples['m1'] > samples['mbns']) & (samples['m2'] <= samples['mbns']))[0]
        idx3 = np.where((samples['m1'] > samples['mbns']) & (samples['m2'] > samples['mbns']))[0]
    
         
    
           
    
        mej, vej = np.zeros(samples['m1'].shape), np.zeros(samples['m1'].shape)
        wind_mej, dyn_mej = np.zeros(samples['m1'].shape), np.zeros(samples['m1'].shape)   
 
        #from gwemlightcurves.EjectaFits.CoDi2019 import calc_meje, calc_vej
        from gwemlightcurves.EjectaFits.PaDi2019 import calc_meje, calc_vej
        # calc the mass of ejecta
        mej1, dyn_mej1, wind_mej1 = calc_meje(samples['m1'], samples['c1'], samples['m2'], samples['c2'], split_mej=True)
        # calc the velocity of ejecta
        vej1 = calc_vej(samples['m1'],samples['c1'],samples['m2'],samples['c2'])
    
        samples['mchirp'], samples['eta'], samples['q'] = lightcurve_utils.ms2mc(samples['m1'], samples['m2'])
    
        #samples['q'] = 1.0 / samples['q']
    
        from gwemlightcurves.EjectaFits.KrFo2019 import calc_meje, calc_vave
        # calc the mass of ejecta
           
            
        mej2, dyn_mej2, wind_mej2 = calc_meje(samples['q'],samples['chi_eff'],samples['c2'], samples['m2'], split_mej=True)
        # calc the velocity of ejecta
        vej2 = calc_vave(samples['q'])
           
    
        # calc the mass of ejecta
        mej3 = np.zeros(samples['m1'].shape)

        dyn_mej3 = np.zeros(samples['m1'].shape)
        wind_mej3 = np.zeros(samples['m1'].shape)
        # calc the velocity of ejecta
        vej3 = np.zeros(samples['m1'].shape) + 0.2
            
        mej[idx1], vej[idx1] = mej1[idx1], vej1[idx1]
        mej[idx2], vej[idx2] = mej2[idx2], vej2[idx2]
        mej[idx3], vej[idx3] = mej3[idx3], vej3[idx3]
   
        wind_mej[idx1], dyn_mej[idx1] = wind_mej1[idx1], dyn_mej1[idx1]
        wind_mej[idx2], dyn_mej[idx2] = wind_mej2[idx2], dyn_mej2[idx2]
        wind_mej[idx3], dyn_mej[idx3] = wind_mej3[idx3], dyn_mej3[idx3]   
 
        samples['mej'] = mej
        samples['vej'] = vej
        samples['dyn_mej'] = dyn_mej
        samples['wind_mej'] = wind_mej
         
    
        # Add draw from a gaussian in the log of ejecta mass with 1-sigma size of 70%
        erroropt = 'none'
        if erroropt == 'none':
            print("Not applying an error to mass ejecta")
        elif erroropt == 'log':
            samples['mej'] = np.power(10.,np.random.normal(np.log10(samples['mej']),0.236))
        elif erroropt == 'lin':
            samples['mej'] = np.random.normal(samples['mej'],0.72*samples['mej'])
        elif erroropt == 'loggauss':
            samples['mej'] = np.power(10.,np.random.normal(np.log10(samples['mej']),0.312))
        #idx = np.where(samples['mej'] > 0)[0]
        #samples = samples[idx]
    
        idx = np.where(samples['mej'] <= 0)[0]
        samples['mej'][idx] = 1e-11
            
           
        if (model_set == "Bu2019inc"):  
                idx = np.where(samples['mej'] <= 1e-6)[0]
                samples['mej'][idx] = 1e-11
        elif (model_set == "Ka2017"):
                idx = np.where(samples['mej'] <= 1e-3)[0]
                samples['mej'][idx] = 1e-11
               
            
        print("Probability of having ejecta")
        print(100 * (len(samples) - len(idx)) /len(samples))
        return samples
Exemplo n.º 6
0
            truths_mej_vej[0] = np.log10(truths_mej_vej[0])

            filename = os.path.join(dataDir,"truth.dat")
            truths = np.loadtxt(filename)

            if opts.doEjecta:
                mej_em = data[:,1]
                vej_em = data[:,2]

                mej_true = truths_mej_vej[0]
                vej_true = truths_mej_vej[1]

            elif opts.doMasses:
                if opts.model == "DiUj2017":
                    if opts.doEOSFit:
                        mchirp_em,eta_em,q_em = lightcurve_utils.ms2mc(data[:,1],data[:,3])
                        mchirp_true,eta_true,q_true = lightcurve_utils.ms2mc(truths[0],truths[2])
                    else:
                        mchirp_em,eta_em,q_em = lightcurve_utils.ms2mc(data[:,1],data[:,4])
                        mchirp_true,eta_true,q_true = lightcurve_utils.ms2mc(truths[0],truths[2])
                elif opts.model == "KaKy2016":
                    if opts.doEOSFit:
                        mchirp_em,eta_em,q_em = lightcurve_utils.ms2mc(data[:,1]*data[:,3],data[:,3])
                        mchirp_true,eta_true,q_true = lightcurve_utils.ms2mc(truths[0]*truths[4],truths[4])
                    else:
                        mchirp_em,eta_em,q_em = lightcurve_utils.ms2mc(data[:,1]*data[:,3],data[:,3])
                        mchirp_true,eta_true,q_true = lightcurve_utils.ms2mc(truths[0]*truths[4],truths[4])
                q_em = 1/q_em
                q_true = 1/q_true

        multifile = lightcurve_utils.get_post_file(plotDir)
    post[name][errorbudget] = {}
    if name == "KaKy2016":
        if opts.doMasses:
            if EOSFit:
                t0 = data[:, 0]
                q = data[:, 1]
                chi_eff = data[:, 2]
                mns = data[:, 3]
                c = data[:, 4]
                th = data[:, 5]
                ph = data[:, 6]
                zp = data[:, 7]
                loglikelihood = data[:, 8]

                mchirp, eta, q = lightcurve_utils.ms2mc(
                    data[:, 1] * data[:, 3], data[:, 3])

                post[name][errorbudget]["mchirp"] = mchirp
                post[name][errorbudget]["q"] = q
            else:
                t0 = data[:, 0]
                q = data[:, 1]
                chi_eff = data[:, 2]
                mns = data[:, 3]
                mb = data[:, 4]
                c = data[:, 5]
                th = data[:, 6]
                ph = data[:, 7]
                zp = data[:, 8]
                loglikelihood = data[:, 9]
Exemplo n.º 8
0
def Test(EOS,
         m1,
         m2,
         chi,
         type_set=Type,
         model_set='Bu2019inc',
         twixie=twixie_tf,
         lan_override=False,
         lan_override_val=None,
         chirp_q=False):

    if type_set == 'BNS_chirp_q':
        type_set = 'BNS'
    if not chirp_q:
        m1 = np.random.normal(m1, .05, 100)
        m2 = np.random.normal(m2, .05, 100)
        q = m2 / m1
        mchirp = np.power((m1 * m2), 3 / 5) / np.power((m1 + m2), 1 / 5)
        eta = m1 * m2 / ((m1 + m2) * (m1 + m2))
    if chirp_q:
        print('running chirp_q')
        m1 = np.random.normal(m1, .05, 100)
        m2 = np.random.normal(m2, .05, 100)
        #m1 = np.ones(100) * m1
        #m2 = np.ones(100) * m2
        q = m2
        mchirp = m1
        eta = lightcurve_utils.q2eta(q)
        m1, m2 = lightcurve_utils.mc2ms(mchirp, eta)

    dist = np.ones(100)
    #chi_eff = np.random.uniform(-1,1,100)
    chi_eff = np.ones(100) * chi
    Xlan = 1e-3 * np.ones(100)

    if lan_override:
        Xlan = lan_override_val * np.ones(100)

    c1 = np.ones(1000)
    c2 = np.ones(1000)
    mb1 = np.ones(1000)
    mb2 = np.ones(1000)
    mej = np.ones(1000)
    vej = np.ones(1000)

    data = np.vstack((m1, m2, dist, chi_eff, Xlan, mchirp, eta, q)).T
    samples_tmp = KNTable(
        (data),
        names=('m1', 'm2', 'dist', 'chi_eff', 'Xlan', 'mchirp', 'eta', 'q'))

    if twixie:
        samples_tmp = KNTable.read_mchirp_samples(opts.mchirp_samples,
                                                  Nsamples=100,
                                                  twixie_flag=twixie_tf)

    lambda1s = []
    lambda2s = []
    m1s = []
    m2s = []
    dists = []
    chi_effs = []
    Xlans = []
    qs = []
    etas = []
    mchirps = []
    mbnss = []
    term1_list, term2_list, term3_list, term4_list = [], [], [], []

    if EOS == "gp":
        # read Phil + Reed's EOS files
        filenames = glob.glob(
            "/home/philippe.landry/gw170817eos/gp/macro/MACROdraw-*-0.csv")
        idxs = []
        for filename in filenames:
            filenameSplit = filename.replace(".csv",
                                             "").split("/")[-1].split("-")
            idxs.append(int(filenameSplit[1]))
        idxs = np.array(idxs)
    elif EOS == "Sly":
        eosname = "SLy"
        eos = EOS4ParameterPiecewisePolytrope(eosname)

    for ii, row in enumerate(samples_tmp):
        if not twixie:
            m1, m2, dist, chi_eff, q, mchirp, eta, Xlan = row["m1"], row[
                "m2"], row["dist"], row["chi_eff"], row['q'], row[
                    'mchirp'], row['eta'], row['Xlan']
        if twixie:
            m1, m2, dist, chi_eff, q, mchirp, eta = row["m1"], row["m2"], row[
                "dist_mbta"], row["chi_eff"], row['q'], row['mchirp'], row[
                    'eta']
        nsamples = 10
        if EOS == "spec":
            indices = np.random.randint(0, 2395, size=nsamples)
        elif EOS == "gp":
            indices = np.random.randint(0, len(idxs), size=nsamples)
        for jj in range(nsamples):
            if (EOS == "spec") or (EOS == "gp"):
                index = indices[jj]

            # samples lambda's from Phil + Reed's files
            if EOS == "spec":
                eospath = "/home/philippe.landry/gw170817eos/spec/macro/macro-spec_%dcr.csv" % index
                data_out = np.genfromtxt(eospath, names=True, delimiter=",")
                marray, larray = data_out["M"], data_out["Lambda"]
                f = interp.interp1d(marray,
                                    larray,
                                    fill_value=0,
                                    bounds_error=False)
                lambda1, lambda2 = f(m1), f(m2)
                mbns = np.max(marray)

            elif EOS == "Sly":
                lambda1, lambda2 = eos.lambdaofm(m1), eos.lambdaofm(m2)
                mbns = eos.maxmass()
                #print(mbns)

            elif EOS == "gp":
                lambda1, lambda2 = 0.0, 0.0
                phasetr = 0
                while (lambda1 == 0.0) or (lambda2 == 0.0):
                    eospath = "/home/philippe.landry/gw170817eos/gp/macro/MACROdraw-%06d-%d.csv" % (
                        idxs[index], phasetr)
                    if not os.path.isfile(eospath):
                        break
                    data_out = np.genfromtxt(eospath,
                                             names=True,
                                             delimiter=",")
                    marray, larray = data_out["M"], data_out["Lambda"]
                    f = interp.interp1d(marray,
                                        larray,
                                        fill_value=0,
                                        bounds_error=False)
                    lambda1_tmp, lambda2_tmp = f(m1), f(m2)
                    if (lambda1_tmp > 0) and (lambda1 == 0.0):
                        lambda1 = lambda1_tmp
                    if (lambda2_tmp > 0) and (lambda2 == 0.0):
                        lambda2 = lambda2_tmp
                    phasetr = phasetr + 1
                    mbns = np.max(marray)

            lambda1s.append(lambda1)
            lambda2s.append(lambda2)
            m1s.append(m1)
            m2s.append(m2)
            dists.append(dist)
            chi_effs.append(chi_eff)
            Xlans.append(Xlan)
            qs.append(q)
            etas.append(eta)
            mchirps.append(mchirp)
            mbnss.append(mbns)

    if twixie:
        Xlans = np.ones(1000) * 1e-3

    data = np.vstack((m1s, m2s, dists, lambda1s, lambda2s, chi_effs, Xlans, c1,
                      c2, mb1, mb2, mchirps, etas, qs, mej, vej, mbnss)).T
    samples = KNTable((data),
                      names=('m1', 'm2', 'dist', 'lambda1', 'lambda2',
                             'chi_eff', 'Xlan', 'c1', 'c2', 'mb1', 'mb2',
                             'mchirp', 'eta', 'q', 'mej', 'vej', 'mbns'))

    #calc compactness
    samples = samples.calc_compactness(fit=True)

    #clac baryonic mass
    samples = samples.calc_baryonic_mass(EOS=None, TOV=None, fit=True)

    if type_set == 'BNS':

        from gwemlightcurves.EjectaFits.CoDi2019 import calc_meje, calc_vej
        #from gwemlightcurves.EjectaFits.PaDi2019 import calc_meje, calc_vej
        # calc the mass of ejecta
        mej = calc_meje(samples['m1'], samples['c1'], samples['m2'],
                        samples['c2'])
        # calc the velocity of ejecta
        vej = calc_vej(samples['m1'], samples['c1'], samples['m2'],
                       samples['c2'])

        samples['mchirp'], samples['eta'], samples[
            'q'] = lightcurve_utils.ms2mc(samples['m1'], samples['m2'])

        samples['q'] = 1.0 / samples['q']

        samples['mej'] = mej
        samples['vej'] = vej

        if model_set == 'Bu2019inc':
            idx = np.where(samples['mej'] <= 1e-6)[0]
            samples['mej'][idx] = 1e-6
            idx2 = np.where(samples['mej'] >= 1)[0]
            samples['mej'][idx2] = 1e-6
        print('mej = ' + str(samples['mej'][0]))

    if type_set == 'NSBH':

        from gwemlightcurves.EjectaFits.KrFo2019 import calc_meje, calc_vave
        # calc the mass of ejecta
        mej = calc_meje(samples['q'], samples['chi_eff'], samples['c2'],
                        samples['m2'])
        # calc the velocity of ejecta
        vej = calc_vave(samples['q'])

        samples['mej'] = mej
        samples['vej'] = vej

        if model_set == 'Bu2019inc':
            idx = np.where(samples['mej'] <= 1e-6)[0]
            samples['mej'][idx] = 1e-6
            idx2 = np.where(samples['mej'] >= 1)[0]
            samples['mej'][idx2] = 1e-6

        print('mej = ' + str(samples['mej'][0]))

    if type_set == 'BNS' or 'NSBH':
        # Add draw from a gaussian in the log of ejecta mass with 1-sigma size of 70%
        erroropt = 'none'
        if erroropt == 'none':
            print("Not applying an error to mass ejecta")
        elif erroropt == 'log':
            samples['mej'] = np.power(
                10., np.random.normal(np.log10(samples['mej']), 0.236))
        elif erroropt == 'lin':
            samples['mej'] = np.random.normal(samples['mej'],
                                              0.72 * samples['mej'])
        elif erroropt == 'loggauss':
            samples['mej'] = np.power(
                10., np.random.normal(np.log10(samples['mej']), 0.312))
        idx = np.where(samples['mej'] > 0)[0]
        samples = samples[idx]
    print(EOS + ' calculation finished')
    return samples
Exemplo n.º 9
0
 if twixie_tf:
     chi_list = [0]
 for chi in chi_list:
     if opts.analysisType == 'NSBH':
         m1 = np.arange(3, 5.8, .1)
         m2 = np.arange(1, 1.5, .05)
         m1 = np.arange(3, 8, .1)
         m2 = np.arange(1, 2.4, .05)
     #sample 3-10 for NSBH
     if opts.analysisType == 'BNS':
         m1 = np.arange(1, 2.4, .1)
         m2 = np.arange(1, 2.4, .1)
         m1 = np.arange(1, 3.1, .1)
         m2 = np.arange(1, 3.1, .1)
     if chirp_q_tf:
         chirp_min, xx, yy = lightcurve_utils.ms2mc(1, 1)
         chirp_max, xx, yy = lightcurve_utils.ms2mc(2.5, 2.5)
         print(chirp_min, chirp_max)
         #m2 becomes q
         #m1 becomes mchirp
         m1 = np.arange(chirp_min, chirp_max, .1)
         m2 = np.arange(1, 2, .1)
     medians, stds = [], []
     m1_plot, m2_plot = [], []
     lambdatildes = []
     term1_plot, term2_plot, term3_plot, term4_plot = [], [], [], []
     for m1m in m1:
         for m2m in m2:
             if m1m >= m2m or chirp_q_tf:
                 print('Initializing ' + str(m1m) + ' ' + str(m2m))
                 runType = 'gp'
Exemplo n.º 10
0
    def read_mchirp_samples(cls, filename_samples, Nsamples=100):
        """
                Read low latency posterior_samples
                """
        import os
        if not os.path.isfile(filename_samples):
            raise ValueError("Sample file supplied does not exist")

        try:
            names = [
                'SNRdiff', 'erf', 'weight', 'm1', 'm2', 'spin1', 'spin2',
                'dist'
            ]
            data_out = Table.read(filename_samples,
                                  names=names,
                                  format='ascii')
        except:
            names = ['SNRdiff', 'erf', 'weight', 'm1', 'm2', 'dist']
            data_out = Table.read(filename_samples,
                                  names=names,
                                  format='ascii')
            data_out['spin1'] = 0.0
            data_out['spin2'] = 0.0

        data_out['mchirp'], data_out['eta'], data_out[
            'q'] = lightcurve_utils.ms2mc(data_out['m1'], data_out['m2'])

        data_out['chi_eff'] = ((data_out['m1'] * data_out['spin1'] +
                                data_out['m2'] * data_out['spin2']) /
                               (data_out['m1'] + data_out['m2']))
        data_out['weight'] = data_out['weight'] / np.max(data_out['weight'])
        kernel = 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1)
        gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=0)
        params = np.vstack((data_out['mchirp'], data_out['q'],
                            data_out['chi_eff'], data_out['dist'])).T
        data = np.array(data_out['weight'])
        gp.fit(params, data)

        mchirp_min, mchirp_max = np.min(data_out['mchirp']), np.max(
            data_out['mchirp'])
        q_min, q_max = np.min(data_out['q']), np.max(data_out['q'])
        chi_min, chi_max = np.min(data_out['chi_eff']), np.max(
            data_out['chi_eff'])
        dist_min, dist_max = np.min(data_out['dist']), np.max(data_out['dist'])

        cnt = 0
        samples = []
        while cnt < Nsamples:
            mchirp = np.random.uniform(mchirp_min, mchirp_max)
            q = np.random.uniform(q_min, q_max)
            chi_eff = np.random.uniform(chi_min, chi_max)
            dist = np.random.uniform(dist_min, dist_max)
            samp = np.atleast_2d(np.array([mchirp, q, chi_eff, dist]))
            weight = gp.predict(samp)[0]
            thresh = np.random.uniform(0, 1)
            if weight > thresh:
                samples.append([mchirp, q, chi_eff, dist])
                cnt = cnt + 1
        samples = np.array(samples)
        data_out = Table(data=samples,
                         names=['mchirp', 'q', 'chi_eff', 'dist'])
        data_out["eta"] = lightcurve_utils.q2eta(data_out["q"])
        data_out["m1"], data_out["m2"] = lightcurve_utils.mc2ms(
            data_out["mchirp"], data_out["eta"])
        data_out["q"] = 1.0 / data_out["q"]

        if 'm1_source' in list(data_out.columns):
            data_out['m1'] = data_out['m1_source']
            print('setting m1 to m1_source')
        if 'm2_source' in list(data_out.columns):
            data_out['m2'] = data_out['m2_source']
            print('setting m2 to m2_source')

        if 'dlam_tilde' in list(data_out.columns):
            data_out['dlambdat'] = data_out['dlam_tilde']
            print('setting dlambdat to dlam_tilde')
        if 'lam_tilde' in list(data_out.columns):
            data_out['lambdat'] = data_out['lam_tilde']
            print('setting lambdat to lam_tilde')

        return KNTable(data_out)
Exemplo n.º 11
0
        mbns = 2.1
        idx1 = np.where((samples['m1'] < mbns) & (samples['m2'] < mbns))[0]
        idx2 = np.where((samples['m1'] > mbns) | (samples['m2'] > mbns))[0]

        mej, vej = np.zeros(samples['m1'].shape), np.zeros(samples['m1'].shape)

        from gwemlightcurves.EjectaFits.CoDi2019 import calc_meje, calc_vej
        # calc the mass of ejecta
        mej1 = calc_meje(samples['m1'], samples['c1'], samples['m2'],
                         samples['c2'])
        # calc the velocity of ejecta
        vej1 = calc_vej(samples['m1'], samples['c1'], samples['m2'],
                        samples['c2'])

        samples['mchirp'], samples['eta'], samples[
            'q'] = lightcurve_utils.ms2mc(samples['m1'], samples['m2'])

        samples['q'] = 1.0 / samples['q']

        from gwemlightcurves.EjectaFits.KrFo2019 import calc_meje, calc_vave
        # calc the mass of ejecta
        mej2 = calc_meje(samples['q'], samples['chi_eff'], samples['c1'],
                         samples['m2'])
        # calc the velocity of ejecta
        vej2 = calc_vave(samples['q'])

        mej[idx1], vej[idx1] = mej1[idx1], vej1[idx1]
        mej[idx2], vej[idx2] = mej2[idx2], vej2[idx2]

        samples['mej'] = mej
        samples['vej'] = vej
Exemplo n.º 12
0
    def calc_ejecta(self, model_KN):
        idx1 = np.where((self.samples['m1'] <= self.samples['mbns'])
                        & (self.samples['m2'] <= self.samples['mbns']))[0]
        idx2 = np.where((self.samples['m1'] > self.samples['mbns'])
                        & (self.samples['m2'] <= self.samples['mbns']))[0]
        idx3 = np.where((self.samples['m1'] > self.samples['mbns'])
                        & (self.samples['m2'] > self.samples['mbns']))[0]

        mej, vej = np.zeros(self.samples['m1'].shape), np.zeros(
            self.samples['m1'].shape)
        from gwemlightcurves.EjectaFits.CoDi2019 import calc_meje, calc_vej
        # calc the mass of ejecta
        mej1 = calc_meje(self.samples['m1'], self.samples['c1'],
                         self.samples['m2'], self.samples['c2'])
        # calc the velocity of ejecta
        vej1 = calc_vej(self.samples['m1'], self.samples['c1'],
                        self.samples['m2'], self.samples['c2'])

        self.samples['mchirp'], self.samples['eta'], self.samples[
            'q'] = lightcurve_utils.ms2mc(self.samples['m1'],
                                          self.samples['m2'])
        self.samples['q'] = 1.0 / self.samples['q']

        from gwemlightcurves.EjectaFits.KrFo2019 import calc_meje, calc_vave
        # calc the mass of ejecta

        mej2 = calc_meje(self.samples['q'], self.samples['chi_eff'],
                         self.samples['c2'], self.samples['m2'])
        # calc the velocity of ejecta
        vej2 = calc_vave(self.samples['q'])

        # calc the mass of ejecta
        mej3 = np.zeros(self.samples['m1'].shape)
        # calc the velocity of ejecta
        vej3 = np.zeros(self.samples['m1'].shape) + 0.2

        mej[idx1], vej[idx1] = mej1[idx1], vej1[idx1]
        mej[idx2], vej[idx2] = mej2[idx2], vej2[idx2]
        mej[idx3], vej[idx3] = mej3[idx3], vej3[idx3]

        print(
            "(mej[1284], self.samples[1284]['m1'], self.samples[1284]['m2'], self.samples[1284]['c1'], self.samples[1284]['c2'], self.samples[1284]['q'], self.samples[1284]['chi_eff'])"
        )
        print((mej[1284], self.samples[1284]['m1'], self.samples[1284]['m2'],
               self.samples[1284]['c1'], self.samples[1284]['c2'],
               self.samples[1284]['q'], self.samples[1284]['chi_eff']))

        self.samples['mej'] = mej
        self.samples['vej'] = vej

        # Add draw from a gaussian in the log of ejecta mass with 1-sigma size of 70%
        erroropt = 'none'
        if erroropt == 'none':
            print("Not applying an error to mass ejecta")
        elif erroropt == 'log':
            self.samples['mej'] = np.power(
                10., np.random.normal(np.log10(self.samples['mej']), 0.236))
        elif erroropt == 'lin':
            self.samples['mej'] = np.random.normal(self.samples['mej'],
                                                   0.72 * self.samples['mej'])
        elif erroropt == 'loggauss':
            self.samples['mej'] = np.power(
                10., np.random.normal(np.log10(self.samples['mej']), 0.312))

        idx = np.where(self.samples['mej'] <= 0)[0]
        self.samples['mej'][idx] = 1e-11

        if (model_KN == "Bu2019inc"):
            idx = np.where(self.samples['mej'] <= 1e-6)[0]
            self.samples['mej'][idx] = 1e-11
        elif (model_KN == "Ka2017"):
            idx = np.where(self.samples['mej'] <= 1e-3)[0]
            self.samples['mej'][idx] = 1e-11

        print("Probability of having ejecta")
        print(100 * (len(self.samples) - len(idx)) / len(self.samples))
        return self.samples['mej']