Exemplo n.º 1
0
def myloglike(cube, ndim, nparams):
    off = 0
    Mg_mu = []
    Mg_sig = []
    frac = cube[off]
    off += 1
    for pop in range(gp.pops):
        Mg_mu.append(cube[off])
        off += 1
        Mg_sig.append(cube[off])
        off += 1
    if off != ndim:
        gh.LOG(1, 'wrong number of parameters in myloglike.cube')
        pdb.set_trace()
    gh.LOG(2, 'starting logev evaluation')
    p1_Mg = 1 / np.sqrt(2 * np.pi * (Mg_sig[0]**2 + Mg_err**2)) * np.exp(
        -(Mg - Mg_mu[0])**2 / (2 * np.sqrt(Mg_sig[0]**2 + Mg_err**2)))
    p2_Mg = 1 / np.sqrt(2 * np.pi * (Mg_sig[1]**2 + Mg_err**2)) * np.exp(
        -(Mg - Mg_mu[1])**2 / (2 * np.sqrt(Mg_sig[1]**2 + Mg_err**2)))
    p1 = frac * PM * p1_Mg
    p2 = (1 - frac) * PM * p2_Mg
    pcom = p1 + p2
    print('pcom (min, max) = ', min(pcom), max(pcom))
    print('fraction of pcom == 0 : ', sum(pcom == 0) / len(pcom))
    lpcom = np.log(pcom)
    logev = np.sum(lpcom)
    gh.LOG(1, 'logL:', logev)
    #if logev < -1e300:
    #    pdb.set_trace()
    return logev
Exemplo n.º 2
0
def calc_chi2(profs, gp):
    chi2 = 0.
    # calc chi^2 from overall baryonic tracers
    # is not needed, as photometric data has very small errors,
    # and the corresponding M/L uncertainty is automatically
    # penalized from wrong sigma_LOS

    # now run through the stellar tracers
    for pop in np.arange(1, gp.pops + 1):  # [1, 2, ... , pops]
        Sigdat = gp.dat.Sig[pop]  # [Munit/pc^2]
        Sigerr = gp.dat.Sigerr[pop]  # [Munit/pc^2]
        Sigmodel = profs.get_prof('Sig', pop)[gp.nexp:-gp.nexp]
        hyperSig = profs.hyperSig[pop - 1]
        chi2_Sig = chi2red(Sigmodel, Sigdat, Sigerr, hyperSig, gp.nipol)  # [1]
        chi2 += chi2_Sig  # [1]
        gh.LOG(2, ' chi2_Sig   = ', chi2_Sig)

        # use the following only if chi2_nu_converged used rather than Sig_converged
        #nudat   = gp.dat.nu[pop]      # [Munit/pc^2]
        #nuerr   = gp.dat.nuerr[pop]   # [Munit/pc^2]
        #numodel = profs.get_prof('nu', pop)[gp.nexp:-gp.nexp]
        #chi2_nu  = chi2red(numodel, nudat, nuerr, gp.nipol) # [1]
        #chi2 += chi2_nu                 # [1]
        #gh.LOG(1, ' chi2_nu   = ', chi2_nu)
        if gp.chi2_Sig_converged > 0:
            continue

        sigdat = gp.dat.sig[pop]  # [km/s]
        sigerr = gp.dat.sigerr[pop]  # [km/s]
        smodel = profs.get_prof('sig', pop)[gp.nexp:-gp.nexp]
        hypersig = profs.hypersig[pop - 1]
        chi2_sig = chi2red(smodel, sigdat, sigerr, hypersig, gp.nipol)  # [1]
        chi2 += chi2_sig  # [1]
        gh.LOG(2, '  chi2_sig  = ', chi2_sig)
        if gp.usekappa:
            kapdat = 1. * gp.dat.kap[pop]  # [1]
            kaperr = 1. * gp.dat.kaperr[pop]  # [1]
            chi2_kap = chi2red(profs.get_kap(pop), kapdat, kaperr,
                               gp.nipol)  # [1]
            chi2 += chi2_kap  # [1]

        if gp.usezeta:
            zetaadat = 1. * gp.dat.zetaadat[pop]
            zetabdat = 1. * gp.dat.zetabdat[pop]
            zetaaerr = 1. * gp.dat.zetaaerr[pop]
            zetaberr = 1. * gp.dat.zetaberr[pop]
            zetaa_model, zetab_model = profs.get_zeta(pop)
            chi2_zetaa = chi2red(zetaa_model, zetaadat, zetaaerr, 1)
            chi2_zetab = chi2red(zetab_model, zetabdat, zetaberr, 1)
            chi2 += (chi2_zetaa + chi2_zetab)

    if gp.chi2_Sig_converged > 0:
        chi2 *= 10  # overamplify chi2 to get better models after switch
        if chi2 < gp.chi2_switch:
            gp.chi2_Sig_converged -= 1
            gh.LOG(1, 'Sig finished burn-in, waiting to get stable, ',
                   gp.chi2_Sig_converged)
    if gp.checksig:
        pdb.set_trace()
    return chi2
Exemplo n.º 3
0
    def plot_profile(self, basename, prof, pop, gp):
        gh.LOG(1, 'prof '+str(prof)+', pop '+str(pop)+', run '+basename)
        fig = plt.figure()
        ax  = fig.add_subplot(111)
        if prof != 'chi2':
            ax.set_xscale('log')
        if prof == 'rho' or prof == 'J' or prof == 'Sig' or\
           prof == 'M' or prof == 'nu':
            ax.set_yscale('log')
        self.plot_labels(ax, prof, pop, gp)
        if len(self.profs)>0:
            if prof == 'chi2':
                goodchi = []
                for k in range(len(self.profs)):
                    # do include all chi^2 values for plot
                    goodchi.append(self.chis[k])
                print('plotting profile chi for '+str(len(goodchi))+' models')
                bins, edges = np.histogram(np.log10(goodchi), range=[-2,6], \
                                           bins=max(6,np.sqrt(len(goodchi))),\
                                           density=True)
                ax.step(edges[1:], bins, where='pre')
                plt.draw()
                self.write_chi2(basename, edges, bins)
                fig.savefig(basename+'output/prof_chi2_0.pdf')
                return

            self.fill_nice(ax, prof, pop, gp)
            # TODO: replace above with full distribution plot (has bugs,
            # File "programs/plotting/gi_collection.py", line 466, in plot_full_distro
            # ybins = np.linspace(min(y[:,:]), max(y[:,:]), num=Nvertbin)
            # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
            #self.plot_full_distro(ax, prof, pop, gp)

            self.plot_N_samples(ax, prof, pop)
            if prof == 'Sig' or prof == 'sig':
                self.plot_data(ax, basename, prof, pop, gp)

            if (gp.investigate == 'gaia' or gp.investigate=='triax') and prof != 'sig' or (gp.investigate=='walk' and prof!='sig' and prof!='nu' and prof!='Sig'):
                r0 = self.analytic.x0
                y0 = self.analytic.get_prof(prof, pop)
                self.broaden_lim(prof, pop, min(y0), max(y0))
                ax.plot(r0, y0, 'b--', lw=2)
            ax.set_ylim(self.ranges[prof+str(pop)])
            plt.draw()
        else:
            gh.LOG(1, 'empty self.profs')
        fig.savefig(basename+'output/pdf/prof_'+prof+'_'+str(pop)+'.pdf')
        return 1
Exemplo n.º 4
0
def calc_chi2(profs, gp):
    chi2 = 0.

    # include rho* in chi^2 calculation
    nudat = gp.dat.nu[0]
    nuerr = gp.dat.nuerr[0]
    numodel = profs.get_prof('nu', 0)
    chi2_nu = chi2red(numodel, nudat, nuerr, gp.nipol)
    gh.LOG(2, ' chi2_nu0 = ', chi2_nu)
    chi2 += chi2_nu

    for pop in np.arange(1, gp.pops + 1):  # look at pops 1, 2, ...
        nudat = gp.dat.nu[pop]
        nuerr = gp.dat.nuerr[pop]
        numodel = profs.get_prof('nu', pop)
        chi2_nu = chi2red(numodel, nudat, nuerr, gp.nipol)
        gh.LOG(2, ' chi2_nu[' + str(pop) + '] = ', chi2_nu)
        chi2 += chi2_nu
        pdb.set_trace()
        if not gp.chi2_nu_converged:
            continue  # with pop loop

        sigdat = gp.dat.sig[pop]  # [km/s]
        sigerr = gp.dat.sigerr[pop]  # [km/s]
        sigmodel = profs.get_prof('sig', pop)
        chi2_sig = chi2red(sigmodel, sigdat, sigerr, gp.nipol)  # [1]
        if chi2_sig == np.inf:
            print('chi2_sig has become infinite')
            pdb.set_trace()
        chi2 += chi2_sig  # [1]
        gh.LOG(1, '  chi2_sig  = ', chi2_sig)

        if gp.usekappa:
            kapdat = gp.dat.kap[pop]  # [1]
            kaperr = gp.dat.kaperr[pop]  # [1]
            chi2_kap = chi2red(profs.get_kap(pop), kapdat, kaperr,
                               gp.nipol)  # [1]
            chi2 += chi2_kap  # [1]

    # switch to chi2_sig calculation too, if converged on Sig
    if not gp.chi2_nu_converged:
        chi2 *= 10
        if chi2 < gp.chi2_switch:
            gh.LOG(1, 'nu burn-in finished, switching on sigma')
            gp.chi2_nu_converged = True

    return chi2
Exemplo n.º 5
0
def Mr(r0, gp):
    if gp.investigate == 'hern':
        return M_hern(r0, gp)
    elif gp.investigate == 'gaia':
        return M_gaia(r0, gp)
    else:
        gh.LOG(1, 'ga.Mr not defined')
        pdb.set_trace()
Exemplo n.º 6
0
def read_Sigdata(gp):
    gh.LOG(1, 'reading Sig converged parameters')
    Sigconvparamsfn = gp.files.modedir + str(gp.case) + '/Sig_conv.stats'
    # read first line into nuparam_min
    # read second line into nuparam_median
    # read third line into nuparam_max
    gp.nupar_min, nupar_med, gp.nupar_max = np.loadtxt(Sigconvparamsfn)
    return
Exemplo n.º 7
0
def nr3Dtot_gaia(rad, gp):
    beta_star1, r_DM, gamma_star1, r_star1, r_a1, gamma_DM, rho0 = gp.files.params
    if gamma_DM == 0:
        nr = 3 * rad / (r_DM + rad)
    elif gamma_DM == 1:
        nr = 2 * rad / (r_DM + rad) + 1
    else:
        gh.LOG(1, 'unknown gamma_DM = ', gamma_DM)
        nr = 0. * rad
    return nr
Exemplo n.º 8
0
def rho(r0, gp):
    if gp.investigate == 'hern':
        return rho_hern(r0, gp)
    elif gp.investigate == 'gaia':
        return rho_gaia(r0, gp)
    elif gp.investigate == 'walk':
        return rho_walk(r0, gp)
    else:
        gh.LOG(1, 'ga.rho not defined')
        pdb.set_trace()
Exemplo n.º 9
0
def Sigma(r0, gp):
    if gp.investigate == 'hern':
        return Sig_hern(r0, gp)
    elif gp.investigate == 'gaia':
        return Sig_gaia(r0, gp)
    elif gp.investigate == 'walk':
        return Sig_walk(r0, gp)
    else:
        gh.LOG(1, 'ga.Sigma not defined')
        pdb.set_trace()
Exemplo n.º 10
0
def beta(rad, gp):
    if gp.investigate == 'hern':
        return beta_hern(rad)
    elif gp.investigate == 'gaia':
        return beta_gaia(rad, gp)
    elif gp.investigate == 'walk':
        return beta_walk(rad, gp)
    elif gp.investigate == 'triax':
        return beta_triax(rad)
    else:
        gh.LOG(1, 'ga.beta not defined')
        pdb.set_trace()
Exemplo n.º 11
0
def Sig_sig_los_2_hern(r0, gp):
    # \sigma_p = \sigma_projected = \sigma_{LOS}
    if gp.investigate != 'hern':
        gh.LOG(1, 'wrong investigation')
        pdb.set_trace()
    s = r0 / gp.ana  # [1]
    G1 = 1
    return G1*gp.anM**2/(12.*np.pi*gp.ana**3)*(1./(2.*(1.-s**2)**3)\
                                          *(-3.*s**2*X(s)\
                                          *(8.*s**6-28.*s**4+35.*s**2-20.)\
                                          -24.*s**6+68.*s**4-65.*s**2+6.)\
                                          -6.*np.pi*s) # [(km/s)^2 * Munit/pc^2]
Exemplo n.º 12
0
def Sig_NORM_rho(R0, Sig, Sigerr, gp):
    rho = Sig_INT_rho(R0, Sig, gp)  # [Munit/lunit^3]
    if min(rho) < 0.:
        gh.LOG(1, '*** Sig_NORM_rho: got bin with negative 3D density! ***')
        for i in range(len(rho)):
            if rho[i] < 0.: rho[i] = rho[i - 1]
            gh.LOG(2, 'corrected iteratively to last valid value')
    # normalization: calc tot mass from 2D and 3D, set equal,
    # get center 3D density rho0 right
    # and convert it into [Munit/lunit^3]
    Mr = rho_SUM_Mr(R0, rho)  # [Munit]
    # R0 != r0, too bad
    r0 = R0  # use the same radii for 3D as for the 2D projection
    MR = Sig_SUM_MR(R0, Sig)  # [Munit]
    corr = MR[-1] / Mr[-1]
    gh.LOG(2, ' * Sig_NORM_rho:  no correction by ', corr)
    # rho *= corr                                      # [Munit/lunit^3]
    # fractional error propagation
    rhoerr = rho * Sigerr / Sig  # [Munit/lunit^3]
    # [pc], [Munit/lunit^3], [Munit/lunit^3]
    return r0, rho, rhoerr, Mr
Exemplo n.º 13
0
def myloglike(cube, ndim, nparams):
    off = 0
    split_mu = []
    split_sig = []
    frac = cube[off]
    off += 1
    for pop in range(2):
        split_mu.append(cube[off])
        off += 1
        split_sig.append(cube[off])
        off += 1
    gh.sanitize_vector(split_mu, 2, -10, 10, True)
    if off != ndim:
        gh.LOG(1, 'wrong number of parameters in myloglike.cube')
        pdb.set_trace()
    gh.LOG(2, 'starting logev evaluation')
    p1_split= 1/np.sqrt(2*np.pi*(split_sig[0]**2+e_split**2))*\
           np.exp(-(split-split_mu[0])**2/(2*(split_sig[0]**2+e_split**2)))
    p2_split= 1/np.sqrt(2*np.pi*(split_sig[1]**2+e_split**2))*\
           np.exp(-(split-split_mu[1])**2/(2*(split_sig[1]**2+e_split**2)))
    p1 = frac * PM * p1_split
    for i in range(0, len(p1)):
        if p1[i] == 0.0:
            p1[i] = 1e-30
    p2 = (1 - frac) * PM * p2_split
    for i in range(0, len(p2)):
        if p2[i] == 0.0:
            p2[i] = 1e-30
    pcom = p1 + p2

    #print('pcom (min, max) = ', min(pcom), max(pcom))
    #print('fraction of pcom == 0 : ', sum(pcom==0)/len(pcom))
    lpcom = np.log(pcom)
    logev = np.sum(lpcom)
    #print(logev)
    #gh.LOG(1, 'logL:',logev)
    if logev < -1e300:
        logev = -1e300
    #    pdb.set_trace()
    return logev
Exemplo n.º 14
0
def rho_gaia(rad, gp):
    if gp.investigate != 'gaia':
        raise Exception('wrong investigation!')
    alpha_star1 = 2.
    alpha_DM = 1.
    beta_DM = 3.
    if gp.case == 9 or gp.case == 10:
        alpha_star1 = 0.5
        beta_DM = 4.
    beta_star1, r_DM, gamma_star1, r_star1, r_a1, gamma_DM, rho0 = gp.files.params
    if gamma_star1 == 0.1:
        nu0 = 2.2e7 / r_star1**3
    elif gamma_star1 == 1.0:
        nu0 = 1.5e7 / r_star1**3
    gh.LOG(2, '  analytic rho_gaia:')
    gh.LOG(2, '   rho0 = ', rho0)
    gh.LOG(2, '   r_DM = ', r_DM)
    gh.LOG(2, '   r_star1 = ', r_star1)
    rhodm = rho_general(rad, r_DM, rho0, alpha_DM, beta_DM, gamma_DM)
    rhostar1 = rho_general(rad, r_star1, nu0, alpha_star1, beta_star1,
                           gamma_star1)
    return rhodm, rhostar1
Exemplo n.º 15
0
    def convert_to_parameter_space(self, gp):
        # priors enter here
        off = 0
        pc = self.cube
        # DM density rho, set in parametrization of n(r)
        offstep = gp.nrho
        tmp_nr = map_nr(pc[0:offstep], 'rho', 0, gp)
        for i in range(offstep):
            pc[off + i] = tmp_nr[i]
        off += offstep

        # rho* only for observations
        if gp.investigate == 'obs':
            offstep = gp.nrho
            tmp_rhostar = map_nr_data(pc[off:off + offstep], 0, gp)
            for i in range(offstep):
                pc[off + i] = tmp_rhostar[i]
            off += offstep

            offstep = 1
            pc[off] = map_MtoL(pc[off], gp)
            off += offstep

        for pop in range(1, gp.pops + 1):  # nu1, nu2, and further
            offstep = gp.nrho
            tmp_nu = map_nr_data(pc[off:off + offstep], pop, gp)
            for i in range(offstep):
                pc[off + i] = tmp_nu[i]
            off += offstep

            offstep = 1
            tmp_hyperSig = map_hypersig(pc[off:off + offstep], 'Sig', pop, gp)
            pc[off] = tmp_hyperSig
            off += offstep

            offstep = 1
            tmp_hypersig = map_hypersig(pc[off:off + offstep], 'sig', pop, gp)
            pc[off] = tmp_hypersig
            off += offstep

            offstep = gp.nbeta
            tmp_betastar = map_betastar_sigmoid(pc[off:off + offstep], gp)
            for i in range(offstep):
                pc[off + i] = tmp_betastar[i]
            off += offstep

        if off != gp.ndim:
            gh.LOG(1, 'wrong subscripts in gi_class_cube')
            raise Exception('wrong subscripts in gi_class_cube')

        return pc
Exemplo n.º 16
0
 def calculate_J(self, gp):
     if len(self.profs)>0:
         for i in range(len(self.profs)):
             Sigprof = gip.rho_INT_Sig(gp.xepol, self.profs[i].get_prof('rho', 0), gp)
             Jprof = gip.Jpar(gp.xepol, Sigprof, gp)
             # add 3 extension bins
             tck = splrep(np.log(gp.xepol[:-gp.nexp]), np.log(Jprof), k=1, s=0.1)
             Jext = np.exp(splev(np.log(gp.xepol[-gp.nexp:]), tck))
             Jfull = np.hstack([Jprof, Jext])
             for k in range(gp.nepol):
                 Jfull[k] = max(0, Jfull[k])
             self.profs[i].set_prof('J', Jfull, 0, gp)
     else:
         gh.LOG(1, 'len(self.profs) == 0, did not calculate self.profs.J')
Exemplo n.º 17
0
def myprior(cube, ndim, nparams):
    # convert to physical space
    off = 0
    cube[off] = cube[off]  # fraction of particles in part 1
    off += 1
    for pop in range(
            gp.pops):  # no. of pops goes in here, first MW, then 1,2,..
        cube[off] = cube[off] * (Mg_max - Mg_min) + Mg_min  # Mg_mu
        off += 1
        cube[off] = cube[off] * (Mg_max - Mg_min)  # Mg_sig
        off += 1
    if off != ndim:
        gh.LOG(1, 'wrong number of parameters in myprior.cube')
        pdb.set_trace()
    return cube
Exemplo n.º 18
0
def myprior(cube, ndim, nparams):
    # convert to physical space
    off = 0
    cube[off] = cube[
        off] * 0.8 + 0.1  # fraction of particles in part 1, with min 0.1, max 0.9
    # such that each population has at least 10% of the total no. stars
    off += 1
    for pop in range(2):  # 2 pops
        cube[off] = cube[off] * (split_max - split_min) + split_min  # Mg_mu
        off += 1
        cube[off] = cube[off] * (split_max - split_min)  # Mg_sig
        off += 1
    if off != ndim:
        gh.LOG(1, 'wrong number of parameters in myprior.cube')
        pdb.set_trace()
    return cube
Exemplo n.º 19
0
    def convert_to_parameter_space(self, gp):
        # if we want any priors, here they have to enter:
        pc = self.cube
        off = 0
        offstep = 1
        pc[off] = pc[off] * 200 - 100 + 17**2  # for normalization C
        off += offstep

        offstep = gp.nrho
        tmp = map_nr(pc[off:off + offstep], 'rho', 0, gp)
        for i in range(offstep):
            pc[off + i] = tmp[i]
        off += offstep

        # rho_baryons
        offstep = gp.nrho
        tmp_rho_baryons = map_nr(pc[off:off + offstep], 'nu', 0, gp)
        for i in range(offstep):
            pc[off + i] = tmp_rho_baryons[i]
        off += offstep

        offstep = 1
        pc[off] = map_MtoL(pc[off], gp)
        off += offstep

        for pop in range(1, gp.pops + 1):
            offstep = gp.nrho
            tmp = map_nr(pc[off:off + offstep], 'nu', pop, gp)
            for i in range(offstep):
                pc[off + i] = tmp[i]
            off += offstep

            offstep = gp.nbeta
            tmp = map_tiltstar(pc[off:off + offstep], gp)
            for i in range(offstep):
                pc[off + i] = tmp[i]
            off += offstep

        if off != gp.ndim:
            gh.LOG(1, 'wrong subscripts in gi_class_cube')
            raise Exception('wrong subscripts in gi_class_cube')

        return pc
Exemplo n.º 20
0
    def read_Sig(self, gp):
        for pop in np.arange(gp.pops + 1):
            print('read_Sig on file ', gp.files.Sigfiles[pop])
            Sigx, binmin, binmax, Sigdat, Sigerr = gh.readcol5(
                gp.files.Sigfiles[pop])
            # 3*[rscale], [Sig0], [Sig0]
            # switch to Munit (msun) and pc here
            Sigx = Sigx[:] * gp.Xscale[pop]  # [pc]
            Sigdat = Sigdat[:] * gp.Sig0pc[pop]  # [Munit/pc^2]
            Sigerr = Sigerr[:] * gp.Sig0pc[pop]  # [Munit/pc^2]
            # take the overall bins for rbin, binmin, binmax vals
            if pop == 0:
                self.rbin = Sigx  # [pc]
                self.binmin = binmin * gp.Xscale[pop]  # [pc]
                self.binmax = binmax * gp.Xscale[pop]  # [pc]
                gp.xipol = self.rbin  # [pc]
                minr = min(self.rbin)  # [pc]
                maxr = max(self.rbin)  # [pc]
                gp.xepol = np.hstack([
                    minr / 8., minr / 4., minr / 2., self.rbin, 2 * maxr,
                    4 * maxr, 8 * maxr
                ])  # [pc]
                gp.xfine = introduce_points_in_between(gp.xepol, gp)
            # deproject,
            # takes [pc], 2* [Munit/pc^2], gives [pc], 2* [Munit/pc^3],
            # already normalized to same total mass
            if gp.geom == 'sphere':
                Sigdatnu, Sigerrnu = gh.complete_nu(self.rbin, Sigdat, Sigerr,
                                                    gp.xfine)
                dummyx, nudatnu, nuerrnu, Mrnu = gip.Sig_NORM_rho(
                    gp.xfine, Sigdatnu, Sigerrnu, gp)
                self.nu_epol.append(gh.linipollog(gp.xfine, nudatnu, gp.xepol))
                self.nuerr_epol.append(
                    gh.linipollog(gp.xfine, nuerrnu, gp.xepol))
                nudat = gh.linipollog(gp.xfine, nudatnu, gp.xipol)
                nuerr = gh.linipollog(gp.xfine, nuerrnu, gp.xipol)
                Mr = gh.linipollog(gp.xfine, Mrnu, gp.xipol)
                self.Mr.append(Mr)  # [Munit]
                Mhalf = Mr[-1] / 2.  # [Munit]
                self.Mhalf.append(Mhalf)  # [Munit]
                # spline interpolation with M as x axis, to get half-mass of system:
                splpar_M = splrep(np.log(Mr), np.log(self.binmax), s=0.01)
                r_half = np.exp(splev(np.log(Mhalf), splpar_M))  # [pc]
                self.rhalf.append(r_half)  # [pc]
                # spline interpolation of nu at r_half:
                splpar_nu = splrep(np.log(gp.xipol), np.log(nudat), s=0.01)
                nuhalf = np.exp(splev(np.log(r_half), splpar_nu))  # [pc]
                self.nuhalf.append(nuhalf)
                # [Munit/pc^3]
                # calculate n(r) parameters as used in gi_physics from the nu(r) profile
                rleft = gp.xfine[gp.xfine <= r_half]
                rleft = rleft[::-1]
                rright = gp.xfine[gp.xfine > r_half]
                nuleft = nudatnu[gp.xfine <= r_half]
                nuleft = nuleft[::-1]
                nuright = nudatnu[gp.xfine > r_half]
                rlast = 1. * r_half
                nulast = 1. * nuhalf
                sloperight = []
                for r0 in rright:
                    i = np.argmin(np.abs(rright - r0))
                    Deltanu = -(np.log(nuright[i]) - np.log(nulast))
                    Deltar = np.log(rright[i]) - np.log(rlast)
                    sloperight.append(Deltanu / Deltar)
                    nulast = nuright[i]
                    rlast = rright[i]
                rlast = 1. * r_half
                nulast = 1. * nuhalf
                slopeleft = []
                # work through the array from the left, from r_half
                for r0 in rleft:
                    i = np.argmin(np.abs(rleft - r0))
                    Deltanu = np.log(nuleft[i]) - np.log(nulast)
                    Deltar = np.log(rlast) - np.log(rleft[i])
                    slopeleft.append(Deltanu / Deltar)
                    nulast = nuleft[i]
                    rlast = rleft[i]
                # inverse order of slopeleft to have it sorted according increasing r
                slopeleft = slopeleft[::-1]
                slopes = np.hstack([slopeleft, sloperight])
                nrpar = 1. * slopes[:-1]
                # Deltalogr = np.log(gp.xfine[1:]) - np.log(gp.xfine[:-1])
                # nrpar = (slopes[1:]-slopes[:-1])/Deltalogr
                spl_nrpar = splrep(gp.xfine[:-1], nrpar, k=1)
                nre = splev(gp.xipol, spl_nrpar)
                extleft = splev(
                    gp.xepol[0:3],
                    spl_nrpar)  # np.array([nrpar[0], nrpar[0], nrpar[0]])
                extright = splev(
                    gp.xepol[-3:],
                    spl_nrpar)  #[nrpar[-1], nrpar[-1], nrpar[-1]])
                maxnre = max(nre)
                self.nrnu.append(
                    np.hstack(
                        [nuhalf, nre[0], extleft, nre, extright, nre[-1]]))

                # checking for correct n(r) profile, have to wait for pop==1
                # if pop==1:
                # from pylab import plot, xscale
                # import gi_analytic as ga
                # testnu = ga.rho_gaia(gp.xfine,gp)[pop]
                # testnrnu = -gh.derivipol(np.log(testnu), np.log(gp.xfine))
                # plot(gp.xfine, testnrnu, 'b-')
                # plot(gp.xfine[:-1], nrpar, 'r.-')
                # xscale('log')

                errnre = np.ones(1 + len(extleft) + len(nre) + len(extright) +
                                 1) * maxnre / 10.
                for k in np.arange(1, 4):
                    errnre[k - 1] *= 5
                    errnre[-k] *= 5
                self.nrnuerr.append(np.hstack([nuhalf / 3., errnre]))
                # import gi_physics as phys
                # from pylab import loglog, axvline, axhline, plot, xscale, clf
                # loglog(gp.xepol, self.nu_epol[0], 'b.-', lw=1)
                # axvline(r_half)
                # axhline(nuhalf)
                # rh = phys.rho(gp.xepol, self.nrnu, 0, gp)
                # rhmin = phys.rho(gp.xepol, self.nrnu - self.nrnuerr, 0, gp)
                # rhmax = phys.rho(gp.xepol, self.nrnu + self.nrnuerr, 0, gp)
                # loglog(gp.xepol, rh, 'r.-', linewidth=2)
                # loglog(gp.xepol, rhmin, 'g.-')
                # loglog(gp.xepol, rhmax, 'g--')
                # clf()
                # plot(gp.xfine[:-1], nrpar, '.-')
                # plot(gp.xipol, nre, '.-')
                # xscale('log')
                # axvline(r_half)
                # print(nrpar)
            else:
                gh.LOG(1, 'working in disc symmetry: reading nu directly')
                dummy1, dummy2, dummy3, nudat, nuerr = \
                        gh.readcol5(gp.files.nufiles[pop])
                self.nuhalf.append(nudat[round(
                    len(nudat) / 2)])  #HS ToDo: check validity of this
            self.Sig.append(Sigdat)  # [Munit/pc^2]
            self.Sigerr.append(Sigerr)  # [Munit/pc^2]
            self.barSig.append(np.mean(Sigerr))
            self.nu.append(nudat)  # [Munit/pc^3]
            self.nuerr.append(nuerr)  # [Munit/pc^3]
        return
Exemplo n.º 21
0
def geom_loglike(cube, ndim, nparams, gp):
    tmp_profs = Profiles(gp.pops, gp.nipol)
    off = 0
    offstep = 1
    norm = cube[off]
    off += offstep

    offstep = gp.nrho
    rhodmpar = np.array(cube[off:off+offstep])  #SS cube[1:1+nrho]
    tmp_rho = phys.rho(gp.xepol, rhodmpar, 0, gp)
    tmp_profs.set_prof('rho', tmp_rho[gp.nexp:-gp.nexp], 0, gp)
    off += offstep

    offstep = gp.nrho
    lbaryonpar = np.array(cube[off:off+offstep]) #SS cube[1+nrho:1+2*nrho]
    tmp_rhostar = phys.rho(gp.xepol, lbaryonpar, 0, gp)[gp.nexp:-gp.nexp]
    tmp_profs.set_prof('nu', tmp_rhostar, 0, gp) # [Munit/pc^3]
    Sigstar = phys.nu_SUM_Sig(gp.dat.binmin, gp.dat.binmax, tmp_rhostar) # [Munit/pc^2]
    tmp_profs.set_prof('Sig', Sigstar, 0, gp)
    off += offstep

    MtoL = cube[off]  #SS cube[1+2*nrho]
    off += 1

    for pop in np.arange(1, gp.pops+1):
        offstep = gp.nrho
        nupar = np.array(cube[off:off+offstep])  #SS 1 cube[2+2*nrho:2+3*nrho]
        tmp_nu = phys.rho(gp.xepol, nupar, pop, gp)[gp.nexp:-gp.nexp]
        tmp_profs.set_prof('nu', tmp_nu, pop, gp) # [Munit/pc^3]
        tmp_Sig = phys.nu_SUM_Sig(gp.dat.binmin, gp.dat.binmax, tmp_nu) # [Munit/pc^2]
        tmp_profs.set_prof('Sig', tmp_Sig, pop, gp)
        off += offstep

        if gp.checksig:
            pdb.set_trace()

        offstep = gp.nbeta
        if gp.chi2_nu_converged:
            tiltpar = np.array(cube[off:off+offstep])#SS cube[2+3*nrho:..+nbeta]
            tmp_tilt = phys.tilt(gp.xipol, tiltpar, gp)
            if check_tilt(tmp_tilt, gp):
                gh.LOG(1, 'tilt error')
                tmp_profs.chi2 = gh.err(2., gp)
                return tmp_profs
            tmp_profs.set_prof('tilt', tmp_tilt, pop, gp)
            sig = phys.sigz(gp.xepol, rhodmpar, lbaryonpar, MtoL, nupar, norm, tiltpar, pop, gp)
            tmp_profs.set_prof('sig', sig, pop, gp)
            # tmp_profs.set_prof('kap', kap, pop, gp)
        off += offstep # add also in case Sig has not yet converged
        # to get the right variables

    if off != gp.ndim:
        gh.LOG(1,'wrong subscripts in gi_class_cube')
        raise Exception('wrong subscripts in gi_class_cube')

    # determine log likelihood
    chi2 = calc_chi2(tmp_profs, gp)
    gh.LOG(1, '   log L = ', -chi2/2.)
    tmp_profs.chi2 = chi2

    return tmp_profs   # from   likelihood L = exp(-\chi^2/2), want log of that
Exemplo n.º 22
0
    #Sigdatnu, Sigerrnu = gh.complete_nu(Rbin, Sig_phot, Sig_phot/10., gp.xfine)
    #dummyx,nudatnu,nuerrnu,Mrnu = gip.Sig_NORM_rho(gp.xfine,Sigdatnu,Sigerrnu,gp)
    #nudat = gh.linipollog(gp.xfine, nudatnu, gp.xipol)
    #nuerr = gh.linipollog(gp.xfine, nuerrnu, gp.xipol)
    #loglog(gp.xipol, nudat, co)
    #axvline(Rhalf, color=co)
    #xlim([min(gp.xipol), max(gp.xipol)])
    #xlabel(r'$R$')
    #ylabel(r'$\nu(R)$')
    #plum = 100*gh.plummer(gp.xipol, Rhalf, len(R0))
    #loglog(gp.xipol, plum, color=co, linestyle='--')
    #ylim([min(plum), max(plum)])
    #pdb.set_trace()

    return


## \fn run(gp)
# run MultiNest
# @param gp global parameters defined in gi_params.py

if __name__ == "__main__":
    import gi_params
    global gp
    gp = gi_params.Params()
    if gp.pops < 2:
        gh.LOG(1,
               " population splitting needs 2 or more populations, corrected")
        gp.pops = 2
    run(gp)
Exemplo n.º 23
0
def Sig_sig_los_2(r0, gp):
    if gp.investigate == 'hern':
        return Sig_sig_los_2_hern(r0, gp)
    else:
        gh.LOG(1, 'ga.Sig_sig_los_2 not defined')
        return 0. * r0 - 1
Exemplo n.º 24
0
alpha_s = alpha_s[order]
delta_s = delta_s[order]
Vhel = Vhel[order]
Vhel_err = Vhel_err[order]
Fe = Fe[order]
Fe_err = Fe_err[order]
Mg = Mg[order]
Mg_err = Mg_err[order]

A = np.loadtxt(gp.files.dir + 'w_2.0.dat')
Rpt, wpt = A.T  # [arcmin], [1]
arcmin__pc = 2. * np.pi * DL / (360 * 60
                                )  # [pc] at distance 138 kpc for Fornax
Rpt *= arcmin__pc  # [pc]

gh.LOG(1, 'starting MultiNest run:')
# works with investigation = 'obs', pops = 2, metalpop = True
# profile with python3 -m cProfile grd_split.py
#comm = MPI.COMM_SELF.Spawn(sys.executable,
#                           args=['grd_metalsplit.py'],
#                           maxprocs=3)

### start grd_metalsplit


def myprior(cube, ndim, nparams):
    # convert to physical space
    off = 0
    cube[off] = cube[off]  # fraction of particles in part 1
    off += 1
    for pop in range(
Exemplo n.º 25
0
def sig_los(r0, gp):
    if gp.investigate == 'hern':
        return sig_los_hern(r0, gp)
    else:
        gh.LOG(1, 'ga.sig_los not defined')
        return 0 * r0 - 1
Exemplo n.º 26
0
def geom_loglike(cube, ndim, nparams, gp):
    tmp_profs = Profiles(gp.pops, gp.nepol)
    off = 0
    offstep = gp.nrho
    if gp.chi2_Sig_converged <= 0:
        rhodmpar = np.array(cube[off:off + offstep])
        tmp_rho0 = phys.rho(gp.xepol, rhodmpar, 0, gp)
        # for J factor calculation (has been deferred to output routine)
        #tmp_rhofine = phys.rho(gp.xfine, rhodmpar, 0, gp)
        #tmp_Jfine = gip.Jpar(gp.xfine, tmp_rhofine, gp) #tmp_rhofine
        #tck = splrep(gp.xfine[:-3], tmp_Jfine)
        #tmp_J = splev(gp.xepol, tck)
        # rhodmpar hold [rho(rhalf), nr to be used for integration
        # from halflight radius, defined on gp.xepol]
        # (only calculate) M, check
        tmp_M0 = gip.rho_SUM_Mr(gp.xepol, tmp_rho0)
        # store profiles
        tmp_profs.set_prof('nr', 1. * rhodmpar[1 + 1:-1], 0, gp)
        tmp_profs.set_prof('rho', tmp_rho0, 0, gp)
        #tmp_profs.set_prof('J', tmp_J, 0, gp)
        tmp_profs.set_prof('M', tmp_M0, 0, gp)
    off += offstep  # anyhow, even if Sig not yet converged

    # get profile for rho*
    if gp.investigate == 'obs':
        offstep = gp.nrho
        lbaryonpar = np.array(cube[off:off + offstep])
        rhostar = phys.rho(gp.xepol, lbaryonpar, 0, gp)
        off += offstep
        Signu = gip.rho_param_INT_Sig(gp.xepol, lbaryonpar, 0,
                                      gp)  # [Munit/pc^2]
        MtoL = cube[off]
        off += 1
        # store these profiles every time
        tmp_profs.set_prof('nu', rhostar, 0, gp)
        tmp_profs.set_prof('Sig', Signu, 0, gp)
        tmp_profs.set_MtoL(MtoL)
    else:
        lbaryonpar = np.zeros(gp.nrho)
        MtoL = 0.
    for pop in np.arange(1, gp.pops + 1):  # [1, 2, ..., gp.pops]
        offstep = gp.nrho
        nupar = np.array(cube[off:off + offstep])
        tmp_nrnu = 1. * nupar[1 + 1:-1]

        tmp_nu = phys.rho(gp.xepol, nupar, pop, gp)
        tmp_Signu = gip.rho_param_INT_Sig(gp.xepol, nupar, pop, gp)
        #tmp_nu = pool.apply_async(phys.rho, [gp.xepol, nupar, pop, gp])
        #tmp_Signu = pool.apply_async(gip.rho_param_INT_Sig, [gp.xepol, nupar, pop, gp])
        off += offstep

        offstep = 1
        tmp_hyperSig = cube[off:off + offstep]
        off += offstep

        offstep = 1
        tmp_hypersig = cube[off:off + offstep]
        off += offstep

        offstep = gp.nbeta
        if gp.chi2_Sig_converged <= 0:
            betapar = np.array(cube[off:off + offstep])
            tmp_beta, tmp_betastar = phys.beta(gp.xepol, betapar, gp)
            if check_beta(tmp_beta, gp):
                gh.LOG(2, 'beta error')
                tmp_profs.chi2 = gh.err(1., gp)
                return tmp_profs
            try:
                #if True:
                if gp.checksig and gp.investigate == 'hern':
                    import gi_analytic as ga
                    anrho = ga.rho(gp.xepol, gp)[0]
                    rhodmpar_half = np.exp(
                        splev(gp.dat.rhalf[0], splrep(gp.xepol,
                                                      np.log(anrho))))
                    nr = -gh.derivipol(np.log(anrho), np.log(gp.xepol))
                    dlr = np.hstack([nr[0], nr, nr[-1]])
                    if gp.investigate == 'gaia':
                        dlr[-1] = 4
                        rhodmpar = np.hstack([rhodmpar_half, dlr])
                    lbaryonpar = 0.0 * rhodmpar
                    MtoL = 0.0
                    betapar = np.array([0, 0, 2,
                                        max(gp.xipol) / 2])  # for hern
                    annu = ga.rho(gp.xepol, gp)[1]
                    nupar_half = np.exp(
                        splev(gp.dat.rhalf[1], splrep(gp.xepol, np.log(annu))))
                    nrnu = -gh.derivipol(np.log(annu), np.log(gp.xepol))
                    dlrnu = np.hstack([nrnu[0], nrnu, nrnu[-1]])
                    if gp.investigate == 'gaia':
                        dlrnu[-1] = 6
                    nupar = np.hstack([nupar_half, dlrnu])
                elif gp.checkbeta and gp.investigate == 'gaia':
                    #                    rhodmpar = np.array([ 0.41586608, 0.38655515, 0.60898657, 0.50936769, 0.52601378, 0.54526758,  0.5755599, 0.57900806, 0.60252357, 0.60668445, 0.62252721, 0.63173754, 0.64555439, 0.65777175, 0.67083556, 0.68506606, 0.69139872, 0.66304763, 0.61462276, 0.70916575, 0.53287872])
                    rhodmpar = np.array([
                        0.18235821, 0.4719348, 0., 0., 0.10029569, 0.11309553,
                        0.25637863, 0.31815175, 0.40621336, 0.46247927,
                        0.53545415, 0.60874961, 0.68978141, 0.79781574,
                        0.91218048, 1.08482356, 1.36074895, 1.88041885,
                        2.31792908, 2.62089078, 3.001
                    ])

                    betapar = np.array([
                        1.23555034e-03, 9.89999994e-01, 2.03722518e+00,
                        5.85640906e+00
                    ])
                    nupar = np.array([
                        0.15649498, 6.65618254, 0.10293663, 0.1087109,
                        0.13849277, 0.24371261, 0.62633345, 1.05913181,
                        1.43774113, 1.82346043, 2.20091446, 2.60007997,
                        2.98745825, 3.423104, 3.80766658, 4.2089698,
                        4.62950843, 4.91166037, 4.97380638, 4.99718073,
                        5.2277589
                    ])
                    gp.dat.nrnu = [
                        np.array([
                            0.15476906, 0.85086798, 0.9342867, 0.88161169,
                            0.83254241, 0.85086798, 0.99930431, 1.22211638,
                            1.47184763, 1.78910057, 2.1987677, 2.51961046,
                            2.80345393, 3.10336133, 3.88504346, 4.52442727,
                            4.88817769, 5.07880404, 4.83455511, 6.32165657,
                            4.88817769
                        ]),
                        np.array([
                            0.15476906, 0.85086798, 0.9342867, 0.88161169,
                            0.83254241, 0.85086798, 0.99930431, 1.22211638,
                            1.47184763, 1.78910057, 2.1987677, 2.51961046,
                            2.80345393, 3.10336133, 3.88504346, 4.52442727,
                            4.88817769, 5.07880404, 4.83455511, 6.32165657,
                            4.88817769
                        ]),
                        np.array([
                            0.15476906, 0.85086798, 0.9342867, 0.88161169,
                            0.83254241, 0.85086798, 0.99930431, 1.22211638,
                            1.47184763, 1.78910057, 2.1987677, 2.51961046,
                            2.80345393, 3.10336133, 3.88504346, 4.52442727,
                            4.88817769, 5.07880404, 4.83455511, 6.32165657,
                            4.88817769
                        ]),
                        np.array([
                            0.15476906, 0.85086798, 0.9342867, 0.88161169,
                            0.83254241, 0.85086798, 0.99930431, 1.22211638,
                            1.47184763, 1.78910057, 2.1987677, 2.51961046,
                            2.80345393, 3.10336133, 3.88504346, 4.52442727,
                            4.88817769, 5.07880404, 4.83455511, 6.32165657,
                            4.88817769
                        ])
                    ]
                    gp.dat.nrnuerr = [
                        np.array([
                            0.05158969, 12.22044422, 2.44408884, 2.44408884,
                            2.44408884, 2.44408884, 0.48881777, 0.48881777,
                            0.48881777, 0.48881777, 0.48881777, 0.48881777,
                            0.48881777, 0.48881777, 0.48881777, 0.48881777,
                            0.48881777, 2.44408884, 2.44408884, 2.44408884,
                            2.44408884
                        ]),
                        np.array([
                            0.05158969, 12.22044422, 2.44408884, 2.44408884,
                            2.44408884, 2.44408884, 0.48881777, 0.48881777,
                            0.48881777, 0.48881777, 0.48881777, 0.48881777,
                            0.48881777, 0.48881777, 0.48881777, 0.48881777,
                            0.48881777, 2.44408884, 2.44408884, 2.44408884,
                            2.44408884
                        ]),
                        np.array([
                            0.05158969, 12.22044422, 2.44408884, 2.44408884,
                            2.44408884, 2.44408884, 0.48881777, 0.48881777,
                            0.48881777, 0.48881777, 0.48881777, 0.48881777,
                            0.48881777, 0.48881777, 0.48881777, 0.48881777,
                            0.48881777, 2.44408884, 2.44408884, 2.44408884,
                            2.44408884
                        ]),
                        np.array([
                            0.05158969, 12.22044422, 2.44408884, 2.44408884,
                            2.44408884, 2.44408884, 0.48881777, 0.48881777,
                            0.48881777, 0.48881777, 0.48881777, 0.48881777,
                            0.48881777, 0.48881777, 0.48881777, 0.48881777,
                            0.48881777, 2.44408884, 2.44408884, 2.44408884,
                            2.44408884
                        ])
                    ]

                    lbaryonpar = 0.0 * rhodmpar
                    MtoL = 0.0

                sig, kap, zetaa, zetab = phys.sig_kap_zet(
                    gp.xepol, rhodmpar, lbaryonpar, MtoL, nupar, betapar, pop,
                    gp)
                #fill_between(gp.xipol, gp.dat.sig[1]-gp.dat.sigerr[1], gp.dat.sig[1]+gp.dat.sigerr[1])
                #plot(gp.xepol, sig, 'r')
                #xscale('log')
                #ylim([0, 30])
                #xlabel('$r$ [pc]')
                #ylabel('$\sigma_{LOS}$ [km/s]')
                #savefig('siglos_gaia_2.pdf')
                #pdb.set_trace()
            except Exception:
                gh.LOG(1, 'sigma error')
                tmp_profs.chi2 = gh.err(2., gp)
                return tmp_profs
            # now store the profiles
            gh.sanitize_vector(tmp_beta, len(tmp_profs.x0), -200, 1, gp.debug)
            tmp_profs.set_prof('beta', tmp_beta, pop, gp)
            gh.sanitize_vector(tmp_betastar, len(tmp_profs.x0), -1, 1,
                               gp.debug)
            tmp_profs.set_prof('betastar', tmp_betastar, pop, gp)
            tmp_profs.set_prof('sig', sig, pop, gp)
            tmp_profs.hypersig = tmp_hypersig
            tmp_profs.set_prof('kap', kap, pop, gp)
            tmp_profs.set_zeta(zetaa, zetab, pop)

        tmp_profs.set_prof('nrnu', tmp_nrnu, pop, gp)
        tmp_profs.set_prof('nu', tmp_nu, pop, gp)  # pool: tmp_nu.get()

        # following profile needs to be stored at all times, to calculate chi
        tmp_profs.set_prof('Sig', tmp_Signu, pop, gp)
        tmp_profs.hyperSig = tmp_hyperSig

        off += offstep  # still do this even if gp.chi2_Sig_converged is False
    if off != gp.ndim:
        gh.LOG(1, 'wrong subscripts in gi_loglike')
        pdb.set_trace()

    # determine log likelihood
    chi2 = calc_chi2(tmp_profs, gp)
    gh.LOG(
        -1, gp.investigate + '/' + str(gp.case) + '/' + gp.files.timestamp +
        ':  ln L = ', gh.pretty(-chi2 / 2.))
    # x=gp.dat.rbin
    # linedat,=ax.loglog(x, gp.dat.Sig[1], 'b')
    # line,=ax.loglog(x, tmp_profs.get_prof("Sig", 1), 'r', alpha=0.1)
    # plt.draw()
    # plt.show()
    tmp_profs.chi2 = chi2

    # after some predefined wallclock time and Sig convergence, plot all profiles
    #if time.time() - gp.last_plot >= gp.plot_after and gp.chi2_Sig_converged <= 0:
    #    gp.last_plot = time.time()
    #    try:
    #        import plotting.plot_profiles
    #        plotting.plot_profiles.run(gp.files.timestamp, gp.files.outdir, gp)
    #    except:
    #        print('plotting error in gi_loglike!')
    # close pool automatically after with clause
    return tmp_profs
Exemplo n.º 27
0
        # TODO check: read in pickled values
        with open(basename+tt+'/pc', 'rb') as fn:
            pc = pickle.load(fn)
        pcall.merge(pc)

    # use last timestamp for output
    import import_path as ip
    ip.insert_sys_path(basename+tt+'/programs/')
    ip.insert_sys_path(basename+tt+'/programs/sphere')
    import gi_params as gip
    gp = gip.Params(tt)
    gp.pops = sr.get_pops(basename+tt+'/')
    print('working with ', gp.pops, ' populations')

    if len(pcall.chis) == 0:
        gh.LOG(1, 'no profiles found for plotting')
        sys.exit(1)
    # first plot all chi^2 values in histogram
    pcall.plot_profile(basename+tt+'/', 'chi2', 0, gp)
    # then select only the best models for plotting the profiles
    pcall.cut_subset()
    import gi_helper as gh
    read_scale(gp) # store half-light radii in  gp.Xscale
    Radii, Binmin, Binmax, Sigdat1, Sigerr1 = gh.readcol5(gp.files.Sigfiles[0])
    # [Xscale0], [Munit/Xscale0^2]
    # verified that indeed the stored files in the run directory are used
    gp.xipol = Radii * gp.Xscale[0]       # [pc]
    maxR = max(Radii)                     # [pc]
    minR = min(Radii)                     # [pc]
    Radii = np.hstack([minR/8, minR/4, minR/2, Radii, 2*maxR, 4*maxR, 8*maxR])
    gp.xepol = Radii * gp.Xscale[0]       # [pc]
Exemplo n.º 28
0
    def __init__(self, gp):
        from gi_class_files import Files
        # show plots during execution of data readout?
        # set automatically if gr_MCMCbin.py is called on the command line
        self.showplots = False
        self.n = 300  # number of iterations in gr_MCMCbin
        self.Rerr = 0.  # 0.01      # distance error in [Xscale]
        self.vrerr = 2.0  # [km/s] 2.0 # velocity error. raises sig_los, errors in it
        if gp.investigate == 'hern':
            self.repr = 1  # choose simulation representation
            self.Rcut = 1.e10  # [Rvir]
            self.Rmin = 0.  # [Rscale]i
            self.Rmax = 10.  # [Rscale]
            self.simname = gp.files.get_sim_name(
                gp)  # dir+'simulation/'+prename+'unit_hern_%i' %(repr)
            if gp.pops == 1:
                self.simpos = gp.files.dir + 'simulation/' + self.simname + 'pos.txt'
                self.simvel = gp.files.dir + 'simulation/' + self.simname + 'vel.txt'
            elif gp.pops == 2:
                self.simpos = gp.files.dir + 'simulation/' + self.simname + 'stars_pos.txt'
                self.simvel = gp.files.dir + 'simulation/' + self.simname + 'stars_vel.txt'
            else:
                gh.LOG(0, 'get data for more than 2 pops in Hernquist profile')
                pdb.set_trace()
        elif gp.investigate == 'walk':  # or just want to try some other generic pymc stuff:
            self.r_DM = 1000.
            self.fi = Files(gp)
            self.fi.set_walk(gp)
            self.dir = self.fi.dir
            self.fil = self.dir + 'mem2'

            self.pmsplit = 0.9  # minimum probability of membership required for analysis
            # use 0 if grw_* should be called from within gravimage
            self.fileposcartesian = self.dir + 'simulation/pos.txt'
            self.filevelcartesian = self.dir + 'simulation/vel_my.txt'

        elif gp.investigate == 'gaia':
            self.fi = Files(gp)
            self.fi.set_gaia(gp)
            self.dir = self.fi.dir
            self.fil = self.dir + 'dat'
            self.r_DM = 1000.

        elif gp.investigate == 'triax':
            self.fi = Files(gp)
            self.fi.set_triax(gp)
            self.dir = self.fi.dir
            self.fil = self.dir + 'dat'
            self.r_DM = 1500.  # [pc]

        elif gp.investigate == 'obs':
            self.fi = Files(gp)
            self.fi.set_obs(gp)
            self.dir = self.fi.dir
            self.fil = self.dir + 'mem2'
            self.pmsplit = 0.9

        elif gp.investigate == 'coll':
            self.fi = Files(gp)
            self.fi.set_coll(gp)
            self.dir = self.fi.dir
            self.fil = self.dir + 'dat'
Exemplo n.º 29
0
def run(gp):
    import gr_params
    gpr = gr_params.grParams(gp)
    gu.G1__pcMsun_1km2s_2 = 1.  # as per definition
    gp.anM = 1.  #
    gp.ana = 1.  #

    print('grh_com: input: ', gpr.simpos)
    xall, yall, zall = np.loadtxt(gpr.simpos, skiprows=1,
                                  unpack=True)  # 3*[gp.ana]
    vxall, vyall, vzall = np.loadtxt(gpr.simvel, skiprows=1,
                                     unpack=True)  # 3*[gp.ana]
    nall = len(xall)  # [1]

    # shuffle and restrict to ntracer random points
    ndm = int(min(gp.ntracer[0], nall - 1))
    trace = random.sample(range(nall), nall)
    if gp.pops > 1:
        gh.LOG(1, 'implement more than 2 pops for hern')
        pdb.set_trace()

    PM = [1.
          for i in trace]  # [1]=const, no prob. of membership info in dataset
    x = [xall[i] for i in trace]  # [gp.ana]
    y = [yall[i] for i in trace]  # [gp.ana]
    z = [zall[i] for i in trace]  # [gp.ana]
    vz = [vzall[i] for i in trace]  # [km/s]
    PM = np.array(PM)
    x = np.array(x)
    y = np.array(y)
    z = np.array(z)
    vz = np.array(vz)

    com_x, com_y, com_z, com_vz = com_shrinkcircle_v(
        x, y, z, vz, PM)  # 3*[gp.ana], [velocity]
    print('COM [gp.ana]: ', com_x, com_y, com_z, com_vz)

    xnew = (x - com_x)  #*gp.ana      # [pc]
    ynew = (y - com_y)  #*gp.ana      # [pc]
    #znew = (z-com_z) # *gp.ana      # [pc]
    vznew = (
        vz - com_vz
    )  #*1e3*np.sqrt(gu.G1__pcMsun_1km2s_2*gp.anM/gp.ana) # [km/s], from conversion from system with L=G=M=1

    R0 = np.sqrt(xnew**2 + ynew**2)  # [pc]
    Rhalf = np.median(R0)  # [pc]
    Rscale = Rhalf  # or gpr.r_DM # [pc]

    print('Rscale/pc = ', Rscale)

    # only for 0 (all) and 1 (first and only population)
    for pop in range(gp.pops + 1):
        crscale = open(gp.files.get_scale_file(pop), 'w')
        print('# Rscale in [pc],',' surfdens_central (=dens0) in [Munit/rscale**2],',\
              ' and totmass_tracers [Munit],',\
              ' and max(sigma_LOS) in [km/s]', file=crscale)
        print(Rscale, file=crscale)
        crscale.close()

        gh.LOG(2, 'grh_com: output: ', gp.files.get_com_file(pop))
        filepos = open(gp.files.get_com_file(pop), 'w')
        print('# x [Rscale]', 'y [Rscale]', 'vLOS [km/s]', file=filepos)
        for k in range(ndm):
            print(xnew[k] / Rscale, ynew[k] / Rscale, vznew[k], file=filepos)
        filepos.close()
        gh.LOG(2, '')