Esempio n. 1
0
def sig_rz(z, zpars, tpars):
    # Mirror prior
    tparsu = abs(tpars)

    # dz = zpars[2]-zpars[1]
    # sig_Rz = np.zeros(gp.nipol)
    # sig_Rz[0] = tparsu[0] * dz / 2.
    # for i in range(1,gp.nipol):
    #   sig_Rz[i] = sig_Rz[i-1] + tparsu[i] * dz
    # f = gp.ipol(zpars,sig_Rz,z)

    # Alternative here --> don't assume monotonic!
    f = gh.ipol(zpars, tparsu, z)
    return f
Esempio n. 2
0
def sig_rz(z, zpars, tpars):
    # Mirror prior
    tparsu = abs(tpars)

    # dz = zpars[2]-zpars[1]
    # sig_Rz = np.zeros(gp.nipol)
    # sig_Rz[0] = tparsu[0] * dz / 2.
    # for i in range(1,gp.nipol):
    #   sig_Rz[i] = sig_Rz[i-1] + tparsu[i] * dz
    # f = gp.ipol(zpars,sig_Rz,z)

    # Alternative here --> don't assume monotonic!
    f = gh.ipol(zpars, tparsu, z)
    return f
Esempio n. 3
0
def disc_sim(gp):
    gp.zpmin = -1
    gp.zpmax = -1

    #import all data from files
    if gp.importdata:
        z_nu1_raw,nu1_dat_raw,nu1_dat_err_raw = gh.readcoln(gp.files.Sigfiles[0])
        z_sig1_raw,sig1_dat_raw,sig1_dat_err_raw = gh.readcoln(gp.files.sigfiles[0])
        #z_surf_raw,surftot_dat_raw,surftot_dat_err_raw = gh.readcoln(gp.files.surfdenfiles[0])
        z_surf_raw,surfbar_dat_raw,surfbar_dat_err_raw = gh.readcoln(gp.files.surfdenfiles[0])
        z_surf_raw,surfdm_dat_raw,surfdm_dat_err_raw = gh.readcoln(gp.files.surfdenfiles[1])

        selnu1  = (z_nu1_raw > 0)
        selsig1 = (z_sig1_raw > 0)
        selsurf = (z_surf_raw > 0)

        if gp.pops == 2:
            z_nu2_raw,nu2_dat_raw,nu2_dat_err_raw = gh.readcoln(gp.files.Sigfiles[1])
            z_sig2_raw,sig2_dat_raw,sig2_dat_err_raw = gh.readcoln(gp.files.sigfiles[2])

            selnu2 = (z_nu2_raw > 0)
            selsig2 = (z_sig2_raw > 0)

        # baryonic surface density
        gp.dat.Mx   = z_surf_raw[selsurf]*1000.      # [pc]
        gp.dat.Mrdat = surfbar_dat_raw[selsurf]      # [Munit/pc^2]
        gp.dat.Mrerr = surfbar_dat_err_raw[selsurf]  # [Munit/pc^2]

        # total surface density
        gp.Mmodel = surftot_dat_raw[selsusrf]        # [Munit/pc^2]
        Kz_zstar = -gp.Mmodel * (2.*np.pi*gu.G1__pcMsun_1km2s_2)

        # should be kappa data (not sure whether this is necessary)
        gp.dat.densx     = z_surf_raw[selsusrf]*1000.         # [pc]
        gp.dat.densdat   = phys.kappa(gp.dat.densx, Kz_zstar)   #should be the total kappa, not sure about x array though
        gp.dat.denserr   = gp.dat.densdat  #not necessary for a certainty

        gp.dat.nux1 = z_nu1_raw[selnu1]*1000.   # [pc]
        gp.dat.nu1 = nu1_dat_raw[selnu1]
        gp.dat.nuerr1 = nu1_dat_err_raw[selnu1]

        gp.dat.sigx1 = z_sig1_raw[selsig1]*1000.
        gp.dat.sig1 = sig1_dat_raw[selsig1]
        gp.dat.sigerr1 = sig1_dat_err_raw[selsig1]

        if gp.pops == 2:
            gp.dat.nux2 = z_nu2_raw[selnu2]*1000.
            gp.dat.nu2 = nu2_dat_raw[selnu2]
            gp.dat.nuerr2 = nu2_dat_err_raw[selnu2]

            gp.dat.sigx2 = z_sig2_raw[selsig2]*1000.
            gp.dat.sig2 = z_sig2_raw[selsig2]
            gp.dat.sigerr2 = z_sig2_raw[selsig2]

        gp.dat.output()
        return gp.dat



    else:
        # import simulation datapoints and calculate nu, sig
        zmin = 100. ; zmax = 1300.          # [pc]
        zbinbndry = np.linspace(zmin, zmax, gp.nipol+1)   # [pc] assuming linear spacing of bins
        zbinmin = zbinbndry[:-1]                          # [pc]
        zbinmax = zbinbndry[1:]                           # [pc]
        gp.xipol= zbinmin + (zbinmax-zbinmin)/2.          # [pc]

        # Read in the data:
        mass, x_dat,y_dat,z_dat, vx_dat,vy_dat,vz_dat, pot_dat = gh.readcoln(gp.files.posvelfiles[0])
        # assume units: Munit, 3*kpc, 3*km/s, [pot] <= last one not needed
        # [Dave] v is in units [100 km/s] <= not possible?!
        if max(mass) != min(mass):
            print('**** Multimass data not yet supported ****')
            exit(1)

        # change to [pc]
        x_dat *= 1000.;    y_dat *= 1000.;        z_dat *= 1000. # [pc]
        z_mean  = np.sum(mass*z_dat)/np.sum(mass)                  # [pc]
        vz_mean = np.sum(mass*vz_dat)/np.sum(mass)                 # [km/s]

        # center on coordinate, if also negative z values read in
        if min(z_dat)<0:
            z_dat = z_dat - z_mean        # [pc]
            vz_dat = vz_dat - vz_mean     # [km/s]

        # Add errors:
        if gp.adderrors:
            # Assume normal errors for now:
            xerrfac  = 10.0;  yerrfac = 10.0;  zerrfac = 10.0
            vxerrfac = 10.0; vyerrfac = 10.0; vzerrfac = 10.0
            x_dat_err = abs(x_dat/xerrfac)          # [pc]
            y_dat_err = abs(y_dat/yerrfac)          # [pc]
            z_dat_err = abs(z_dat/zerrfac)          # [pc]
            vx_dat_err = abs(vx_dat/vxerrfac)       # [km/s]
            vy_dat_err = abs(vy_dat/vyerrfac)       # [km/s]
            vz_dat_err = abs(vz_dat/vzerrfac)       # [km/s]

            x_dat = x_dat + npr.normal(-1.,1.,len(z_dat)) * x_dat_err # [pc]
            y_dat = y_dat + npr.normal(-1.,1.,len(z_dat)) * y_dat_err # [pc]
            z_dat = z_dat + npr.normal(-1.,1.,len(z_dat)) * z_dat_err # [pc]
            vx_dat = vx_dat + npr.normal(-1.,1.,len(z_dat)) * vx_dat_err # [km/s]
            vy_dat = vy_dat + npr.normal(-1.,1.,len(z_dat)) * vy_dat_err # [km/s]
            vz_dat = vz_dat + npr.normal(-1.,1.,len(z_dat)) * vz_dat_err # [km/s]

        # Cut on zmax, cut zero velocities
        sel = (z_dat < zmax) * (abs(vz_dat) >= 0.)   # [bool]
        z_dat  = z_dat[sel]               # [pc]
        vz_dat = vz_dat[sel]              # [km/s]

        # determine sigma_v
        sig_dat_bin = np.zeros(gp.nipol)
        sig_dat_err_bin = np.zeros(gp.nipol)
        for i in range(gp.nipol):
            sel = (z_dat > zbinmin[i])*(z_dat < zbinmax[i])   # select bin
            vtemp = np.array(vz_dat[sel])                     # [km/s]
            sig_dat_bin[i] = np.sqrt(np.mean(vtemp**2) - np.mean(vtemp)**2) # [km/s]
            sig_dat_err_bin[i] = sig_dat_bin[i]/(1.*np.sum(sel))   # [km/s]


        nu_dat_bin = np.zeros(gp.nipol)
        nu_dat_err_bin = np.zeros(gp.nipol)
        for i in range(gp.nipol):
            sel = (z_dat > zbinmin[i])*(z_dat < zbinmax[i])   # select bin
            nu_dat_bin[i] = 1.*np.sum(sel)/(1.*(zbinmax[i]-zbinmin[i])) # [1/tot. area/pc]
            nu_dat_err_bin[i] = nu_dat_bin[i] / np.sqrt(np.sum(sel)) # [1/tot. area/pc], Poisson distributed

        renorm = 1.*max(nu_dat_bin)       # [1/tot.area/pc]
        nu_dat_bin = nu_dat_bin / renorm  # [1]
        nu_dat_err_bin = nu_dat_err_bin / renorm   # [1]

        if gp.pops == 2:
            mass2, x_dat2,y_dat2,z_dat2, vx_dat2,vy_dat2,vz_dat2, pot_dat2 = gh.readcoln(gp.files.posvelfiles[1])
            if max(mass2) > min(mass2):
                print('**** Multimass data not yet supported ****')
                exit(1)

            # change to [pc]
            x_dat2 *= 1000.;    y_dat2 *= 1000.;        z_dat2 *= 1000. # [pc]
            z_mean2  = np.sum(mass2*z_dat2)/np.sum(mass2)                  # [pc]
            vz_mean2 = np.sum(mass2*vz_dat2)/np.sum(mass2)                 # [km/s]

            # center on coordinate, if also negative z values read in
            if min(z_dat2)<0:
                z_dat2 = z_dat2 - z_mean2        # [pc]
                vz_dat2 = vz_dat2 - vz_mean2     # [km/s]

            # Add errors:
            if gp.adderrors:
                # Assume normal errors for now:
                x_dat_err2 = abs(x_dat2/xerrfac)          # [pc]
                y_dat_err2 = abs(y_dat2/yerrfac)          # [pc]
                z_dat_err2 = abs(z_dat2/zerrfac)          # [pc]
                vx_dat_err2 = abs(vx_dat2/vxerrfac)       # [km/s]
                vy_dat_err2 = abs(vy_dat2/vyerrfac)       # [km/s]
                vz_dat_err2 = abs(vz_dat2/vzerrfac)       # [km/s]

                x_dat2 = x_dat2 + npr.normal(-1.,1.,len(z_dat2)) * x_dat_err2 # [pc]
                y_dat2 = y_dat2 + npr.normal(-1.,1.,len(z_dat2)) * y_dat_err2 # [pc]
                z_dat2 = z_dat2 + npr.normal(-1.,1.,len(z_dat2)) * z_dat_err2 # [pc]
                vx_dat2 = vx_dat2 + npr.normal(-1.,1.,len(z_dat2)) * vx_dat_err2 # [km/s]
                vy_dat2 = vy_dat2 + npr.normal(-1.,1.,len(z_dat2)) * vy_dat_err2 # [km/s]
                vz_dat2 = vz_dat2 + npr.normal(-1.,1.,len(z_dat2)) * vz_dat_err2 # [km/s]

            # Cut on zmax, cut zero velocities
            sel = (z_dat2 < zmax) * (abs(vz_dat2) >= 0.)   # [bool]
            z_dat2  = z_dat2[sel]               # [pc]
            vz_dat2 = vz_dat2[sel]              # [km/s]

            # determine sigma_v
            sig_dat_bin2 = np.zeros(gp.nipol)
            sig_dat_err_bin2 = np.zeros(gp.nipol)
            for i in range(gp.nipol):
                sel = (z_dat2 > zbinmin[i])*(z_dat2 < zbinmax[i])   # select bin
                vtemp = np.array(vz_dat2[sel])                     # [km/s]
                sig_dat_bin2[i] = np.sqrt(np.mean(vtemp**2) - np.mean(vtemp)**2) # [km/s]
                sig_dat_err_bin2[i] = sig_dat_bin2[i]/(1.*np.sum(sel))   # [km/s]


            nu_dat_bin2 = np.zeros(gp.nipol)
            nu_dat_err_bin2 = np.zeros(gp.nipol)
            for i in range(gp.nipol):
                sel = (z_dat2 > zbinmin[i])*(z_dat2 < zbinmax[i])   # select bin
                nu_dat_bin2[i] = 1.*np.sum(sel)/(1.*(zbinmax[i]-zbinmin[i])) # [1/tot. area/pc]
                nu_dat_err_bin2[i] = nu_dat_bin2[i] / np.sqrt(np.sum(sel)) # [1/tot. area/pc], Poisson distributed

            renorm = 1.*max(nu_dat_bin2)       # [1/tot.area/pc]
            nu_dat_bin2 /= renorm              # [1]
            nu_dat_err_bin2 /= renorm          # [1]


        # if gp.bprior:
        # Load the baryonic model:
        if gp.baryonmodel == 'silvia':
            zvis,sigexpvis,sigexpviserr,sigsecvis,sigsecviserr = gh.readcoln('/home/ast/user/jread/Data/Local_dm/Vis/Sigma_MM.txt')
            # [kpc, Munit/pc^2, Msun/pc^2, Msun/pc^2, Msun/pc^2]
            sigusevis    = sigsecvis      # [Munit/pc^2]
            siguseviserr = sigsecviserr   # [Munit/pc^2]
        elif gp.baryonmodel == 'sim':
            zvis, sigusevis, siguseviserr = gh.readcol3(gp.files.surfdenfiles[0])
            # [kpc, Munit/pc^2, Munit/pc^2]
            zvis *= 1000.                     # [pc]
            sigusevis    = gh.ipol(zvis, sigusevis, gp.xipol)   # interpolate to xipol radius array
            siguseviserr = gh.ipol(zvis, siguseviserr, gp.xipol)
            zvis = gp.xipol                          # [pc]

            # read in DM surface density
            zdm, sigusedm, sigusedmerr = gh.readcol3(gp.files.surfdenfiles[1])
            # [kpc, Munit/pc^2, Munit/pc^2]
            zdm *= 1000.                                # [pc]
            sigusedm = gh.ipol(zdm, sigusedm, gp.xipol) # interpolate to xipol radius array
            sigusedmerr = gh.ipol(zdm, sigusedmerr, gp.xipol)
            zdm = gp.xipol                    # [pc]
        elif gp.baryonmodel == 'simple':
            zvis = gp.xipol                   # [pc]
            D = 250.                          # [pc]
            K = 1.65
            sigusevis = K*zvis/sqrt(zvis**2.+D**2.) / (2.0*np.pi*G1)
            siguseviserr = sigusevis*0.01

        # baryonic surface density, really a Sig
        gp.dat.Mx   = gp.xipol                # [pc]
        gp.dat.Mrdat = sigusevis              # [Munit/pc^2]
        gp.dat.Mrerr = siguseviserr           # [Munit/pc^2]

        # total surface density (same z array as baryonic)
        gp.Mmodel = sigusevis + sigusedm         # [Munit/pc^2]
        Kz_zstar = -gp.Mmodel * (2.*np.pi*gu.G1__pcMsun_1km2s_2) # [1000/pc (km/s)^2]

        # should be kappa data (not sure whether this is necessary)
        gp.dat.densx     = gp.xipol                       # [pc]
        gp.dat.densdat   = phys.kappa(gp.dat.densx, Kz_zstar)
        gp.dat.denserr   = gp.dat.densdat/np.sqrt(len(Kz_zstar))

        gp.dat.nux1   = gp.xipol          # [pc]
        gp.dat.nu1 = nu_dat_bin           # [Munit/pc^3]
        gp.dat.nuerr1 = nu_dat_err_bin    # [Munit/pc^3]

        gp.dat.sigx1   = gp.xipol         # [pc]
        gp.dat.sig1 = sig_dat_bin         # [km/s]
        gp.dat.sigerr1 = sig_dat_err_bin  # [km/s]

        if gp.pops == 2:
            gp.dat.nux2 = gp.xipol               # [pc]
            gp.dat.nu2 = nu_dat_bin2             # [Munit/pc^3]
            gp.dat.nuerr2 = nu_dat_err_bin2      # [Munit/pc^3]

            gp.dat.sigx2 = gp.xipol              # [pc]
            gp.dat.sig2 = sig_dat_bin2           # [km/s]
            gp.dat.sigerr2 = sig_dat_err_bin2    # [km/s]

        gp.dat.output()
        return gp.dat
Esempio n. 4
0
def disc_sim(gp):
    gp.zpmin = -1
    gp.zpmax = -1

    #import all data from files
    if gp.importdata:
        z_nu1_raw, nu1_dat_raw, nu1_dat_err_raw = gh.readcoln(
            gp.files.Sigfiles[0])
        z_sig1_raw, sig1_dat_raw, sig1_dat_err_raw = gh.readcoln(
            gp.files.sigfiles[0])
        #z_surf_raw,surftot_dat_raw,surftot_dat_err_raw = gh.readcoln(gp.files.surfdenfiles[0])
        z_surf_raw, surfbar_dat_raw, surfbar_dat_err_raw = gh.readcoln(
            gp.files.surfdenfiles[0])
        z_surf_raw, surfdm_dat_raw, surfdm_dat_err_raw = gh.readcoln(
            gp.files.surfdenfiles[1])

        selnu1 = (z_nu1_raw > 0)
        selsig1 = (z_sig1_raw > 0)
        selsurf = (z_surf_raw > 0)

        if gp.pops == 2:
            z_nu2_raw, nu2_dat_raw, nu2_dat_err_raw = gh.readcoln(
                gp.files.Sigfiles[1])
            z_sig2_raw, sig2_dat_raw, sig2_dat_err_raw = gh.readcoln(
                gp.files.sigfiles[2])

            selnu2 = (z_nu2_raw > 0)
            selsig2 = (z_sig2_raw > 0)

        # baryonic surface density
        gp.dat.Mx = z_surf_raw[selsurf] * 1000.  # [pc]
        gp.dat.Mrdat = surfbar_dat_raw[selsurf]  # [Munit/pc^2]
        gp.dat.Mrerr = surfbar_dat_err_raw[selsurf]  # [Munit/pc^2]

        # total surface density
        gp.Mmodel = surftot_dat_raw[selsusrf]  # [Munit/pc^2]
        Kz_zstar = -gp.Mmodel * (2. * np.pi * gu.G1__pcMsun_1km2s_2)

        # should be kappa data (not sure whether this is necessary)
        gp.dat.densx = z_surf_raw[selsusrf] * 1000.  # [pc]
        gp.dat.densdat = phys.kappa(
            gp.dat.densx, Kz_zstar
        )  #should be the total kappa, not sure about x array though
        gp.dat.denserr = gp.dat.densdat  #not necessary for a certainty

        gp.dat.nux1 = z_nu1_raw[selnu1] * 1000.  # [pc]
        gp.dat.nu1 = nu1_dat_raw[selnu1]
        gp.dat.nuerr1 = nu1_dat_err_raw[selnu1]

        gp.dat.sigx1 = z_sig1_raw[selsig1] * 1000.
        gp.dat.sig1 = sig1_dat_raw[selsig1]
        gp.dat.sigerr1 = sig1_dat_err_raw[selsig1]

        if gp.pops == 2:
            gp.dat.nux2 = z_nu2_raw[selnu2] * 1000.
            gp.dat.nu2 = nu2_dat_raw[selnu2]
            gp.dat.nuerr2 = nu2_dat_err_raw[selnu2]

            gp.dat.sigx2 = z_sig2_raw[selsig2] * 1000.
            gp.dat.sig2 = z_sig2_raw[selsig2]
            gp.dat.sigerr2 = z_sig2_raw[selsig2]

        gp.dat.output()
        return gp.dat

    else:
        # import simulation datapoints and calculate nu, sig
        zmin = 100.
        zmax = 1300.  # [pc]
        zbinbndry = np.linspace(zmin, zmax, gp.nipol +
                                1)  # [pc] assuming linear spacing of bins
        zbinmin = zbinbndry[:-1]  # [pc]
        zbinmax = zbinbndry[1:]  # [pc]
        gp.xipol = zbinmin + (zbinmax - zbinmin) / 2.  # [pc]

        # Read in the data:
        mass, x_dat, y_dat, z_dat, vx_dat, vy_dat, vz_dat, pot_dat = gh.readcoln(
            gp.files.posvelfiles[0])
        # assume units: Munit, 3*kpc, 3*km/s, [pot] <= last one not needed
        # [Dave] v is in units [100 km/s] <= not possible?!
        if max(mass) != min(mass):
            print('**** Multimass data not yet supported ****')
            exit(1)

        # change to [pc]
        x_dat *= 1000.
        y_dat *= 1000.
        z_dat *= 1000.  # [pc]
        z_mean = np.sum(mass * z_dat) / np.sum(mass)  # [pc]
        vz_mean = np.sum(mass * vz_dat) / np.sum(mass)  # [km/s]

        # center on coordinate, if also negative z values read in
        if min(z_dat) < 0:
            z_dat = z_dat - z_mean  # [pc]
            vz_dat = vz_dat - vz_mean  # [km/s]

        # Add errors:
        if gp.adderrors:
            # Assume normal errors for now:
            xerrfac = 10.0
            yerrfac = 10.0
            zerrfac = 10.0
            vxerrfac = 10.0
            vyerrfac = 10.0
            vzerrfac = 10.0
            x_dat_err = abs(x_dat / xerrfac)  # [pc]
            y_dat_err = abs(y_dat / yerrfac)  # [pc]
            z_dat_err = abs(z_dat / zerrfac)  # [pc]
            vx_dat_err = abs(vx_dat / vxerrfac)  # [km/s]
            vy_dat_err = abs(vy_dat / vyerrfac)  # [km/s]
            vz_dat_err = abs(vz_dat / vzerrfac)  # [km/s]

            x_dat = x_dat + npr.normal(-1., 1., len(z_dat)) * x_dat_err  # [pc]
            y_dat = y_dat + npr.normal(-1., 1., len(z_dat)) * y_dat_err  # [pc]
            z_dat = z_dat + npr.normal(-1., 1., len(z_dat)) * z_dat_err  # [pc]
            vx_dat = vx_dat + npr.normal(-1., 1.,
                                         len(z_dat)) * vx_dat_err  # [km/s]
            vy_dat = vy_dat + npr.normal(-1., 1.,
                                         len(z_dat)) * vy_dat_err  # [km/s]
            vz_dat = vz_dat + npr.normal(-1., 1.,
                                         len(z_dat)) * vz_dat_err  # [km/s]

        # Cut on zmax, cut zero velocities
        sel = (z_dat < zmax) * (abs(vz_dat) >= 0.)  # [bool]
        z_dat = z_dat[sel]  # [pc]
        vz_dat = vz_dat[sel]  # [km/s]

        # determine sigma_v
        sig_dat_bin = np.zeros(gp.nipol)
        sig_dat_err_bin = np.zeros(gp.nipol)
        for i in range(gp.nipol):
            sel = (z_dat > zbinmin[i]) * (z_dat < zbinmax[i])  # select bin
            vtemp = np.array(vz_dat[sel])  # [km/s]
            sig_dat_bin[i] = np.sqrt(np.mean(vtemp**2) -
                                     np.mean(vtemp)**2)  # [km/s]
            sig_dat_err_bin[i] = sig_dat_bin[i] / (1. * np.sum(sel))  # [km/s]

        nu_dat_bin = np.zeros(gp.nipol)
        nu_dat_err_bin = np.zeros(gp.nipol)
        for i in range(gp.nipol):
            sel = (z_dat > zbinmin[i]) * (z_dat < zbinmax[i])  # select bin
            nu_dat_bin[i] = 1. * np.sum(sel) / (1. * (zbinmax[i] - zbinmin[i])
                                                )  # [1/tot. area/pc]
            nu_dat_err_bin[i] = nu_dat_bin[i] / np.sqrt(
                np.sum(sel))  # [1/tot. area/pc], Poisson distributed

        renorm = 1. * max(nu_dat_bin)  # [1/tot.area/pc]
        nu_dat_bin = nu_dat_bin / renorm  # [1]
        nu_dat_err_bin = nu_dat_err_bin / renorm  # [1]

        if gp.pops == 2:
            mass2, x_dat2, y_dat2, z_dat2, vx_dat2, vy_dat2, vz_dat2, pot_dat2 = gh.readcoln(
                gp.files.posvelfiles[1])
            if max(mass2) > min(mass2):
                print('**** Multimass data not yet supported ****')
                exit(1)

            # change to [pc]
            x_dat2 *= 1000.
            y_dat2 *= 1000.
            z_dat2 *= 1000.  # [pc]
            z_mean2 = np.sum(mass2 * z_dat2) / np.sum(mass2)  # [pc]
            vz_mean2 = np.sum(mass2 * vz_dat2) / np.sum(mass2)  # [km/s]

            # center on coordinate, if also negative z values read in
            if min(z_dat2) < 0:
                z_dat2 = z_dat2 - z_mean2  # [pc]
                vz_dat2 = vz_dat2 - vz_mean2  # [km/s]

            # Add errors:
            if gp.adderrors:
                # Assume normal errors for now:
                x_dat_err2 = abs(x_dat2 / xerrfac)  # [pc]
                y_dat_err2 = abs(y_dat2 / yerrfac)  # [pc]
                z_dat_err2 = abs(z_dat2 / zerrfac)  # [pc]
                vx_dat_err2 = abs(vx_dat2 / vxerrfac)  # [km/s]
                vy_dat_err2 = abs(vy_dat2 / vyerrfac)  # [km/s]
                vz_dat_err2 = abs(vz_dat2 / vzerrfac)  # [km/s]

                x_dat2 = x_dat2 + npr.normal(-1., 1.,
                                             len(z_dat2)) * x_dat_err2  # [pc]
                y_dat2 = y_dat2 + npr.normal(-1., 1.,
                                             len(z_dat2)) * y_dat_err2  # [pc]
                z_dat2 = z_dat2 + npr.normal(-1., 1.,
                                             len(z_dat2)) * z_dat_err2  # [pc]
                vx_dat2 = vx_dat2 + npr.normal(
                    -1., 1., len(z_dat2)) * vx_dat_err2  # [km/s]
                vy_dat2 = vy_dat2 + npr.normal(
                    -1., 1., len(z_dat2)) * vy_dat_err2  # [km/s]
                vz_dat2 = vz_dat2 + npr.normal(
                    -1., 1., len(z_dat2)) * vz_dat_err2  # [km/s]

            # Cut on zmax, cut zero velocities
            sel = (z_dat2 < zmax) * (abs(vz_dat2) >= 0.)  # [bool]
            z_dat2 = z_dat2[sel]  # [pc]
            vz_dat2 = vz_dat2[sel]  # [km/s]

            # determine sigma_v
            sig_dat_bin2 = np.zeros(gp.nipol)
            sig_dat_err_bin2 = np.zeros(gp.nipol)
            for i in range(gp.nipol):
                sel = (z_dat2 > zbinmin[i]) * (z_dat2 < zbinmax[i]
                                               )  # select bin
                vtemp = np.array(vz_dat2[sel])  # [km/s]
                sig_dat_bin2[i] = np.sqrt(
                    np.mean(vtemp**2) - np.mean(vtemp)**2)  # [km/s]
                sig_dat_err_bin2[i] = sig_dat_bin2[i] / (1. * np.sum(sel)
                                                         )  # [km/s]

            nu_dat_bin2 = np.zeros(gp.nipol)
            nu_dat_err_bin2 = np.zeros(gp.nipol)
            for i in range(gp.nipol):
                sel = (z_dat2 > zbinmin[i]) * (z_dat2 < zbinmax[i]
                                               )  # select bin
                nu_dat_bin2[i] = 1. * np.sum(sel) / (
                    1. * (zbinmax[i] - zbinmin[i]))  # [1/tot. area/pc]
                nu_dat_err_bin2[i] = nu_dat_bin2[i] / np.sqrt(
                    np.sum(sel))  # [1/tot. area/pc], Poisson distributed

            renorm = 1. * max(nu_dat_bin2)  # [1/tot.area/pc]
            nu_dat_bin2 /= renorm  # [1]
            nu_dat_err_bin2 /= renorm  # [1]

        # if gp.bprior:
        # Load the baryonic model:
        if gp.baryonmodel == 'silvia':
            zvis, sigexpvis, sigexpviserr, sigsecvis, sigsecviserr = gh.readcoln(
                '/home/ast/user/jread/Data/Local_dm/Vis/Sigma_MM.txt')
            # [kpc, Munit/pc^2, Msun/pc^2, Msun/pc^2, Msun/pc^2]
            sigusevis = sigsecvis  # [Munit/pc^2]
            siguseviserr = sigsecviserr  # [Munit/pc^2]
        elif gp.baryonmodel == 'sim':
            zvis, sigusevis, siguseviserr = gh.readcol3(
                gp.files.surfdenfiles[0])
            # [kpc, Munit/pc^2, Munit/pc^2]
            zvis *= 1000.  # [pc]
            sigusevis = gh.ipol(zvis, sigusevis,
                                gp.xipol)  # interpolate to xipol radius array
            siguseviserr = gh.ipol(zvis, siguseviserr, gp.xipol)
            zvis = gp.xipol  # [pc]

            # read in DM surface density
            zdm, sigusedm, sigusedmerr = gh.readcol3(gp.files.surfdenfiles[1])
            # [kpc, Munit/pc^2, Munit/pc^2]
            zdm *= 1000.  # [pc]
            sigusedm = gh.ipol(zdm, sigusedm,
                               gp.xipol)  # interpolate to xipol radius array
            sigusedmerr = gh.ipol(zdm, sigusedmerr, gp.xipol)
            zdm = gp.xipol  # [pc]
        elif gp.baryonmodel == 'simple':
            zvis = gp.xipol  # [pc]
            D = 250.  # [pc]
            K = 1.65
            sigusevis = K * zvis / sqrt(zvis**2. + D**2.) / (2.0 * np.pi * G1)
            siguseviserr = sigusevis * 0.01

        # baryonic surface density, really a Sig
        gp.dat.Mx = gp.xipol  # [pc]
        gp.dat.Mrdat = sigusevis  # [Munit/pc^2]
        gp.dat.Mrerr = siguseviserr  # [Munit/pc^2]

        # total surface density (same z array as baryonic)
        gp.Mmodel = sigusevis + sigusedm  # [Munit/pc^2]
        Kz_zstar = -gp.Mmodel * (2. * np.pi * gu.G1__pcMsun_1km2s_2
                                 )  # [1000/pc (km/s)^2]

        # should be kappa data (not sure whether this is necessary)
        gp.dat.densx = gp.xipol  # [pc]
        gp.dat.densdat = phys.kappa(gp.dat.densx, Kz_zstar)
        gp.dat.denserr = gp.dat.densdat / np.sqrt(len(Kz_zstar))

        gp.dat.nux1 = gp.xipol  # [pc]
        gp.dat.nu1 = nu_dat_bin  # [Munit/pc^3]
        gp.dat.nuerr1 = nu_dat_err_bin  # [Munit/pc^3]

        gp.dat.sigx1 = gp.xipol  # [pc]
        gp.dat.sig1 = sig_dat_bin  # [km/s]
        gp.dat.sigerr1 = sig_dat_err_bin  # [km/s]

        if gp.pops == 2:
            gp.dat.nux2 = gp.xipol  # [pc]
            gp.dat.nu2 = nu_dat_bin2  # [Munit/pc^3]
            gp.dat.nuerr2 = nu_dat_err_bin2  # [Munit/pc^3]

            gp.dat.sigx2 = gp.xipol  # [pc]
            gp.dat.sig2 = sig_dat_bin2  # [km/s]
            gp.dat.sigerr2 = sig_dat_err_bin2  # [km/s]

        gp.dat.output()
        return gp.dat
Esempio n. 5
0
def disc_mock(gp):
    global K,C,D,F, zth, zp_kz, zmin, zmax, z0, z02
    # Set up simple population here using analytic formulae:
    zmin = 100.                               # [pc], first bin center
    zmax = 1300.                              # [pc], last bin center
    # get Stuetzpunkte for theoretical profiles (not yet stars, finer spacing in real space)
    nth = 3*gp.nipol
    zth = 1.* np.arange(nth) * (zmax-zmin)/(nth-1.) + zmin
    z0  = 240.                                # [pc], scaleheight of first population
    z02 = 200.                                # [pc], scaleheight of second population
    D   = 250.                                # [pc], scaleheight of all stellar tracers
    K   = 1.65
    F   = 1.65e-4
    C   = 17.**2.                             # [km/s] integration constant in sig

    # Draw mock data:
    nu_zth = np.exp(-zth/z0)                                 # [1]
    Kz_zth = -(K*zth/np.sqrt(zth**2.+D**2.) + 2.0 * F * zth)

    if gp.adddarkdisc:
        DD = 600                                         # [pc]
        KD = 0.15 * 1.650
        Kz_zth = Kz_zth - KD*zth/np.sqrt(zth**2. + DD**2.)

    # calculate sig_z^2
    inti = np.zeros(nth)
    for i in range(1, nth):
        inti[i] = simps(Kz_zth[:i]*nu_zth[:i], zth[:i])

    sigzth = np.sqrt((inti + C) / nu_zth)

    # project back to positions of stars
    ran = npr.uniform(size=int(gp.ntracer[1-1]))                 # [1]
    zstar = -z0 * np.log(1.0 - ran)           # [pc] stellar positions
    sigzstar = gh.ipol(zth, sigzth, zstar)
    # > 0 ((IDL, Justin)) stellar velocity dispersion

    # assign [0,1] * maxsig
    ran2 = npr.normal(size=int(gp.ntracer[1-1]))  # [1]
    vzstar = ran2 * sigzstar                      # [km/s]

    # Add second population [thick-disc like]:
    if gp.pops == 2:
        nu_zth2 = gp.ntracer[2-1]/gp.ntracer[1-1]*np.exp(-zth/z02)
        # no normalization to 1
        inti    = np.zeros(nth)
        for i in range(1, nth):
            inti[i] = simps(Kz_zth[:i]*nu_zth2[:i], zth[:i])
        sigzth2 = np.sqrt((inti + C) / nu_zth2) # same integration constant
        ran = npr.uniform(-1., 1., gp.ntracer[2-1])            # [1]
        zstar2 = -z02 * np.log(1.0 - ran)                      # [pc]
        zstarobs = np.hstack([zstar, zstar2]) # concat pop1, pop2 for all stars
        sigzstar2 = gh.ipol(zth, sigzth2, zstar2)
        ran2 = npr.normal(-1., 1, gp.ntracer[2-1])        # [1]
        vzstar2 = ran2 * sigzstar2                        # [(km/2)^2]

    # enforce observational cut on zmax:
    sel = (zstar < zmax)
    print('fraction of z<zmax selected elements: ', 1.*sum(sel)/(1.*len(sel)))
    z_dat  = zstar[sel]
    vz_dat = vzstar[sel]

    # throw away velocities of value zero (unstable?):
    sel = (abs(vz_dat) > 0)
    print('fraction of vz_dat>0 selected elements: ', 1.*sum(sel)/(1.*len(sel)))
    z_dat  = z_dat[sel]
    vz_dat = vz_dat[sel]

    # Calulate binned data (for plots/binned anal.). old way, linear spacings, no const #particles/bin
    binmin, binmax, z_dat_bin, sig_dat_bin, count_bin = gh.binsmooth(z_dat, vz_dat, \
                                                                     zmin, zmax, gp.nipol, 0.)
    sig_dat_err_bin = sig_dat_bin / np.sqrt(count_bin)

    nu_dat_bin, count_bin = gh.bincount(z_dat, binmax)
    nu_dat_err_bin = nu_dat_bin / np.sqrt(count_bin)
    renorm = max(nu_dat_bin)
    nu_dat_bin = nu_dat_bin / renorm
    nu_dat_err_bin = nu_dat_err_bin / renorm

    # if only 1 pop, use 0 for all components
    binmin0 = binmin; binmax0 = binmax; z_dat_bin0 = z_dat_bin
    sig_dat_bin0 = sig_dat_bin; sig_dat_err_bin0 = sig_dat_err_bin
    nu_dat_bin0  = nu_dat_bin;  nu_dat_err_bin0  = nu_dat_err_bin

    if gp.pops == 2:
        # enforce observational constraint on z<z_max
        sel = (zstar2 < zmax)
        z_dat2  = zstar2[sel]
        vz_dat2 = vzstar2[sel]

        # cut zero velocities:
        sel = (abs(vz_dat2) > 0)
        z_dat2  = z_dat2[sel]
        vz_dat2 = vz_dat2[sel]

        # Calulate binned data (for plots/binned analysis):
        binmin2, binmax2, z_dat_bin2, sig_dat_bin2, count_bin2 = gh.binsmooth(z_dat2, vz_dat2, \
                                                                              zmin, zmax, gp.nipol, 0.)
        sig_dat_err_bin2 = sig_dat_bin2 / np.sqrt(count_bin2)

        nu_dat_bin2, count_bin2 = gh.bincount(z_dat2, binmax2)
        nu_dat_err_bin2 = nu_dat_bin2 / np.sqrt(count_bin2)
        renorm2 = max(nu_dat_bin2) # normalize by max density of first bin, rather
        nu_dat_bin2 = nu_dat_bin2 / renorm2
        nu_dat_err_bin2 = nu_dat_err_bin2 / renorm2

        # CALCULATE PROPERTIES FOR ALL POP TOGETHER
        z_dat0 = np.hstack([z_dat, z_dat2])
        vz_dat0 = np.hstack([vz_dat, vz_dat2])

        # Calulate binned data (for plots/binned anal.). old way, linear spacings, no const #particles/bin
        binmin0, binmax0, z_dat_bin0, sig_dat_bin0, count_bin0 = gh.binsmooth(z_dat0, vz_dat0, \
                                                                              zmin, zmax, gp.nipol, 0.)
        sig_dat_err_bin0 = sig_dat_bin0 / np.sqrt(count_bin0)
        # binmin, binmax, z_dat_bin = gh.bin_r_const_tracers(z_dat, gp.nipol)
        nu_dat_bin0, count_bin0 = gh.bincount(z_dat0, binmax0)
        nu_dat_err_bin0 = nu_dat_bin0 / np.sqrt(count_bin0)
        renorm0 = max(nu_dat_bin0)
        nu_dat_bin0 = nu_dat_bin0 / renorm0
        nu_dat_err_bin0 = nu_dat_err_bin0 / renorm0


    xip = np.copy(z_dat_bin0)                        # [pc]
    gp.dat.binmin = binmin0
    gp.dat.rbin   = xip
    gp.xipol      = gp.dat.rbin
    gp.Rscale.append(D)  # [pc]
    gp.Rscale.append(z0) # [pc]
    maxr          = max(gp.dat.rbin)
    gp.xepol      = np.hstack([gp.dat.rbin, 2*maxr, 4*maxr, 8*maxr])
    gp.dat.binmax = binmax0
    gp.dat.Mrdat   = K*xip/np.sqrt(xip**2.+D**2.) / (2.0*np.pi*gu.G1__pcMsun_1km2s_2)
    gp.dat.Mrerr   = gp.dat.Mrdat*nu_dat_err_bin/nu_dat_bin

    gp.dat.nu.append(nu_dat_bin0)        # [Msun/pc^3], normalized to 1 at center
    gp.dat.nuerr.append(nu_dat_err_bin0) # [Msun/pc^3], normalized

    gp.dat.sig.append(sig_dat_bin0)       # [km/s]
    gp.dat.sigerr.append(sig_dat_err_bin0)# [km/s]

    gp.dat.nu.append(nu_dat_bin)         # [Msun/pc^3], normalized to 1
    gp.dat.nuerr.append(nu_dat_err_bin)  # [Msun/pc^3], normalized

    gp.dat.sig.append(sig_dat_bin)        # [km/s]
    gp.dat.sigerr.append(sig_dat_err_bin) # [km/s]

    if gp.pops == 2:
        gp.Rscale.append(z02)                 # [pc]
        gp.dat.nu.append(nu_dat_bin2)        # [Msun/pc^3], normalized to 1
        gp.dat.nuerr.append(nu_dat_err_bin2) # [Msun/pc^3], normalized

        gp.dat.sig.append(sig_dat_bin2)       # [km/s]
        gp.dat.sigerr.append(sig_dat_err_bin2)# [km/s]
    return gp.dat
Esempio n. 6
0
def get_prof(prof, pop, gp):
    zmin = 100.                               # [pc], first bin center
    zmax = 1300.                              # [pc], last bin center
    # get Stuetzpunkte for theoretical profiles (not yet stars, finer spacing in real space)
    nth = gp.nipol                            # [1] number of bins
    zth = 1.* np.arange(nth) * (zmax-zmin)/(nth-1.) + zmin # [pc] bin centers
    z0  = 240.                                # [pc], scaleheight of first population
    z02 = 200.                                # [pc], scaleheight of second population
    D   = 250.                                # [pc], scaleheight of all stellar tracers
    K   = 1.65
    F   = 1.65e-4
    C   = 17.**2.                             # [km/s] integration constant in sig

    # Draw mock data from exponential disk:
    nu_zth = np.exp(-zth/z0)                                 # [nu0] = [Msun/A/pc] 3D tracer density
    if prof == 'nu' and pop==1:
        return zth, nu_zth
    Kz_zth = -(K*zth/np.sqrt(zth**2.+D**2.) + 2.0 * F * zth)

    if gp.adddarkdisc:
        DD = 600                                         # [pc] scaleheight of dark disc
        KD = 0.15 * 1.650
        Kz_zth = Kz_zth - KD*zth/np.sqrt(zth**2. + DD**2.)

    # calculate sig_z^2
    inti = np.zeros(nth)
    for i in range(1, nth):
        inti[i] = simps(Kz_zth[:i]*nu_zth[:i], zth[:i])

    sigzth = np.sqrt((inti + C) / nu_zth)
    if prof == 'sig' and pop == 1:
        return zth, sigzth
    # project back to positions of stars
    ran = npr.uniform(size=int(gp.ntracer[1-1]))                 # [1]
    zstar = -z0 * np.log(1.0 - ran)           # [pc] stellar positions, exponential falloff

    sigzstar = gh.ipol(zth, sigzth, zstar)
    # > 0 ((IDL, Justin)) stellar velocity dispersion

    # assign [0,1] * maxsig
    ran2 = npr.normal(size=int(gp.ntracer[2-1]))  # [1]
    vzstar = ran2 * sigzstar                      # [km/s]

    # Add second population [thick-disc like]:
    if gp.pops == 2:
        nu_zth2 = gp.ntracer[2-1]/gp.ntracer[1-1]*np.exp(-zth/z02)
        if prof == 'nu' and pop == 2:
            return zth, nu_zth2
        # [nu0,2] = [Msun/A/pc], 3D tracer density, exponentially falling
        # no normalization to 1 done here
        inti    = np.zeros(nth)
        for i in range(1, nth):
            inti[i] = simps(Kz_zth[:i]*nu_zth2[:i], zth[:i])
        sigzth2 = np.sqrt((inti + C) / nu_zth2) # same integration constant
        if prof == 'sig' and pop == 2:
            return zth, sigzth2
        ran = npr.uniform(-1., 1., gp.ntracer[2-1])            # [1]
        zstar2 = -z02 * np.log(1.0 - ran)                      # [pc]
        #zstarobs = np.hstack([zstar, zstar2]) # concat pop1, pop2 for all stars
        sigzstar2 = gh.ipol(zth, sigzth2, zstar2)
        ran2 = npr.normal(-1., 1, gp.ntracer[2-1])        # [1]
        vzstar2 = ran2 * sigzstar2                        # [(km/2)^2]
Esempio n. 7
0
def run(gp):
    global K, C, D, F, zth, zp_kz, zmin, zmax, z0, z02
    # Set up simple population here using analytic formulae:
    zmin = 100.0  # [pc], first bin center
    zmax = 1300.0  # [pc], last bin center
    # get Stuetzpunkte for theoretical profiles (not yet stars, finer spacing in real space)
    nth = gp.nipol  # [1] number of bins

    zth = 1.0 * np.arange(nth) * (zmax - zmin) / (nth - 1.0) + zmin  # [pc] bin centers

    z0 = 240.0  # [pc], scaleheight of first population
    z02 = 200.0  # [pc], scaleheight of second population
    D = 250.0  # [pc], scaleheight of all stellar tracers
    K = 1.65
    F = 1.65e-4
    C = 17.0 ** 2.0  # [km/s] integration constant in sig

    # Draw mock data from exponential disk:
    nu_zth = np.exp(-zth / z0)  # [nu0] = [Msun/A/pc] 3D tracer density
    Kz_zth = -(K * zth / np.sqrt(zth ** 2.0 + D ** 2.0) + 2.0 * F * zth)

    if gp.adddarkdisc:
        DD = 600  # [pc] scaleheight of dark disc
        KD = 0.15 * 1.650
        Kz_zth = Kz_zth - KD * zth / np.sqrt(zth ** 2.0 + DD ** 2.0)

    # calculate sig_z^2
    inti = np.zeros(nth)
    for i in range(1, nth):
        inti[i] = simps(Kz_zth[:i] * nu_zth[:i], zth[:i])

    sigzth = np.sqrt((inti + C) / nu_zth)

    # project back to positions of stars
    ran = npr.uniform(size=int(gp.ntracer[1 - 1]))  # [1]
    zstar = -z0 * np.log(1.0 - ran)  # [pc] stellar positions, exponential falloff

    sigzstar = gh.ipol(zth, sigzth, zstar)
    # > 0 ((IDL, Justin)) stellar velocity dispersion

    # assign [0,1] * maxsig
    ran2 = npr.normal(size=int(gp.ntracer[1 - 1]))  # [1]
    vzstar = ran2 * sigzstar  # [km/s]

    # Add second population [thick-disc like]:
    if gp.pops == 2:
        nu_zth2 = gp.ntracer[2 - 1] / gp.ntracer[1 - 1] * np.exp(-zth / z02)
        # [nu0,2] = [Msun/A/pc], 3D tracer density, exponentially falling
        # no normalization to 1 done here
        inti = np.zeros(nth)
        for i in range(1, nth):
            inti[i] = simps(Kz_zth[:i] * nu_zth2[:i], zth[:i])
        sigzth2 = np.sqrt((inti + C) / nu_zth2)  # same integration constant
        ran = npr.uniform(-1.0, 1.0, gp.ntracer[2 - 1])  # [1]
        zstar2 = -z02 * np.log(1.0 - ran)  # [pc]
        # zstarobs = np.hstack([zstar, zstar2]) # concat pop1, pop2 for all stars
        sigzstar2 = gh.ipol(zth, sigzth2, zstar2)
        ran2 = npr.normal(-1.0, 1, gp.ntracer[2 - 1])  # [1]
        vzstar2 = ran2 * sigzstar2  # [(km/2)^2]

    # enforce observational cut on zmax:
    sel = zstar < zmax
    print("fraction of z<zmax selected elements: ", 1.0 * sum(sel) / (1.0 * len(sel)))
    z_dat1 = zstar[sel]
    vz_dat1 = vzstar[sel]

    # throw away velocities of value zero (unstable?):
    sel = abs(vz_dat1) > 0
    print("fraction of vz_dat>0 selected elements: ", 1.0 * sum(sel) / (1.0 * len(sel)))
    z_dat1 = z_dat1[sel]
    vz_dat1 = vz_dat1[sel]

    # Calulate binned data (for plots/binned anal.). old way, linear spacings, no const #particles/bin
    binmin1, binmax1, z_dat_bin1, sig_dat_bin1, count_bin1 = gh.binsmooth(z_dat1, vz_dat1, zmin, zmax, gp.nipol, 0.0)
    sig_dat_err_bin1 = np.sqrt(sig_dat_bin1)  # Poisson errors

    nu_dat_bin1, nu_dat_err_bin1 = gh.bincount(z_dat1, binmax1)
    nu_dat_bin1 /= binmax1 - binmin1
    nu_dat_err_bin1 /= binmax1 - binmin1

    import gr_params

    gpr = gr_params.grParams(gp)
    if gpr.showplots:
        nuscaleb = nu_zth[np.argmin(np.abs(zth - z0))]
        plt.loglog(zth, nu_zth / nuscaleb, "b.-")
        nuscaler = nu_dat_bin1[np.argmin(np.abs(zth - z0))]
        plt.loglog(zth, nu_dat_bin1 / nuscaler, "r.-")
        # pdb.set_trace()

    Sig_dat_bin1 = np.cumsum(nu_dat_bin1)
    Sig_dat_err_bin1 = np.sqrt(Sig_dat_bin1)
    Mrdat1 = np.cumsum(Sig_dat_bin1)
    Mrerr1 = Mrdat1 * Sig_dat_err_bin1 / Sig_dat_bin1

    scales = [[], [], []]
    scales[1].append(z0)  # [pc]
    scales[1].append(Sig_dat_bin1[0])
    scales[1].append(Mrdat1[-1])
    scales[1].append(nu_dat_bin1[0])
    scales[1].append(max(sig_dat_bin1))

    # start analysis of "all stars" with only component 1,
    # append to it later if more populations required
    z_dat0 = z_dat1  # [pc]
    vz_dat0 = vz_dat1  # [km/s]

    if gp.pops == 2:
        # enforce observational constraints on z<z_max
        sel = zstar2 < zmax
        z_dat2 = zstar2[sel]
        vz_dat2 = vzstar2[sel]

        # cut zero velocities:
        sel = abs(vz_dat2) > 0
        z_dat2 = z_dat2[sel]
        vz_dat2 = vz_dat2[sel]

        # Calulate binned data (for plots/binned analysis):
        binmin2, binmax2, z_dat_bin2, sig_dat_bin2, count_bin2 = gh.binsmooth(
            z_dat2, vz_dat2, zmin, zmax, gp.nipol, 0.0
        )
        sig_dat_err_bin2 = np.sqrt(sig_dat_bin2)  # Poissonian errors

        nu_dat_bin2, nu_dat_err_bin2 = gh.bincount(z_dat2, binmax2)
        nu_dat_bin2 /= binmax2 - binmin2
        nu_dat_err_bin2 /= binmax2 - binmin2

        Sig_dat_bin2 = np.cumsum(nu_dat_bin2)
        Sig_dat_err_bin2 = np.sqrt(Sig_dat_bin2)
        Mrdat2 = np.cumsum(nu_dat_bin2)
        Mrerr2 = np.sqrt(Mrdat2)

        scales[2].append(z02)  # [pc]
        scales[2].append(Sig_dat_bin2[0])
        scales[2].append(Mrdat2[-1])
        scales[2].append(nu_dat_bin2[0])  # normalize by max density of first bin, rather
        scales[2].append(max(sig_dat_bin2))

        # calculate properties for all pop together with stacked values
        z_dat0 = np.hstack([z_dat1, z_dat2])
        vz_dat0 = np.hstack([vz_dat1, vz_dat2])

    # Calulate binned data (for plots/binned anal.). old way, linear spacings, no const #particles/bin
    binmin0, binmax0, z_dat_bin0, sig_dat_bin0, count_bin0 = gh.binsmooth(z_dat0, vz_dat0, zmin, zmax, gp.nipol, 0.0)
    sig_dat_err_bin0 = np.sqrt(sig_dat_bin0)
    # binmin, binmax, z_dat_bin = gh.bin_r_const_tracers(z_dat, gp.nipol)

    nu_dat_bin0, nu_dat_err_bin0 = gh.bincount(z_dat0, binmax0)
    nu_dat_bin0 /= binmax0 - binmin0
    nu_dat_err_bin0 /= binmax0 - binmin0

    Sig_dat_bin0 = np.cumsum(nu_dat_bin0)
    Sig_dat_err_bin0 = np.sqrt(Sig_dat_bin0)
    # renorm0 = max(nu_dat_bin0)

    xip = np.copy(z_dat_bin0)  # [pc]
    Mrdat0 = K * xip / np.sqrt(xip ** 2.0 + D ** 2.0) / (2.0 * np.pi * gu.G1__pcMsun_1km2s_2)
    Mrerr0 = Mrdat0 * nu_dat_err_bin0 / nu_dat_bin0

    scales[0].append(D)  # [pc]
    scales[0].append(Sig_dat_bin0[0])
    scales[0].append(Mrdat0[-1])
    scales[0].append(nu_dat_bin0[0])
    scales[0].append(max(sig_dat_bin0))

    rmin = binmin0 / scales[0][0]  # [pc]
    rbin = xip / scales[0][0]  # [pc]
    rmax = binmax0 / scales[0][0]  # [pc]

    # store parameters for output
    # normalized by scale values
    nudat = []
    nudat.append(nu_dat_bin0 / scales[0][3])  # [Msun/pc^3]
    nudat.append(nu_dat_bin1 / scales[1][3])
    if gp.pops == 2:
        nudat.append(nu_dat_bin2 / scales[2][3])

    nuerr = []
    nuerr.append(nu_dat_err_bin0 / scales[0][3])  # [Msun/pc^3]
    nuerr.append(nu_dat_err_bin1 / scales[1][3])
    if gp.pops == 2:
        nuerr.append(nu_dat_err_bin2 / scales[2][3])

    Mrdat = []
    Mrdat.append(Mrdat0 / scales[0][2])  # [Msun]
    Mrdat.append(Mrdat1 / scales[1][2])
    if gp.pops == 2:
        Mrdat.append(Mrdat2 / scales[2][2])

    Mrerr = []
    Mrerr.append(Mrerr0 / scales[0][2])  # [Msun]
    Mrerr.append(Mrerr1 / scales[1][2])
    if gp.pops == 2:
        Mrerr.append(Mrerr2 / scales[2][2])

    Sigdat = []
    Sigdat.append(Sig_dat_bin0 / scales[0][1])
    Sigdat.append(Sig_dat_bin1 / scales[1][1])
    if gp.pops == 2:
        Sigdat.append(Sig_dat_bin2 / scales[2][1])

    Sigerr = []
    Sigerr.append(Sig_dat_err_bin0 / scales[0][1])
    Sigerr.append(Sig_dat_err_bin1 / scales[1][1])
    if gp.pops == 2:
        Sigerr.append(Sig_dat_err_bin2 / scales[2][1])

    sigdat = []
    sigdat.append(sig_dat_bin0 / scales[0][4])  # [km/s]
    sigdat.append(sig_dat_bin1 / scales[1][4])
    if gp.pops == 2:
        sigdat.append(sig_dat_bin2 / scales[2][4])

    sigerr = []
    sigerr.append(sig_dat_err_bin0 / scales[0][4])  # [km/s]
    sigerr.append(sig_dat_err_bin1 / scales[1][4])
    if gp.pops == 2:
        sigerr.append(sig_dat_err_bin2 / scales[2][4])
    write_disc_output_files(rbin, rmin, rmax, nudat, nuerr, Sigdat, Sigerr, Mrdat, Mrerr, sigdat, sigerr, scales, gp)

    return gp.dat
Esempio n. 8
0
def get_prof(prof, pop, gp):
    zmin = 100.  # [pc], first bin center
    zmax = 1300.  # [pc], last bin center
    # get Stuetzpunkte for theoretical profiles (not yet stars, finer spacing in real space)
    nth = gp.nipol  # [1] number of bins
    zth = 1. * np.arange(nth) * (zmax - zmin) / (nth -
                                                 1.) + zmin  # [pc] bin centers
    z0 = 240.  # [pc], scaleheight of first population
    z02 = 200.  # [pc], scaleheight of second population
    D = 250.  # [pc], scaleheight of all stellar tracers
    K = 1.65
    F = 1.65e-4
    C = 17.**2.  # [km/s] integration constant in sig

    # Draw mock data from exponential disk:
    nu_zth = np.exp(-zth / z0)  # [nu0] = [Msun/A/pc] 3D tracer density
    if prof == 'nu' and pop == 1:
        return zth, nu_zth
    Kz_zth = -(K * zth / np.sqrt(zth**2. + D**2.) + 2.0 * F * zth)

    if gp.adddarkdisc:
        DD = 600  # [pc] scaleheight of dark disc
        KD = 0.15 * 1.650
        Kz_zth = Kz_zth - KD * zth / np.sqrt(zth**2. + DD**2.)

    # calculate sig_z^2
    inti = np.zeros(nth)
    for i in range(1, nth):
        inti[i] = simps(Kz_zth[:i] * nu_zth[:i], zth[:i])

    sigzth = np.sqrt((inti + C) / nu_zth)
    if prof == 'sig' and pop == 1:
        return zth, sigzth
    # project back to positions of stars
    ran = npr.uniform(size=int(gp.ntracer[1 - 1]))  # [1]
    zstar = -z0 * np.log(
        1.0 - ran)  # [pc] stellar positions, exponential falloff

    sigzstar = gh.ipol(zth, sigzth, zstar)
    # > 0 ((IDL, Justin)) stellar velocity dispersion

    # assign [0,1] * maxsig
    ran2 = npr.normal(size=int(gp.ntracer[2 - 1]))  # [1]
    vzstar = ran2 * sigzstar  # [km/s]

    # Add second population [thick-disc like]:
    if gp.pops == 2:
        nu_zth2 = gp.ntracer[2 - 1] / gp.ntracer[1 - 1] * np.exp(-zth / z02)
        if prof == 'nu' and pop == 2:
            return zth, nu_zth2
        # [nu0,2] = [Msun/A/pc], 3D tracer density, exponentially falling
        # no normalization to 1 done here
        inti = np.zeros(nth)
        for i in range(1, nth):
            inti[i] = simps(Kz_zth[:i] * nu_zth2[:i], zth[:i])
        sigzth2 = np.sqrt((inti + C) / nu_zth2)  # same integration constant
        if prof == 'sig' and pop == 2:
            return zth, sigzth2
        ran = npr.uniform(-1., 1., gp.ntracer[2 - 1])  # [1]
        zstar2 = -z02 * np.log(1.0 - ran)  # [pc]
        #zstarobs = np.hstack([zstar, zstar2]) # concat pop1, pop2 for all stars
        sigzstar2 = gh.ipol(zth, sigzth2, zstar2)
        ran2 = npr.normal(-1., 1, gp.ntracer[2 - 1])  # [1]
        vzstar2 = ran2 * sigzstar2  # [(km/2)^2]