Exemplo n.º 1
0
def wd1_pm_errors():
    cc = objects.Constants()

    times1 = np.array([2005.0, 2010.0, 2012.0, 2014.0])
    times2 = np.array([2010.0, 2012.0, 2014.0])
    
    poserr = 1.0 # mas

    pmerr1 = poserr / math.sqrt( ((times1 - times1.mean())**2).sum() )
    pmerr2 = poserr / math.sqrt( ((times2 - times2.mean())**2).sum() )

    # Conversion from mas/yr to km/s
    dist = 4000.0 # pc
    masyr_to_kms = dist * cc.cm_in_au / (10**5 * 10**3 * cc.sec_in_yr)
    print 'Conversion: 1 mas/yr = %.2f km/s' % masyr_to_kms

    print 'Proper Motion Errors:'
    print '  positional error  = %.1f mas' % poserr
    print '  epochs of obs.    =  ', times1
    print '  proper motion err = %.2f mas/yr (%.1f km/s)' % \
        (pmerr1, pmerr1 * masyr_to_kms)

    print 'Proper Motion Errors:'
    print '  positional error  = %.1f mas' % poserr
    print '  epochs of obs.    =  ', times2
    print '  proper motion err = %.2f mas/yr (%.1f km/s)' % \
        (pmerr2, pmerr2 * masyr_to_kms)
Exemplo n.º 2
0
def distProperMotionError(yng, binsIn, r2d, ridx, colors, legend, outdir):
    """
    Return the distribution of proper motion errors in arcsec/yr.
    """
    cc = objects.Constants()
    binCnt = len(binsIn) - 1

    # Velocity Errors are taken from the observed distribution
    vxerrData = yng.getArray('vxerr') / cc.asy_to_kms
    vyerrData = yng.getArray('vyerr') / cc.asy_to_kms

    vxerrDist = []
    vyerrDist = []
    py.clf()
    for bb in range(binCnt):
        vxerrDist.append(np.array(vxerrData[ridx[bb]]))
        py.hist(vxerrDist[bb] * 10**3,
                bins=5,
                histtype='step',
                ec=colors[bb],
                linewidth=2)
    py.xlabel('X Proper Motion Errors (mas/yr)')
    py.ylabel('Number of Stars Observed')
    py.legend(legend)
    py.savefig(outdir + 'hist_vxerr_obs.png')

    py.clf()
    for bb in range(binCnt):
        vyerrDist.append(np.array(vyerrData[ridx[bb]]))
        py.hist(vyerrDist[bb] * 10**3,
                bins=5,
                histtype='step',
                ec=colors[bb],
                linewidth=2)
    py.xlabel('Y Proper Motion Errors (mas/yr)')
    py.ylabel('Number of Stars Observed')
    py.legend(legend)
    py.savefig(outdir + 'hist_vyerr_obs.png')

    py.clf()
    for bb in range(binCnt):
        py.plot(r2d[ridx[bb]],
                vxerrDist[bb] * 10**3,
                marker='s',
                color=colors[bb],
                linestyle='')
        py.plot(r2d[ridx[bb]],
                vyerrDist[bb] * 10**3,
                marker='o',
                color=colors[bb],
                linestyle='')
    py.xlabel('Projected Radius (arcsec)')
    py.ylabel('Proper Motion Errors (mas/yr)')
    py.savefig(outdir + 'plot_vxyerr_vs_r2d_obs.png')

    return (vxerrDist, vyerrDist)
Exemplo n.º 3
0
def distSemimajorAxisPeriod(N, outdir):
    """
    Generate semi-major axis in parsec and period in years.
    """
    cc = objects.Constants()

    # Stretch from 0.2" (1600 AU) out to 40" (1.6 pc)
    a = scipy.stats.reciprocal.rvs(0.008, 1.6, size=N)  # in pc

    # Generate period in years
    p = np.sqrt((a * cc.au_in_pc)**3 / 4.0e6)

    # Double check that this produces a volume density \propto 1/r^3
    py.clf()
    counts, bins, foo = py.hist(a, bins=100)

    xdata = bins[:-1] + ((bins[1:] - bins[:-1]) / 2.0)

    shellVolume = (4.0 / 3.0) * math.pi * (bins[1:]**3 - bins[:-1]**3)
    shellVolume /= 0.04**3
    ydata = np.array(counts, dtype=float) / shellVolume

    idx = np.where(counts <= 0)[0]
    if (len(idx) > 0):
        xdata = xdata[0:idx[0]]
        ydata = ydata[0:idx[0]]

    # Fit the data to check the powerlaw of the volume density.
    logx = np.log10(xdata)
    logy = np.log10(ydata)

    fitfunc = lambda p, x, y: y - (p[0] + p[1] * x)
    pinit = [1.0, -3.0]
    out = scipy.optimize.leastsq(fitfunc,
                                 pinit,
                                 args=(logx, logy),
                                 full_output=1)
    pfinal = out[0]
    amp = 10.0**pfinal[0]
    index = pfinal[1]
    print 'Semimajor-Axis: Amplitude = %5.2f   Index = %5.2f' % (amp, index)

    powerlaw = lambda x, amp, index: amp * (x**index)

    py.clf()
    py.loglog(xdata, ydata, 'bo')
    py.loglog(xdata, powerlaw(xdata, amp, index), 'k-')
    py.ylabel('Number Density (stars/arcsec^3)')
    py.xlabel('Semi-major axis (pc)')
    py.savefig(outdir + '/semimajor_axis_profile.png')

    return a, p
Exemplo n.º 4
0
def velocityAverage(alignRoot, polyRoot, magCut=15):
    """
    Calculate the mean (and error on mean) of the velocities from
    a align/polyfit.

    Input Parameters:
    -- the root name of the align output (e.g. 'align/align_d_rms_t'
    -- the root name of the polyfit output( e.g. 'polyfit_d/fit')
    """
    # This should be an absolute aligned data set.
    cc = objects.Constants()
    s = starset.StarSet(alignRoot)
    s.loadPolyfit(polyRoot, accel=0)

    vx = s.getArray('fitXv.v') * 10**3
    vy = s.getArray('fitYv.v') * 10**3
    vxerr = s.getArray('fitXv.verr') * 10**3
    vyerr = s.getArray('fitYv.verr') * 10**3

    mag = s.getArray('mag')
    idx = np.where(mag <= magCut)[0]

    py.clf()
    py.hist(vx[idx])
    py.hist(vy[idx])

    print 'Number of Stars: %d' % len(idx)
    print 'X Mean Velocity: %5.2f' % (vx[idx].mean())
    print 'X Error on Mean: %5.2f' % (vx[idx].std() / math.sqrt(len(vx)))
    print 'Y Mean Velocity: %5.2f' % (vy[idx].mean())
    print 'Y Error on Mean: %5.2f' % (vy[idx].std() / math.sqrt(len(vx)))

    # Plot distribution of velocity errors
    py.clf()
    binsIn = np.arange(0, max([max(vxerr), max(vyerr)]), 0.1)
    (bins, data) = histNofill.hist(binsIn, vxerr)
    py.plot(bins, data, 'r', linewidth=2)
    (bins, data) = histNofill.hist(binsIn, vyerr)
    py.plot(bins, data, 'b', linewidth=2)
    py.axis([0, 10, 0, 600])
    py.xlabel('Velocity Errors (mas/yr)')
    py.ylabel('N')
    py.savefig('plots/histVelErr.png')
Exemplo n.º 5
0
def plotRadial():
    # Constants take from Bender et al. (2005)
    cc = objects.Constants()
    m31mass = 1.4e8
    m31dist = 760000.0

    # Construct an array of radii out to 5 arcsec in steps of 0.05''
    r = (na.arange(12 * 5) * 0.05) + 0.05
    r_au = r * m31dist
    r_pc = r_au / cc.au_in_pc
    r_cm = r_au * cc.cm_in_au

    # Determine the theoretical amount for a vs. r
    a_cm_s2 = cc.G * m31mass * cc.msun / r_cm**2
    a_km_s_yr = a_cm_s2 * cc.sec_in_yr / 1.0e5
    a_mas_yr2 = a_cm_s2 * pow(cc.sec_in_yr, 2) * 1000.0
    a_mas_yr2 /= (cc.cm_in_au * m31dist)

    # Plot circular velocity in both mas/yr and km/s
    v_cm_s = na.sqrt(cc.G * m31mass * cc.msun / r_cm)
    v_km_s = v_cm_s / 1.0e5
    v_mas_yr = v_cm_s * cc.sec_in_yr * 1000.0 / (cc.cm_in_au * m31dist)

    masyr_kms = (1.0 / 1000.0) * m31dist * cc.cm_in_au / (1.0e5 * cc.sec_in_yr)
    masyr2_kmsyr = (1.0 / 1000.0) * m31dist * cc.cm_in_au
    masyr2_kmsyr /= 1.0e5 * pow(cc.sec_in_yr, 2)

    ##########
    #
    # Calculate some useful quantities for Keck/TMT
    #
    ##########
    dKeck = 10.0
    dTMT = 10.0

    resKeckK = 1.0e3 * 0.25 * 2.2 / dKeck  # K (GC) on Keck has similar Strehl
    resTMTZ = 1.0e3 * 0.25 * 1.035 / dTMT  # to obs with Z on TMT (mas).

    posErrKeck = 0.15  # mas
    ratioKeck = resKeckK / posErrKeck

    posErrTMT = resTMTZ / ratioKeck
    print('Estimated positional error for TMT at Z-band: %5.3f' % (posErrTMT))

    # 1 years, 3 sigma
    velLo1 = 3.0 * posErrTMT
    velLoKms1 = velLo1 * masyr_kms

    # 3 years, 3 sigma
    velLo3 = posErrTMT
    velLoKms3 = velLo3 * masyr_kms

    print('Lowest detectable velocities in:')
    print('\t 1 year, 3 sigma -- low vel = %4.2f mas/yr = %4d km/s' % \
          (velLo1, velLoKms1))
    print('\t 3 year, 3 sigma -- low vel = %4.2f mas/yr = %4d km/s' % \
          (velLo3, velLoKms3))

    ##########
    #
    # Velocity vs. Radius
    #
    ##########
    pylab.figure(2, figsize=(7, 7))
    pylab.clf()

    #     pylab.plot(r, v_mas_yr, linewidth=2)
    #     pylab.xlabel('Distance from Mbh (arcsec)')
    #     pylab.ylabel('Circular Velocity (mas/yr)')
    pylab.plot(r, v_km_s, linewidth=2)
    pylab.xlabel('Distance from Mbh (arcsec)')
    pylab.ylabel('Circular Velocity (km/s)')

    # Detection limit
    #     pylab.plot([0, 10], [velLo1, velLo1], 'k--')
    #     pylab.plot([0, 10], [velLo3, velLo3], 'k--')
    #     pylab.text(2.5, velLo1, '1 year')
    #     pylab.text(2.5, velLo3, '3 years')
    pylab.plot([0, 10], [velLoKms1, velLoKms1], 'k--')
    pylab.plot([0, 10], [velLoKms3, velLoKms3], 'k--')
    pylab.plot([0, 10], [30.0, 30.0], 'k--')
    pylab.text(2.5, velLoKms1, '1 year')
    pylab.text(2.5, velLoKms3, '3 years')
    pylab.text(0.3, 35.0, 'Radial Vel.')

    arr1 = pylab.Arrow(2.4, velLoKms1, 0, 0.04 * masyr_kms, width=0.09)
    arr3 = pylab.Arrow(2.4, velLoKms3, 0, 0.04 * masyr_kms, width=0.09)
    arrRv = pylab.Arrow(0.2, 30.0, 0, 0.04 * masyr_kms, width=0.09)
    fig = pylab.gca()
    fig.add_patch(arr1)
    fig.add_patch(arr3)
    fig.add_patch(arrRv)

    str = '0.1 mas/yr = %4d km/s' % (0.1 * masyr_kms)
    pylab.axis([0.0, 3, 0.0, 0.5 * masyr_kms])
    pylab.text(1.3, 0.45 * masyr_kms, str)
    pylab.savefig('m31theory_vel.png')
    pylab.savefig('m31theory_vel.eps')

    pylab.clf()
    pylab.plot(r, a_mas_yr2)
    pylab.xlabel('Distance from Mbh (arcsec)')
    pylab.ylabel('Acceleration (mas/yr^2)')
    str = '1 mas/yr^2 = %5.2f km/s/yr' % (1.0 * masyr2_kmsyr)
    pylab.text(1.0e-3, 1.5, str)
    pylab.savefig('m31theory_acc.eps')
    pylab.savefig('m31theory_acc.png')
Exemplo n.º 6
0
import numpy as np
import pylab as py
import pyfits, math
from gcreduce import gcutil
from gcwork import objects
from jlu.util import img_scale

datadir = '/u/jlu/data/m31/08oct/081021/SPEC/reduce/m31/ss/'
workdir = '/u/jlu/work/m31/nucleus/ifu_09_02_24/'
cuberoot = 'm31_08oct_Kbb_050'

cc = objects.Constants()

uvInNIRC2 = np.array([700.8, 582.7])


def plotWithHST(rotateFITS=True):
    # Load up the OSIRIS image
    cubeFile = datadir + cuberoot + '_img.fits'
    cube, cubehdr = pyfits.getdata(cubeFile, header=True)
    paCube = cubehdr['PA_SPEC']
    scaleCube = 0.05

    # Load up the NIRC2 image
    kFile = '/u/jlu/data/m31/05jul/combo/m31_05jul_kp.fits'
    #     kFile = '/u/jlu/data/m31/09sep/combo/mag09sep_m31_kp.fits'
    k, khdr = pyfits.getdata(kFile, header=True)
    m31K = np.array([751.986, 716.989])
    paK = 0.0
    scaleK = 0.00995
def m31_plotWithHST(rotateFITS=True):
    cc = objects.Constants()

    # Load up the images
    kFile = '/u/jlu/data/m31/09sep/combo/mag09sep_m31_kp.fits'
    f330File = '/u/jlu/work/m31/nucleus/align/m31_0330nm_rot.fits'
    f435File = '/u/jlu/work/m31/nucleus/align/m31_0435nm_rot.fits'

    # Load up the NIRC2 image
    k = pyfits.getdata(kFile)
    f330 = pyfits.getdata(f330File)
    f435 = pyfits.getdata(f435File)

    m31K = np.array([701.822, 583.696])
    scaleK = 0.00995

    img = np.zeros((k.shape[0], k.shape[1], 3), dtype=float)
    img[:, :, 0] = img_scale.linear(k, scale_min=10, scale_max=3800)
    img[:, :, 1] = img_scale.linear(f435, scale_min=0.01, scale_max=1.8)
    img[:, :, 2] = img_scale.linear(f330, scale_min=0, scale_max=0.16)

    # Axes
    xaxis = (np.arange(img.shape[0], dtype=float) - (m31K[0] - 1.0)) * scaleK
    yaxis = (np.arange(img.shape[1], dtype=float) - (m31K[1] - 1.0)) * scaleK

    py.clf()
    py.imshow(img,
              aspect='equal',
              extent=[xaxis[0], xaxis[-1], yaxis[0], yaxis[-1]],
              interpolation='nearest')
    py.plot([0], [0], 'c+', linewidth=2, ms=10, mew=2)
    py.xlabel('R.A. Offset from M31* (arcsec)', fontsize=16)
    py.ylabel('Dec. Offset from M31* (arcsec)', fontsize=16)
    py.title('Blue = F330W, Green = F435W, Red = NIRC2-K\'')

    # Overplot the OSIRIS fields of view.
    rec1xTmp = np.array([-1.6, -1.6, 1.6, 1.6, -1.6])
    rec1yTmp = np.array([-0.3, 0.5, 0.5, -0.3, -0.3])
    rec2xTmp = np.array([-1.6, -1.6, 1.6, 1.6, -1.6])
    rec2yTmp = np.array([-1.0, -0.2, -0.2, -1.0, -1.0])
    rec3xTmp = np.array([-1.6, -1.6, 1.6, 1.6, -1.6])
    rec3yTmp = np.array([0.4, 1.2, 1.2, 0.4, 0.4])

    # rotate
    pa = math.radians(-34.0)
    cospa = math.cos(pa)
    sinpa = math.sin(pa)
    rec1x = rec1xTmp * cospa - rec1yTmp * sinpa
    rec1y = rec1xTmp * sinpa + rec1yTmp * cospa
    rec2x = rec2xTmp * cospa - rec2yTmp * sinpa
    rec2y = rec2xTmp * sinpa + rec2yTmp * cospa
    rec3x = rec3xTmp * cospa - rec3yTmp * sinpa
    rec3y = rec3xTmp * sinpa + rec3yTmp * cospa

    py.plot(rec1x, rec1y, 'g-', linewidth=2)
    py.plot(rec2x, rec2y, 'g--', linewidth=2)
    py.plot(rec3x, rec3y, 'g--', linewidth=2)

    # Label
    py.text(0.1,
            0.1,
            'P3\nBH+A stars',
            color='cyan',
            fontweight='bold',
            verticalalignment='bottom',
            horizontalalignment='left',
            fontsize=16)
    py.text(0.5,
            -0.3,
            'P2\nperiapse',
            color='white',
            fontweight='bold',
            verticalalignment='top',
            horizontalalignment='center',
            fontsize=16)
    py.text(-0.7,
            0.5,
            'apoapse\nP1',
            color='white',
            fontweight='bold',
            verticalalignment='bottom',
            horizontalalignment='center',
            fontsize=16)

    limit = 2.0
    py.axis([-limit, limit, -limit, limit])
    py.savefig('m31_hst_nirc2_rgb.png')
    py.savefig('m31_hst_nirc2_rgb.eps')
    py.show()
Exemplo n.º 8
0
def generateData(root, outdir, N=10**7):
    """
    Remember to calculate at least 2 times more stars than you want
    because many are dropped from our observing window.
    """
    cc = objects.Constants()

    yng = young.loadAllYoungStars(root)
    names = yng.getArray('name')

    # Get out the distributions we will need for our simulation.
    # Each relevant parameter will be plotted as a function of
    # radius, divided into the following radial bins:
    #    BIN #1:  r = 0.8" - 2.5"
    #    BIN #2:  r = 2.5" - 3.5"
    #    BIN #3:  r = 3.5" - 7.0"
    #    BIN #4:  r = 7.0" - 13.5"

    binsIn = np.array([0.8, 2.5, 3.5, 7.0, 13.5])
    colors = ['red', 'orange', 'green', 'blue']
    legend = ['0.8"-2.5"', '2.5"-3.5"', '3.5"-7.0"', '7.0"-13.5"']

    r2d = yng.getArray('r2d')

    py.figure(1)
    py.clf()
    py.hist(r2d, bins=binsIn, histtype='step', linewidth=2)
    py.xlabel('Projected Radius (arcsec)')
    py.ylabel('Number of Stars Observed')
    py.savefig(outdir + 'hist_r2d_obs.png')

    binCnt = len(binsIn) - 1

    ridx = []
    for bb in range(binCnt):
        idx = np.where((r2d >= binsIn[bb]) & (r2d < binsIn[bb + 1]))[0]
        ridx.append(idx)

    ##########
    #
    # For each star we will use error distributions from the
    # data itself. Set these up.
    #
    ##########
    # Positional Uncertainties in arcsec
    xerrDist, yerrDist = distPositionalError(yng, binsIn)

    # Proper motion uncertainties in arcsec/yr
    vxerrDist, vyerrDist = distProperMotionError(yng, binsIn, r2d, ridx,
                                                 colors, legend, outdir)

    # Radial velocity uncertainties in km/s
    vzerrDist = distRadialVelocityError(yng, binsIn, r2d, ridx, colors, legend,
                                        outdir)

    # Acceleration uncertainties in arcsec/yr^2
    axerrDist, ayerrDist = distAccelerationError(yng, binsIn, r2d, colors,
                                                 legend, outdir)

    # We will need a random number distribution for each of these.
    # These are all 0-1, but we will convert them into indices into
    # the above distributions later on when we know the radius of each
    # of the simulated stars.
    xerrRand = scipy.rand(N)
    yerrRand = scipy.rand(N)
    vxerrRand = scipy.rand(N)
    vyerrRand = scipy.rand(N)
    vzerrRand = scipy.rand(N)
    axerrRand = scipy.rand(N)
    ayerrRand = scipy.rand(N)

    ##########
    #
    # Generate simulated stars' orbital parameters
    #
    ##########

    # Inclination and Omega (degrees)
    i, o = distNormalVector(N, outdir)

    # Angle to the ascending node (degrees)
    w = scipy.rand(N) * 360.0

    # Semi-major axis (pc) and period (years)
    a, p = distSemimajorAxisPeriod(N, outdir)

    # Eccentricity
    e = scipy.stats.powerlaw.rvs(2, 0, size=N)

    # Lets correct the eccentricities just to prevent the
    # infinite loops.
    edx = np.where(e > 0.98)[0]
    e[edx] = 0.98

    # Time of Periapse (yr)
    t0 = scipy.rand(N) * p

    ##########
    #
    # Determine x, y, vx, vy, vz, ax, ay
    # Assign errors accordingly.
    # Record star to file IF it falls within our projected
    # observing window.
    #
    ##########
    x = np.zeros(N, dtype=np.float32)
    y = np.zeros(N, dtype=np.float32)
    vx = np.zeros(N, dtype=np.float32)
    vy = np.zeros(N, dtype=np.float32)
    vz = np.zeros(N, dtype=np.float32)
    ax = np.zeros(N, dtype=np.float32)
    ay = np.zeros(N, dtype=np.float32)

    xerr = np.zeros(N, dtype=np.float32)
    yerr = np.zeros(N, dtype=np.float32)
    vxerr = np.zeros(N, dtype=np.float32)
    vyerr = np.zeros(N, dtype=np.float32)
    vzerr = np.zeros(N, dtype=np.float32)
    axerr = np.zeros(N, dtype=np.float32)
    ayerr = np.zeros(N, dtype=np.float32)

    skipped = np.zeros(N, dtype=np.int16)

    for nn in range(N):
        if (((nn % 10**4) == 0)):
            print 'Simulated Star %d: ' % nn, time.ctime(time.time())

        orb = orbits.Orbit()
        orb.w = w[nn]
        orb.o = o[nn]
        orb.i = i[nn]
        orb.e = e[nn]
        orb.p = p[nn]
        orb.t0 = t0[nn]

        # Remember r (arcsec), v (mas/yr), a (mas/yr^2)
        (pos, vel, acc) = orb.kep2xyz(np.array([0.0]), mass=4.0e6, dist=8.0e3)
        pos = pos[0]  # in arcsec
        vel = vel[0] / 10**3  # convert to arcsec/yr
        acc = acc[0] / 10**3  # convert to arcsec/yr^2

        r2d = math.sqrt(pos[0]**2 + pos[1]**2)

        # Impose our observing limits
        if (r2d < binsIn[0]) or (r2d > binsIn[-1]):
            skipped[nn] = 1
            continue

        tmp = np.where(binsIn > r2d)[0]
        errIdx = tmp[0] - 1

        # Set noisey positions
        xerr[nn] = xerrDist[errIdx]
        yerr[nn] = yerrDist[errIdx]
        x[nn] = np.random.normal(pos[0], xerr[nn])
        y[nn] = np.random.normal(pos[1], yerr[nn])

        # Set noisy velocities
        errCnt = len(vxerrDist)
        vxerr[nn] = vxerrDist[errIdx][int(math.floor(vxerrRand[nn] * errCnt))]
        vyerr[nn] = vyerrDist[errIdx][int(math.floor(vyerrRand[nn] * errCnt))]
        vzerr[nn] = vzerrDist[errIdx][int(math.floor(vzerrRand[nn] * errCnt))]

        # Convert radial velocity into km/s
        vel[2] *= 8.0 * cc.cm_in_au / (1e5 * cc.sec_in_yr)

        vx[nn] = np.random.normal(vel[0], vxerr[nn])
        vy[nn] = np.random.normal(vel[1], vyerr[nn])
        vz[nn] = np.random.normal(vel[2], vzerr[nn])

        # Set noisy accelerations ONLY for close in stars

        if (errIdx <= 1):
            # Close in stars have acceleration constraints
            axerr[nn] = axerrDist[errIdx][int(
                math.floor(axerrRand[nn] * errCnt))]
            ayerr[nn] = ayerrDist[errIdx][int(
                math.floor(ayerrRand[nn] * errCnt))]
            ax[nn] = np.random.normal(acc[0], axerr[nn])
            ay[nn] = np.random.normal(acc[1], ayerr[nn])
        else:
            axerr[nn] = 0.0
            ayerr[nn] = 0.0
            ax[nn] = 0.0
            ay[nn] = 0.0

    useIdx = np.where(skipped == 0)[0]
    finalTotal = len(useIdx)
    skipCount = N - finalTotal

    print 'Skipped %d, Final Total %d' % (skipCount, finalTotal)

    x = x[useIdx]
    y = y[useIdx]
    vx = vx[useIdx]
    vy = vy[useIdx]
    vz = vz[useIdx]
    ax = ax[useIdx]
    ay = ay[useIdx]

    xerr = xerr[useIdx]
    yerr = yerr[useIdx]
    vxerr = vxerr[useIdx]
    vyerr = vyerr[useIdx]
    vzerr = vzerr[useIdx]
    axerr = axerr[useIdx]
    ayerr = ayerr[useIdx]

    #print 'x  = ', x[-1], xerr[-1]
    #print 'y  = ', y[-1], yerr[-1]
    #print 'vx = ', vx[-1], vxerr[-1]
    #print 'vy = ', vy[-1], vyerr[-1]
    #print 'vz = ', vz[-1], vzerr[-1]
    #print 'ax = ', ax[-1], axerr[-1]
    #print 'ay = ', ay[-1], ayerr[-1]

    # Verify that we get the expected 2D distribution back out again
    checkSurfaceDensity(x, y, outdir)

    ##########
    #
    # Write to output file
    #
    ##########
    _out = open(outdir + '/isotropic_stars.dat', 'w')

    pickle.dump((x, xerr), _out)
    pickle.dump((y, yerr), _out)
    pickle.dump((vx, vxerr), _out)
    pickle.dump((vy, vyerr), _out)
    pickle.dump((vz, vzerr), _out)
    pickle.dump((ax, axerr), _out)
    pickle.dump((ay, ayerr), _out)

    _out.close()
Exemplo n.º 9
0
def plotParamsProb(
        mcfile=root + '08_02_16/MCdir/bias_cent_vel3d/mc_zero.new.1e2.log',
        paramfile=root + '08_02_16/output/output.s02.vel3d.5b.param1'):
    """
    Plot the 1D PDFs for all the parameters generated in the
    efit Monte Carlo runs. 

    Input Parameters:
    mcfile -- the Monte Carlo results file.
    paramfile -- the param file from the chi2 analysis (single line)

    Output Files:
    plotParamsProb_**.eps -- plot of the probability distribution
    plotParamsProb_**.png    for each parameter.

    plotParamsProb_limits.txt -- File containing the best fit solution,
    the most probabie solution, and the 1, 2, and 3 sigma limits.

    Here are all the variables (and associated limits) that are plotted:

    x0    = 'Sgr A* X Position (mas)'
    y0    = 'Sgr A* Y Position (mas)'
    r0    = 'Ro (pc)'
    p     = 'Period (yr)'
    a     = 'Semi-Major Axis (mas)' 
    e     = 'Eccentricity'
    t0    = 'Epoch of Periapse (yr)'
    w     = 'Argument of Periapse (deg)'
    i     = 'Inclination (deg)'
    o     = 'Angle of the Ascending Node (deg)'
    vx    = 'Sgr A* X Velocity (mas/yr)'
    vy    = 'Sgr A* Y Velocity (mas/yr)'
    vz    = 'Sgr A* Z Velocity (km/s)'
    m     = 'Mass (million solar masses)' 
    pdist = 'Periapse Distance (mpc)' 
    density = 'Density (10^15 Mo/pc^3)'

    """
    cc = objects.Constants()

    # param1
    table = asciidata.open(paramfile)

    # Make things into arrays of floats, etc.
    r0_fit = float(table[0][0])  # in pc
    x0_fit = float(table[1][0])  # in pix (working on abs, scale = 1)
    y0_fit = float(table[2][0])  # in pix (working on abs, scale = 1)
    a_fit = float(table[3][0])  # in mas
    p_fit = float(table[4][0])  # in yrs
    e_fit = float(table[5][0])
    t0_fit = float(table[6][0])
    w_fit = float(table[7][0])
    i_fit = float(table[8][0])
    o_fit = float(table[9][0])
    vx_fit = float(table[10][0])  # mas/yr
    vy_fit = float(table[11][0])  # mas/yr
    vz_fit = float(table[12][0])  # km/s

    # convert semi-major axis and period into mass
    m_fit = (a_fit * r0_fit / 1000.0)**3 / p_fit**2

    # calculate periapse distance (in mpc)
    pdist_fit = a_fit * r0_fit * (1.0 - e_fit) / (206265.)

    # calculate density (in Mo/pc^3)
    density_fit = m_fit / ((4.0 / 3.0) * math.pi * pdist_fit**3)

    # Set up axis labels and units
    x0_fit *= 1000.0  # in mas (assumed scale = 1)
    y0_fit *= 1000.0  # in mas (assumed scale = 1)
    m_fit /= 1e6  # in millions of solar masses
    density_fit /= 1e6  # in 10^15 Mo/pc^3

    # Read in the efit monte carlo output file. Reading in this way
    # organizes stuff by column (e.g. table[0] = first column
    table = asciidata.open(mcfile)

    # Make things into arrays of floats, etc.
    r0 = table[0].tonumpy()  # in pc
    x0 = table[1].tonumpy()  # in pix (working on abs, scale = 1)
    y0 = table[2].tonumpy()  # in pix (working on abs, scale = 1)
    a = table[3].tonumpy()  # in mas
    p = table[4].tonumpy()  # in yrs
    e = table[5].tonumpy()
    t0 = table[6].tonumpy()
    w = table[7].tonumpy()
    i = table[8].tonumpy()
    o = table[9].tonumpy()
    vx = table[10].tonumpy()  # mas/yr
    vy = table[11].tonumpy()  # mas/yr
    vz = table[12].tonumpy()  # km/s

    # We need some cleanup... there is some junk in these results
    idx = (where((r0 > 1000) & (abs(x0) < 1) & (abs(y0) < 1) & (a > 0)))[0]
    r0 = r0[idx]
    x0 = x0[idx]
    y0 = y0[idx]
    a = a[idx]
    p = p[idx]
    e = e[idx]
    t0 = t0[idx]
    w = w[idx]
    i = i[idx]
    o = o[idx]
    vx = vx[idx]
    vy = vy[idx]
    vz = vz[idx]

    # convert semi-major axis and period into mass
    m = (a * r0 / 1000.0)**3 / p**2

    # calculate periapse distance (in mpc)
    pdist = a * r0 * (1.0 - e) / (206265.)

    # calculate density (in Mo/pc^3)
    density = m / ((4.0 / 3.0) * math.pi * pdist**3)

    # Set up axis labels and units
    x0 *= 1000.0  # in mas (assumed scale = 1)
    y0 *= 1000.0  # in mas (assumed scale = 1)
    m /= 1e6  # in millions of solar masses
    density /= 1e6  # in 10^15 Mo/pc^3

    axisLabel_x0 = 'Sgr A* X Position (mas)'
    axisLabel_y0 = 'Sgr A* Y Position (mas)'
    axisLabel_r0 = 'Ro (pc)'
    axisLabel_p = 'Period (yr)'
    axisLabel_a = 'Semi-Major Axis (mas)'
    axisLabel_e = 'Eccentricity'
    axisLabel_t0 = 'Epoch of Periapse (yr)'
    axisLabel_w = 'Argument of Periapse (deg)'
    axisLabel_i = 'Inclination (deg)'
    axisLabel_o = 'Angle of the Ascending Node (deg)'
    axisLabel_vx = 'Sgr A* X Velocity (mas/yr)'
    axisLabel_vy = 'Sgr A* Y Velocity (mas/yr)'
    #   axisLabel_vz = 'Sgr A* Z Velocity (km/s)'
    axisLabel_m = 'Mass (million solar masses)'
    axisLabel_pdist = 'Periapse Distance (mpc)'
    axisLabel_density = 'Density (10^15 Mo/pc^3)'

    _out = open('plotParamsProb_limits.txt', 'w')
    _out.write('%6s  %10s  %10s    ' % ('#Param', 'BestFit', 'PeakProb'))
    _out.write('%10s %10s   %10s %10s   ' % \
               ('1sigma_lo', '1sigma_hi', '1sig(-)', '1sig(+)'))
    _out.write('%10s %10s   %10s %10s   ' % \
               ('2sigma_lo', '2sigma_hi', '2sig(-)', '2sig(+)'))
    _out.write('%10s %10s   %10s %10s\n' % \
               ('3sigma_lo', '3sigma_hi', '3sig(-)', '3sig(+)'))

    def plotProb(var, label, suffix, bestFit):
        # Compute the probability distribution
        # (not the probability density function)
        (prob, bins) = matplotlib.mlab.hist(var, bins=40, normed=False)
        prob = array(prob, dtype=float) / prob.sum()  # normalize

        # Calculate the peak of the probability distribution
        # and the confidence intervals from the 1D Probs.
        sid = (prob.argsort())[::-1]  #  indices for a reverse sort
        probSort = prob[sid]

        peakPix = sid[0]
        peakVal = bins[peakPix]
        peakProb = prob[peakPix]

        # Make a cumulative distribution function starting from the
        # highest pixel value. This way we can find the level above
        # which 68% of the trials will fall.
        cdf = cumsum(probSort)

        # Determine point at which we reach XX confidence
        idx1 = (where(cdf > 0.6827))[0]  # 1 sigma
        idx2 = (where(cdf > 0.9545))[0]  # 2 sigma
        idx3 = (where(cdf > 0.9973))[0]  # 3 sigma

        if ((len(idx1) < 2) or (len(idx2) < 2) or (len(idx3) < 2)):
            clf()
            hist(var)
            print 'Min, Max = ', var.min(), var.max()
            print idx1
            print idx2
            print idx3

        level1 = probSort[idx1[0]]
        level2 = probSort[idx2[0]]
        level3 = probSort[idx3[0]]

        # Find the range of values
        idx1 = (where((prob > level1)))[0]
        idx2 = (where((prob > level2)))[0]
        idx3 = (where((prob > level3)))[0]

        # Parameter Range:
        range1 = array([bins[idx1[0]], bins[idx1[-1]]])
        range2 = array([bins[idx2[0]], bins[idx2[-1]]])
        range3 = array([bins[idx3[0]], bins[idx3[-1]]])

        # Plus/Minus Errors:
        pmErr1 = abs(range1 - peakVal)
        pmErr2 = abs(range2 - peakVal)
        pmErr3 = abs(range3 - peakVal)

        pmErr1_best = abs(range1 - bestFit)
        pmErr2_best = abs(range2 - bestFit)
        pmErr3_best = abs(range3 - bestFit)

        # Find the min and max values for each confidence
        print ''
        print 'Best Fit vs. Peak of Prob. Dist. for the %s' % label
        print '   %6s = %f   vs.   %f' % (suffix, bestFit, peakVal)
        print '1, 2, 3 Sigma Confidence Intervals for the %s' % label
        print '   68.27%% = [%10.4f -- %10.4f] or -/+ [%10.4f, %10.4f] [%10.4f, %10.4f]' % \
              (range1[0], range1[1], pmErr1_best[0], pmErr1_best[1], pmErr1[0], pmErr1[1])
        print '   95.45%% = [%10.4f -- %10.4f] or -/+ [%10.4f, %10.4f] [%10.4f, %10.4f]' % \
              (range2[0], range2[1], pmErr2_best[0], pmErr2_best[1], pmErr2[0], pmErr2[1])
        print '   99.73%% = [%10.4f -- %10.4f] or -/+ [%10.4f, %10.4f] [%10.4f, %10.4f]' % \
              (range3[0], range3[1], pmErr3_best[0], pmErr3_best[1], pmErr3[0], pmErr3[1])

        # Write in an output file:
        _out.write('%6s  %10.4f  %10.4f    ' % (suffix, bestFit, peakVal))
        _out.write('%10.4f %10.4f  %10.4f %10.4f    ' % \
                   (range1[0], range1[1], pmErr1[0], pmErr1[1]))
        _out.write('%10.4f %10.4f  %10.4f %10.4f    ' % \
                   (range2[0], range2[1], pmErr2[0], pmErr2[1]))
        _out.write('%10.4f %10.4f  %10.4f %10.4f\n' % \
                   (range3[0], range3[1], pmErr3[0], pmErr3[1]))

        clf()
        (pbins, pprob) = histNofill.convertForPlot(bins, prob)
        plot(pbins, pprob, color='black')
        xlabel(label)
        ylabel('Probability')

        # Plot the best-fit value
        #quiver([bestFit], [peakProb * 1.1], [0], [-peakProb*0.1])

        if (suffix == 't0'):
            gca().get_xaxis().set_major_formatter(FormatStrFormatter('%.2f'))

        savefig('plotParamsProb_' + suffix + '.png')
        savefig('plotParamsProb_' + suffix + '.eps')

        return (pbins, pprob)

    (x0bins, x0hist) = plotProb(x0, axisLabel_x0, 'x0', x0_fit)
    (y0bins, y0hist) = plotProb(y0, axisLabel_y0, 'y0', y0_fit)
    (r0bins, r0hist) = plotProb(r0, axisLabel_r0, 'r0', r0_fit)
    (pbins, phist) = plotProb(p, axisLabel_p, 'p', p_fit)
    (abins, ahist) = plotProb(a, axisLabel_a, 'a', a_fit)
    (ebins, ehist) = plotProb(e, axisLabel_e, 'e', e_fit)
    (t0bins, t0hist) = plotProb(t0, axisLabel_t0, 't0', t0_fit)
    (wbins, whist) = plotProb(w, axisLabel_w, 'w', w_fit)
    (ibins, ihist) = plotProb(i, axisLabel_i, 'i', i_fit)
    (obins, ohist) = plotProb(o, axisLabel_o, 'o', o_fit)
    (vxbins, vxhist) = plotProb(vx, axisLabel_vx, 'vx', vx_fit)
    (vybins, vyhist) = plotProb(vy, axisLabel_vy, 'vy', vy_fit)
    #  (vzbins, vzhist) = plotProb(vz, axisLabel_vz, 'vz', vz_fit)
    (mbins, mhist) = plotProb(m, axisLabel_m, 'm', m_fit)
    (pdbins, pdhist) = plotProb(pdist, axisLabel_pdist, 'pdist', pdist_fit)
    (densbins, denshist) = plotProb(density, axisLabel_density, 'density',
                                    density_fit)

    _out.close()

    # Now lets make 2 combined plots with panels for each parameter
    # Plot 1:
    #   x0  y0  r0
    #   vx  vy  vz
    #   m

    rc('axes', titlesize=10, labelsize=10)
    rc('xtick', labelsize=8)
    rc('ytick', labelsize=8)

    figure(2, figsize=(11, 13))
    clf()
    subplots_adjust(bottom=0.05, left=0.09, right=0.95, top=0.97)

    # Row 1
    subplot(5, 3, 1)
    plot(x0bins, x0hist, color='black')
    xlabel(axisLabel_x0)
    ylabel('Probability')

    subplot(5, 3, 2)
    plot(y0bins, y0hist, color='black')
    xlabel(axisLabel_y0)
    ylabel('Probability')

    subplot(5, 3, 3)
    plot(r0bins, r0hist, color='black')
    xlabel(axisLabel_r0)
    ylabel('Probability')

    # Row 2
    subplot(5, 3, 4)
    plot(vxbins, vxhist, color='black')
    xlabel(axisLabel_vx)
    ylabel('Probability')

    subplot(5, 3, 5)
    plot(vybins, vyhist, color='black')
    xlabel(axisLabel_vy)
    ylabel('Probability')

    subplot(5, 3, 6)
    #  plot(vzbins, vzhist, color='black')
    xlabel(axisLabel_vz)
    ylabel('Probability')

    # Row 3
    subplot(5, 3, 8)
    plot(mbins, mhist, color='black')
    xlabel(axisLabel_m)
    ylabel('Probability')

    # Row 4
    subplot(5, 3, 10)
    plot(pbins, phist, color='black')
    xlabel(axisLabel_p)
    ylabel('Probability')

    subplot(5, 3, 11)
    plot(t0bins, t0hist, color='black')
    xlabel(axisLabel_t0)
    ylabel('Probability')
    gca().get_xaxis().set_major_formatter(FormatStrFormatter('%.2f'))

    subplot(5, 3, 12)
    plot(ebins, ehist, color='black')
    xlabel(axisLabel_e)
    ylabel('Probability')

    # Row 5
    subplot(5, 3, 13)
    plot(ibins, ihist, color='black')
    xlabel(axisLabel_i)
    ylabel('Probability')

    subplot(5, 3, 14)
    plot(wbins, whist, color='black')
    xlabel(axisLabel_w)
    ylabel('Probability')

    subplot(5, 3, 15)
    plot(obins, ohist, color='black')
    xlabel(axisLabel_o)
    ylabel('Probability')

    savefig('plotParamsProb_all.png')
    savefig('plotParamsProb_all.eps')
    close(2)

    rc('axes', titlesize=14, labelsize=14)
    rc('xtick', labelsize=12)
    rc('ytick', labelsize=12)
Exemplo n.º 10
0
def compareParams(column,
                  file1=root + '08_02_16/MC/mc_zero.log.a',
                  file2=root + '08_02_16/MCast/mc_zero.log'):
    """
    Compare parameters via their 1D PDFs. 

    Input Parameters:
    column - a string indicating what variable from the efit MC should
    be plotted. Here are the options:

    'r0' (column #1 of efit MC file)
    'x0' (column #2 of efit MC file)
    'y0' (column #3 of efit MC file)
    'a'  (column #4 of efit MC file)
    'p'  (column #5 of efit MC file)
    'e'  (column #6 of efit MC file)
    'm'  (combine columns #1, #4, #5 of efit MC file)
    'periapse' (Periapse distance)
    """
    cc = objects.Constants()

    # Read in the efit monte carlo output file. Reading in this way
    # organizes stuff by column (e.g. table[0] = first column
    table = asciidata.open(file1)

    # Make things into arrays of floats, etc.
    r01 = table[0].tonumpy()  # in pc
    x01 = table[1].tonumpy()  # in pix (working on abs, scale = 1)
    y01 = table[2].tonumpy()  # in pix (working on abs, scale = 1)
    a1 = table[3].tonumpy()  # in mas
    p1 = table[4].tonumpy()  # in yrs
    e1 = table[5].tonumpy()

    # convert semi-major axis and period into mass
    m1 = (a1 * r01 / 1000.0)**3 / p1**2

    # calculate periapse distance (in pc)
    pdist1 = a1 * r01 * (1.0 - e1) / (cc.au_in_pc * 1000.0)

    # Read in the efit monte carlo output file. Reading in this way
    # organizes stuff by column (e.g. table[0] = first column
    table = asciidata.open(file2)

    # Make things into arrays of floats, etc.
    r02 = table[0].tonumpy()  # in pc
    x02 = table[1].tonumpy()  # in pix
    y02 = table[2].tonumpy()  # in pix
    a2 = table[3].tonumpy()  # in mas
    p2 = table[4].tonumpy()  # in yrs
    e2 = table[5].tonumpy()

    # convert semi-major axis and period into mass
    m2 = (a2 * r02 / 1000.0)**3 / p2**2

    # calculate periapse distance (in pc)
    pdist2 = a2 * r02 * (1.0 - e2) / (cc.au_in_pc * 1000.0)

    if (column == 'r0'):
        var1 = r01
        var2 = r02
        axisLabel = 'Ro (pc)'
    if (column == 'x0'):
        var1 = x01 * 1000.0
        var2 = x02 * 1000.0
        axisLabel = 'Sgr A* X Position (mas)'
    if (column == 'y0'):
        var1 = y01 * 1000.0
        var2 = y02 * 1000.0
        axisLabel = 'Sgr A* X Position (mas)'
    if (column == 'p'):
        var1 = p1
        var2 = p2
        axisLabel = 'Period (yr)'
    if (column == 'a'):
        var1 = a1
        var2 = a2
        axisLabel = 'Semi-Major Axis (mas)'
    if (column == 'e'):
        var1 = e1
        var2 = e2
        axisLabel = 'Eccentricity'
    if (column == 'm'):
        var1 = m1
        var2 = m2
        axisLabel = 'Mass (Msun)'
    if (column == 'periapse'):
        var1 = pdist1
        var2 = pdist2
        axisLabel = 'Periapse Distance (pc)'

    minVar = concatenate((var1, var2)).min()
    maxVar = concatenate((var1, var2)).max()
    binsIn = arange(minVar, maxVar, (maxVar - minVar) / 10.0)

    (bins1, data1) = histNofill.hist(binsIn, var1)
    (bins2, data2) = histNofill.hist(binsIn, var2)

    foo = stats.stats.ks_2samp(var1, var2)
    print 'KS Test: Distance Statistic (0 is same) = %5.2f' % (foo[0])

    clf()
    hist1 = plot(bins1, data1 / data1.sum(), color='red', linewidth=3)
    hist2 = plot(bins2, data2 / data2.sum(), color='blue', linewidth=3)
    legend((hist1, hist2), (file1, file2))
    xlabel(axisLabel)
    ylabel('Probability')

    savefig('compareParams_' + column + '.png')
    savefig('compareParams_' + column + '.eps')
Exemplo n.º 11
0
def plotMassDensity(mbincnt=50,
                    dbincnt=50,
                    showContour=True,
                    outfile='massDensity',
                    file=root + '07_05_18/mc_zero3e4.log'):
    """
    Plot a 2D histogram of mass and density values output by an efit monte
    carlo. The density is calculated from the mass and the periapse passage 
    of the star's orbit found in the log file (presumably S0-2).
    Saves the plot to a EPS file. You must pass in the file name
    containing the simulation results. Parameters are:

    outfile:  Root name of the file to save the plot to (def=massDensity)
    file:     Name of the file containing the efit output. By default
              this is set to
              /net/uni/Groups/ghez/ghez/analysis/Ro/07_05_18/mc_zero3e4.log
              as an example file.
    mbincnt:  Number of bins across the mass axis in the histogram (def=50)
    dbincnt: Number of bins across the density axis in the histogram (def=50)
    showContour:  Set to True (default value) to draw contours.
    """
    cc = objects.Constants()

    # Read in the efit monte carlo output file. Reading in this way
    # organizes stuff by column (e.g. table[0] = first column
    table = asciidata.open(file)

    # Make things into arrays of floats, etc.
    r0 = table[0].tonumpy()  # in pc
    x0 = table[1].tonumpy()  # in pix
    y0 = table[2].tonumpy()  # in pix
    amas = table[3].tonumpy()  # in mas
    p = table[4].tonumpy()  # in yrs
    e = table[5].tonumpy()

    # convert semi-major axis into AU
    a = amas * r0 / 1000.0

    # convert semi-major axis and period into mass
    m = a**3 / p**2

    # calculate periapse distance (in pc)
    pdist = a * (1.0 - e) / cc.au_in_pc

    # determine density (solar masses per pc^3)
    density = m / ((4.0 / 3.0) * math.pi * pdist**3)

    ##########
    #
    # Make 2D histogram
    #
    ##########
    # Lets put everything in log scale first
    mlog = log10(m)
    dlog = log10(density)

    (hist, mbins, dbins) = h2d.histogram2d(mlog, dlog, bins=(mbincnt, dbincnt))

    # Need to convert the 2d histogram into floats
    probDist = array(hist, dtype=float)

    # We can turn the histogram into a probability distribution
    # just by dividing by the total number of trials
    probDist /= float(len(m))

    if (showContour == True):
        levels = getContourLevels(probDist)

    ##########
    #
    # Plotting
    #
    ##########
    rc('text', usetex=True)

    # Clear the plot
    clf()

    # Display the 2D histogram
    # Convert bins into x10^6 Msun and kpc
    imshow(probDist,
           cmap=cm.hot_r,
           origin='lower',
           aspect='auto',
           extent=[mbins[0], mbins[-1], dbins[0], dbins[-1]])

    # Make a colorbar
    #colorbar()

    # Draw contours
    if (showContour == True):
        contour(probDist,
                levels,
                origin=None,
                colors='black',
                extent=[mbins[0], mbins[-1], dbins[0], dbins[-1]])

    # Stretch axes
    axis('tight')

    # Draw labels using latex by putting "r" before the string
    font = {'fontname': 'Sans', 'fontsize': 20}
    xlabel(r'$\log_{10} M$ (M$_\odot$)', font)
    ylabel(r'$\log_{10} \rho$ (M$_\odot$/pc$^3$)', font)

    # Set the label axis formatting.
    thePlot = gca()
    setp(thePlot.get_xticklabels(), fontsize=16, fontweight='bold')
    setp(thePlot.get_yticklabels(), fontsize=16, fontweight='bold')

    # Set the label axis formatting.
    thePlot = gca()
    setp(thePlot.get_xticklabels(), fontsize=16, fontweight='bold')
    setp(thePlot.get_yticklabels(), fontsize=16, fontweight='bold')

    savefig(outfile + '.eps')
    savefig(outfile + '.png')

    # Turn of Latex Processing
    rc('text', usetex=False)
Exemplo n.º 12
0
def compareAlignVels(alignRoot, polyRoot):
    """
    Compare the velocities that are derived in align and in polyfit.
    There must be the same number of stars (and in the same order)
    in the align and polyfit output.

    Input Parameters:
    -- the root name of the align output (e.g. 'align/align_d_rms_t'
    -- the root name of the polyfit output( e.g. 'polyfit_d/fit')
    """
    cc = objects.Constants()
    s = starset.StarSet(alignRoot, absolute=0)
    s.loadPolyfit(polyRoot, arcsec=0)

    pixyr2kms = 0.00995 * cc.asy_to_kms
    #pixyr2kms = cc.asy_to_kms

    # Need to get rid of the central arcsecond sources
    r = s.getArray('r2d')
    idx = (py.where(r > 0.5))[0]
    s.stars = [s.stars[ii] for ii in idx]
    r = s.getArray('r2d')

    # Align Fit in Pixels
    vxa = s.getArray('fitpXalign.v') * pixyr2kms
    vya = s.getArray('fitpYalign.v') * pixyr2kms
    vxa_err = s.getArray('fitpXalign.verr') * pixyr2kms
    vya_err = s.getArray('fitpYalign.verr') * pixyr2kms
    vxp = s.getArray('fitpXv.v') * pixyr2kms
    vyp = s.getArray('fitpYv.v') * pixyr2kms
    vxp_err = s.getArray('fitpXv.verr') * pixyr2kms
    vyp_err = s.getArray('fitpYv.verr') * pixyr2kms

    # Plot differences in Sigmas but can't combine (not independent)
    vxdiff = vxa - vxp
    vydiff = vya - vyp

    vxsig1 = vxdiff / vxa_err
    vysig1 = vydiff / vya_err
    vxsig2 = vxdiff / vxp_err
    vysig2 = vydiff / vyp_err

    #####
    # Crude velocity plot (absolute velocity differences)
    #####
    py.clf()
    py.subplot(2, 1, 1)
    py.plot(vxa, vxp, 'k.')
    py.xlabel('Align vx (km/s)')
    py.ylabel('Polyfit vx (km/s)')
    py.title('Align vs. Polyfit Vel')

    py.subplot(2, 1, 2)
    py.plot(vya, vyp, 'k.')
    py.xlabel('Align vy (km/s)')
    py.ylabel('Polyfit vy (km/s)')
    py.savefig('plots/align_vs_poly_v.png')

    #####
    # Compare velocity errors
    #####
    py.clf()
    py.plot(vxa_err, vxp_err, 'r.')
    py.plot(vya_err, vyp_err, 'b.')
    py.plot([0, 30], [0, 30], 'k--')
    py.axis('equal')
    py.axis([0, 30, 0, 30])
    py.xlabel('Align Vel Error (km/s)')
    py.ylabel('Poly Vel Error (km/s)')
    py.legend(('X', 'Y'))
    py.title('Align vs. Polyfit Vel. Errors')
    py.savefig('plots/align_vs_poly_verr.png')

    #####
    # Absolute velocity differences
    #####
    py.clf()
    py.plot(vxa, vxdiff, 'r.')
    py.plot(vya, vydiff, 'b.')
    py.legend(('X', 'Y'))
    py.xlabel('Align v (km/s)')
    py.ylabel('Align - Poly (km/s)')
    py.title('Align - Polyfit Vel.')
    py.savefig('plots/align_vs_poly_vdiff.png')

    #####
    # Velocity difference in sigmas
    #####
    py.clf()
    py.plot(vxa, vxsig1, 'r.')
    py.plot(vya, vysig1, 'b.')
    py.legend(('X', 'Y'))
    py.title('Diff over align error')
    py.xlabel('Align v (km/s)')
    py.ylabel('Align - Poly (sigma)')
    py.title('(Align - Polyfit) / Align Err')
    py.savefig('plots/align_vs_poly_vsig_alignerr.png')

    py.clf()
    py.plot(vxa, vxsig2, 'r.')
    py.plot(vya, vysig2, 'b.')
    py.legend(('X', 'Y'))
    py.title('Diff over poly error')
    py.xlabel('Align v (km/s)')
    py.ylabel('Align - Poly (sigma)')
    py.title('(Align - Polyfit) / Polyfit Err')
    py.savefig('plots/align_vs_poly_vsig_polyerr.png')

    #####
    # Histogram of Sigmas
    #####
    binsIn = py.arange(-6, 6, 0.5)
    (bins, vxhist1) = histNofill.hist(binsIn, vxsig1, normed=True)
    (bins, vxhist2) = histNofill.hist(binsIn, vxsig2, normed=True)
    (bins, vyhist1) = histNofill.hist(binsIn, vysig1, normed=True)
    (bins, vyhist2) = histNofill.hist(binsIn, vysig2, normed=True)

    # Make a gaussian for what is expected
    gg = stats.distributions.norm()
    gaussian = gg.pdf(bins)

    py.clf()
    py.plot(bins, vxhist1, 'r-')
    py.plot(bins, vyhist1, 'b-')
    py.plot(bins, gaussian, 'k--')
    py.legend(('X', 'Y'))
    py.xlabel('Align - Poly (sigma)')
    py.title('Vel Diff Significance (Align Err)')
    py.savefig('plots/align_vs_poly_hist_alignerr.png')

    py.clf()
    py.plot(bins, vxhist2, 'r-')
    py.plot(bins, vyhist2, 'b-')
    py.plot(bins, gaussian, 'k--')
    py.legend(('X', 'Y'))
    py.xlabel('Align - Poly (sigma)')
    py.title('Vel Diff Significance (Poly Err)')
    py.savefig('plots/align_vs_poly_hist_polyerr.png')
Exemplo n.º 13
0
def byf73_properMotions():
    """
    Calculate the range of proper motions for different
    possible distances. Assume a constant circular rotation 
    of the galaxy of 220 km/s.

    Inputs:
    galacticLat -- in degrees.
    """
    galacticLat = 286.0
    distance = 2.5 # kpc
    clusterName = 'BYF73'

    cc = objects.Constants()

    # R0 - distance from Earth to the GC (Ghez et al. 2009; Reid et al. 2009)
    # d  - distance from Earth to the object. 
    # R  - distance from the object to the GC. Need this to get velocity.
    # Theta0 - rotational velocity at solar circle (Reid et al. 2009)
    #
    # l  - galacticLat in radians
    # 
    l = math.radians(galacticLat)
    R0 = 8.4        # kpc
    Theta0 = 254.0  # km/s
    
    d = np.arange(1.0, 10, 0.05) # distance in kpc

    cosl = math.cos(l)
    sinl = math.sin(l)

    R = np.sqrt( d**2 + R0**2 - (2.0 * d * R0 * math.cos(l)) )

    x = R0 - d * cosl
    y = d * sinl

    # Assume a rotation curve based on a reasonable potential.
    # Pulled from Brunetti and Pfenniger (2010)
    vc = Theta0 * R / np.sqrt(1 + R**2)


    oneR_2 = np.sqrt(1.0 + R**2)
    oneR0_2 = math.sqrt(1.0 + R0**2)

    vt = (x*R0 - R**2) / (d * oneR_2) + (x*R0 - R0**2) / (d * oneR0_2)
    vt *= Theta0

    vr = ((-y * R0) / d) * ((1.0 / oneR_2) - (1.0 / oneR0_2))
    vr *= Theta0

    
    # Proper motion (mas/yr)
    kms_masyr = 1.0e5 * cc.sec_in_yr / (d * cc.cm_in_au)
    pm = vt * kms_masyr
    pm_hi = (vt + 5.0) * kms_masyr
    pm_lo = (vt - 5.0) * kms_masyr

    py.clf()
    py.figure(linewidth=2)
    py.plot(d, pm, 'r-', linewidth=2)
    py.plot(d, pm_hi, 'r--', linewidth=2)
    py.plot(d, pm_lo, 'r--', linewidth=2)
    py.xlabel('Distance (kpc)', fontsize=22, fontweight='bold')
    py.ylabel('Proper Motion (mas/yr)', fontsize=22, fontweight='bold')
    title = '%s (l = %d)' % (clusterName, galacticLat)
    py.title(title, fontsize=22, fontweight='bold')
    

    # Calculate Proper Motion Errors
    # Positional Errors for each measurements
    posErr = 1.0 # mas

    # Times of measurements
    t2 = np.arange(2011, 2013.001, 2.0)
    t4 = np.arange(2011, 2015.001, 2.0)

    # Proper Motion error
    pmErr2 = posErr / math.sqrt( ((t2 - t2.mean())**2).sum() )
    pmErr4 = posErr / math.sqrt( ((t4 - t4.mean())**2).sum() )

    print 'Proper Motion Error Calculation:'
    print '   Positional Error: %3.1f mas' % posErr
    print ''
    print '   Time of Measurements: ', t2
    print '   Proper Motion Errors: %4.2f mas/yr' % pmErr2
    print ''
    print '   Time of Measurements: ', t4
    print '   Proper Motion Errors: %4.2f mas/yr' % pmErr4

    diff = np.abs(d - distance)
    ii = diff.argmin()
    pm_max2 = pm - pm + pm[ii] + (pmErr2/2.0)
    pm_min2 = pm - pm + pm[ii] - (pmErr2/2.0)
    pm_max4 = pm - pm + pm[ii] + (pmErr4/2.0)
    pm_min4 = pm - pm + pm[ii] - (pmErr4/2.0)

    py.fill_between(d, pm_min2, pm_max2, color='grey', alpha=0.3)
    py.fill_between(d, pm_min4, pm_max4, color='grey', alpha=0.5)
    py.plot([distance], [pm[ii]], 'k*', ms=10)

    py.ylim(-9, 0)
    
    ax = py.gca()
    for tick in ax.xaxis.get_major_ticks(): 
        tick.label1.set_fontsize(16) 
        tick.label1.set_fontweight('bold') 
    for tick in ax.yaxis.get_major_ticks(): 
        tick.label1.set_fontsize(16) 
        tick.label1.set_fontweight('bold') 

    outfile = 'hst_clusterPropMot_%s' % clusterName
    py.savefig(outfile + '.png')
    py.savefig(outfile + '.eps')
Exemplo n.º 14
0
def fit(root=None, silent=False, label=False, outsuffix=''):
    """Fit the disk of young stars (clockwise) using our latest
    data and the indirect velocity method of Levin & Beloborodov (2003).
    
    @keyword root: The root directory for align stuff (default is
	'/u/jlu/work/gc/proper_motion/align/06_02_14/').
    @type root: string

    @keyword silent: Set to True for no printed output.
    @type: boolean
    """

    def fitfunc(n_in, fjac=None, yng=None):
	"""The fit function that is passed to mpfit."""

	# Normalize
	n = n_in / sqrt( vdot(n_in, n_in))

	vx = yng.getArray('vx')
	vy = yng.getArray('vy')
	vz = yng.getArray('vz')
	vxerr = yng.getArray('vxerr')
	vyerr = yng.getArray('vyerr')
	vzerr = yng.getArray('vzerr')
	r = yng.getArray('r2d')
	jz = yng.getArray('jz')

	# Chi-squared formula from Levin's paper
	num = len(yng.stars)
	devs = arange(num, dtype=float)
        bias = arange(num, dtype=float)

	for i in range(num):
	    v = array([vx[i], vy[i], vz[i]])
	    verr = array([vxerr[i], vyerr[i], vzerr[i]])

	    top = vdot(n, v)
	    bott = sqrt( ((n*verr)**2).sum() )

	    #devs[i] = (1.0 / num) * pow(top, 2) / pow(bott, 2)
	    devs[i] = top / bott
            bias[i] = 2.0 * log( bott )
            #print '  %2d  %7.3f  %7.3f' % (i, devs[i], bias[i])

        devsUnbias = sqrt(devs**2 + bias)

	status = 0
	return [status, devs / sqrt(num - 1)]



    ##########
    #
    # Load up young star sample
    #
    ##########

    if (root == None):
	root = '/u/jlu/work/gc/proper_motion/align/08_03_26/'

    yng = young.loadYoungStars(root)
    cc = objects.Constants()

    # Decide which young stars we are including in our fits.
    x = yng.getArray('x')
    y = yng.getArray('y')
    vX = yng.getArray('vx')
    vY = yng.getArray('vy')
    vZ = yng.getArray('vz')
    sigx = yng.getArray('vxerr')
    sigy = yng.getArray('vyerr')
    sigz = yng.getArray('vzerr')
    r = yng.getArray('r2d')
    jz = yng.getArray('jz')
    names = yng.getArray('name')

    # Read a table of sources + disk membership probability
    diskTabFile = root + 'tables/disk_membership_prob.dat'
    diskTab = asciidata.open(diskTabFile)
    diskNames = [diskTab[0][i].strip() for i in range(diskTab.nrows)]
    diskProbs = diskTab[1].tonumpy()

    indisk = zeros(len(names))
    indiskNames = []
    for i in range(len(names)):
        try:
            idx = diskNames.index(names[i])

            if (diskProbs[idx] > 2.7e-3):
                indisk[i] = 1
                indiskNames.append(names[i])
        except ValueError, e:
            print 'Failed'
            foo = 0