def __init__(self, prop,
                 sim='L0025N0376',
                 run='REFERENCE',
                 snapnum=28):
        print 'Initialising box for imaging...'

        tag = snapdict[str(snapnum)][0]

        if prop == 'xrays':
            print 'Loading particle X-ray luminosities'
            xray_data = h5.File('/hpcdata7/arijdav1/Lx_matching/'+sim+'_'+run+'/'+tag+'.hdf5','r')
            self.xrays = np.array(xray_data['Xray_luminosity']) / 1e30
            self.xray_pids = np.array(xray_data['ParticleIDs'])

        self.sim_path = '/data5/simulations/EAGLE/' + sim + '/' + run + '/data/'

        # Get volume information
        boxsize = E.readAttribute('SNAP', self.sim_path, tag, "/Header/BoxSize")
        self.h = E.readAttribute('SNAP', self.sim_path, tag, "/Header/HubbleParam")
        self.a_0 = E.readAttribute('SNAP', self.sim_path, tag, "/Header/ExpansionFactor")

        self.sim = sim
        self.run = run
        self.tag = tag
        self.property = prop
        self.boxsize = boxsize * self.a_0/self.h
        self.snapnum = snapnum
    def __init__(self,
                 prop,
                 sim='L0025N0376',
                 run='REFERENCE',
                 snapnum=28,
                 tag=None,
                 quiet=False,
                 ion=None):

        if not quiet:
            print 'Initialising box for imaging...'

        if sim == 'L0100N1504':
            storage_loc = '/hpcdata6'
        else:
            storage_loc = '/hpcdata5'

        if tag == None:
            tag = snapdict[str(snapnum)][0]
        else:
            snapnum = int(tag[:3])

        if prop == 'xrays':
            if not quiet:
                print 'Loading particle X-ray luminosities'
            xray_data = h5.File(
                '/hpcdata7/arijdav1/Lx_matching/' + sim + '_' + run + '/' +
                tag + '.hdf5', 'r')
            self.xrays = np.array(xray_data['Xray_luminosity']) / 1e30
            self.xray_pids = np.array(xray_data['ParticleIDs'])

        self.sim_path = storage_loc + '/simulations/EAGLE/' + sim + '/' + run + '/data/'

        # Get volume information
        boxsize = E.readAttribute('SNAP', self.sim_path, tag,
                                  "/Header/BoxSize")
        self.h = E.readAttribute('SNAP', self.sim_path, tag,
                                 "/Header/HubbleParam")
        self.a_0 = E.readAttribute('SNAP', self.sim_path, tag,
                                   "/Header/ExpansionFactor")
        self.z = (1. / self.a_0) - 1.
        self.sim = sim
        self.run = run
        self.tag = tag
        self.property = prop
        self.boxsize = boxsize * self.a_0 / self.h
        self.snapnum = snapnum
        self.quiet = quiet
        self.storage_loc = storage_loc
        self.ion = ion
Ejemplo n.º 3
0
def load_attribute(att_label,
                   array_type='SNAP',
                   run='L0100N1504',
                   model='REFERENCE',
                   tag='028_z000p000'):
    sim = '/data5/simulations/EAGLE/' + run + '/' + model + '/data'
    return E.readAttribute(att_label, sim, tag, '/Header/' + att_label)
Ejemplo n.º 4
0
    def __init__(self,
                 sim='L0100N1504',
                 run='REFERENCE',
                 tag='028_z000p000',
                 pdata_type='SNAPSHOT',
                 data_location='/hpcdata5/simulations/EAGLE/'):

        # Initialises everything and loads in information about the entire simulation volume

        if run == 'EagleVariation_NOAGN':
            self.incBH = False
        else:
            self.incBH = True

        self.sim = sim
        self.run = run
        self.tag = tag

        sim_path = data_location + sim + '/' + run + '/data/'

        # Name of one file from the snapshot
        self.snapfile = sim_path + 'snapshot_' + tag + '/snap_' + tag + '.0.hdf5'

        # Get volume information
        boxsize = E.readAttribute(pdata_type, sim_path, tag, "/Header/BoxSize")
        self.h = E.readAttribute(pdata_type, sim_path, tag,
                                 "/Header/HubbleParam")
        self.Omega0 = E.readAttribute(pdata_type, sim_path, tag,
                                      "/Header/Omega0")
        self.OmegaLambda = E.readAttribute(pdata_type, sim_path, tag,
                                           "/Header/OmegaLambda")
        self.OmegaBaryon = E.readAttribute(pdata_type, sim_path, tag,
                                           "/Header/OmegaBaryon")
        self.a_0 = E.readAttribute(pdata_type, sim_path, tag,
                                   "/Header/ExpansionFactor")
        self.physical_boxsize = boxsize * self.a_0 / self.h
        self.masstable = E.readAttribute(pdata_type, sim_path, tag,
                                         "/Header/MassTable") / self.h
        self.r200s = E.readArray("SUBFIND_GROUP", sim_path, tag,
                                 "/FOF/Group_R_Crit200")
Ejemplo n.º 5
0
    def __init__(self,
                 centers,
                 side_sizes,
                 bulkvels,
                 central_gnums,
                 sgnums,
                 align_radius=0.03,
                 run='L0100N1504',
                 model='REFERENCE',
                 tag='028_z000p000',
                 part_types=['0', '4'],
                 pdata_type='SNAPSHOT',
                 overwrite=True,
                 path='/scratch/eagle/sav/',
                 halotype='any_halos',
                 timing_flag=False):
        vol_file = path + run + '_' + model + '/' + tag + '/FOF' + str(
            int(central_gnums[0])) + '_volume.hdf5'

        if overwrite == True or not os.path.exists(vol_file):
            sim = '/data5/simulations/EAGLE/' + run + '/' + model + '/data'
            #get boxsize and particle positions
            boxsize = E.readAttribute(pdata_type, sim, tag, "/Header/BoxSize")
            h = E.readAttribute(pdata_type, sim, tag, "/Header/HubbleParam")
            Omega0 = E.readAttribute(pdata_type, sim, tag, "/Header/Omega0")
            OmegaLambda = E.readAttribute(pdata_type, sim, tag,
                                          "/Header/OmegaLambda")
            OmegaBaryon = E.readAttribute(pdata_type, sim, tag,
                                          "/Header/OmegaBaryon")
            a_0 = E.readAttribute(pdata_type, sim, tag,
                                  "/Header/ExpansionFactor")
            boxsize = boxsize / h
            masstable = E.readAttribute(pdata_type, sim, tag,
                                        "/Header/MassTable") / h

            self.mask_type = []
            self.pos_type = []
            self.vel_type = []
            self.mass_type = []
            self.density_type = []
            self.PID_type = []
            self.abund_type = []
            self.smooths = []
            self.gnum_type = []
            self.sfr_type = []
            self.metal = []
            self.temp = []

            num_subs = E.readArray("SUBFIND_GROUP", sim, tag,
                                   "/FOF/NumOfSubhalos")
            r200 = E.readArray("SUBFIND_GROUP", sim, tag,
                               "/FOF/Group_R_Crit200")[num_subs > 0]
            gns = central_gnums - 1
            self.r200 = r200[gns]

            #re-center to 'center' and remove particles outside box
            for t in part_types:

                masks = []
                pos = load_array('Coordinates',
                                 t,
                                 array_type=pdata_type,
                                 run=run,
                                 model=model,
                                 tag=tag)
                grouppos = []

                # Create a list of COPs where each COP is given in the co-ordinate system of the previous COP.
                shift_centres = np.zeros(np.shape(centers))
                shift_centres[0, :] = centers[0, :]
                for c in range(len(centers) - 1):
                    shift_centres[c + 1] = centers[c + 1] - centers[c]

                inds = np.arange(len(pos[:, 0]))
                npart = len(inds)

                print 'Creating apertures...'
                for ii in tqdm(range(len(shift_centres))):
                    centre = shift_centres[ii]

                    if ii == 0:
                        current_centre = np.array([0., 0., 0.])
                    else:
                        current_centre = centers[
                            ii -
                            1]  # the current centre in the true box co-ordinate system

                    #if (np.absolute(centre)+side_sizes[ii]/2.).any() > boxsize/2.:

                    if ((boxsize / 2. + np.absolute(current_centre)) -
                        (np.absolute(centre) + side_sizes[ii] / 2.)
                        ).any() < 0.:  # Is the group actually on the edge?
                        pos = ne.evaluate("pos-centre")
                        pos[pos[:, 0] < (-1. * boxsize / 2.), 0] += boxsize
                        pos[pos[:, 1] < (-1. * boxsize / 2.), 1] += boxsize
                        pos[pos[:, 2] < (-1. * boxsize / 2.), 2] += boxsize
                        '''
                        pos -= (centre - boxsize/2.)
                        pos = np.mod(pos,boxsize)
                        pos -= boxsize/2.
                        '''
                        #if timing_flag:
                        print 'Wrapped box'

                    else:  # Don't bother doing the wrapping if it doesn't affect the current group
                        s = time()
                        pos = ne.evaluate("pos-centre")
                        transform_time = time() - s

                    rmax = side_sizes[ii] / 2.

                    s = time()
                    # Chop along x
                    xs = pos[:, 0]
                    mask = ne.evaluate(
                        'where(abs(xs)<rmax,True,False)').nonzero()[0]
                    cut_pos = pos[mask, :]
                    cut_inds = inds[mask]
                    # Along y
                    ys = cut_pos[:, 1]
                    mask = ne.evaluate(
                        'where(abs(ys)<rmax,True,False)').nonzero()[0]
                    cut_pos = cut_pos[mask, :]
                    cut_inds = cut_inds[mask]
                    # Along z
                    zs = cut_pos[:, 2]
                    mask = ne.evaluate(
                        'where(abs(zs)<rmax,True,False)').nonzero()[0]
                    cut_pos = cut_pos[mask, :]
                    cut_inds = cut_inds[mask]
                    chop_time = time() - s

                    s = time()
                    r2 = np.einsum('...j,...j->...', cut_pos,
                                   cut_pos)  # get the radii from the centre
                    radial_calculation = time() - s

                    s = time()
                    mask = np.where(r2 < rmax**2)[0]  # make the mask
                    radial_masking = time() - s

                    masks.append(cut_inds[mask])
                    grouppos.append(cut_pos[mask])

                    if timing_flag:
                        print 'Time to transform box: ', transform_time
                        print 'Time to chop box up: ', chop_time
                        print 'Time to calculate radial distances: ', radial_calculation
                        print 'Time to mask by radius: ', radial_masking

                self.pos_type.append(grouppos)
                del pos
                del inds

                velarr = load_array('Velocity',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                groupvel = []
                for ii, mask in enumerate(masks):
                    vel = velarr[mask]
                    vel -= bulkvels[ii]
                    groupvel.append(vel)
                self.vel_type.append(groupvel)
                del velarr

                groupmass = []
                if t != '1':
                    massarr = load_array('Mass',
                                         t,
                                         array_type=pdata_type,
                                         run=run,
                                         model=model,
                                         tag=tag)
                if t == '1':
                    massarr = np.ones(npart) * masstable[1]
                for ii, mask in enumerate(masks):
                    groupmass.append(massarr[mask])
                self.mass_type.append(groupmass)
                del massarr

                if t in ['0', '4', '5']:
                    groupdensity = []
                    if t == '0':
                        densityarr = load_array('Density',
                                                t,
                                                array_type=pdata_type,
                                                run=run,
                                                model=model,
                                                tag=tag)
                    if t == '4':
                        densityarr = load_array('BirthDensity',
                                                t,
                                                array_type=pdata_type,
                                                run=run,
                                                model=model,
                                                tag=tag)
                    if t == '5':
                        densityarr = load_array('BH_Density',
                                                t,
                                                array_type=pdata_type,
                                                run=run,
                                                model=model,
                                                tag=tag)
                    for ii, mask in enumerate(masks):
                        groupdensity.append(densityarr[mask])
                    self.density_type.append(groupdensity)
                    del densityarr

                grouppids = []
                PIDs = load_array('ParticleIDs',
                                  t,
                                  array_type=pdata_type,
                                  run=run,
                                  model=model,
                                  tag=tag)
                for ii, mask in enumerate(masks):
                    grouppids.append(PIDs[mask])
                self.PID_type.append(grouppids)
                del PIDs

                groupgnums = []
                gnums = load_array('GroupNumber',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                for ii, mask in enumerate(masks):
                    groupgnums.append(gnums[mask])
                self.gnum_type.append(groupgnums)
                del gnums

                if t == '0':
                    groupsfrs = []
                    sfr = load_array('StarFormationRate',
                                     t,
                                     array_type=pdata_type,
                                     run=run,
                                     model=model,
                                     tag=tag)
                    for ii, mask in enumerate(masks):
                        groupsfrs.append(sfr[mask])
                    self.sfr_type = groupsfrs

                    grouptemps = []
                    temps = load_array('Temperature',
                                       t,
                                       array_type=pdata_type,
                                       run=run,
                                       model=model,
                                       tag=tag)
                    for ii, mask in enumerate(masks):
                        grouptemps.append(temps[mask])
                    self.temp = grouptemps

                if t == '4':
                    groupsftimes = []
                    sftimes = load_array('StellarFormationTime',
                                         t,
                                         array_type=pdata_type,
                                         run=run,
                                         model=model,
                                         tag=tag)
                    for ii, mask in enumerate(masks):
                        groupsftimes.append(sftimes[mask])
                    self.sftimes = groupsftimes

                if t in ['0', '4']:
                    H = load_array('SmoothedElementAbundance/Hydrogen',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                    He = load_array('SmoothedElementAbundance/Helium',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    C = load_array('SmoothedElementAbundance/Carbon',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                    N = load_array('SmoothedElementAbundance/Nitrogen',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                    O = load_array('SmoothedElementAbundance/Oxygen',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                    Ne = load_array('SmoothedElementAbundance/Neon',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    Mg = load_array('SmoothedElementAbundance/Magnesium',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    Si = load_array('SmoothedElementAbundance/Silicon',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    #S = load_array('SmoothedElementAbundance/Sulphur', t, array_type=pdata_type, run=run, model=model, tag=tag)
                    #Ca = load_array('SmoothedElementAbundance/Calcium', t, array_type=pdata_type, run=run, model=model, tag=tag)
                    Fe = load_array('SmoothedElementAbundance/Iron',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    abunds = np.dstack((H, He, C, N, O, Ne, Mg, Si, Fe))[0]
                    del H, He, C, N, O, Ne, Mg, Si, Fe
                    groupabunds = []
                    for ii, mask in enumerate(masks):
                        groupabunds.append(abunds[mask])
                    self.abund_type.append(groupabunds)
                    del abunds

                    groupmetal = []
                    metal = load_array('SmoothedMetallicity',
                                       t,
                                       array_type=pdata_type,
                                       run=run,
                                       model=model,
                                       tag=tag)
                    for ii, mask in enumerate(masks):
                        groupmetal.append(metal[mask])
                    self.metal.append(groupmetal)
                    del metal

                if t in ['0', '4', '5']:
                    groupsmooths = []
                    smooths = load_array('SmoothingLength',
                                         t,
                                         array_type=pdata_type,
                                         run=run,
                                         model=model,
                                         tag=tag)
                    for ii, mask in enumerate(masks):
                        groupsmooths.append(smooths[mask])
                    self.smooths.append(groupsmooths)
                    del smooths
            self.mask_type.append(masks)

            for n in range(len(part_types)):
                if part_types[n] == '4':
                    print 'Stars are part index ', n
                    si = n
                else:
                    continue
                self.transform = []
                for ii in range(0, len(self.pos_type[0])):
                    #perform alignment to Jz of star particles in a given R
                    Rs = np.sqrt(self.pos_type[si][ii][:, 0]**2 +
                                 self.pos_type[si][ii][:, 1]**2 +
                                 self.pos_type[si][ii][:, 2]**2)
                    radmask = Rs < align_radius
                    starj = np.cross(
                        self.pos_type[si][ii], self.vel_type[si][ii] *
                        self.mass_type[si][ii][:, np.newaxis])
                    r200j = starj[radmask]
                    tot_ang_mom = np.sum(r200j, axis=0)
                    a = np.matrix([
                        tot_ang_mom[0], tot_ang_mom[1], tot_ang_mom[2]
                    ]) / np.linalg.norm(
                        [tot_ang_mom[0], tot_ang_mom[1], tot_ang_mom[2]])
                    b = np.matrix([0, 0, 1])
                    v = np.cross(a, b)
                    s = np.linalg.norm(v)
                    c = np.dot(a, b.T)
                    vx = np.matrix([[0, -v[0, 2], v[0, 1]],
                                    [v[0, 2], 0, -v[0, 0]],
                                    [-v[0, 1], v[0, 0], 0]])
                    transform = np.eye(3, 3) + vx + (vx * vx) * (1 /
                                                                 (1 + c[0, 0]))
                    self.transform.append(transform)
            self.run = run
            self.model = model
            self.tag = tag
            self.halotype = halotype
            self.centers = centers
            self.side_sizes = side_sizes
            self.bulkvels = bulkvels
            self.central_gnums = central_gnums
            self.part_types = part_types

            print 'Applying transformations...'
            for ii, ptype in enumerate(part_types):
                for jj, transform in tqdm(enumerate(self.transform)):
                    try:
                        self.pos_type[ii][jj] = np.array([
                            np.dot(transform, self.pos_type[ii][jj][i].T)
                            for i in range(0, len(self.pos_type[ii][jj]))
                        ])[:, 0]
                        self.vel_type[ii][jj] = np.array([
                            np.dot(transform, self.vel_type[ii][jj][i].T)
                            for i in range(0, len(self.vel_type[ii][jj]))
                        ])[:, 0]
                    except IndexError:
                        continue
#center = np.array([15.2974, 10.9540,  9.0412])
center = np.array([xcoord, ycoord, zcoord])

lgrid = lgrid / 1e+03
lgridz = lgrid * 4  # Four times longer than Lgrid in zdirection.

path = "snapshot_%s/snap_%s.0.hdf5" % (input_filename_base,
                                       input_filename_base)
print path

#filein = h5py.File(path)
#redshift = filein['Header'].attrs['Redshift']
#aex = 1/(1+redshift)
#center = center*aex

aex = eagle.readAttribute("SNAP", sim, input_filename_base,
                          "/Header/ExpansionFactor")
hubble_param = eagle.readAttribute("SNAP", sim, input_filename_base,
                                   "/Header/HubbleParam")

center = center / hubble_param * aex

coords = eagle.readArray("SNAP",
                         sim,
                         input_filename_base,
                         "/PartType0/Coordinates",
                         numThreads=1)
hsmooth = eagle.readArray("SNAP",
                          sim,
                          input_filename_base,
                          "/PartType0/SmoothingLength",
                          numThreads=1)
Ejemplo n.º 7
0
    def __init__(self,
                 centers,
                 side_sizes,
                 bulkvels,
                 central_gnums,
                 sgnums,
                 align_radius=0.03,
                 run='L0100N1504',
                 model='REFERENCE',
                 tag='028_z000p000',
                 part_types=['0', '4'],
                 pdata_type='SNAPSHOT',
                 overwrite=True,
                 path='/scratch/eagle/sav/',
                 halotype='any_halos'):
        vol_file = path + run + '_' + model + '/' + tag + '/FOF' + str(
            int(central_gnums[0])) + '_volume.hdf5'

        if overwrite == True or not os.path.exists(vol_file):
            sim = '/data5/simulations/EAGLE/' + run + '/' + model + '/data'
            #get boxsize and particle positions
            boxsize = E.readAttribute(pdata_type, sim, tag, "/Header/BoxSize")
            h = E.readAttribute(pdata_type, sim, tag, "/Header/HubbleParam")
            Omega0 = E.readAttribute(pdata_type, sim, tag, "/Header/Omega0")
            OmegaLambda = E.readAttribute(pdata_type, sim, tag,
                                          "/Header/OmegaLambda")
            OmegaBaryon = E.readAttribute(pdata_type, sim, tag,
                                          "/Header/OmegaBaryon")
            a_0 = E.readAttribute(pdata_type, sim, tag,
                                  "/Header/ExpansionFactor")
            boxsize = boxsize / h
            masstable = E.readAttribute(pdata_type, sim, tag,
                                        "/Header/MassTable") / h

            self.mask_type = []
            self.pos_type = []
            self.vel_type = []
            self.mass_type = []
            self.density_type = []
            self.PID_type = []
            self.abund_type = []
            self.smooths = []
            self.gnum_type = []
            self.sfr_type = []
            self.metal = []
            self.temp = []

            num_subs = E.readArray("SUBFIND_GROUP", sim, tag,
                                   "/FOF/NumOfSubhalos")
            r200 = E.readArray("SUBFIND_GROUP", sim, tag,
                               "/FOF/Group_R_Crit200")[num_subs > 0]
            gns = central_gnums - 1
            self.r200 = r200[gns]

            #re-center to 'center' and remove particles outside box
            for t in part_types:

                posarr = load_array('Coordinates',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                '''
                masks = []
                grouppos = []
                print 'Creating apertures...'
                for ii,center in tqdm(enumerate(centers)):
                    if (np.absolute(center)+side_sizes[ii]/2.).any() > boxsize/2.: # Is the group actually on the edge?
                        pos = posarr - (center - boxsize/2.)
                        pos = np.mod(pos,boxsize)
                        pos -= boxsize/2.

                    else: # Don't bother doing the wrapping if it doesn't affect the current group
                        pos = posarr - center
                        
                    r = np.sqrt(np.einsum('...j,...j->...',pos,pos)) # get the radii from the centre
                    mask = np.where(r<side_sizes[ii]/2.)[0] # make the mask
                    masks.append(mask)
                    grouppos.append(pos[mask])
                '''

                grouppos, masks = create_apertures(posarr, centers, side_sizes,
                                                   boxsize)

                self.pos_type.append(grouppos)
                del posarr

                print len(masks)

                velarr = load_array('Velocity',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                groupvel = []
                for ii, mask in enumerate(masks):
                    vel = velarr[mask]
                    vel -= bulkvels[ii]
                    groupvel.append(vel)
                self.vel_type.append(groupvel)
                del velarr

                groupmass = []
                if t != '1':
                    massarr = load_array('Mass',
                                         t,
                                         array_type=pdata_type,
                                         run=run,
                                         model=model,
                                         tag=tag)
                if t == '1':
                    massarr = np.ones(len(mask)) * masstable[1]
                for ii, mask in enumerate(masks):
                    groupmass.append(massarr[mask])
                self.mass_type.append(groupmass)
                del massarr

                if t in ['0', '4', '5']:
                    groupdensity = []
                    if t == '0':
                        densityarr = load_array('Density',
                                                t,
                                                array_type=pdata_type,
                                                run=run,
                                                model=model,
                                                tag=tag)
                    if t == '4':
                        densityarr = load_array('BirthDensity',
                                                t,
                                                array_type=pdata_type,
                                                run=run,
                                                model=model,
                                                tag=tag)
                    if t == '5':
                        densityarr = load_array('BH_Density',
                                                t,
                                                array_type=pdata_type,
                                                run=run,
                                                model=model,
                                                tag=tag)
                    for ii, mask in enumerate(masks):
                        groupdensity.append(densityarr[mask])
                    self.density_type.append(groupdensity)
                    del densityarr

                grouppids = []
                PIDs = load_array('ParticleIDs',
                                  t,
                                  array_type=pdata_type,
                                  run=run,
                                  model=model,
                                  tag=tag)
                for ii, mask in enumerate(masks):
                    grouppids.append(PIDs[mask])
                self.PID_type.append(grouppids)
                del PIDs

                groupgnums = []
                gnums = load_array('GroupNumber',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                for ii, mask in enumerate(masks):
                    groupgnums.append(gnums[mask])
                self.gnum_type.append(groupgnums)
                del gnums

                if t == '0':
                    groupsfrs = []
                    sfr = load_array('StarFormationRate',
                                     t,
                                     array_type=pdata_type,
                                     run=run,
                                     model=model,
                                     tag=tag)
                    for ii, mask in enumerate(masks):
                        groupsfrs.append(sfr[mask])
                    self.sfr_type = groupsfrs

                    grouptemps = []
                    temps = load_array('Temperature',
                                       t,
                                       array_type=pdata_type,
                                       run=run,
                                       model=model,
                                       tag=tag)
                    for ii, mask in enumerate(masks):
                        grouptemps.append(temps[mask])
                    self.temp = grouptemps

                if t == '4':
                    groupsftimes = []
                    sftimes = load_array('StellarFormationTime',
                                         t,
                                         array_type=pdata_type,
                                         run=run,
                                         model=model,
                                         tag=tag)
                    for ii, mask in enumerate(masks):
                        groupsftimes.append(sftimes[mask])
                    self.sftimes = groupsftimes

                if t in ['0', '4']:
                    H = load_array('SmoothedElementAbundance/Hydrogen',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                    He = load_array('SmoothedElementAbundance/Helium',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    C = load_array('SmoothedElementAbundance/Carbon',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                    N = load_array('SmoothedElementAbundance/Nitrogen',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                    O = load_array('SmoothedElementAbundance/Oxygen',
                                   t,
                                   array_type=pdata_type,
                                   run=run,
                                   model=model,
                                   tag=tag)
                    Ne = load_array('SmoothedElementAbundance/Neon',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    Mg = load_array('SmoothedElementAbundance/Magnesium',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    Si = load_array('SmoothedElementAbundance/Silicon',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    #S = load_array('SmoothedElementAbundance/Sulphur', t, array_type=pdata_type, run=run, model=model, tag=tag)
                    #Ca = load_array('SmoothedElementAbundance/Calcium', t, array_type=pdata_type, run=run, model=model, tag=tag)
                    Fe = load_array('SmoothedElementAbundance/Iron',
                                    t,
                                    array_type=pdata_type,
                                    run=run,
                                    model=model,
                                    tag=tag)
                    abunds = np.dstack((H, He, C, N, O, Ne, Mg, Si, Fe))[0]
                    del H, He, C, N, O, Ne, Mg, Si, Fe
                    groupabunds = []
                    for ii, mask in enumerate(masks):
                        groupabunds.append(abunds[mask])
                    self.abund_type.append(groupabunds)
                    del abunds

                    groupmetal = []
                    metal = load_array('SmoothedMetallicity',
                                       t,
                                       array_type=pdata_type,
                                       run=run,
                                       model=model,
                                       tag=tag)
                    for ii, mask in enumerate(masks):
                        groupmetal.append(metal[mask])
                    self.metal.append(groupmetal)
                    del metal

                if t in ['0', '4', '5']:
                    groupsmooths = []
                    smooths = load_array('SmoothingLength',
                                         t,
                                         array_type=pdata_type,
                                         run=run,
                                         model=model,
                                         tag=tag)
                    for ii, mask in enumerate(masks):
                        groupsmooths.append(smooths[mask])
                    self.smooths.append(groupsmooths)
                    del smooths
            self.mask_type.append(masks)

            for n in range(len(part_types)):
                if part_types[n] == '4':
                    print 'Stars are part index ', n
                    si = n
                else:
                    continue
                self.transform = []
                for ii in range(0, len(self.pos_type[0])):
                    #perform alignment to Jz of star particles in a given R
                    Rs = np.sqrt(self.pos_type[si][ii][:, 0]**2 +
                                 self.pos_type[si][ii][:, 1]**2 +
                                 self.pos_type[si][ii][:, 2]**2)
                    radmask = Rs < align_radius
                    starj = np.cross(
                        self.pos_type[si][ii], self.vel_type[si][ii] *
                        self.mass_type[si][ii][:, np.newaxis])
                    r200j = starj[radmask]
                    tot_ang_mom = np.sum(r200j, axis=0)
                    a = np.matrix([
                        tot_ang_mom[0], tot_ang_mom[1], tot_ang_mom[2]
                    ]) / np.linalg.norm(
                        [tot_ang_mom[0], tot_ang_mom[1], tot_ang_mom[2]])
                    b = np.matrix([0, 0, 1])
                    v = np.cross(a, b)
                    s = np.linalg.norm(v)
                    c = np.dot(a, b.T)
                    vx = np.matrix([[0, -v[0, 2], v[0, 1]],
                                    [v[0, 2], 0, -v[0, 0]],
                                    [-v[0, 1], v[0, 0], 0]])
                    transform = np.eye(3, 3) + vx + (vx * vx) * (1 /
                                                                 (1 + c[0, 0]))
                    self.transform.append(transform)
            self.run = run
            self.model = model
            self.tag = tag
            self.halotype = halotype
            self.centers = centers
            self.side_sizes = side_sizes
            self.bulkvels = bulkvels
            self.central_gnums = central_gnums
            self.part_types = part_types

            print 'Applying transformations...'
            for ii, ptype in enumerate(part_types):
                for jj, transform in tqdm(enumerate(self.transform)):
                    try:
                        self.pos_type[ii][jj] = np.array([
                            np.dot(transform, self.pos_type[ii][jj][i].T)
                            for i in range(0, len(self.pos_type[ii][jj]))
                        ])[:, 0]
                        self.vel_type[ii][jj] = np.array([
                            np.dot(transform, self.vel_type[ii][jj][i].T)
                            for i in range(0, len(self.vel_type[ii][jj]))
                        ])[:, 0]
                    except IndexError:
                        continue
Ejemplo n.º 8
0
thresh = 0.2

outfile = 'voidCatalogue.dat'

sim = '/cosma5/data/Eagle/ScienceRuns/Planck1/'\
'L0025N0376/PE/REFERENCE/data/'

tag = '028_z000p000'

start = time.time()

print('Starting void search.')
print('Reading: ' + sim + tag)
print('')

boxsize = eagle.readAttribute("SNAPSHOT", sim, tag, "/Header/BoxSize")
numpart_pertype = eagle.readAttribute("SNAPSHOT", sim, tag, "/Header/NumPart_Total")
np = numpy.sum(numpart_pertype)
ngas, ndm, nstar, nbh = numpart_pertype[0], numpart_pertype[1], numpart_pertype[4], numpart_pertype[5]

maxvoid_radius = boxsize / 2.

print('BoxSize = ' + repr(boxsize))
print('Total number of particles: ' + repr(np))
print('ngas: ' + repr(ngas))
print('ndm: ' + repr(ndm))
print('nstar: ' + repr(nstar))
print('nbh: ' + repr(nbh))
print('')
print('Proceeding to read data...')
Ejemplo n.º 9
0
def halo_matcher(ref_group_numbers, ref_ordered_GNs, ref_most_bound, match_ordered_GNs, sim_match, SN_i, n_bound, verbose, redshift_tracker):

    '''Main function, called to return a matched halo catalogue from sim 1
    to sim 2.
    Arguments:
    ref_group_numbers -- group numbers from sim 1 for which a match is going
                         to be searched for in sim 2.
    ref_ordered_GNs -- numpy array with length n_part, filled in with the group
                       each particle from sim 1 belongs too.
    ref_most_bound --  numpy array of length n_bound*len(ref_group_numbers)
                       particle IDs are organised by binding energy from most to least
                       bound.
    match_ordered_GNs -- numpy array with length n_part, filled in with the group
                       each particle from sim 2 belongs too.
    sim_match -- simulation for which particles from sim 1 will be matched.
    SN_i -- snapshot of simulation matching too. This allows one to match back in redshift.
    n_bound -- number of most bound particles that will be used when matching halos. Default
               is set to 50.
    verbose -- output information on reading in particle data.
    redshift_tracker -- Flag, telling the code one is matching one simulation
                        backwards through redshift. This is needed as the way it
                        is set up, in order to minimise reading in time, the function
                        will return the matched_ordered_GNs array, which will then be
                        used as the ref_ordered_GNs for the next iteration.'''

    # Check if Sim2 is hydro
    match_hydro = E.readAttribute('SNAPSHOT', sim_match, SN_i, '/Header/NumPart_Total')[0] > 0
    n_part = E.readAttribute('SNAPSHOT', sim_match, SN_i, '/Header/NumPart_Total')[1]

    # Create array to store current Group Numbers matching from Sim1 to Sim2
    temp_halo_catalogue_ref = np.ones((np.max(ref_group_numbers)+1, 2), dtype=int_type) * -1
    temp_halo_catalogue_ref[:,0] = np.arange(0,np.max(ref_group_numbers)+1)

    # Read in the particle IDs and Group Numbers of Sim2
    match_GNs = np.abs(E.readArray('PARTDATA', sim_match, SN_i, '/PartType1/GroupNumber', verbose=verbose)) - 1
    match_IDs = E.readArray('PARTDATA', sim_match, SN_i, '/PartType1/ParticleIDs', verbose=verbose).astype(int_type) - 1

    # Remove hydro particle IDs and normalise the dm particle IDs 
    if match_hydro:
        match_IDs = match_IDs
        match_IDs -= n_part
        index_pos = match_IDs > 0
        match_ordered_GNs[match_IDs[index_pos]] = match_GNs[index_pos] # Ordered by ID
    else: 
        match_ordered_GNs[match_IDs] = match_GNs # Ordered by ID

    # Match halos from Sim1 to Sim2 and store matches in temporary catalogue
    matches = GN_match(match_ordered_GNs, ref_most_bound, n_bound)
    temp_halo_catalogue_ref[ref_group_numbers,1] = matches[:,0]

    # Now match from Sim2 to Sim1
    # First read in particle information for Sim2
    match_IDs = E.readArray('SUBFIND_PARTICLES', sim_match, SN_i, '/IDs/ParticleID', verbose=verbose).astype(int_type) - 1
    match_group_length = E.readArray('SUBFIND_GROUP', sim_match, SN_i, '/FOF/GroupLength', verbose=verbose).astype(int_type)

    # Remove hydro particle IDs from SUBFIND and correct group lengths
    if match_hydro:
        match_IDs = match_IDs.astype(np.int64)
        match_IDs -= n_part
        match_group_length = np.add.reduceat(match_IDs>0, np.hstack([0,np.cumsum(match_group_length)[:-1]]))
        match_IDs = match_IDs[match_IDs>0]

    # Find halos which contain at least 50 particles in Sim2
    match_group_numbers = np.where(match_group_length>=n_bound)[0]

    # Create array to store current Group numbers and matched Group Numbers from Sim2 to Sim1
    temp_halo_catalogue_match = np.ones((np.max(match_group_numbers) + 1, 2), dtype=int_type) * -1
    temp_halo_catalogue_match[:,1] = np.arange(0,np.max(match_group_numbers)+1)

    # Make n_bound most bound particle array for halos in Sim2
    match_most_bound = most_bound(match_IDs, match_group_length, n_bound)

    # Match halos from Sim2 to Sim1 using most bound particles
    bi_matches = GN_match(ref_ordered_GNs.astype(np.int64), match_most_bound, n_bound)

    # Store matches in temporary halo catalogue
    temp_halo_catalogue_match[match_group_numbers,0] = bi_matches[:,0]

    # Match between the halo catalogues of Sim1 and Sim2
    # Remove all halos which do not have a match straight away
    temp_halo_catalogue_ref = temp_halo_catalogue_ref[temp_halo_catalogue_ref[:,1] != -1]
    temp_halo_catalogue_match = temp_halo_catalogue_match[temp_halo_catalogue_match[:,0] != -1]

    # Now find which of the reference halos are in both sets of matched halos
    index_both = np.isin(temp_halo_catalogue_match[:,0], temp_halo_catalogue_ref[:,0])
    temp_halo_catalogue_match = temp_halo_catalogue_match[index_both]

    # Construct a common set of halos across the two simulations
    index_array = np.searchsorted(temp_halo_catalogue_ref[:,0], temp_halo_catalogue_match[:,0])
    temp_halo_catalogue_ref = temp_halo_catalogue_ref[index_array]
    true_match = temp_halo_catalogue_ref[:,1] == temp_halo_catalogue_match[:,1]
    temp_halo_catalogue = temp_halo_catalogue_ref[true_match]

    if redshift_tracker is False:
        return temp_halo_catalogue
    elif redshift_tracker is True:
        return temp_halo_catalogue, match_ordered_GNs, match_IDs, match_group_length
    def __init__(self, prop,
                 sim='L0100N1504',
                 run='REFERENCE',
                 snapnum=28):
        print 'Initialising box for imaging...'

        tag = snapdict[str(snapnum)][0]

        sim_path = '/data5/simulations/EAGLE/' + sim + '/' + run + '/data/'

        # Get volume information
        boxsize = E.readAttribute('SNAP', sim_path, tag, "/Header/BoxSize")
        h = E.readAttribute('SNAP', sim_path, tag, "/Header/HubbleParam")
        a_0 = E.readAttribute('SNAP', sim_path, tag, "/Header/ExpansionFactor")

        # Point read_eagle to the data
        snapfile = sim_path + 'snapshot_' + tag + '/snap_' + tag + '.0.hdf5'
        comm = MPI.COMM_WORLD
        comm_rank = comm.Get_rank()
        comm_size = comm.Get_size()
        # Open snapshot
        snap = read.EagleSnapshot(snapfile)
        # Select region of interest
        snap.select_region(0.,boxsize,0.,boxsize,0.,boxsize)
        # Split selection between processors
        # This assigns an equal number of hash cells to each processor.
        snap.split_selection(comm_rank,comm_size)

        if prop == 'stars':
            #pos = load_array('Coordinates', 4, sim=sim, run=run, tag=tag).T
            #smoothing_length = load_array('SmoothingLength', 4, sim=sim, run=run, tag=tag)
            #quantity = load_array('Mass', 4, sim=sim, run=run, tag=tag) * 1e10

            pos = snap.read_dataset(4,'Coordinates') * a_0/h
            pos = pos.T
            smoothing_length = snap.read_dataset(4, 'SmoothingLength') * a_0 / h
            quantity = snap.read_dataset(4, 'Mass') / h * 1e10
        else:
            #pos = load_array('Coordinates', 0, sim=sim, run=run, tag=tag).T
            #smoothing_length = load_array('SmoothingLength', 0, sim=sim, run=run, tag=tag)

            pos = snap.read_dataset(0, 'Coordinates') * a_0 / h
            print pos
            pos = pos.T
            smoothing_length = snap.read_dataset(0, 'SmoothingLength') * a_0 / h

            if prop == 'gas':
                quantity = snap.read_dataset(0, 'Mass') / h / 1e10
                print quantity
            elif prop == 'xrays':
                pids = snap.read_dataset(0, 'ParticleIDs')
                print 'Matching x-rays to particles'

                xray_data = h5.File('/data6/arijdav1/Lx_matching/'+sim+'_'+run+'/'+tag+'.hdf5','r')
                xrays = np.array(xray_data['Xray_luminosity']) / 1e30
                xray_pids = np.array(xray_data['ParticleIDs'])
                #match_sort = np.argsort(xray_pids)
                #xrays = xrays[match_sort]
                #xray_pids = xray_pids[match_sort]

                quantity = xrays[np.searchsorted(xray_pids,pids)]


            else:
                raise IOError('Plot options are "gas","stars" or "xrays"')

        N = len(quantity)

        pos *= 1e3  # convert to kpc
        smoothing_length *= 1e3

        print N

        Particles = sphviewer.Particles(pos, quantity, hsml=smoothing_length)

        print Particles.get_pos()
        print Particles.get_mass()
        print Particles.get_hsml()

        self.Scene = sphviewer.Scene(Particles)

        self.sim = sim
        self.run = run
        self.tag = tag
        self.property = prop
        self.boxsize = boxsize / h
Ejemplo n.º 11
0
center = np.array([xcoord, ycoord, zcoord])

lgrid = lgrid / 1e+03
lgridz = lgrid * 4  # Four times longer than Lgrid in zdirection.

if (snip == '0'):
    path = "snapshot_%s/snap_%s.0.hdf5" % (input_filename_base,
                                           input_filename_base)
    sniptag = "SNAP"
else:
    path = "snipshot_%s/snip_%s.0.hdf5" % (input_filename_base,
                                           input_filename_base)
    sniptag = "SNIP"

redshift = eagle.readAttribute(sniptag, sim, input_filename_base,
                               "/Header/Redshift")
aex = eagle.readAttribute(sniptag, sim, input_filename_base,
                          "/Header/ExpansionFactor")
hubble_param = eagle.readAttribute(sniptag, sim, input_filename_base,
                                   "/Header/HubbleParam")
center = center / hubble_param * aex
print "center= ", center
coords = eagle.readArray(sniptag,
                         sim,
                         input_filename_base,
                         "/PartType1/Coordinates",
                         numThreads=1)
mass_table = eagle.readAttribute(sniptag, sim, input_filename_base,
                                 "/Header/MassTable")

DM_mass = eagle.readArray(
Ejemplo n.º 12
0
'''

test_snaps = [0, 249, 499, 749, 999]

if 'setrange' in argv:
    # Make some test images for setting the dynamic range
    vmax_vals = []
    vmin_vals = []

    print 'Running dynamic range tests... '

    for ts in test_snaps:

        tag = snaps[ts]
        snapnum = int(tag[:3])
        a_exp = E.readAttribute('SNAP', root_dir, tag,
                                "/Header/ExpansionFactor")

        gas = region(prop, sim=sim, run=run, tag=tag, quiet=True)
        gas.select(centre * a_exp, regionsize * a_exp)

        gas.image(groupnum,
                  centre * a_exp,
                  extent=extent * a_exp,
                  resolution=resolution,
                  save=False,
                  show=True)

        while True:
            vmax_temp = np.float32(raw_input('Enter vmax: '))
            vmin_temp = np.float32(raw_input('Enter vmin: '))
Ejemplo n.º 13
0
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import gridspec
from scipy.stats import gaussian_kde
import numpy as np
import eagle as E
import os
import math
import csv

default_run = "L0050N0752"
default_sim = "/data5/simulations/EAGLE/"+default_run+"/REFERENCE/data"
default_tag = "028_z000p000"

boxsize = E.readAttribute("SUBFIND", default_sim, default_tag, "/Header/BoxSize")
h = E.readAttribute("SUBFIND", default_sim, default_tag, "/Header/HubbleParam")
boxsize = boxsize/h
masstable = E.readAttribute("SUBFIND", default_sim, default_tag, "/Header/MassTable") / h

def ensure_dir(f):
	""" Ensure a a file exists and if not make the relevant path """
	d = os.path.dirname(f)
	if not os.path.exists(d):
			os.makedirs(d)

def correctwrap(rel_pos):
	""" Correct the periodic wrap in EAGLE """
	for i in range(0,len(rel_pos)):
		if abs(rel_pos[i][0]) > (boxsize/2):
			if np.sign(rel_pos[i][0]) == -1:
Ejemplo n.º 14
0
def savesubhalo(halo,subgroup,parttype,path="/data5/astjmack/halos/"):
	halo = halo
	subgroup = subgroup
	parttype = parttype
	good_types = [0,4,5]
	boxsize = E.readAttribute("SUBFIND", sim, tag, "/Header/BoxSize")
	h = E.readAttribute("SUBFIND", sim, tag, "/Header/HubbleParam")
	masstable = E.readAttribute("SUBFIND", sim, tag, "/Header/MassTable") / h
	boxsize = boxsize/h
	groupnum = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/GroupNumber")
	subgroupnum = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/SubGroupNumber")
	subgroupnum = subgroupnum[groupnum == halo]
	r_200 = E.readArray("SUBFIND_GROUP", sim, tag, "/FOF/Group_R_Crit200")[halo-1]
	fsid = E.readArray("SUBFIND_GROUP", sim, tag, "FOF/FirstSubhaloID")
	pos = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/Coordinates")[groupnum == halo, :]
	pos = pos[subgroupnum == subgroup, :]
	if parttype in good_types:
		mass = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/Mass")[groupnum == halo]
		mass = mass[subgroupnum == subgroup]
	elif parttype == 1:
		mass = np.ones(len(pos))*masstable[1]
		
	vel = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/Velocity")[groupnum == halo, :]
	vel = vel[subgroupnum == subgroup, :]
	if parttype == 4:
		stars_h = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/SmoothedElementAbundance/Hydrogen")[groupnum == halo]
		stars_fe = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/SmoothedElementAbundance/Iron")[groupnum == halo]
		stars_o = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/SmoothedElementAbundance/Oxygen")[groupnum == halo]
		stars_mg = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/SmoothedElementAbundance/Magnesium")[groupnum == halo]
		starformtime = E.readArray("PARTDATA", sim, tag, "/PartType"+str(parttype)+"/StellarFormationTime")[groupnum == halo]
		stars_h = stars_h[subgroupnum == subgroup]
		stars_fe = stars_fe[subgroupnum == subgroup]
		stars_o = stars_o[subgroupnum == subgroup]
		stars_mg = stars_mg[subgroupnum == subgroup]
		starformtime = starformtime[subgroupnum == subgroup]
		solar_h = 0.706498
		solar_fe = 0.00110322
		solar_mg = 0.000590706
		solar_o = 0.00549262
		solar_fe_h = np.log10(solar_fe/solar_h)
		solar_mg_fe = np.log10(solar_mg/solar_h)-(solar_fe_h)
		solar_o_fe = np.log10(solar_o/solar_h)-(solar_fe_h)
		stars_fe_h = np.log10(stars_fe/stars_h)
		stars_mg_fe = np.log10(stars_mg/stars_h)-(stars_fe_h)
		stars_o_fe = np.log10(stars_o/stars_h)-(stars_fe_h)
		fe_h = np.array([str_fe_h - solar_fe_h for str_fe_h in stars_fe_h])
		mg_fe = np.array([str_a_fe - solar_mg_fe for str_a_fe in stars_mg_fe])
		o_fe = np.array([str_o_fe - solar_o_fe for str_o_fe in stars_o_fe])

	subhaloindex = fsid[halo-1]+subgroup
	CoP = E.readArray("SUBFIND", sim, tag, "/Subhalo/CentreOfPotential")[subhaloindex, :]
	subhalovel = E.readArray("SUBFIND", sim, tag, "/Subhalo/Velocity")[subhaloindex, :]

	#Calculate the abundance ratios (relative to solar abundances from EAGLE)
	

	rel_pos = [[pos[0]-CoP[0],pos[1]-CoP[1],pos[2]-CoP[2]] for pos in pos] #Relative positions

	#re-position overlapped particles
	for i in range(0,len(rel_pos)):
		if abs(rel_pos[i][0]) > (boxsize/2):
			if np.sign(rel_pos[i][0]) == -1:
				rel_pos[i][0] = rel_pos[i][0] + boxsize
			else:
				rel_pos[i][0] = rel_pos[i][0] - boxsize
		if abs(rel_pos[i][1]) > (boxsize/2):
			if np.sign(rel_pos[i][1]) == -1:
				rel_pos[i][1] = rel_pos[i][1] + boxsize
			else:
				rel_pos[i][1] = rel_pos[i][1] - boxsize
		if abs(rel_pos[i][2]) > (boxsize/2):
			if np.sign(rel_pos[i][2]) == -1:
				rel_pos[i][2] = rel_pos[i][2] + boxsize
			else:
				rel_pos[i][2] = rel_pos[i][2] - boxsize
	rel_pos = np.array(rel_pos)

	#Make a mask for R_Crit200 and reduce arrays to contain only these values.
	r_crit_mask =[]
	for i in range(0,len(rel_pos)):
		if np.sqrt(rel_pos[i][0]**2+rel_pos[i][1]**2+rel_pos[i][2]**2) <= 0.15*r_200:
			r_crit_mask.append(True)
		else:
			r_crit_mask.append(False)
	r_crit_mask = np.array(r_crit_mask, dtype='bool')
	rel_pos_1 = rel_pos[r_crit_mask]
	mass = mass[r_crit_mask]
	vel = vel[r_crit_mask]
	if parttype == 4:
		fe_h = fe_h[r_crit_mask]
		mg_fe = mg_fe[r_crit_mask]
		o_fe = o_fe[r_crit_mask]
		fe_h = np.array(fe_h)
		mg_fe = np.array(mg_fe)
		o_fe = np.array(o_fe)
	
	"""
	nanmask = np.zeros(len(fe_h))
	for i in range(0, len(fe_h)):
		if (np.isnan(fe_h[i]) == True) | (np.isinf(fe_h[i]) == True) | (np.isnan(mg_fe[i]) == True) | (np.isinf(mg_fe[i]) == True) | (np.isnan(o_fe[i]) == True) | (np.isinf(o_fe[i]) == True):
			nanmask[i] = False
		else:
			nanmask[i] = True

	nanmask = np.array(nanmask, dtype='bool')

	rel_pos_1 = rel_pos_1[nanmask]
	mass = mass[nanmask]
	vel = vel[nanmask]
	fe_h = fe_h[nanmask]
	mg_fe = mg_fe[nanmask]
	o_fe = o_fe[nanmask]
	starformtime = starformtime[nanmask]
	"""
	#Remove galaxy bulk motion from velocities
	vel = [bulkvel-subhalovel for bulkvel in vel]

	#Perform angular momentum calculation
	mv = [m*v for m,v in zip(mass,vel)]	
	ang_mom = [np.cross(rpos,mv) for rpos,mv in zip(rel_pos_1,mv)]
	tot_ang_mom = map(sum, zip(*ang_mom))
	tot_ang_mom = E.readArray("SUBFIND", sim, tag, "/Subhalo/Stars/Spin")[subhaloindex, :]
	print str(tot_ang_mom)
	yaw = np.arccos(tot_ang_mom[1]/(np.sqrt(tot_ang_mom[0]**2+tot_ang_mom[1]**2)))
	pitch = np.arccos(tot_ang_mom[1]/(np.sqrt(tot_ang_mom[1]**2+tot_ang_mom[2]**2)))
	roll = np.arccos(tot_ang_mom[0]/(np.sqrt(tot_ang_mom[0]**2+tot_ang_mom[2]**2)))
	cos = np.cos
	sin = np.sin
	yaw_tran = np.matrix([[cos(yaw), -sin(yaw), 0],[sin(yaw), cos(yaw), 0],[0,0,1]])
	pitch_tran = np.matrix([[cos(pitch), 0, sin(pitch)],[0,1,0],[-sin(pitch), 0, cos(pitch)]])
	roll_tran = np.matrix([[1,0,0],[0,cos(roll),-sin(roll)],[0,sin(roll),cos(roll)]])
	trans = np.array(roll_tran*pitch_tran*yaw_tran)

	#Transform positions and velocities
	r_tran = np.array([np.array([np.dot(i, trans[0]), np.dot(i, trans[1]), np.dot(i,trans[2])]) for i in rel_pos_1])
	vel_tran = np.array([np.array([np.dot(j, trans[0]), np.dot(j, trans[1]), np.dot(j, trans[2])]) for j in vel])

	#Calculate radial position
	R_pos = np.array([np.sqrt(rpos[0]**2 + rpos[2]**2) for rpos in r_tran])
	z_pos = abs(np.array(zip(*r_tran)[1]))
	
	#vertical and Circular angular momentum
	#mv = [m*v for m,v in zip(mass,vel)]
	ang_mom = [np.cross(rpos,v) for rpos,v in zip(r_tran,vel)]

	#Calculate star formation ages
	Mpc = 3.08567758e22
	t_0 = 13.8
	H_0 = h  * 100
	#t_a = [(2*a**(3/2))/(3*H_0)/(1e9*365*24*60*60) for a in starformtime]
	if parttype == 4:
		ages = starformtime #[t_0 - t for t in t_a]
	
	if parttype == 4:
		partarray = np.array([zip(*r_tran)[0], zip(*r_tran)[2], zip(*r_tran)[1], zip(*vel_tran)[0], zip(*vel_tran)[2], zip(*vel_tran)[1], mass, R_pos, z_pos, fe_h, mg_fe, r_200, zip(*ang_mom)[1], ages, o_fe] )
	else:
		partarray = np.array([zip(*r_tran)[0], zip(*r_tran)[2], zip(*r_tran)[1], zip(*vel_tran)[0], zip(*vel_tran)[2], zip(*vel_tran)[1], mass, R_pos, z_pos, r_200, zip(*ang_mom)[1]])
	ensure_dir(path+run+"_FOF"+str(halo)+"_SUB"+str(subgroup)+"/")
	np.save(path+run+"_FOF"+str(halo)+"_SUB"+str(subgroup)+"/part"+str(parttype)+"dat", partarray)
Ejemplo n.º 15
0
def Read_MainProp_OnlyDM_V01():
    boxSize = E.readAttribute("SUBFIND", sim_DM, tag, "/Header/BoxSize")
    z = E.readAttribute("SUBFIND", sim_DM, tag, "/Header/Redshift")
    h = E.readAttribute("SUBFIND", sim_DM, tag, "/Header/HubbleParam")
    return boxSize, 1 / (1 + z), h
Ejemplo n.º 16
0
if (runlabel == 'halo'):
    haloname = snapname.split("halo")[1]
    halo_id = haloname.split("_")[1]
    mh = haloname.split("_")[2]
    ms = haloname.split("_")[3]
    sfr = haloname.split("_")[4]
    haloinfostr = 'lg M$_{\mathrm{halo}}=' + mh + '$, lg M$_{*}=' + ms + '$, SFR$=' + sfr + '$'
else:
    haloinfostr = runlabel

center = np.array([xcoord, ycoord, zcoord])

lgrid = lgrid / 1e+03
##lgridz = lgrid*0.01 # *2 #Now 2 times as of 12/30/14 # Four times longer than Lgrid in zdirection.

redshift = eagle.readAttribute("SNAP", sim, input_filename_base_pos,
                               "/Header/Redshift")
aex = eagle.readAttribute("SNAP", sim, input_filename_base_pos,
                          "/Header/ExpansionFactor")
hubble_param = eagle.readAttribute("SNAP", sim, input_filename_base_pos,
                                   "/Header/HubbleParam")
boxsize = eagle.readAttribute("SNAP", sim, input_filename_base_pos,
                              "/Header/BoxSize")

boxsize = boxsize / hubble_param * aex
print "boxsize=", boxsize
center = center / hubble_param * aex
print "center= ", center

coords_pos = eagle.readArray("SNAP",
                             sim,
                             input_filename_base_pos,
Ejemplo n.º 17
0
def Halo_Matcher(sims, SN='033', n_bound = 50, tag='', output_dir='./', redshift_tracker = False, verbose=False):

    '''Return an array of group numbers, which have been matched across sims.
    Arguments:
    sims -- List of simulation directories to produce the matched halo catalogue for.

    Keyword arguments:
    SN -- snapshot tag required by readEagle when reading in simulation output. Default corresponds to
          redshift 0 for a BAHAMAS simulation. Note, if redshift_tracker is true. SN is expected to be
          of the form: [start_SN, end_SN], and will be used create a range of snaps to match across. I.e.
          if input was: ['025', '030'] will create redshift catalogue for halos across snaps:
          ['025', '026', '027', '028', '029', '030'].
    n_bound -- Number of most bound particles that will be used when matching halos. Default is set to 50.
    tag -- Name for the simulation suite. Used when saving the output matched halo catalogue.
    output_dir -- Directory where the resultant halo catalogue will be output.
    Return:
    catalogues -- A dictionary object, with key: value corresponding to the snaps in SN: Halo
              catalogue generated for that redshift. This halo catalogue is a numpy array with
              all matched halo numbers wth shape (-1, len(sims)).
    Method:
    This halo matcher uses a bijective matching technique to match halos across simulations.
    It does this using particle IDs which encode the initial Lagrangian positions of the particles.
    It matches all halos which have more particles than n_bound from the reference simulation
    to each simulation in sims. Note, that the reference simulation is assumed to be the first element of
    the sims list. It then matches back from these simulations to the reference
    simulation, and keeps all halos which were able to be matched both backwards and forwards.
    The algorithm works primarily by setting up two arrays: temp_halo_catalogue_ref and
    temp_halo_catalogue_match. The former, holds halo numbers which have been matched from the
    reference simulation, to some simulation in sims. The latter holds halo numbers when matching
    back from the simulation to the reference simulation. The columns in these arrays correspond to:
    column 0: all halo numbers in the reference simulation.
    column 1: all halo numbers in the matched simulation.
    '''

    snaps = np.array([])
    snaps = np.append(snaps,SN)

    if redshift_tracker is False:
        # Read in number of particles in the simulation
        n_part = E.readAttribute('SNAPSHOT', sims[0], snaps[0], '/Header/NumPart_Total')[1]
    elif redshift_tracker is True:
        n_part = E.readAttribute('SNAPSHOT', sims, snaps[0], '/Header/NumPart_Total')[1]

    global int_type
    if n_part >= np.power(1024,3):
        int_type = np.int64
    else:
        int_type = np.int32


    if redshift_tracker is False:
        catalogues = {}

        assert len(sims) > 1, 'Need at least 2 simulations to form a match'
        sims = np.array(sims)
        sim_ref = sims[0]
        sims = sims[1:]

        for SN_i in snaps:
            #Read in redshift of Snapshot
            redshift = np.around(E.readAttribute('SNAPSHOT', sim_ref, SN_i, '/Header/Redshift'), 3)

            # Check to see if this halo catalogue already exists:
            try:
                halo_catalogue = np.load('{}Halo_Catalogue_{}_z_{}.npy'.format(output_dir, tag, str(redshift).replace('.','p')))
                catalogues[SN_i] = halo_catalogue
                print('Existing halo catalogue found: {}Halo_Catalogue_{}_z_{}.npy'.format(output_dir, tag, str(redshift).replace('.','p')))
                continue
            except(FileNotFoundError):
                if sims is None:
                    raise ValueError('Please provide a list of simulation directories')
                else:
                    print('No halo catalogue found with tag: {}, at redshift: {}'.format(tag, redshift))
                    print('Generating new halo catalogue')

            ref_ordered_GNs = np.ones(n_part, dtype=int_type) * -1
            # Set up an array to be filled in with ordered group numbers
            match_ordered_GNs = np.ones(n_part, dtype=int_type) * -1

            GNs = np.abs(E.readArray('PARTDATA', sim_ref, SN_i, '/PartType1/GroupNumber', verbose=verbose)) -1
            IDs = E.readArray('PARTDATA', sim_ref, SN_i, '/PartType1/ParticleIDs', verbose=verbose).astype(int_type) -1

            ref_hydro = E.readAttribute('SNAPSHOT', sim_ref, SN_i, '/Header/NumPart_Total')[0] > 0
            ref_IDs = E.readArray('SUBFIND_PARTICLES', sim_ref, SN_i, '/IDs/ParticleID', verbose=verbose).astype(int_type) - 1
            ref_group_length = E.readArray('SUBFIND_GROUP', sim_ref, SN_i, '/FOF/GroupLength', verbose=verbose).astype(int_type)

            # Remove hydro particle IDs and normalise the dm particle IDs 
            if ref_hydro:
                IDs -= n_part
                ref_ordered_GNs[IDs[IDs > 0]] = GNs[IDs > 0]

                # Remove hydro particle IDs from SUBFIND and correct group lengths
                ref_IDs -= n_part
                ref_group_length = np.add.reduceat(ref_IDs>0, np.hstack([0,np.cumsum(ref_group_length)[:-1]]))
                ref_IDs = ref_IDs[ref_IDs>0]
            else: 
                ref_ordered_GNs[IDs] = GNs

            # Find halos which contain at least 50 particles in Sim1
            ref_group_numbers = np.where(ref_group_length >= n_bound)[0]

            # Make array of n_bound most bound particle in Sim1
            ref_most_bound = most_bound(ref_IDs, ref_group_length, n_bound)

            halo_catalogue = np.ones((np.max(ref_group_numbers)+1, len(sims)+1), dtype=int_type) * - 1
            halo_catalogue[:,0] = np.arange(0, np.max(ref_group_numbers)+1)

            for i, sim in enumerate(tqdm(sims)):

                temp_halo_catalogue = halo_matcher(ref_group_numbers, ref_ordered_GNs, \
                    ref_most_bound, match_ordered_GNs, sim, SN_i, n_bound, verbose, redshift_tracker)
                halo_catalogue[temp_halo_catalogue[:,0],i+1] = temp_halo_catalogue[:,1]

            halo_catalogue = np.delete(halo_catalogue, np.where(halo_catalogue==-1)[0],axis=0)
            np.save('{}Halo_Catalogue_{}_z_{}'.format(output_dir, tag, str(redshift).replace('.','p')), halo_catalogue)

        catalogues[SN_i] = halo_catalogue

        return catalogues

    elif redshift_tracker is True:
        assert len(snaps) == 2, 'redshift_tracker is True, please input a start and end snapshot \
                              in order to create a redshift catalogue'
        SN_ref = snaps[1] # will match to the end snapshot, so tracking back in redshift
        snaps = np.array(["%03d" %i for i in range(int(snaps[0]), int(snaps[1]))])[::-1]

        # Check to see if this halo catalogue already exists:
        try:
            redshift_catalogue = np.load('{}Redshift_Catalogue_{}_SN_{}-{}.npy'.format(output_dir, tag, snaps[0], SN_ref))
            return redshift_catalogue
        except(FileNotFoundError):
            print('No redshift catalogue found with tag: {}, for sim: {}'.format(tag, sims))
            print('Generating new redshift catalogue')

        for i, SN_i in enumerate(tqdm(snaps)):

            match_ordered_GNs = np.ones(n_part, dtype=int_type) * -1

            if i == 0:
                '''For the first element, need to define the reference IDs and GNs which are going
                to be used to make the matches. These correspond to the first snapshot.'''

                # Set up arrays to be filled in with ordered group numbers
                ref_ordered_GNs = np.ones(n_part, dtype=int_type) * -1

                # Read in the reference particles information, which are going to be matched to
                GNs = np.abs(E.readArray('PARTDATA', sims, SN_ref, '/PartType1/GroupNumber', verbose=verbose)) - 1
                IDs = E.readArray('PARTDATA', sims, SN_ref, '/PartType1/ParticleIDs', verbose=verbose).astype(int_type) - 1

                ''' Read in the reference particles information. This reads particles in so that they are ordered
                by their group number, i.e. biggest group to smallest group, and from most bound to least bound
                inside a group'''
                ref_hydro = E.readAttribute('SNAPSHOT', sim_ref, SN_i, '/Header/NumPart_Total')[0] > 0
                ref_IDs = E.readArray('SUBFIND_PARTICLES', sims, SN_ref, '/IDs/ParticleID', verbose=verbose).astype(int_type) - 1
                ref_group_length = E.readArray('SUBFIND_GROUP', sims, SN_ref, '/FOF/GroupLength', verbose=verbose).astype(int_type)

                # Remove hydro particle IDs and normalise the dm particle IDs 
                if ref_hydro:
                    IDs -= n_part
                    ref_ordered_GNs[IDs[IDs > 0]] = GNs[IDs > 0]

                    # Remove hydro particle IDs from SUBFIND and correct group lengths
                    ref_IDs -= n_part
                    ref_group_length = np.add.reduceat(ref_IDs>0, np.hstack([0,np.cumsum(ref_group_length)[:-1]]))
                    ref_IDs = ref_IDs[ref_IDs>0]
                else: 
                    ref_ordered_GNs[IDs] = GNs

                # Find halos which contain at least 50 particles in Sim1
                ref_group_numbers = np.where(ref_group_length >= n_bound)[0]

                # Make array of n_bound most bound particle in Sim1
                ref_most_bound = most_bound(ref_IDs, ref_group_length, n_bound)

                ''' Set up the redshift catalogue, which will be updated upon each iteration.
                This will have length equal to the maximum group number in the reference simulation.'''
                redshift_catalogue = np.ones((np.max(ref_group_numbers)+1, len(snaps)+1), dtype=int_type) * - 1
                redshift_catalogue[:,0] = np.arange(0, np.max(ref_group_numbers)+1)

                temp_redshift_catalogue, ref_ordered_GNs, ref_IDs, ref_group_length = halo_matcher(ref_group_numbers, \
                    ref_ordered_GNs, ref_most_bound, match_ordered_GNs, sims, SN_i, n_bound, verbose, redshift_tracker)
                redshift_catalogue[temp_redshift_catalogue[:,0],i+1] = temp_redshift_catalogue[:,1]
                '''Note, this returns the matched_ordered_GNs array, and replaces the
                ref_ordered_GNs to be given to the matcher for next iteration'''
                continue

            '''if not first snapshot update the reference group numbers to groups
            successfully matched at this redshift.'''
            ref_group_numbers = redshift_catalogue[:,i]

            # Make array of n_bound most bound particle in Sim1
            ref_most_bound = most_bound(ref_IDs, ref_group_length, n_bound)

            # Now only extract the particles of the groups we matched from previous iteration
            index = ref_group_numbers[:, None]*50 + np.arange(n_bound)[None, :]
            ref_most_bound = np.concatenate(ref_most_bound[index])

            temp_redshift_catalogue, ref_ordered_GNs, ref_IDs, ref_group_length = halo_matcher(ref_group_numbers, \
                ref_ordered_GNs, ref_most_bound, match_ordered_GNs, sims, SN_i, n_bound, verbose, redshift_tracker)

            '''Now fill in the redshift catalogue. To do this, need to find which halos have been matched
            and where in the catalogue they are positioned.'''
            group_numbers_cur = temp_redshift_catalogue[:,0]
            index = np.where(np.isin(ref_group_numbers, group_numbers_cur))[0]

            redshift_catalogue[index,i+1] = temp_redshift_catalogue[:,1]

        np.save('{}Redshift_Catalogue_{}_SN_{}-{}.npy'.format(output_dir, tag, snaps[0], snaps[-1]), redshift_catalogue)

    return redshift_catalogue