def main_worker(haloi,ffo, stardata,snapshot,min_snap):
    hpath = "/bigbang/data/AnnaGroup/caterpillar/halos/middle_mass_halos/H1387186/H1387186_EB_Z127_P7_LN7_LX14_O4_NV4/"
    z_current = htils.get_z_snap(hpath,snapshot)[0]
    #print "Current redshift",z_current
#---# case empty
    if ffo['key'] == 0 :

        if snapshot != min_snap:
        # LW to be computed here
        # Way it is done now is to lookback one snapshot, then find galaxies with stars in them, track them to rhe start
        # and make arrays of star mass and ages for that 'one' position at the last snapshot. Thus you create an SED
        # accordingly and then compute ht ekde/kdi/Jlw

            gal_mass, gal_age, pos_gal  = makelw.lwgal(snapshot, stardata)
            kde , kdi , jlw = lwlib.sedcompute(gal_mass,gal_age,pos_gal)

            jlw_global = lwlib.lwglobal(z_current)
            # Global J is computed as a fit

            total_lw = jlw + jlw_global

            #step2: Pop III or no Pop III
            make_PopIII = popiii.checkpopiii_lw(mvir, total_lw)

            #step3: pass final output
            if make_PopIII == 'yes':
                smass_iii = popiii.makepopiii(niii)
                mstar = smass_iii
                # NEED TO PRINT THIS TO A FILE: GLOBALSTARCAT

                print "MADE IT"
                key_update = 3

#---# case DCBH
    elif ffo['key'] == 1 :

        #do nothing here
        ffo['bh_switch'] = 1

#---# case Pop II stars already exist
    # here is where the prog_info comes in handy

    elif ffo['key'] == 2 :
        popii.makepopii(haloi['snapshot'],haloi['mvir'],haloi['vmax'],haloi['rvir'],ffo['mvir_prog'],ffo['coldgas'],ffo['hotgas'],ffo['blowout'],ffo['mstar'])
        # NEED TO PRINT THIS TO A FILE: GLOBALSTARCAT
        ffo['key'] = 2
#---# case Pop III formed here at some point , make Pop II now
    elif ffo['key'] == 3 :
        popiii.makepopiii(haloi['snapshot'],haloi['mvir'],haloi['vmax'],haloi['rvir'],ffo['mvir_prog'],ffo['coldgas'],ffo['hotgas'],ffo['blowout'],ffo['mstar'])
        # NEED TO PRINT THIS TO A FILE: GLOBALSTARCAT
        ffo['key'] = 2

    if ffo['key'] != 1:
        ffo['bh_switch'] = 0

    #return the output fields
    return ffo['key'], ffo['bh_switch'], ffo['coldgas'], ffo['hotgas'], ffo['blowout'], ffo['mstar']
Example #2
0
            'key': o_key,
            'bh_switch': o_bh_swtich,
            'coldgas': o_coldgas,
            'hotgas': o_hotgas,
            'blowout': 0,
            'mstar': o_mstar
        },
        index=first_data.index)

    first_data_with_ffo = first_data.join(ffo_data)
    first_data_with_ffo.to_csv(first_out_filename, sep=' ')

print "> Now looping through snapshots (%i > %i)" % (min_snap + 1, max_snap)

for snapi in np.arange(min_snap + 1, max_snap):
    z_current = htils.get_z_snap(hpath, snapi)[0]

    print " >> Snapshot: %i | %3.2f" % (snapi, z_current)
    print " >> Reading input_data..."
    input_file_name = input_cat_path + input_cat_file_prefix + str(snapi)
    input_data = pd.read_csv(input_file_name,
                             delim_whitespace=True,
                             names=input_cols)

    snapi_prev = snapi - 1
    print " >> Reading output_catalogue..."
    output_cat_file_name = output_cat_path + output_cat_file_prefix + str(
        snapi_prev)
    output_cat_data = pd.read_csv(output_cat_file_name, delim_whitespace=True)

    #sys.exit()
def convert_tree(hpath, tree, treeoutputpath, verbose=True):
    # Maps to go up and down trees
    # desc_map is analogous to merger_tree_ytree.add_descendants()
    # mmp = "most massive progenitor" flag
    start = time.time()
    desc_map = tree.get_desc_map()
    if verbose:
        print "  Time to precompute tree maps: {:.1f}".format(time.time() -
                                                              start)
        sys.stdout.flush()
    num_nodes_in_tree = len(tree.data)
    num_nodes_processed = 0

    ## This gets the scale factors etc for Caterpillar
    snaps = np.unique(tree["snap"])
    times = haloutils.get_t_snap(hpath, snaps) * 1e9  #Gyr
    redshifts = haloutils.get_z_snap(hpath, snaps)
    ## Convert some things to lists for list.index() method
    snaps = list(snaps)

    # NOTE: a branch is a multi-snapshot segment of the tree that experiences no mergers
    # Declare the arrays
    br_halo_ID = []  # List of connected halo IDs (in redshift order)
    br_age = []  # Age of the branch
    br_z = []  # Redshift of the branch
    br_t_merge = [
    ]  # Duration of the branches (delay between formation and merger)
    br_ID_merge = []  # Last halo ID of the branch (once it has merged)
    br_m_halo = []  # Array of dark matter halo masses
    br_r_vir = []  # Array of dark matter halo radii
    br_is_sub = [
    ]  # Array of True if the halo is a subhalo, False if is a host halo
    br_is_prim = []  # True or False depending whether the branch is primordial

    # Create an entry for each redshift
    for i_z in range(0, len(redshifts)):
        br_halo_ID.append([])
        br_age.append([])
        br_z.append([])
        br_t_merge.append([])
        br_ID_merge.append([])
        br_m_halo.append([])
        br_r_vir.append([])
        br_is_sub.append([])
        br_is_prim.append([])

    start = time.time()
    ## Loop through all nodes of the tree that are branch points
    for irow, row in enumerate(tree.data):
        i_z = snaps.index(row["snap"])
        ## If exactly one progenitor, this is not a branching point, so skip it
        if row["num_prog"] == 1: continue
        # Create a new branch for the considered redshift
        br_halo_ID[i_z].append([])
        br_age[i_z].append([])
        br_z[i_z].append([])
        br_t_merge[i_z].append(0.0)
        br_ID_merge[i_z].append(0.0)
        br_m_halo[i_z].append([])
        br_r_vir[i_z].append([])
        br_is_sub[i_z].append([])

        ## Assign whether or not this is a primordial branch
        br_is_prim[i_z].append(row["num_prog"] == 0)

        ## Fill the start of the branch
        ## Fill the halo ID, age, mass, and radius
        # Note: the ID is the mtid!!! not sure if this is what we actually want
        # To access that object in the halo catalogs, we need origid and snapshot
        fill_branch_info(br_halo_ID, br_age, br_z, br_m_halo, br_r_vir,
                         row["id"], i_z, i_z, times, redshifts, row, br_is_sub)
        num_nodes_processed += 1
        ## Step down the tree
        desc_irow = tree.getDesc(irow, desc_map)
        if desc_irow is None: continue  # reached the root
        desc_row = tree[desc_irow]
        ## Loop through subsequent parts of the branch, defined as single-progenitor
        while desc_row["num_prog"] == 1:
            ## Fill next part of the branch
            i_z_cur = snaps.index(desc_row["snap"])
            fill_branch_info(br_halo_ID, br_age, br_z, br_m_halo, br_r_vir,
                             desc_row["id"], i_z, i_z_cur, times, redshifts,
                             desc_row, br_is_sub)
            num_nodes_processed += 1
            ## Step down the tree
            desc_irow = tree.getDesc(desc_irow, desc_map)
            if desc_irow is None: break  # reached the root
            desc_row = tree[desc_irow]

        # Calculate the time before merger
        i_z_last = snaps.index(desc_row["snap"])
        br_t_merge[i_z][-1] = times[i_z_last] - times[i_z]
        # Copy the last halo ID (when the branch has merged)
        br_ID_merge[i_z][-1] = desc_row["id"]
    if verbose:
        print "  Time to convert: {:.1f}".format(time.time() - start)
        sys.stdout.flush()
    if num_nodes_processed != num_nodes_in_tree:
        raise ValueError(
            "ERROR! num nodes processed != num nodes in tree ({} != {})".
            format(num_nodes_processed, num_nodes_in_tree))

    start = time.time()
    np.save(treeoutputpath,[br_halo_ID, br_age, br_z, br_t_merge, br_ID_merge, \
                            br_m_halo, br_r_vir, br_is_prim, redshifts, times, tree[0]['id'], br_is_sub])
    if verbose:
        print "  Time to save: {:.1f}".format(time.time() - start)
        sys.stdout.flush()
        print
Example #4
0
    first_out_filename = output_cat_path + output_cat_file_prefix + str(min_snap)

    ffo_data = pd.DataFrame({'key':o_key, 
                             'bh_switch':o_bh_swtich,
                             'coldgas':o_coldgas, 'hotgas':o_hotgas, 
                             'blowout':0, 
                             'mstar':o_mstar},index=first_data.index)
    
    first_data_with_ffo = first_data.join(ffo_data)
    first_data_with_ffo.to_csv(first_out_filename,sep=' ')

print "> Now looping through snapshots (%i > %i)" % (min_snap+1,max_snap)

for snapi in np.arange(min_snap + 1, max_snap):
    z_current = htils.get_z_snap(hpath,snapi)[0]

    print " >> Snapshot: %i | %3.2f" % (snapi,z_current)
    print " >> Reading input_data..."
    input_file_name = input_cat_path + input_cat_file_prefix + str(snapi)
    input_data = pd.read_csv(input_file_name, delim_whitespace=True, names=input_cols)

    snapi_prev = snapi - 1
    print " >> Reading output_catalogue..."
    output_cat_file_name = output_cat_path + output_cat_file_prefix + str(snapi_prev)
    output_cat_data = pd.read_csv(output_cat_file_name, delim_whitespace=True)

    #sys.exit()
    #z_current = zl.zreturn(snapi)
    
    # matching has to happen here for each halo in the current input file