def moustakas_sham(sham_subhalo_property, scatter):
    """
    """
    Lbox_h0p7 = 250./0.7
    _remainder = moustakas_af.deconvolute(scatter, 20)
    return moustakas_af.match(calc_number_densities(sham_subhalo_property, Lbox_h0p7),
                scatter=scatter, do_add_scatter=True, do_rematch=True)
Ejemplo n.º 2
0
def abundanceMatchSnapshot(proxy,
                           scatter,
                           lf,
                           box_size,
                           minmag=-25.,
                           maxmag=10.,
                           debug=False,
                           figname=None):

    af = AbundanceFunction(lf['mag'],
                           lf['phi'],
                           ext_range=(minmag, maxmag),
                           nbin=2000,
                           faint_end_fit_points=6)

    # check the abundance function
    if debug:
        plt.clf()
        plt.semilogy(lf['mag'], lf['phi'])
        x = np.linspace(minmag, maxmag, 101)
        plt.semilogy(x, af(x))
        plt.savefig('abundance_fcn.png')

    # deconvolution and check results (it's a good idea to always check this)
    remainder = af.deconvolute(scatter * LF_SCATTER_MULT, 40)
    x, nd = af.get_number_density_table()

    if debug:
        plt.clf()
        plt.semilogy(x, np.abs(remainder / nd))
        plt.savefig('nd_remainder.png')

    # get number densities of the halo catalog
    nd_halos = calc_number_densities(proxy, box_size)

    # do abundance matching with some scatter
    catalog_sc = af.match(nd_halos, scatter * LF_SCATTER_MULT)

    if debug:
        plt.clf()
        c, e = np.histogram(catalog_sc[~np.isnan(catalog_sc)],
                            bins=np.linspace(minmag, maxmag, 101))
        c = c / box_size**3 / (e[1:] - e[:-1])
        me = (e[:-1] + e[1:]) / 2
        plt.semilogy(me, c)
        plt.semilogy(me, af(me))
        plt.savefig('lf_in_v_out.png')

    return catalog_sc
Ejemplo n.º 3
0
    def abundance_match(self, alpha, scatter, Niter, repeat=20):
        """
        Returns a list of abundance matching catalogs of given alpha and scatter.

        Args:
            alpha : (float) halo matching proxy parameter
            scatter : (float) halo matching proxy parameter
            min_galmass: (float) minimum galactic mass in the catalog in log10
            Niter : (int) number of catalogs to be calculated
        
        Note:
            This function is single core only. Might bottleneck some calculations
        """
        # Halo matching proxy
        plist = self.halos['vvir']*(self.halos['vmax']/self.halos['vvir'])**alpha
        # Calculate the number densities
        nd_halos = calc_number_densities(plist, self.boxsize)
        
        def gen_catalog(i):
            af = self.__getAbundanceFunc(self.MFobj)
            af.deconvolute(scatter, repeat)
            cat_this = af.match(nd_halos, scatter)
            # Eliminate NaNs and galaxies with mass lower cut
            mask = (~np.isnan(cat_this)) & (cat_this>self.logMlim)
            N = np.where(mask == True)[0].size
            cat_out = np.zeros(N, dtype={'names':('mvir', 'cat', 'x', 'y', 'z', 'gbins'),
                              'formats':('float64', 'float64', 'float64', 'float64', 'float64', 'int64')})
            cat_out['mvir'] = self.halos['mvir'][mask]
            cat_out['cat'] = cat_this[mask]
            cat_out['x'] = self.halos['x'][mask]
            cat_out['y'] = self.halos['y'][mask]
            cat_out['z'] = self.halos['z'][mask]
            cat_out['gbins'] = self.halos['gbins'][mask]
            
            return cat_out


        pool = ProcessPool(ncore)
        catalogs = pool.map(gen_catalog, np.arange(Niter))

        pool.close()
        pool.join()
        pool.terminate()

        return catalogs
Ejemplo n.º 4
0
    def assign_stellar_mass(self,  **kwargs):
        """
        assign stellar mass
        """

        if 'table' in kwargs.keys():
            table = kwargs['table']
            try:
                Lbox = kwargs['Lbox']
            except KeyError:
                Lbox = self._Lbox
        else:
            try:
                Lbox = kwargs['Lbox']
            except KeyError:
                Lbox = self._Lbox

        Lbox = np.atleast_1d(Lbox)
        if len(Lbox) == 3:
            Lbox = (np.prod(Lbox))**(1.0/3.0)
        else:
            Lbox = Lbox[0]

        nd_halos = calc_number_densities(table[self.prim_haloprop], Lbox)
        mstar = self.af.match(nd_halos, self.param_dict['scatter'])

        mask = np.isnan(mstar)
        mstar[mask] = self.gal_min_mass

        if self.gal_log_prop is True:
            mstar = 10.0**mstar
            table[self.prim_galprop] = mstar
        else:
            table[self.prim_galprop] = mstar

        if 'table' in kwargs.keys():
            return table
        else:
            return mstar
Ejemplo n.º 5
0
    def compute_wprp(self, params, ret_log_likelihood=False, verbose=False):
        """ Calculate the wprp (and loglikelihood) for the specific parameter 
			configuration that was passed in.
			Parameters:
				params: A vector containing [scatter,mu_cut] to be tested. Both
					parameters are assumed to be in log space.
				ret_log_likelihood: A boolean specifying if the log likelihood
					should also be returned.
				verbose: Whether or not to print wprp calcualtion outputs.
		"""
        # Load the parameters
        scatter = np.exp(params[0])
        mu_cut = np.exp(params[1])

        # We assume here that the maximum mass is stored as mvir and
        # the current mass is stored as mvir_now. Need to be changed if the
        # dictionairy changes (or made more general).
        halos_post_cut = self.halos['mvir_now'] / self.halos['mvir'] > mu_cut

        # Calculate what to remove due to k_nearest_neighbors
        if self.wp_keep is not None:
            wp_post_cut_keep = self.wp_keep[halos_post_cut]
        else:
            wp_post_cut_keep = np.ones(np.sum(halos_post_cut), dtype=bool)

        nd_halos = calc_number_densities(
            self.halos[self.af_criteria][halos_post_cut], self.box_size)
        # Deconvolve the scatter and generate catalogs for each mag_cut
        catalog_list = []
        for af in self.af_list:
            af.deconvolute(scatter * LF_SCATTER_MULT, self.deconv_repeat)
            catalog_list.append(
                af.match(nd_halos, scatter * LF_SCATTER_MULT,
                         do_rematch=False))

        if ret_log_likelihood:
            log_like = 0
        wp_saved_results = []
        for c_i in range(len(catalog_list)):
            catalog = catalog_list[c_i]
            sub_catalog = catalog[wp_post_cut_keep] < self.mag_cuts[c_i]

            # Extract positions of halos in our catalog
            x = self.halos['px'][halos_post_cut]
            x = x[wp_post_cut_keep]
            x = x[sub_catalog]
            y = self.halos['py'][halos_post_cut]
            y = y[wp_post_cut_keep]
            y = y[sub_catalog]
            z = self.halos['pz'][halos_post_cut]
            z = z[wp_post_cut_keep]
            z = z[sub_catalog]

            # Get the wp for the catalog
            wp_results = wp(self.box_size,
                            self.pimax,
                            self.nthreads,
                            self.rbins,
                            x,
                            y,
                            z,
                            verbose=verbose,
                            output_rpavg=True)
            wp_binned = np.zeros(len(wp_results))
            for i in range(len(wp_results)):
                wp_binned[i] = wp_results[i][3]
            wp_saved_results.append(wp_binned)

            if ret_log_likelihood:
                dif_vector = wp_binned - self.wp_data_list[c_i]
                log_like += -0.5 * np.dot(
                    np.dot(dif_vector, np.linalg.inv(self.wp_cov_list[c_i])),
                    dif_vector)

        if ret_log_likelihood and math.isnan(log_like):
            log_like = -np.inf

        wp_saved_results = np.array(wp_saved_results)

        # Return the log likelihood if requested
        if ret_log_likelihood:
            return wp_saved_results, log_like

        return wp_saved_results
Ejemplo n.º 6
0
def generate_wp(lf_list,
                halos,
                af_criteria,
                r_p_data,
                box_size,
                mag_cuts,
                pimax=40.0,
                nthreads=1,
                scatters=None,
                deconv_repeat=20,
                verbose=False):
    """	Generate the projected 2D correlation by abundance matching galaxies
		Parameters:
			lf_list: A list of luminosity functions for each mag_cut. The first 
				column is the magnitudes and thesecond column is the density in 
				units of 1/Mpc^3.
			halos: A catalog of the halos in the n-body sim that can be indexed
				into using the quantity name.
			af_criteria: The galaxy property (i.e. vpeak) to use for abundance 
				matching.
			r_p_data: The positions at which to calculate the 2D correlation
				function.
			box_size: The size of the box (box length not volume)
			mag_cuts: The magnitude cuts for w_p(r_p) (must be a list)
			pimax: The maximum redshift seperation to use in w_p(r_p) calculation
			nthreads: The number of threads to use for CorrFunc
			scatters: The scatters to deconvolve / re-introduce in the am (must
				be a list)
			deconv_repeat: The number of deconvolution steps to conduct
			verbose: If set to true, will generate plots for visual inspection
				of am outputs.
		Returns:
			w_p(r_p) at the r_p values specified by r_p_data.
	"""
    # Repeat once for each magnitude cut
    wp_binneds = []
    for mag_cut_i in range(len(mag_cuts)):
        mag_cut = mag_cuts[mag_cut_i]
        lf = lf_list[mag_cut_i]

        # Initialize abundance function and calculate the number density of the
        # halos in the box
        af = AbundanceFunction(lf[:, 0], lf[:, 1], (-25, -5))
        nd_halos = calc_number_densities(halos[af_criteria], box_size)
        if scatters is not None:
            remainders = []
            for scatter in scatters:
                remainders.append(
                    af.deconvolute(scatter * LF_SCATTER_MULT, deconv_repeat))

        # If verbose output the match between abundance function and input data
        if verbose:
            matplotlib.rcParams.update({'font.size': 18})
            plt.figure(figsize=(10, 8))
            plt.plot(lf[:, 0], lf[:, 1], lw=7, c=custom_blues[1])
            x = np.linspace(np.min(lf[:, 0]) - 2, np.max(lf[:, 0]) + 2, 101)
            plt.semilogy(x, af(x), lw=3, c=custom_blues[4])
            plt.xlim([np.max(lf[:, 0]) + 2, np.min(lf[:, 0])])
            plt.ylim([1e-5, 1])
            plt.xlabel('Magnitude (M - 5 log h)')
            plt.ylabel('Number Density (1/ (Mpc^3 h))')
            plt.legend(['Input', 'Fit'])
            plt.title('Luminosity Function')
            plt.yscale('log')
            plt.show()

        # Plot remainder to ensure the deconvolution returned reasonable results
        if verbose and scatters is not None:
            f, ax = plt.subplots(2,
                                 1,
                                 sharex='col',
                                 sharey='row',
                                 figsize=(15, 12),
                                 gridspec_kw={'height_ratios': [2, 1]})

            x, nd = af.get_number_density_table()
            ax[0].plot(x, nd, lw=3, c=custom_blues[4])
            legend = []
            for scatter in scatters:
                ax[0].plot(af._x_deconv[float(scatter * LF_SCATTER_MULT)],
                           nd,
                           lw=3,
                           c=custom_blues_complement[2 * len(legend)])
                legend.append('Scatter = %.2f' % (scatter))
            ax[0].set_xlim([np.max(lf[:, 0]) + 2, np.min(lf[:, 0])])
            ax[0].set_ylim([1e-5, 1])
            ax[0].set_ylabel('Number Density (1/ (Mpc^3 h))')
            ax[0].legend(['Fit'] + legend)
            ax[0].set_title('Deconvolved Luminosity Function')
            ax[0].set_yscale('log')
            ax[1].set_xlabel('Magnitude (M - 5 log h)')
            ax[1].set_ylabel('(LF (deconv $\Rightarrow$ conv) - LF) / LF')
            ax[1].set_xlim([np.max(lf[:, 0]) + 2, np.min(lf[:, 0])])
            y_max = 0
            for r_i in range(len(remainders)):
                remainder = remainders[r_i] / nd
                ax[1].plot(x,
                           remainder,
                           lw=3,
                           c=custom_blues_complement[2 * r_i])
                y_max = max(y_max, np.max(remainder[x > np.min(lf[:, 0])]))
            ax[1].set_ylim([-1.2, y_max * 1.2])
            plt.show()

        # Conduct the abundance matching
        catalogs = []
        if scatters is not None:
            for scatter in scatters:
                catalogs.append(
                    af.match(nd_halos,
                             scatter * LF_SCATTER_MULT,
                             do_rematch=False))
        else:
            catalogs = [af.match(nd_halos)]

        wp_scatts = []
        for catalog in catalogs:
            # A luminosity cutoff to use for the correlation function.
            sub_catalog = catalog < mag_cut
            print('Scatter %.2f catalog has %d galaxies' %
                  (scatters[len(wp_scatts)], np.sum(sub_catalog)))
            x = halos['px'][sub_catalog]
            y = halos['py'][sub_catalog]
            z = halos['pz'][sub_catalog]

            # Generate rbins so that the average falls at r_p_data
            rbins = np.zeros(len(r_p_data) + 1)
            rbins[1:-1] = 0.5 * (r_p_data[:-1] + r_p_data[1:])
            rbins[0] = 2 * r_p_data[0] - rbins[1]
            rbins[-1] = 2 * r_p_data[-1] - rbins[-2]

            # Calculate the projected correlation function
            wp_results = wp(box_size,
                            pimax,
                            nthreads,
                            rbins,
                            x,
                            y,
                            z,
                            verbose=False,
                            output_rpavg=True)

            # Extract the results
            wp_binned = np.zeros(len(wp_results))
            for i in range(len(wp_results)):
                wp_binned[i] = wp_results[i][3]
            wp_scatts.append(wp_binned)
        wp_binneds.append(wp_scatts)

    return wp_binneds