Exemple #1
0
    def hash_to_grid(self, group_centers):
        group_centers_grid = defaultdict(list)

        gc_rad_hash = np.int32(
            mf.bin_hash(3e5 * group_centers['redshift'],
                        self.grid.radial_grid))
        gc_pol_hash = np.int32(
            mf.bin_hash(group_centers['weighted_mean_pol'],
                        self.grid.polar_grid))

        #compute delta_az (azimuthal coordinate distance) corresponding to 'spacing'
        delta_az = lambda spacing, phi: np.arccos(
            (np.cos(spacing) - np.cos(phi)**2) / (np.sin(phi)**2))

        gc_az_hash = -np.ones(gc_pol_hash.shape).astype('int')
        for p, phi_range in enumerate(
                zip(self.grid.polar_grid[:-1], self.grid.polar_grid[1:])):

            sel = gc_pol_hash == p

            if p == 0 or p == len(self.grid.polar_grid) - 2:
                gc_az_hash[sel] = 0
            else:
                phi = phi_range[np.argmin(np.sin(phi_range))]
                azimuthal_spacing = (2 * np.pi) / np.floor(
                    (2 * np.pi) / delta_az(self.grid.angular_spacing, phi))
                azimuthal_grid = np.arange(0, 2 * np.pi + azimuthal_spacing,
                                           azimuthal_spacing)
                gc_az_hash[sel] = np.int32(
                    mf.bin_hash(group_centers['weighted_mean_az'][sel],
                                azimuthal_grid))

        for index, point_hash in enumerate(
                zip(gc_pol_hash, gc_az_hash, gc_rad_hash)):
            group_centers_grid[point_hash].append(index)

        group_centers_grid.default_factory = None

        return gc_pol_hash, gc_az_hash, gc_rad_hash
Exemple #2
0
def smooth1d(x, y, xs, func):

    x, y = np.array(x), np.array(y)

    #Precomputation
    x_0 = np.copy(x)

    x, I = np.sort(x), np.argsort(x)
    y = y[I]
    xs = xs / 2
    f = np.zeros(np.shape(x))

    dum = np.arange(len(x))
    dum = dum[I]
    I_inv = np.argsort(dum)

    assert (all(x[I_inv] == x_0))

    x_min = x[0]
    x_max = x[-1]

    x_edges = np.arange(min(x) - xs, max(x) + xs, xs)
    hx = np.histogram(x, x_edges)[0]

    x_hash = np.array(mf.bin_hash(x, x_edges)).astype(int)

    left_ind = 0
    right_ind = 1 + hx[x_hash[0]] + hx[x_hash[0] + 1]

    for i in range(len(x)):
        if x[i] - x[left_ind] > xs:
            while x[i] - x[left_ind] > xs:
                left_ind += 1
        elif x[i] - x[left_ind] < xs:
            while x[i] - x[left_ind] < xs and left_ind > 0:
                left_ind -= 1

        if x[right_ind] - x[i] > xs:
            while x[i] - x[right_ind] > xs:
                right_ind -= 1
        elif x[right_ind] - x[i] < xs:
            while x[right_ind] - x[i] < xs and right_ind < len(x) - 1:
                right_ind += 1

        f[i] = func(y[left_ind:right_ind + 1])

    f = f[I_inv]

    return f
Exemple #3
0
 def __init__(self,points,spacing):
     """
     var points: list of points, where each point is a tuple of its coordinates
     var spacing: the spacing of the grid. A number, or a tuple of one spacing per dimension
     """
     super(grid, self).__init__(list)
     #inits self as an instance of defaultdict, 
     #with the default grid element being an empty list
     
     self.points = np.array(points).astype(float)
     self.spacing = spacing
     myHash = lambda dim,space: np.int32(mf.bin_hash(
                                                 dim, 
                                                 np.arange(
                                                             np.floor(min(dim)/space)*space,
                                                             np.ceil(max(dim)/space)*space+space,
                                                             space
                                                           )
                                               ))
     
     if np.ndim(self.spacing) == len(self.points[0]):
         dims_hash = [ myHash(dim,space) for dim,space in zip( zip(*self.points), self.spacing ) ]
     elif np.ndim(self.spacing)==0:
         dims_hash = [ myHash(dim,self.spacing) for dim in zip(*self.points)]
     else:
         raise Exception(
                         "spacing must either be a number, or a tuple with dimension equal to the points"
                             )
     
     self.ndims = len(dims_hash)
     self.bounds = tuple( (min(dim),max(dim)) for dim in dims_hash )
     
     point_hash = zip( *dims_hash )
         
     for index,point in enumerate(point_hash):
         self[point].append(index)
    def run(self, l_p, l_z, mhalo_model, B=10, max_iter=5):
        def proj_sep_lite(angular_separation, R_comoving):
            """
            converts angular separation on sky to projected separation, assumes a flat cosmology.
            """
            projected_separation = angular_separation * R_comoving  #comoving rproj
            return projected_separation

        # 1: Run simple group finder
        #P = self.link_pairs(l_p,l_z,density='redshift')
        P = self.link_pairs(l_p, l_z, density='global')
        groupIDs, groups = self.merge_links(P)

        iter_results = [[] for i in range(max_iter)]
        iter_results[0].append((groupIDs, groups))

        weights = 10**self.stellarmass

        for N_iter in range(max_iter):
            #define Mh-M* relation
            if N_iter == 0:
                #TotalMs = halo_mass_model.SimpleModel(load_params=False).get_features(self.sample,groups)
                #features = np.array([TotalMs,TotalMs**2]).T.reshape(-1,2)

                Model = halo_mass_model.SimpleModel(
                    load_params=False
                )  #.fit( np.array([Ms,Ms**2]).T.reshape(-1,2), Mh )
                Model.Model.intercept_ = 25.020301549565737
                Model.Model.coef_ = np.array([-3.34185887, 0.20127182])
            else:
                #assign halo masses to groups via abundance matching

                #src = '/scratch/Documents/Conformity2019/SLH2020/models/abundance_matching/complete/'
                #AM = halo_mass_model.AbundanceMatching(src=src)

                #src='/scratch/Documents/Conformity2019/SLH2020/models/z_abundance_matching/'
                #AM = halo_mass_model.RedshiftDependentAbundanceMatching(src=src)

                AbundanceMh = mhalo_model.predict(self.sample, groups)
                TotalMs = mhalo_model.get_features(self.sample,
                                                   groups,
                                                   training=False)

                if len(TotalMs) == 2:
                    TotalMs = TotalMs[0]

                #define Mh-M* relation
                Model = halo_mass_model.Interpolator(TotalMs, AbundanceMh)

            Converged = [0]
            inner_iter = 0
            while np.mean(Converged) < 0.98 and inner_iter < 20:
                #while not all(Converged) and inner_iter<20:
                # For the current Mh-M* relation, iterate until group memberships are converged
                TrueMh = Model.predict(self.sample, groups)
                Mh = 10**(TrueMh - 14 + np.log10(0.673)
                          )  # units of 10**14 h**-1 Msun

                #should be luminosity weighted, I'll use mass
                GroupsRedshift = np.array([
                    np.average(self.redshift[group], weights=weights[group])
                    for group in groups.values()
                ])
                GroupComDist = np.array([
                    np.average(self.R_comoving[group], weights=weights[group])
                    for group in groups.values()
                ])

                r180 = (1.26 / 0.673) * Mh**(1 / 3) * (1 + GroupsRedshift)**(
                    -1)  #Mpc
                vel_disp = 397.9 * Mh**0.3214  #units of km/s

                #concentration = 10**( 1.02 - 0.109*(TrueMh-12) ) #Maccio 2007
                concentration = 10**(1.071 - 0.098 * (TrueMh - 12)
                                     )  #Maccio 2007
                r_scale = r180 / concentration

                PossibleMemberOf = {
                    idx: []
                    for idx in range(self.sample.count)
                }

                # 4: Update group memberships using tentative halo information
                weighted_mean_az = np.array(
                    list(
                        map(
                            lambda members: np.average(
                                self.az[members], weights=weights[members]),
                            list(groups.values()))))
                weighted_mean_pol = np.array(
                    list(
                        map(
                            lambda members: np.average(
                                self.pol[members], weights=weights[members]),
                            list(groups.values()))))
                xgc, ygc, zgc = mf.sphere2cart(weighted_mean_az,
                                               weighted_mean_pol)

                group_centers_IDs = list(groups.keys())
                group_centers_grid = defaultdict(list)
                gc_rad_hash = np.int32(
                    mf.bin_hash(3e5 * GroupsRedshift, self.grid.radial_grid))
                gc_pol_hash = np.int32(
                    mf.bin_hash(weighted_mean_pol, self.grid.polar_grid))

                #compute delta_az (azimuthal coordinate distance) corresponding to 'spacing'
                delta_az = lambda spacing, phi: np.arccos(
                    (np.cos(spacing) - np.cos(phi)**2) / (np.sin(phi)**2))

                gc_az_hash = -np.ones(gc_pol_hash.shape).astype('int')
                for p, phi_range in enumerate(
                        zip(self.grid.polar_grid[:-1],
                            self.grid.polar_grid[1:])):

                    sel = gc_pol_hash == p

                    if p == 0 or p == len(self.grid.polar_grid) - 2:
                        gc_az_hash[sel] = 0
                    else:
                        phi = phi_range[np.argmin(np.sin(phi_range))]
                        azimuthal_spacing = (2 * np.pi) / np.floor(
                            (2 * np.pi) /
                            delta_az(self.grid.angular_spacing, phi))
                        azimuthal_grid = np.arange(
                            0, 2 * np.pi + azimuthal_spacing,
                            azimuthal_spacing)
                        gc_az_hash[sel] = np.int32(
                            mf.bin_hash(weighted_mean_az[sel], azimuthal_grid))

                for index, point_hash in enumerate(
                        zip(gc_pol_hash, gc_az_hash, gc_rad_hash)):
                    group_centers_grid[point_hash].append(index)

                group_centers_grid.default_factory = None

                t = time.time()
                for elem, gcs in group_centers_grid.items():
                    neighbours = np.array(
                        list(
                            chain(*list(
                                map(self.grid.__getitem__,
                                    self.grid.neighbours(elem))))))
                    for gc in gcs:
                        angular_separation = np.arccos(
                            self.x[neighbours] * xgc[gc] +
                            self.y[neighbours] * ygc[gc] +
                            self.z[neighbours] * zgc[gc])
                        angular_separation[np.isnan(angular_separation)] = 0
                        mean_R = (self.R_comoving[neighbours] +
                                  GroupComDist[gc]) / 2

                        rp = angular_separation * mean_R
                        dz = self.redshift[neighbours] - GroupsRedshift[gc]

                        P_M = ((67.3 / 3e5) *
                               self.Sigma(rp, r_scale[gc], concentration[gc]) *
                               self.p(dz, vel_disp[gc], GroupsRedshift[gc]))

                        for idx in np.where(P_M > B)[0]:
                            PossibleMemberOf[neighbours[idx]].append(
                                (group_centers_IDs[gc], P_M[idx]))

                print('Inner iteration number ', inner_iter + 1)
                print(time.time() - t, ' seconds')

                newgroupIDs, newgroups = self.Assign(self.ResolveMergers,
                                                     PossibleMemberOf)
                print(
                    'Scores: ',
                    MyEvaluate(self.sample, self.sample.data['FOFCentralGal'],
                               newgroupIDs))

                Converged = groupIDs == newgroupIDs
                print(np.mean(Converged), ' percent of group IDs unchanged')

                print(' ')

                if sum(map(len, newgroups.values())) != self.sample.count:
                    raise Exception(
                        'Total number of galaxies not preserved, something\'s wrong.'
                    )

                groupIDs, groups = newgroupIDs, newgroups
                iter_results[N_iter].append((groupIDs, groups))
                inner_iter += 1

            print('Iteration {} complete'.format(N_iter + 1))

        return iter_results


#class group:
#    def __init__(self,gf,ID,members):
#        self.parent = gf
#        self.id,self.members = ID, members
#
#        self.HaloMass = self.parent.HaloMassModel.predict(self.parent.sample,{self.id:self.members})
#
#        Mh = 10**(self.HaloMass - 14 + np.log10(0.673)) #units of 10**14 h**-1 Msun, for convenience
#
#        self.az, self.pol = np.mean(self.parent.az[members]), np.mean(self.parent.pol[members])
#        self.redshift = np.mean(self.parent.redshift[members])
#        self.R_comoving = np.mean(self.parent.R_comoving[members])
#        self.r180 = (1.26/0.673) * Mh**(1/3) * (1+self.redshift)**(-1) #Mpc
#        self.vel_disp = 397.9 * Mh**0.3214 #units of km/s
#        self.concentration = 10**( 1.02 - 0.109*(self.HaloMass-12) ) #Maccio 2007
#        self.r_scale = self.r180/self.concentration
#
#    def __repr__(self):
#        return '{}: {}'.format(self.id, self.members)
#
#    def rp(self):
#        def proj_sep_lite( angular_separation, R_comoving):
#            """
#            converts angular separation on sky to projected separation, assumes a flat cosmology.
#            """
#            projected_separation = angular_separation*R_comoving #comoving rproj
#            return projected_separation
#
#        xgc,ygc,zgc = sphere2cart( self.az, self.pol )
#
#        angular_separation = np.arccos( self.parent.x*xgc +
#                                        self.parent.y*ygc +
#                                        self.parent.z*zgc )
#        angular_separation[np.isnan(angular_separation)] = 0
#
#        mean_R = ( self.parent.R_comoving + self.R_comoving )/2
#
#        rp = proj_sep_lite( angular_separation, mean_R )
#
#        return rp
#
#    def dz(self):
#        return self.parent.redshift - self.redshift
#
#    def Sigma(self):
#        return self.parent.Sigma( self.rp(), self.r_scale, self.concentration )
#
#    def pz(self):
#        return self.parent.p( self.dz(), self.vel_disp, self.redshift )
#
#    def Pm(self):
#        Sigma = self.parent.Sigma( self.rp(), self.r_scale, self.concentration )
#        pz = self.parent.p( self.dz(), self.vel_disp, self.redshift )
#
#        return (67.3/3e5) * Sigma * pz
Exemple #5
0
    def __init__(self,points,spacing):
        """
        var points: list of points, given as (polar, azimuthal, radial) in ([rad],[rad],[arbitrary])
        var spacing: the spacing of the grid, given as (angular_spacing,radial_spacing)
        """
        super(spherical_grid, self).__init__(list)
        #inits self as an instance of defaultdict, 
        #with the default grid element being an empty list
        
        if len(spacing) != 2:
            raise Exception("spacing must be a tuple of (angular_spacing,radial_spacing)")
        
        pol,az,rad = zip(*points)
        az = np.array(az)
            
        self.ndims = 3
        self.points = points
        self.angular_spacing,self.radial_spacing = spacing
        
        self.radial_grid = np.arange(
                                       np.floor(min(rad)/self.radial_spacing)*self.radial_spacing,
                                       np.ceil(max(rad)/self.radial_spacing)*self.radial_spacing+self.radial_spacing,
                                       self.radial_spacing
                                     )
        self.rad_hash = np.int32(mf.bin_hash(rad, self.radial_grid))
        self.rad_bounds=(min(self.rad_hash),max(self.rad_hash))
        
        if min(az)<0 or max(az)>2*np.pi or min(pol)<0 or max(pol)>np.pi:
            raise Exception("az must be [0,2pi], pol must be [0,pi]")
        
        self.polar_spacing = np.pi/np.floor(np.pi/self.angular_spacing)
        self.polar_grid = np.arange( 0, np.pi+self.polar_spacing, self.polar_spacing )
        self.pol_hash = np.int32( mf.bin_hash(pol,self.polar_grid) )
        
        #compute delta_az (azimuthal coordinate distance) corresponding to 'spacing'
        delta_az = lambda spacing,phi: np.arccos(( np.cos(spacing)-np.cos(phi)**2 )/( np.sin(phi)**2 ) )
        
        self.az_hash = -np.ones(self.pol_hash.shape).astype('int')
        for p,phi_range in enumerate( zip(self.polar_grid[:-1],self.polar_grid[1:]) ):
            
            sel=self.pol_hash==p
            
            if p==0 or p==len(self.polar_grid)-2:
                self.az_hash[sel] = 0
            else:
                phi = phi_range[np.argmin( np.sin(phi_range) )]
                azimuthal_spacing = (2*np.pi)/np.floor((2*np.pi)/delta_az(self.angular_spacing,phi))
                azimuthal_grid = np.arange( 0, 2*np.pi+azimuthal_spacing, azimuthal_spacing )
                self.az_hash[sel] = np.int32( mf.bin_hash(az[sel],azimuthal_grid) )
        
        
        
        for index,point_hash in enumerate(zip( self.pol_hash,self.az_hash,self.rad_hash )):
            self[point_hash].append(index)
        
        
        self.f=dict()
        for p,phi_range in enumerate( zip(self.polar_grid[:-1],self.polar_grid[1:]) ):
            phi = phi_range[np.argmin( np.sin(phi_range) )]
            delta_az = np.arccos( ( np.cos(self.angular_spacing)-np.cos(phi)**2 )/( np.sin(phi)**2 ) )

            if p==0 or p==len(self.polar_grid)-2:
                self.f[p] = 1
            else:
                self.f[p] = np.floor((2*np.pi)/delta_az)