Пример #1
0
def launch(sampler, prior, alpha, eps, ratio_min=1e-2, surveypath=None, pool=None, plotting=False):
    """ Launches pools
     Could become implemented in abc-pmc itself"""
    
    pools = []
    for pool in sampler.sample(prior, eps, pool):
        eps_str = ", ".join(["{0:>.4f}".format(e) for e in pool.eps])
        print("T: {0}, eps: [{1}], ratio: {2:>.4f}".format(pool.t, eps_str, pool.ratio))

        for i, (mean, std) in enumerate(zip(*abcpmc.weighted_avg_and_std(pool.thetas, pool.ws, axis=0))):
            print("    theta[{0}]: {1:>.4f} +- {2:>.4f}".format(i, mean, std))

        eps.eps = np.percentile(pool.dists, alpha, axis=0) # reduce eps value
        pools.append(pool)
        
        iom.pickleObject(pools, surveypath, 'launch_pools', append=False)

        """ Creates plots on the fly """
        if plotting:
            plot_abctraces(pools, surveypath)
 
        if pool.ratio < ratio_min:
            print('Ended abc-pmc because ratio_min < %.3e' % (ratio_min))
            break
    sampler.close()
    return pools
Пример #2
0
def mupro_Output_NicePickleClusters( in_queue, output):
# Just pickles the relic for the highest efficiency, mechanism: It asks for the image   

    for well in in_queue:
        (Clrealisations,  Rmodel) = well
        outputMod = output #+ '_%05i/' % (Rmodel.id)
        iom.check_mkdir(outputMod + '/pickled')
        for GCl in Clrealisations:
           filename = 'MonsterPickle-Snap%02i' % (GCl.mockobs.snap)
           iom.pickleObject( (GCl, Rmodel), outputMod + '/pickled/', filename, append = True) 
    return
Пример #3
0
def AddFilesToSurvey(survey, savefolder, verbose=True, clusterwise=False):
    """
    Adds galaxy cluster in a specified folder to an survey.
    
    Currently clusterwise=True is the case for the normal Run() and clusterwise=False is the case for the ABC-routine.
    How about CW?
    """
    """ This replaces tha galaxy clusters of a survey object with all pickled galaxy clusters in an particular 'savefolder' """
    minsize = 1
    location = savefolder + '/pickled/'
    location = location.replace('//', '/')
    GCls = []

    if clusterwise:
        fn = 'GCl'
    else:
        fn = 'MonsterPickle'

    if verbose: print('%s%s*.pickle' % (location, fn))

    for filename in glob.glob(
            '%s%s*.pickle' %
        (location, fn)):  #glob.glob('%srelics/*.pickle' % (location)):
        if verbose:
            print('surveyutil::AddFilesToSurvey()::', filename)
        if os.path.getsize(filename) > minsize:
            if verbose:
                print('surveyutil::AddFilesToSurvey()::filename', filename)
            items = iom.unpickleObjectS(filename)
            for (Cluster, Rmodel) in items:
                GCls.append(Cluster)
            os.remove(filename)

    GCls = sorted(GCls, key=iom.Object_natural_keys)
    survey.GCls = GCls

    if len(GCls) == 0:
        print('surveyutil::AddFilesToSurvey() len(GCls)', len(GCls))

    iom.pickleObject(survey, location, 'Survey')
    return survey
Пример #4
0
def ABC_dist_severalMetrices(SurveyA, SurveyB, metrics=['number'], outpath='', delal=True,
                             verbose=False, stochdrop=True, phoenixdrop=False):
    """ 
    Returns the distance within the MUSIC-2/NVSS metric
    you have: data,model
    
    SurveyA: realworld
    SurveyB: model
    
    """

    print('ABC_dist_severalMetrices', metrics)
    if verbose:
        print(SurveyA.name, SurveyB.name)

    if phoenixdrop:
        SurveyB.relic_filter_kwargs.update({"alpha_tresh":-1.65})
    if stochdrop:
        SurveyB.set_seed_dropout()

    distances = []
    SurveyA.FilterCluster(**SurveyA.cluster_filter_kwargs)
    SurveyB.FilterCluster(**SurveyB.cluster_filter_kwargs)

    if len(SurveyB.filteredClusters) < 3:
        distances = [1e9 for m in metrics]
        return distances

    print('SurveyA.GCls', len(SurveyA.GCls), '-->', 'SurveyA.filteredClusters', len(SurveyA.filteredClusters))
    print('SurveyB.GCls', len(SurveyB.GCls), '-->', 'SurveyB.filteredClusters', len(SurveyB.filteredClusters))

    relicsA = SurveyA.fetch_totalRelics()
    A = np.array([min(-1, relic.alpha()) for relic in relicsA])
    for metric in metrics:
        print('metric:', metric)
        if metric == 'number':
            distance = ABC_summaryStatistics_number_relics([SurveyA,SurveyB], verbose=verbose)
        elif metric == 'number_cluster':
            distance = ABC_summaryStatistics_number_cluster([SurveyA,SurveyB], verbose=verbose)
        elif metric == 'flux_kolm':
            distance = ABC_summaryStatistics_flux_komogorov([SurveyA,SurveyB])
        elif metric == 'polarHisto':
            distance = ABC_summaryStatistics_polarHisto([SurveyA,SurveyB])
        elif metric == 'polarHisto_simple':
            distance = ABC_summaryStatistics_polarHisto_simple([SurveyA,SurveyB])
        elif metric == 'logMach':
            distance = ABC_summaryStatistics_logMach([SurveyA,SurveyB])
        elif metric == 'alpha':
            distance = ABC_summaryStatistics_alpha([SurveyA,SurveyB])
        elif metric == '2DKS':
            distance = ABC_summaryStatistics_2DKS([SurveyA,SurveyB])
        elif metric == 'PCA':
            distance = ABC_summaryStatistics_PCA([SurveyA,SurveyB])
        else:
            print(metric, 'is not an implemented metric!')
        distances.append(distance)

    print('surveymetrics::ABC_dist_severalMetrices::', SurveyA.name, 'VS', SurveyB.name, 'metric disimilarity:',
          ['%s: %.3e' % (m,d) for (m,d) in zip(metrics,distances)])


    """ This puts the survey to a bagged survey folder and increases the counter.
    It might be interesting to know if this number is also the number of the runs.
    """
    if delal:
        file_path = "%s/pickled/Survey.pickle" % (SurveyB.outfolder)
        if os.path.isfile(file_path):
            n = 0
            while n < 10:
                try:
                    with open('%s/count.txt' % (outpath), 'r') as f:
                        SURVEYCOUNT = int(float(f.readline()))
                    print('SURVEYCOUNT', SURVEYCOUNT) # Is implemented to write the correct survey output files for the ABC abbroach
                    with open('%s/count.txt' % (outpath), 'w') as f:
                        f.write(str(SURVEYCOUNT+1))

                    SurveyB.name = "%s_%05i" % (SurveyB.name_short, SURVEYCOUNT)
                    outfolder_old = SurveyB.outfolder
                    SurveyB.outfolder = '/data/ClusterBuster-Output/%s' % (SurveyB.name)

                    if verbose:
                        print('surveymetrics::ABC_dist_severalMetrices:: SurveyB.name, surveyB.outfolder:',
                              SurveyB.name, SurveyB.outfolder)
                        print('surveymetrics::ABC_dist_severalMetrices:: file_path, os.path.isfile(file_path)',
                              file_path, os.path.isfile(file_path))

                    iom.check_mkdir(outpath+'surveys/')
                    iom.pickleObject(SurveyB, "%s/surveys/" % outpath, "Survey_%05i" % SURVEYCOUNT)  #obj, location, oname, append = False
                    print("shutil.rmtree:", outfolder_old)
                    shutil.rmtree(outfolder_old)
                    n = 10
                except:
                    n += 1
                    time.sleep(2)
                    print('surveymetrics::ABC_dist_severalMetrices:: Could not write counter.')
                    print("___ cp -rf %s/pickled/Survey.pickle %s/surveys/Survey_unknown.pickle" % (SurveyB.outfolder, outpath))

        """ We increment the current logfile number by one ... just to show how much we have progressed """
        with open("%s/logfile.txt" % outpath, "a") as f:
            Rm  = SurveyB.Rmodel
            Sm  = SurveyB.surmodel
            eff = SurveyB.Rmodel.effList[0]

            line = ''
            for dist in distances:
                line += "%8.5e " % dist
            line += '%7i %+.4e %+.4e %+.4e' % (SURVEYCOUNT, eff, Rm.B0, Rm.kappa)

            if isinstance(Rm, cbclass.PreModel_Hoeft):
                line += ' %+.4e %+.4e %+.4e %+.4e %+.4e' % (Rm.t0, Rm.t1, Rm.ratio, Rm.n0, Rm.n1)
            if isinstance(Rm, cbclass.PreModel_Gelszinnis):
                line += ' %+.4e %+.4e %+.4e %+.4e' % (Rm.p0, Rm.p_sigma, Rm.sigmoid_0, Rm.sigmoid_width)
            if Sm is not None:
                line += ' %+.4e %+.4e' % (Sm.relic_filter_pca_a, Sm.relic_filter_pca_b)
            line += '\n'

            f.write(line)
    
    return distances
Пример #5
0
def Run_MockObs(bulked,
                GClrealisations,
                CASAmock=False,
                saveFITS=False,
                writeClusters=False,
                savewodetect=False,
                log=False,
                side_effects=False,
                filter_sp_phase=False,
                extract_subtracted=True):
    """ Runs a mock observation
        side_effects: put   True if you want the input galaxy cluster to be changed,
                            False if you want only a copy to be influenced """
    (snap, Rmodel, emptySurvey) = bulked
    savefolder = emptySurvey.outfolder
    iom.check_mkdir(savefolder)

    #Variant B: Clean mask and .fits --> Source parameters; like variant A from step 4 on
    if CASAmock:
        import drivecasa as drica
        casa = drica.Casapy()

    smt = iom.SmartTiming(
        rate=5e4
    )  # logf=outf+'smt.log'  #;  print( '###==== Step2a:  Loading configuration files ====###'  )

    #  Units, conversion factors, and input variables
    fac_rho = loadsnap.conversion_fact_gadget_rho_to_nb(
        snap.head) * loadsnap.conversion_fact_ne_per_nb()  #electrons/cm^-3
    fac_T = loadsnap.conversion_fact_gadget_U_to_keV(snap.head)  # in [keV]
    fac_T2 = loadsnap.conversion_fact_gadget_U_to_keV(
        snap.head) / 8.61732814974056e-08  # to K
    """ determines if you want to change the galaxy cluster or not """
    if side_effects:
        GClrealisations_used = GClrealisations
    else:
        GClrealisations_used = copy.deepcopy(GClrealisations)

    for jj, gcl in enumerate(GClrealisations_used):
        #  Load variables and setting survey parameters
        mockobs = gcl.mockobs
        z = gcl.z.value
        hsize = mockobs.hsize
        dinf = gcl.dinfo  # Some parameters of dinfo could change, because of adaptive pixelsize etc.
        fac_x = loadsnap.comH_to_phys(snap.head, z)
        eff = Rmodel.effList[0]

        #  Units, conversion factors, and input variables
        radiounit = myu.radiounit_A * eff  # erg/s/Hz    --- Unit of particle luminousity in .radio snaps
        rot = mathut.Kep3D_seb(Omega=mockobs.theta,
                               i=mockobs.phi,
                               omega=mockobs.psi)
        posrot = np.dot(snap.pos, rot) * fac_x
        #velrot   = np.dot(snap.vel, rot)  Taken out, as long as we don't need to plot the velocity vetors

        smt(task='Bin_radio_[sub]')
        #print( '###==== Step 3b:  Binning cluster data cube  (radio) ====###'
        # Parameters implied
        # See Nuza+ 2012 Equ (1)
        relativistics = (1 + z)
        s_radio_SI = radiounit / myu.Jy_SI / (
            4 * np.pi * (gcl.cosmoDL * 1e-2)**2
        ) * relativistics  # radiounit*s_radioSI is Jy/particle        #Umrechnung
        nbins = int(2 * hsize / (gcl.cosmoPS * dinf.spixel))
        if nbins > mockobs.binmax:
            binsold = nbins
            spixelold = dinf.spixel
            dinf.spixel = dinf.spixel * np.power(
                float(nbins) / float(mockobs.binmax), 0.5)
            mockobs.hsize = mockobs.hsize * np.power(
                float(nbins) / float(mockobs.binmax), -0.5)
            dinf.update_Abeam()
            nbins = mockobs.binmax
            if log:
                print(
                    'At z=%.3f with an pixel size of %.1f arcsec, the number of pixels per image is %i^2. Due to that the pixelscale was increased to %.1f arcsec and the binned boxsize decreased to %i kpc.'
                    % (z, spixelold, binsold, dinf.spixel, mockobs.hsize))
            hsize = mockobs.hsize
        dinf.pcenter = [nbins / 2, nbins / 2]

        if filter_sp_phase:
            """ Filteres the cooled particles that no longer belong to the hot-ICM"""
            iL = np.where((
                np.sqrt(snap.pos[:, 0]**2 + snap.pos[:, 1]**2 +
                        snap.pos[:, 2]**2) * fac_x < 2.0 * gcl.R200())
                          & ((8.9 + 3.3 - np.log(snap.u * fac_T2) -
                              0.65 * np.log10(snap.rho * fac_rho)) < 0)
                          & ((8.9 - 11.0 - np.log(snap.u * fac_T2) -
                              3.50 * np.log10(snap.rho * fac_rho)) < 0)
                          & ((8.9 + 6.9 - np.log(snap.u * fac_T2) +
                              0.50 * np.log10(snap.rho * fac_rho)) < 0)
                          & (snap.mach < 10))[0]
        else:
            iL = np.where(
                np.sqrt(snap.pos[:, 0]**2 + snap.pos[:, 1]**2 +
                        snap.pos[:, 2]**2) * fac_x < 2.0 * gcl.R200())[0]

        if hasattr(snap, 'radiPre'):
            if log:
                print('Run_MockObs:: Ratio of PREs to total emission',
                      (np.sum(snap.radiPre[iL])) /
                      (np.sum(snap.radi[iL]) + np.sum(snap.radiPre[iL])))

        H1, xedges, yedges = np.histogram2d(-posrot[iL, 0],
                                            -posrot[iL, 1],
                                            weights=s_radio_SI * snap.radi[iL],
                                            range=[[-hsize, hsize],
                                                   [-hsize, hsize]],
                                            bins=nbins)
        """ Difference of gaussians method - accomplishing a simple subtraction of compact sources"
        
        We do this iteratively three times to also remove those particles that where shadowed by other 
        bright particles before
        
        This method is defines by
        
        thresh: A threshold for masking
        scale_1: Smaller scale in kpc
        scale_2: Larger  scale in kpc        
        """
        thresh = 0.75
        scale_1 = 20
        scale_2 = 60

        DoG1_filter = copy.deepcopy(dinf)
        DoG1_filter.beam = [scale_1 / gcl.cosmoPS, scale_1 / gcl.cosmoPS, 0]
        DoG1_filter.update_Abeam()

        DoG2_filter = copy.deepcopy(dinf)
        DoG2_filter.beam = [scale_2 / gcl.cosmoPS, scale_2 / gcl.cosmoPS, 0]
        DoG2_filter.update_Abeam()

        DoG_mask = np.ones_like(H1)
        for no_use in range(2):
            convolved_sigma1 = DoG1_filter.convolve_map(
                H1 * DoG_mask)  ## gaussian convolution
            convolved_sigma2 = DoG2_filter.convolve_map(
                H1 * DoG_mask)  ## gaussian convolution
            DoG_rel = np.divide(
                np.abs(convolved_sigma2 - convolved_sigma1) + 1e-20,
                convolved_sigma2 + 1e-20)
            DoG_mask[np.where(
                DoG_rel < thresh)] = 0.2 * DoG_mask[np.where(DoG_rel < thresh)]
        #convolved_sigma1 = DoG1_filter.convolve_map(H1)  ## gaussian convolution
        #convolved_sigma2 = DoG2_filter.convolve_map(H1)  ## gaussian convolution

        H2 = dinf.convolve_map(H1 * DoG_mask)
        #            print('____ Masked/Unmasked flux (mJy):  %6.3f %6.3f' % (np.sum(H2)/dinf.Abeam[0]*1000,s_radio_SI*np.sum(snap.radi[iL])*1000))

        smt(task='WriteDilMask_[sub]')
        #print( '###==== -- 4b:  Writing dilated .mask  ====###'
        #        mask       =  maput.numpy2mask (H2, dinf.limit, Ndil) # outfile = outfile.replace('.fits','') + '_mask.fits',

        #img = bdsm.process_image(filename= outfile+'_simple_conv.fits', thresh_isl=args.tIs, thresh_pix=args.tPi, mean_map = 'zero', beam = (0.0125,0.0125,0), rms_map = False, rms_value = 0.00045, thresh = 'hard')
        #img.export_image(outfile= outfile+'_simple_conv.ismk.fits'    , img_type='island_mask', img_format='fits', mask_dilation=5, clobber=True)
        #img.export_image(outfile= outfile+'_simple_conv.ismk.mask'    , img_type='island_mask', img_format='casa', mask_dilation=5, clobber=True)

        if CASAmock:
            """ Removed implementation; original idea:
            #1. Python: Create convolved perfect simulational output image
            #2. PyBDSM/Python: Create clean mask on that with some dilation
            #3. Casa/Python: Create constant rms and beam-corrected image (clean)
            #4. Casa/Python: Apply this clean mask with immath on constant-rms image
            #5. PyBDSM/python: Use pybdsm with detection_image= 'masked_constant rms_image'
            #6. Python. Create masked .fits mock imagename
            #7  Python. Extract radio relics """
        else:
            if log:
                print(
                    '###====          - Using the simple convolved image ====###'
                )
            IM0 = H2  #(fits.open(simpleconv))[0].data

        smt(task='CreateMask_[sub]')
        #print( '###==== Step 6:  Create masked .fits mock image ====###'
        IM1 = np.squeeze(
            IM0)  #!!! Here unmasked! ... np.multiply(np.squeeze(IM0), mask)
        """Outdated saveFITS, please update and put to end of procedure """
        if saveFITS and CASAmock:
            maput.numpy2FITS(IM1, 'sim.vla.d.masked.fits', dinf.spixel)

        smt(task='RelicExtr_[sub]')

        relics = relex.RelicExtraction(
            IM1,
            z,
            GCl=gcl,
            dinfo=dinf,
            rinfo=cbclass.RelicRegion(
                '', [], rtype=1))  #, faintexcl=0.4, Mach=Hmach, Dens=Hdens,

        smt(task='RelicHandling_[sub]')
        relics = sorted(relics, key=lambda x: x.flux, reverse=True)
        gcl.add_relics(relics)
        if savewodetect or len(relics) > 0:

            if log:
                print(
                    '  ++The brightest relic found has a flux density of %f mJy'
                    % (relics[0].flux)
                )  #Could producese errors, once there is no relict in the list
            iom.check_mkdir(savefolder + '/maps/z%04.f' %
                            (gcl.mockobs.z_snap * 1000))
            """ Part to derive additional relic information like average and  mach number and alpha. We also get the 
            emission weighted density, as this works only on the bright parts it is fine to work with the subset of 
            particles
            """
            alpha_help = (snap.mach[iL]**2 + 1) / (snap.mach[iL]**2 - 1)

            Hmach = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * x.mach[iL])
            Halpha = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * alpha_help)
            Hrho_up = SPH_binning(snap,
                                  posrot,
                                  dinf,
                                  iL,
                                  immask=DoG_mask,
                                  HSize_z=hsize,
                                  nbins=nbins,
                                  weights=lambda x: s_radio_SI * x.radi[iL] * x
                                  .rup[iL] * fac_rho)
            Htemp = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * x.u[iL] * fac_T)
            Harea = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * x.area[iL])
            Hmag = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * x.B[iL])
            Hpre = SPH_binning(snap,
                               posrot,
                               dinf,
                               iL,
                               immask=DoG_mask,
                               HSize_z=hsize,
                               nbins=nbins,
                               weights=lambda x: s_radio_SI * x.radiPre[iL])

            allflux = np.asarray([])
            for relic in relics:
                relic.wMach = Hmach[relic.pmask]
                relic.wT = Htemp[relic.pmask]
                relic.wArea = Harea[relic.pmask]
                relic.wAlpha = Halpha[relic.pmask]
                relic.wB = Hmag[relic.pmask]
                relic.wPre = Hpre[relic.pmask]
                relic.wRho_up = Hrho_up[relic.pmask]
                #                    relic.wRho        =  Hrho [relic.pmask]
                #                    relic.wRho_down   =  Hrho_down[relic.pmask]
                #                    relic.wT_up       =  Htemp_up[relic.pmask]
                #                    relic.wT_down     =  Htemp_down[relic.pmask]

                relic.wDoG_rel = DoG_rel[relic.pmask]
                allflux = np.concatenate((relic.sparseW, allflux), axis=0)
                relic.averages_quantities()
            """Save maps"""
            allflux = allflux.flatten()
            """ I couldn't come up with something better to take the inverse """
            mask = np.ones(snap.rho.shape, dtype=bool)
            mask[iL] = 0
            Subtracted, xedges, yedges = np.histogram2d(
                -posrot[mask, 0],
                -posrot[mask, 1],
                weights=s_radio_SI * snap.radi[mask],
                range=[[-hsize, hsize], [-hsize, hsize]],
                bins=nbins)
            Subtracted += H1 * (1 - DoG_mask)
            Subtracted_conv = dinf.convolve_map(Subtracted)
            if extract_subtracted:
                relics_subtracted = relex.RelicExtraction(
                    Subtracted_conv,
                    z,
                    GCl=gcl,
                    dinfo=dinf,
                    rinfo=cbclass.RelicRegion(
                        '', [],
                        rtype=1))  # , faintexcl=0.4, Mach=Hmach, Dens=Hdens,
                for relic in relics_subtracted:
                    relic.wMach = Hmach[relic.pmask]
                    relic.wT = Htemp[relic.pmask]
                    relic.wArea = Harea[relic.pmask]
                    relic.wAlpha = Halpha[relic.pmask]
                    relic.wB = Hmag[relic.pmask]
                    relic.wPre = Hpre[relic.pmask]
                    relic.wRho_up = Hrho_up[relic.pmask]
                    #                    relic.wRho        =  Hrho [relic.pmask]
                    #                    relic.wRho_down   =  Hrho_down[relic.pmask]
                    #                    relic.wT_up       =  Htemp_up[relic.pmask]
                    #                    relic.wT_down     =  Htemp_down[relic.pmask]

                    relic.wDoG_rel = DoG_rel[relic.pmask]
                    relic.averages_quantities()
                gcl.compacts = relics_subtracted

            if saveFITS:
                """ Here the maps are already masked with the detection region """

                smt(task='WriteFits_[writes,sub]')
                if log:
                    print(
                        '###==== Step 4:  Preparing FITS file & folders ====###'
                    )

                parlist = (savefolder, gcl.mockobs.z_snap * 1000, gcl.name,
                           Rmodel.id)
                gcl.maps_update(H1, 'Raw',
                                '%s/maps/z%04.f/%s-%04i_native.fits' % parlist)
                gcl.maps_update(IM1, 'Diffuse',
                                '%s/maps/z%04.f/%s-%04i.fits' % parlist)
                gcl.maps_update(
                    Subtracted, 'CompModell',
                    '%s/maps/z%04.f/%s-%04i_compact.fits' % parlist)
                gcl.maps_update(
                    Subtracted_conv, 'Subtracted',
                    '%s/maps/z%04.f/%s-%04i_compactObserved.fits' % parlist)
                if len(relics) > 0:
                    gcl.maps_update(
                        gcl.Mask_Map(Hmach, normalize=allflux), 'Mach',
                        '%s/maps/z%04.f/%s-%04i_mach.fits' % parlist)
                    gcl.maps_update(
                        gcl.Mask_Map(Hrho_up, normalize=allflux), 'RhoUp',
                        '%s/maps/z%04.f/%s-%04i_rhoup.fits' % parlist)
                    gcl.maps_update(
                        gcl.Mask_Map(Htemp, normalize=allflux), 'Temp',
                        '%s/maps/z%04.f/%s-%04i_temp.fits' % parlist)
                    gcl.maps_update(gcl.Mask_Map(Hmag, normalize=allflux), 'B',
                                    '%s/maps/z%04.f/%s-%04i_B.fits' % parlist)
                    gcl.maps_update(
                        gcl.Mask_Map(Hpre, normalize=allflux), 'PreRatio',
                        '%s/maps/z%04.f/%s-%04i_prerat.fits' % parlist)
                gcl.maps_update(
                    DoG_rel, 'DoG_rel',
                    '%s/maps/z%04.f/%s-%04i_DoG_rel.fits' % parlist)
                gcl.maps_update(
                    DoG_mask, 'DoG_mask',
                    '%s/maps/z%04.f/%s-%04i_DoG_mask.fits' % parlist)
            """ PhD feature --> plot the DoF images in a subplot
##                import matplotlib.pyplot as plt   
##                with np.errstate(divide='ignore', invalid='ignore'):
##                    DoG_rel            = np.divide(np.abs(convolved_sigma2-convolved_sigma1)+1e-20,convolved_sigma2+1e-20)
##                pixR200 =        gcl.R200()/(gcl.cosmoPS*dinf.spixel)     
##                bou     =  gcl.R200()*1.5   # pixR200*1
##                f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, figsize=(30,12)) #, sharey='row', sharex='col', sharey='row'
##                ax1.imshow( np.power((np.abs(convolved_sigma1-convolved_sigma2)), 1/1 )     , extent=(-hsize, hsize, -hsize, hsize)) #-bou+cen
##                im2 = ax2.imshow( DoG_rel                                 , extent=(-hsize, hsize, -hsize, hsize), vmin=0.2, vmax=1 ) #-bou+cen           
##                XX,YY = np.meshgrid(xedges[0:-1]+0.5*gcl.cosmoPS*dinf.spixel,yedges[0:-1][::-1]+0.5*gcl.cosmoPS*dinf.spixel) #yedges[-1:0]
##                ax2.contour(XX, YY, DoG_mask,  colors='r', levels=[0.5])
##                ax3.imshow( np.power(dinf.convolve_map(H1*DoG_mask), 1/1 ), extent=(-hsize, hsize, -hsize, hsize)  ) #-bou+cen
##                ax1.set_xlim(-bou, bou)
##                ax1.set_ylim(-bou, bou)
##                ax2.set_xlim(-bou, bou)
##                ax2.set_ylim(-bou, bou) 
##                ax3.set_xlim(-bou, bou)
##                ax3.set_ylim(-bou, bou)
##                
##                ax1.set_title('DoG')
##                ax2.set_title('DoG/LowResImage + mask (contours)')
##                ax3.set_title('Filtered NVSS')
##                
##                print('CreateMokObs',  pixR200, gcl.R200(),dinf.spixel, gcl.cosmoPS)
##                circle1 = plt.Circle((0, 0), gcl.R200(), fill=False, color='w', ls='-')
##                circle2 = plt.Circle((0, 0), gcl.R200(), fill=False, color='w', ls='-')      
##                circle3 = plt.Circle((0, 0), gcl.R200(), fill=False, color='w', ls='-')   
##
##                ax1.add_artist(circle1)
##                ax2.add_artist(circle2)
##                ax3.add_artist(circle3)
##                
##                cax2 = f.add_axes([0.42, 0.12, 0.2, 0.03]) 
##                cb2  = f.colorbar(im2, format='%.2f', ticks=[0.0, 0.25, 0.5, 0.75, 1.0], cax = cax2, orientation="horizontal")  #label='average Mach', 
##                
##                plt.savefig('%s/%s-%04i_joined.png'        % (savefolder, gcl.name, Rmodel.id)) #dpi=400
##                plt.savefig('%s/%s-%04i_joined.pdf'        % (savefolder, gcl.name, Rmodel.id)) #dpi=400          
#                """ """
            
            gcl.add_relics(relics) 
            PhD feature end """
    if writeClusters:
        """This is here because some outputs get lost in a multiprocessing heavy input/output queue process"""
        for gcl in GClrealisations_used:
            filename = 'GCl-%05i' % (gcl.mockobs.id)
            iom.pickleObject((gcl, Rmodel),
                             savefolder + '/pickled/',
                             filename,
                             append=False)

    if log: print('Finished with all efficency values')
    return True, smt, GClrealisations_used, Rmodel
Пример #6
0
def survey_run(surveys,
               infolder='',
               outfoldertop='/data/ClusterBuster-Output/',
               plot=True):
    """ Extracts survey relics from an real world survey
    """

    for survey in surveys:
        print(
            '###==== Step 0b: Initialize internal variables/objects for survey: %s ====###'
            % survey)
        smt = iom.SmartTiming()

        ClList = []
        Excluded = []
        subtract = ['slist', 'fits']  # im, fits, slist

        Jy_SI = 1e-26  # W/Hz/m^2
        outfolder = '%s%s' % (outfoldertop, survey)
        topfolder = os.getcwd(
        )  # '/home/jakobg/lib/ClusterBuster/Relics_Surveys/'
        iom.check_mkdir(outfolder)  # create folder if necesairy

        print('###==== Step 1: Load data and anaylise it   ====###')
        # np.genfromtxt('Analysis_RORRS/ClusterRelics.csv'', delimiter=';')
        ClusterFile = infolder + 'ClusterList/ClusterAfterNuza2017_clusters.csv'
        RegionFile = infolder + 'ClusterList/ClusterAfterNuza2017_regions.csv'

        Clusters = pd.read_csv(ClusterFile,
                               comment='#',
                               delimiter=',',
                               quotechar='"')
        Clusters.where(Clusters.notnull(), 0)
        """ Part of development: rpelace nan values with values that can be handled by clustebruster """

        for strings in ['REF_LX', 'REF_M200', 'REF_M500', 'REF_F']:
            Clusters[strings] = Clusters[strings].replace(np.nan,
                                                          '',
                                                          regex=True)

        for values in ['M200', 'M500', 'LX_500_0.1-2.4']:
            Clusters[values] = Clusters[values].replace(np.nan, 0, regex=True)

        n = 0
        for index, CL in Clusters.iterrows():
            if CL['Cluster'] and CL['Cluster'] not in [
                    o.name for o in ClList
            ] and CL['Cluster'] not in ['']:
                """I did this to remove unfinished, but recent additions to the relic database"""
                #print(type(CL['Discovery']))
                #if math.isnan(CL['Discovery']):
                #    pass
                #elif int(CL['Discovery']) >= 2018:
                #    continue

                try:
                    """I did this to remove unfinished, but recent additions to the relic database"""
                    if CL['Cluster'] == '#':
                        continue
                    if int(CL['Discovery']) >= 2018:
                        continue
                except:
                    pass

                n += 1
                #                if n > 5:
                #                     continue
                Cl_name = CL['Cluster']
                status = CL['FLAG_INCLUDED']

                print(CL)

                RA_host = float(CL['RA'])  #float(CL[2])
                Dec_host = float(CL['Dec'])

                diff = np.sqrt((float(CL['RA_Xmax']) - RA_host)**2 +
                               (float(CL['Dec_Xmax']) - Dec_host)**2) * 3600
                if not math.isnan(diff):
                    print(
                        'Two different centre positions given for cluster %s. The offset was %.1f arcsec'
                        % (Cl_name, diff))
                    RA_host = float(CL['RA_Xmax'])
                    Dec_host = float(CL['Dec_Xmax'])

                z = float(CL['z'])
                M200 = float(CL['M200']) * 1e14
                M500 = float(CL['M500']) * 1e14
                Lx = float(CL['LX_500_0.1-2.4']) * 1e44
                flux_lit = float(CL['F_lit'])

                halo = CL['FLAG_Halo']

                try:
                    ClassFlag = ('true' in CL['Type_Flag'].lower())
                except:
                    ClassFlag = False

                #create Class object
                GCl = cbclass.Galaxycluster(name=Cl_name,
                                            RA=RA_host,
                                            Dec=Dec_host,
                                            z=z,
                                            M200=M200,
                                            M500=M500,
                                            Lx=Lx,
                                            Lx_lit=Lx,
                                            flux_lit=flux_lit,
                                            ClassFlag=ClassFlag,
                                            halo=halo,
                                            status=status)

                # add further references
                GCl.Lx.ref = cdb.reference(CL['REF_LX'],
                                           rtype='text',
                                           page=None,
                                           nr=None)
                GCl.M200.ref = cdb.reference(CL['REF_M200'],
                                             rtype='text',
                                             page=CL['REFPAGE_M200'],
                                             nr=None)
                GCl.M500.ref = cdb.reference(CL['REF_M500'],
                                             rtype='text',
                                             page=CL['REFPAGE_M500'],
                                             nr=None)
                GCl.flux_lit.ref = cdb.reference(CL['REF_F'],
                                                 rtype='text',
                                                 page=CL['REFPAGE_F'],
                                                 nr=None)

                #============= Load  survey (NVSS) image  =============#
                if GCl.status not in ['TRUE']:
                    ClList.append(GCl)
                    continue
                fitsimage = infolder + 'Images_%s/%s-%s.fits' % (
                    survey, survey, Cl_name)
                image, center, spixel = fitsut.fits2numpy(fitsimage)

                if survey == 'NVSS':
                    s_pixel = [
                        spixel[1] * GCl.cosmoPS * 3600, spixel[1] * 3600
                    ]
                    NVSSbeam = [45., 45. / s_pixel[1]]
                    NVSS_rms = 4.5e-4  # in Jy/beam
                    NVSSlimit = 2 * NVSS_rms
                    NVSSnu = 1.4
                    telescope = 'VLA-D'
                    GCl.dinfo = cbclass.DetInfo(
                        beam=[NVSSbeam[0], NVSSbeam[0], 0],
                        spixel=s_pixel[1],
                        rms=NVSS_rms,
                        limit=NVSSlimit,
                        telescope=telescope,
                        nucen=NVSSnu,
                        center=center[0],
                        pcenter=center[1])
                if survey == 'TGSS':
                    s_pixel = [
                        spixel[1] * GCl.cosmoPS * 3600, spixel[1] * 3600
                    ]
                    TGSSbeam = [25., 25. / s_pixel[1]]
                    TGSS_rms = 3.0e-3  # in Jy/beam
                    TGSSlimit = 2 * TGSS_rms
                    beamrec = 1. if GCl.Dec > 19 else 1. / np.cos(
                        np.radians(GCl.Dec - 19))
                    TGSSnu = 0.1475
                    telescope = 'GMRT'
                    GCl.dinfo = cbclass.DetInfo(
                        beam=[TGSSbeam[0] * beamrec, TGSSbeam[0], 0],
                        spixel=s_pixel[1],
                        rms=TGSS_rms,
                        limit=TGSSlimit,
                        telescope=telescope,
                        nucen=TGSSnu,
                        center=center[0],
                        pcenter=center[1])
                dinfo_survey = GCl.dinfo
                #============= Load relic search region  =============#
                # Make in np.image
                regfile = infolder + 'Regions/RR_%s.reg' % (Cl_name)
                GCl.regions = ioclass.readDS9relics(regfile, spixel, center[0],
                                                    center[1])

                #============= Subtract Sources  =============#
                # in Sources folder
                #try load folder:
                #img = bdsm.process_image(args.file+args.ft, thresh_isl=args.tIs, thresh_pix=args.tPi, mean_map = 'zero', beam = (0.0125,0.0125,0), rms_map = False, rms_value = 0.00045, thresh = 'hard')
                #except:

                #pybdsm.catalog_type
                #--< create .fits image ut of that, which you subtract from your image ....
                smt(task='subtraction')
                model = np.zeros(image.shape)
                model_conv = np.zeros(image.shape)
                use_list, use_im = (False, False)
                if 'slist' in subtract:
                    slist = infolder + 'Sources/slist/%s.slist' % Cl_name
                    if os.path.isfile(slist):
                        scL = iom.read_para_list(slist)

                        for sc in scL:

                            if sc['shape'] == 'Gaussian':
                                g_size = [
                                    float(sc['majoraxis']) / s_pixel[1] * 60,
                                    float(sc['minoraxis']) / s_pixel[1] * 60
                                ]
                            else:
                                g_size = [
                                    GCl.dinfo.beam[0] / GCl.dinfo.spixel,
                                    GCl.dinfo.beam[1] / GCl.dinfo.spixel
                                ]
                            freq_factor = (GCl.dinfo.nucen / 1.4)**(-0.7)
                            COOp = iom.CoordinateToPixel(
                                iom.J2000ToCoordinate(sc['dir']), spixel,
                                center[0], center[1])

                            GCl.compacts.append(sc)
                            #This is not good --> better create an unconcolved model and convolve it with the desired beam
                            model += maput.ImageGaussian_inv(
                                model.shape,
                                sc['flux'] * 1e-3 * freq_factor,
                                g_size, [COOp[0] - 1, COOp[1] - 1],
                                theta=sc['theta'],
                                FWHM=True)  #*gaussian_area
                            #model_conv += maput.ImageGaussian_inv(model_conv.shape, sc['flux']*1e-3*freq_factor, g_size, [COOp[0]-1.,COOp[1]-1], theta = sc['theta'], FWHM=True)  #*gaussian_area
                        model_conv = model
                        use_list = True

                if 'fits' in subtract:
                    highres_image_path = infolder + 'Images_%s/%s-%s.fits' % (
                        "FIRST", "FIRST", Cl_name)
                    if os.path.isfile(highres_image_path):

                        # regridd

                        # http://reproject.readthedocs.io/en/stable/  --> works on fits files
                        hdu_raw = fits.open(fitsimage)[0]
                        image_HR, center_HR, spixel_HR = fitsut.fits2numpy(
                            highres_image_path)
                        s_pixel_HR = [
                            spixel_HR[1] * GCl.cosmoPS * 3600,
                            spixel_HR[1] * 3600
                        ]
                        fitsut.numpy2fits(
                            image_HR, infolder + 'Images_%s/%s-%s_test.fits' %
                            ("FIRST", "FIRST", Cl_name), s_pixel_HR[1],
                            center_HR[0], center_HR[1])
                        hdu_HR = fits.open(infolder +
                                           'Images_%s/%s-%s_test.fits' %
                                           ("FIRST", "FIRST", Cl_name))[0]
                        hdu_HR.data = hdu_HR.data.squeeze()
                        hdu_HR.data[np.isnan(
                            hdu_HR.data
                        )] = 0.  # For contour masked  NVSS images I encountered the issue that some values where nan
                        hdu_HR.data[np.where(hdu_HR.data < 6e-4)] = 0.

                        pad = 50
                        hdu_HR.data = np.lib.pad(hdu_HR.data, pad,
                                                 maput.padwithtens)

                        FWHM2sigma = 1 / 2.354
                        FWHM_FIRST = 5.4
                        FWHM_conv = np.sqrt(GCl.dinfo.beam[0]**2 -
                                            FWHM_FIRST**2)
                        gaussian_2D_kernel = Gaussian2DKernel(
                            FWHM_conv / s_pixel_HR[1] * FWHM2sigma)
                        A_beam_old = 1.133 * (
                            (FWHM_FIRST / s_pixel_HR[1])**2)  # FIRST-beam
                        A_beam = 1.133 * (
                            (GCl.dinfo.beam[0] / s_pixel_HR[1])**2)
                        """
                        The copy action is very dangerous, because the corresponding .header object is cloned, so that
                        any change in hdu_HR_conv.header also influences hdu_HR.header
                        deepcopy() is not possible. Because of the we remove hdu_HR_conv from any changes in the header
                        """
                        hdu_HR_conv = copy(hdu_HR)
                        hdu_HR_conv.data = A_beam / A_beam_old * convolve(
                            hdu_HR.data,
                            gaussian_2D_kernel,
                            normalize_kernel=True)
                        for hdu in [hdu_HR]:
                            #                            hdu.data = np.expand_dims(hdu.data, axis=0)
                            #                            hdu.data = np.expand_dims(hdu.data, axis=0)
                            hdu.header['CRPIX1'] = hdu.header['CRPIX1'] + pad
                            hdu.header['CRPIX2'] = hdu.header['CRPIX2'] + pad
                            hdu.header[
                                'NAXIS1'] = hdu.header['NAXIS1'] + pad * 2
                            hdu.header[
                                'NAXIS2'] = hdu.header['NAXIS2'] + pad * 2


#
#                        from astropy.io import fits
#                        from astropy.utils.data import get_pkg_data_filename
#                        hdu_raw = fits.open(get_pkg_data_filename('galactic_center/gc_2mass_k.fits'))[0]
#                        hdu2 = fits.open(get_pkg_data_filename('galactic_center/gc_msx_e.fits'))[0]
###
                        for hdu in [hdu_raw, hdu_HR]:
                            try:
                                hdu.data = hdu_raw.data[0, 0, :, :]
                            except:
                                print('Test ... data dimensions are matching')
                            hdu.header['NAXIS'] = 2

                            keylist = [
                                'PC01_01', 'PC02_01', 'PC03_01', 'PC04_01',
                                'PC01_02', 'PC02_02', 'PC03_02', 'PC04_02',
                                'PC01_03', 'PC02_03', 'PC03_03', 'PC04_03',
                                'PC01_04', 'PC02_04', 'PC03_04', 'PC04_04',
                                'NAXIS3', 'NAXIS4', 'CTYPE3', 'CRVAL3',
                                'CDELT3', 'CRPIX3', 'CUNIT3', 'CROTA3',
                                'CTYPE4', 'CRVAL4', 'CDELT4', 'CRPIX4',
                                'CUNIT4', 'CROTA4'
                            ]
                            for key in keylist:
                                try:
                                    del hdu.header[key]
                                    print(
                                        '[%s] removed from the .fits header' %
                                        (key))
                                except:
                                    print(
                                        '[%s] not found, so we cannot delete it from the .fits header'
                                        % (key))

                            hdu.header['EPOCH'] = 2e3
                            hdu.header['EQUINOX'] = 2e3
                            print('====================')

                        hdu_HR_conv.writeto(infolder +
                                            'Images_%s/%s-%s_test2b.fits' %
                                            ("FIRST", "FIRST", Cl_name),
                                            overwrite=True)
                        fitsut.numpy2fits(
                            hdu_HR_conv.data.squeeze(),
                            infolder + 'Images_%s/%s-%s_test2.fits' %
                            ("FIRST", "FIRST", Cl_name), s_pixel_HR[1],
                            center_HR[0], [c + pad for c in center_HR[1]])

                        print(
                            'WCS(hdu_raw.header).wcs.naxis, WCS(hdu2.header).wcs.naxis',
                            WCS(hdu_raw.header).wcs.naxis,
                            WCS(hdu_HR_conv.header).wcs.naxis)
                        array, footprint = reproject_interp(
                            hdu_HR_conv, hdu_raw.header
                        )  #hdu 2 image and systm, hdu1--> just the system

                        print('_______', np.sum(array), footprint)
                        print('_______________________________', array.shape,
                              image.shape)
                        array = array.squeeze()  #could be removed
                        array[np.isnan(array)] = 0.

                        fitsut.map2fits(
                            array, GCl.dinfo,
                            infolder + 'Images_%s/%s-%s_test3.fits' %
                            ("FIRST", "FIRST", Cl_name))

                        model_conv = array.squeeze()  # add up  OR replace!
                        print('fits_subtraction: np.sum(model_conv):',
                              np.sum(model_conv))
                        use_im = True

                residuum = image - model_conv
                """ Development: Only get the flux within the search region """
                extreme_res = True
                residuum = maput.ContourMasking(
                    residuum, [region.cnt[0] for region in GCl.regions])

                print('%30s source subtraction;  list: %5r; image: %5r' %
                      (Cl_name, use_list, use_im))
                GCl.maps_update(
                    residuum, 'Diffuse',
                    infolder + '%s/Images_%s/diffuse/%s-%s.fits' %
                    (topfolder, survey, survey, Cl_name))
                if np.sum(model_conv) != 0 or extreme_res:
                    GCl.maps_update(
                        image, 'Raw',
                        infolder + '%s/Images_%s/raw/%s-%s_res.fits' %
                        (topfolder, survey, survey, Cl_name))
                    GCl.maps_update(
                        model, 'Modell',
                        infolder + '%s/Images_%s/subtracted/%s-%s.fits' %
                        (topfolder, survey, survey, Cl_name))
                    GCl.maps_update(
                        model_conv, 'Subtracted',
                        infolder + '%s/Images_%s/subtracted/%s-%s_conv.fits' %
                        (topfolder, survey, survey, Cl_name))
                smt()

                #============= impose relic.search  =============#
                for ii, region in enumerate(GCl.regions):
                    smt(task='RelicExtr')
                    relics = relex.RelicExtraction(
                        residuum,
                        z,
                        GCl=GCl,
                        dinfo=GCl.dinfo,
                        rinfo=region,
                        Imcenter=center,
                        subtracted=model)[0:2]  # faintexcl=3.6
                    smt()
                    relics = sorted(relics, key=lambda x: x.flux, reverse=True)

                    for relic in relics:
                        relic.alpha.value = region.alpha
                        print(region.alpha_err)
                        if region.alpha_err is None:
                            relic.alpha.set_std(0)
                        else:
                            relic.alpha.set_std(region.alpha_err)

                    GCl.add_relics(relics)

                # Add galaxy cluster to the list
                ClList.append(GCl)

            #============= Report why certain clusters are excluded  =============#
            else:
                RL_name = CL['Cluster']
                if CL['Identifier']:
                    RL_name += '_' + CL['Identifier']

                if CL['FLAG_INCLUDED'] in ['noMAP']:
                    string = RL_name + ' excluded because the corresponding region is not mapped by the survey.'
                else:
                    string = RL_name + ' excluded because of: ' + CL[
                        'FLAG_INCLUDED']

                Excluded.append(string)

            mf = open("%s/Excluded.dat" % outfolder, "w")
            for ex in Excluded:
                mf.write(ex + '\n')

        ClList = sorted(ClList, key=iom.Object_natural_keys)

        print('#=====  Last Step: Output is produced ====#')
        smt(task='output')
        """ This is an intervening step: Update and ... the missing clusters, in the future this might done at the beginning at an first step """
        ClList = updateClusters_missingRegions(
            ClList, RegionFile)  # topfolder+RegionFile

        print('#=====  A: Pickle Objects ====#')
        iom.pickleObject(ClList, outfolder + 'pickled/', 'ClList')

        print('#=====  B: Create the Survey and pickle it ====#')
        cnt_levels = [9e-4, 1.8e-3, 3.6e-3, 7.2e-3, 1.44e-2]

        synonyms = [
            ('1RXS J060313.4+421231', '1RXS J06+42'),
            ('ACT-CLJ0102-4915', 'ACT-CLJ01-49'),
            ('CIZA J0649.3+1801', 'CIZA J0649'),  # CIZA J0649+18
            ('CIZA J0107.7+5408', 'CIZA J0107'),
            ('CIZA J2242.8+5301', 'CIZA J2243'),  # CIZA J2243+53
            ('MACS J0025-1222', 'MACS J0025'),
            ('MACS J0152.5-2852', 'MACS J0152'),  # MCS J0717+37
            ('MACS J0717.5+3745', 'MACS J0717'),  # MCS J0717+37
            ('MACS J1149.5+2223', 'MACS J1149'),  # J1149+22
            ('MACS J1752.0+4440', 'MACS J1752'),  # MCS J1752+44
            ('MACS J2243.3-0935', 'MACS J2243'),  # MCS J1752+44
            ('MaxBCG 138.91895+25.19876', 'MaxBCG 138+25'),
            ('MaxBCG 217.95869+13.53470', 'MaxBCG 217+13'),
            ('PSZ1 G004.5-19.5', 'PSZ1 G004'),  # PSZ1 G004-19
            ('PSZ1 G096.89+24.17', 'PSZ1 G097'),  # PSZ1 G097+24
            ('PSZ1 G108.18-11.53', 'PSZ1 G108'),  # PSZ1 G108-12
            ('PLCK G200.9-28.2', 'PLCK G200'),  # PSZ1 G108-12
            ('PLCK G287.0+32.9', 'PLCK G287'),  # PLCK G287+33
            ('RXC J0225.1-2928', 'RXC J0225'),
            ('RXC J1053.7+5452', 'RXC J1054'),  # RXC J1054+55
            ('RXC J1053.7+5452 ', 'RXC J1053'),  # RXC J1054+55
            ('RXC J1234.2+0947', 'RXC J1234'),  # RXC J1054+55
            ('RXC J1314.4-2515', 'RXC J1314'),  # RXC J1314-25
            ('ZwCl 0008+5215', 'ZwCl 0008'),  # ZwCl 0008+52
            ('ZwCl 1447.2+2619', 'ZwCl 1447'),  # ZwCl 0008+52
            ('ZwCl 2341+0000', 'ZwCl 2341'),  # ZwCl 2341+00
            ('[KMA2007] 217.95869+13.53470', 'KMA2007'),  # ZwCl 2341+00
        ]

        #        synonyms_lit = [('2017A&A...597A..15D', '2017A+A_deGasperin+Intema+'),
        #                        ('2017arXiv170801718K', '2017arXiv_Kale+Wik+')]
        for GCl in ClList:

            for syn in synonyms:
                if GCl.name.replace('_', ' ') == syn[0] or GCl.name == syn[0]:
                    print('! Name replacement:', syn)
                    GCl.name = syn[1]
            GCl.name = GCl.name.replace('_', ' ')
            GCl.updateInformation()

        norm = cdb.norm('R200', Nexp=1)
        Histo = cdb.Histogram2D(
            nbins=(64, 46), fromto=[[0, 2. * np.pi], [0, 1.5]],
            norm=norm)  # angle_projected(rad), D_proj(R200)

        Survey = cbclass.Survey(
            ClList,
            survey,
            cnt_levels=cnt_levels,
            synonyms=synonyms,
            dinfo=dinfo_survey,
            hist_main=Histo,
            surshort=survey
        )  # 'NVSS' should be replaced with a real survey class
        Survey.emi_max = 2e-2
        Survey.scatterkwargs = {"alpha": 0.7, "fmt": "o", "markersize": 10}
        Survey.histkwargs = {"alpha": 0.4}
        Survey.relic_filter_kwargs = {
            "Filter": True,
            "shape": False,
            "minrms": 8
        }
        iom.pickleObject(Survey, outfolder + '/pickled/', 'Survey')

        for GCl in Survey.GCls:
            print(GCl.name, GCl.status)

    smt(forced=True)
    return True