Example #1
0
def RadioCuts(val, compradio=False):
    smt = iom.SmartTiming()
    smt(task='RadioAndMock_initialization')

    (pase, realisations, survey) = val

    Rmodel = survey.Rmodel
    snapMF, strSn, smt_add = LoadSnap_multiprocessing(pase,realisations,Rmodel, getstring=True)
    if compradio:   
        (snap,subsmt) = radiomodel.CreateRadioCube(snapMF, Rmodel, realisations[0].mockobs.z_snap, nuobs=pase['nu_obs'])[0:2]
        smt.MergeSMT_simple(subsmt, silent=True)
        
        smt(task='UpdateHeader')  
        realisations[0].z.value    = realisations[0].mockobs.z_snap
        realisations[0].Mvir.value = snap.head['Mvir']*snap.head['hubble']
        realisations[0].updateInformation()
        snap.head['M200'] = realisations[0].M200.value
                     
        """ Here we compute the volume weighted radio emission """         
        radiosum      = np.sum(snap.radi)    
        borders       = 2*realisations[0].R200*snap.head ['hubble']/snap.head ['aexpan']
        whereR200     = np.where( np.sqrt(np.power(snap.pos[:,0],2) + np.power(snap.pos[:,1],2) + np.power(snap.pos[:,2],2) ) < borders/2 )
        radiosum_R200 = np.sum(snap.radi[whereR200])

        # update information on the total radio power (at the rest frame frequency) in the simulational volume
        for kk,real in enumerate(realisations):
             realisations[kk].P_rest.value    = radiosum       # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
             realisations[kk].Prest_vol.value = radiosum_R200  # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
    else:
        (snap, MF) = snapMF

    smt(task='Shed_DoMockObs_misc')
    if suut.TestPar(pase['redSnap']): 
         # This is all part of reducing the data load. A shrinked version of the snapshot with less particles is saved in the custom format that is a fork of the gadget format
         redsnap = radiomodel.PiggyBagSnap_cut(snap, snap, float(pase['cutradio']), cutmask = MF )
         strSn   = strSn.replace('cluster.', 'clusterSUBSET.')
         strSn   = strSn.replace(pase['snapfolder'], pase['outf'])
         print(strSn)
         redsnap.savedata(strSn)    # original snapshot
    if compradio:
         print('np.sum(radiocube[0].radi)', np.sum(snap.radi), '-->', np.sum(redsnap.radi), 'i.e.', radiosum, radiosum_R200, 'pickled to', strSn)
         
    return (realisations, Rmodel), smt
Example #2
0
def LoadSnap_multiprocessing(pase, realisations, Rmodel, getstring=False, verbose=False):
    smt = iom.SmartTiming()
    smt(task='LoadSnaps')   
           
    gcl = realisations[0]

    """ We load the radio cubes """

    strSn = (pase['snapfolder'] + 'SHOCKS_%05i/cluster.%05i.snap.%03i.shocks') % (gcl.mockobs.clid, gcl.mockobs.clid, gcl.mockobs.snap)

#    if suut.TestPar(pase['useMiniCube']):     # subset of snapshot, pickled
#        strSn   = strSn.replace('cluster.', 'clusterSUBSET.') 
#        if verbose: print('Loading snapshot:',strSn)
#        print('___ RunSurvey::LoadSnap_multiprocessing::',strSn,pase['useMiniCube'],suut.TestPar(pase['useMiniCube']))
#        snap    = iom.unpickleObject(strSn)
#        try:
#            snap    = iom.unpickleObject(strSn)
#        except:
#            with open('/data/ErrorLog.txt',"a") as f:
#                for gcl in realisations:
#                    f.write(strSn+'\n')   
#               
#    else: # original snapshot                          
#        if verbose: print('Loading snapshot:',strSn)
#        snap    = loadsnap.Loadsnap(strSn,headerc=pase['headerC'])    # original snapshot
        

    if  suut.TestPar(pase['useMiniCube']):   # asks: original or modified  snapshot?
         strSn   = strSn.replace('cluster.', 'clusterSUBSET.') 
    if verbose: print('Loading snapshot:',strSn)
    snap    = loadsnap.Loadsnap(strSn,headerc=pase['headerC'])                                    
                                    
                                    
    
    """psi and machfiles could become sharred (or global) arrays, but as they are quite small < 1MB, the impact on performance should be snmall"""
    PreSnap = radiomodel.PrepareRadioCube(snap, psiFile=pase['miscdata']+pase['PSItable'], machFile=pase['miscdata']+pase['DSAtable'])
    PreSnap = ( radiomodel.PiggyBagSnap(PreSnap[0]), PreSnap[1] )

    if getstring:
        return PreSnap, strSn, smt
    else:
        return PreSnap, smt
Example #3
0
def main(parfile, workdir=None, ABC=None, verbose=False, survey=None, index=None, Clfile='clusterCSV/MUSIC2-AGN',
         processTasks=True):

    """
    input: parameter file:
           ABC  : None or a list of parameters (what a pity that a dictionary is not possible with the current abcpmc or ABCpmc module)

    output: some strings to track down the output directories

    once a survey is not None all the default survey values will be used;
    i.e. the parfile will be considered much
    For future: - delete survey findings (or add for different detection parameter)
                or make the Rmodel changeable

    If pase['default'] is False then a run will be made with one shell and one single cluster realisation per snapshot

    For abcpmc MUSIC-2 vanilla use MUSIC2-AGN for clusterCSV and MUSIC2_NVSS02_SSD.parset for parset
               MUSIC-2cooling  use MUSIC2-AGN for clusterCSV and MUSIC2COOL_NVSS.parset         for parset
    """
    RModelID = os.getpid()  # get process id

    seed = random.randrange(4294967295)
    np.random.seed(seed=seed)

    """also possible : np.random.seed(seed=None) 
    ... this here is only to save the seed, which is not needed, because all the random stuff can be reconstructed from the cluster statistics

    processes from random are well seeded even without this, somehow numpy needs this kind of seed
    but only in abcpmc and not ABC

    import random
    random.randrange(sys.maxsize)
    """

    # ===  Read parset; then extract fundamental parameters ... Parset OBLIGATORY has be saved as  'parsets/*.parset'
    if workdir is None: workdir = os.path.dirname(__file__) + '/'
    pase, Z = suut.interpret_parset(parfile, repository=workdir + '/parsets/')
    # TODO: workdir should be the same as pase['miscdir'] yet the issue is that it is unknown before miscdir is known.

    if survey is None:
        surveyN = parfile.replace('.parset', '')
        savefolder = pase['outf'] + surveyN
        logfolder = pase['outf'] + surveyN
    else:
        surveyN = survey.name
        savefolder = survey.outfolder
        logfolder = survey.logfolder

    # Constants
    # ==== Cosmological Parameter - MUSIC-2
    Omega_M = 0.27  # Matter density parameter

    # === Create folder if needed
    iom.check_mkdir(savefolder)
    iom.check_mkdir(logfolder)

    smt = iom.SmartTiming(rate=pase['smarttime_sub'], logf=savefolder + '/smt');
    smt(task='Prepare_Clusters')

    # === Create detection information
    dinfo = cbclass.DetInfo(beam=[float(pase['S_beam']), float(pase['S_beam']), 0], spixel=float(pase['S_pixel']),
                            rms=float(pase['RMSnoise']) * 1e-6,
                            limit=float(pase['RMSnoise']) * float(pase['Detthresh']) * 1e-6,
                            nucen=float(pase['nu_obs']), center=(0, 0), survey='UVcoverage')

    if survey is None:
        """Create all galaxy clusters:
            All these steps are to decide which clusters to use. Better placement: in SurveyUtils"""

        """ Read the cluster lists from MUSIC-2 for all snapshots """
        all_clusters = pd.read_csv('%s%s_allclusters.csv' % (pase['miscdata'], Clfile))
        if verbose:
            all_clusters.info()
        zsnap_list = pd.Series(all_clusters['redshift'].unique())
        snapidlist = pd.Series(all_clusters['snapID'].unique())
        clusterIDs = list(all_clusters['clID'].unique())
        NclusterIDs = [len(all_clusters[all_clusters['redshift'] == z]) for z in zsnap_list]
        misslist = np.loadtxt(pase['miscdata'] + pase['missFile'])
        """ e.g. cluster 10# was not (re)simulated, in neither of the MUSIC-2 simulations (also 7 has some issues?)"""

        GClList = []

        if pase['snaplistz'] != 'None':

            snaplistz = [float(z) for z in iom.str2list(pase['snaplistz'])]
            snapidlist = [float(z) for z in iom.str2list(pase['snapidlist'].replace(' ', ''))]

            zsnap_list = (snaplistz[::-1])[0:11]  # 0:17 List of available sn [0:11]
            snapidlist = (snapidlist[::-1])[0:11]  # 0:17 [0:11]

            NclusterIDs = [len(all_clusters['clID'].unique().tolist())] * len(snaplistz)
            if verbose: print('NclusterIDs', NclusterIDs[0])

        use_list = [True] * len(zsnap_list)  # Also put some False, you don't really want to use the z=4.0 snapshots!
        Vsimu = (1.0 / (myu.H0 / 100.)) ** 3  # Gpc**3 comoving volume

        """ Iterate trough each shell of your z-onion and attribute clusters to them
            with z-range and percentage of covered sky, we have z=0.1
        """
        if not suut.TestPar(pase['default']):
            N_shells = 1
        else:
            N_shells = float(pase['N_shells'])

        shells_z, delta_z = np.linspace(Z[0], Z[1], num=N_shells + 1, retstep=True)
        cosmo = FlatLambdaCDM(H0=myu.H0, Om0=Omega_M)
        DCMRs = cosmo.comoving_volume(shells_z).value / 1e9
        count = 0

        for (zlow, zhigh, VCMlow, VCMhigh) in zip(shells_z[0:-1], shells_z[1:], DCMRs[0:-1], DCMRs[1:]):

            """ Iterate through each shell of the observed volume 
                and assign clusters
            """

            boundaries_z = (zlow, zhigh)
            VCM = (VCMlow, VCMhigh)
            z_central = np.mean(boundaries_z)
            if not suut.TestPar(pase['default']):
                z_central = 0.051
            choosen = suut.assign_snaps(zsnap_list, boundaries_z, VCM[1] - VCM[0], NclusterIDs,
                                        sigma_z=float(pase['sigma_z']),
                                        skycoverage=float(pase['surv_compl']), Vsimu=Vsimu, use_list=use_list,
                                        fake=(not suut.TestPar(pase['default'])), logmode=None)

            for (snap, kk) in choosen:
                l = all_clusters[(all_clusters["clID"] == clusterIDs[kk])
                                 & (all_clusters["snapID"] == snapidlist[snap])]
                """ It would be good if you could directly access the element like in an ordered list, 
                    as this would dramatically speed up the process
                """

                """ Skips missing snapshots --> they will also miss in the .csv"""
                if len(l) == 0:
                    if verbose:
                        print('__ Missing snapshot:', clusterIDs[kk], snapidlist[snap])
                    continue

                ids = int(l["clID"])
                M200 = float(l["M200"])

                # Filter for the cluster masses. Please mind that this filtering step is also redshift dependent
                if suut.TestPar(pase['empicut']) and np.log10(M200) < (13.6 + 2 * z_central):
                    continue

                count += 1

                # Decide on the projection of the cluster
                # it would be great if a random initializer between 0 and 1 could have been saved,
                if suut.TestPar(pase['rotation']):
                    theta = np.arccos(uniform(0, 2) - 1)
                    phi = uniform(0, 2 * np.pi)
                    psi = uniform(0, 2 * np.pi)
                else:
                    theta = 0
                    phi = 0
                    psi = 0

                # Create mockObs and the galaxyCluster_simulation
                mockObs = cbclass.MockObs(count, theta=theta, phi=phi, psi=psi, snap=snapidlist[snap],
                                          z_snap=zsnap_list[snap], clid=ids,
                                          snapfolder=pase['xrayfolder'], xrayfolder=pase['xrayfolder'],
                                          headerc=pase['headerC'])
                GClList.append(
                    cbclass.Galaxycluster_simulation("MUSIC2%05i-%06i-%06i" % (ids, snapidlist[snap], count), count,
                                                     z=z_central, M200=M200, dinfo=dinfo,
                                                     mockobs=mockObs))  # , **addargs

        # Also create a list of the chosen clusters for later loockup
        GClList = sorted(GClList, key=lambda x: (x.mockobs.clid, -x.mockobs.snap))

        if verbose: print('Length of GClList:', len(GClList))
        """ New approach: Create a list of modes for the radio emission (Rmodels)
        """
        surmodel = None
        if ABC is None:
            """CAVEAT: The issue with the currently used ABC routines is that you have to give them arrays. Which is why 
               the corresponding model associated has to be defined at this layer.
               Giving the procedure a function which would create this array would allow all of this to be defined 
               in the top layer of ABC
            """

            RModel = cbclass.RModel(RModelID, effList=[float(pase['eff'])], B0=float(pase['B0']),
                                    kappa=float(pase['kappa']), compress=float(pase['compress']))

            if suut.TestPar(pase['redSnap']):
                RModel.effList = RModel.effList[0]
        elif len(ABC) == 1:
            """ Vary only efficiency """
            (lgeff) = ABC
            RModel = cbclass.RModel(RModelID, effList=[10 ** lgeff], B0=1, kappa=0.5, compress=float(pase['compress']))
        elif len(ABC) == 2:
            """ Vary efficiency and B0"""
            (lgeff, lgB0) = ABC
            RModel = cbclass.RModel(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=0.5,
                                    compress=float(pase['compress']))
            print('#== Begin Processing task')
        elif len(ABC) == 3:
            """ Varies the standard model """
            (lgeff, lgB0, kappa) = ABC
            RModel = cbclass.RModel(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=kappa,
                                    compress=float(pase['compress']))
        elif len(ABC) == 4:
            """ Varies the standard model + detection probability """
            (lgeff, lgB0, kappa, survey_filter_pca_b) = ABC
            RModel = cbclass.RModel(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=kappa,
                                    compress=float(pase['compress']))
            surmodel = cbclass.SurModel(b=survey_filter_pca_b)
        elif len(ABC) == 6:
            (lgeff, lgB0, kappa, lgt0, lgt1, lgratio) = ABC
            RModel = cbclass.PreModel_Hoeft(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=kappa,
                                            compress=float(pase['compress']), t0=10**lgt0, t1=10**lgt1, ratio=10**lgratio)
            Rm = RModel
        elif len(ABC) == 7:
            (lgeff, lgB0, kappa, survey_filter_pca_b, lgratio, lgt0, lgt1) = ABC
            RModel = cbclass.PreModel_Hoeft(RModelID, effList=[10 ** lgeff], B0=10 ** lgB0, kappa=kappa,
                                            compress=float(pase['compress']), t0=10 ** lgt0, t1=10 ** lgt1,
                                            ratio=10 ** lgratio)
            Rm = RModel
            surmodel = cbclass.SurModel(b=survey_filter_pca_b)
        else:
            print('RunSurvey::main: model unknown')
            return


        """ Create survey """
        outfolder = '%s_%05i/' % (logfolder, RModelID)
        survey = cbclass.Survey(GClList, survey='%s' % (parfile.replace('.parset', '')),
                                emi_max=float(pase['RMSnoise']) * 1e-3 * 200,
                                cnt_levels=[float(pase['RMSnoise']) * 2 ** i for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]],
                                saveFITS=(ABC is None), savewodetect=suut.TestPar(pase['savewodetect']), dinfo=dinfo,
                                surshort='MUSIC2', Rmodel=RModel, outfolder=outfolder, logfolder=logfolder)
        survey.set_surmodel(surmodel)
        survey.set_seed_dropout()
    else:
        """ If you directly loaded a survey, just use its internal Rmodel """
        RModel = survey.Rmodel

    if verbose: print('Outfolder:', survey.outfolder)

    """=== Create a Task Cube, modify it & save it 
    The function of this Taskcube formerly was to keep track of all the computations that were already done in my multiproccecing version of ClusterBuster 
    Now it is outdated and isn't maintained anymore. After a BUG I didn't want to fix I decommisened this functionality
    """

    Taskcube = np.zeros((len(survey.GCls), len([RModel])))
    # A cube of all possible entries , efficiency is always fully computed and thus not in the Taskcube

    if suut.TestPar(pase['reCube']):
        Taskcube = np.load(logfolder + '/TaskCube.npy')
        smt.MergeSMT_simple(iom.unpickleObject(logfolder + '/smt'))
    if int(pase['reCL']) + int(pase['reRM']) > 0:
        for (GCl_ii, RModelID), status in np.ndenumerate(Taskcube[:, :]):
            if verbose: print('GCl_ii, RModelID:', GCl_ii, RModelID)
            if GClList[GCl_ii].mockobs.clid > int(pase['reCL']) and int(pase['reRM']):
                break
            else:
                Taskcube[GCl_ii, RModelID] = 1
    np.save(logfolder + '/TaskCube',
            Taskcube)  # also to be pickled/saved: Levels Cluster&TaskID --> B0, kappa, (z) -->  eff0
    """"""

    if verbose: print('#== Begin Processing task')

    """ This is the most important task! """
    while processTasks:
        processTasks, smt = DoRun((pase, survey), smt, verbose=verbose)

    print('RModelID %i of run %s finished' % (RModelID, surveyN))
    return survey
Example #4
0
def RadioAndMock(val, verbose=True):
    smt = iom.SmartTiming()
    smt(task='RadioAndMock_initialization')

    (pase, realisations, survey) = val
    Rmodel = survey.Rmodel

    PreSnap, smt_add = LoadSnap_multiprocessing(pase, realisations, Rmodel)

    if len(PreSnap) > 0:
        snapMF = PreSnap
        (radiosnap, subsmt) = radiomodel.CreateRadioCube(snapMF, Rmodel, realisations[0].mockobs.z_snap, nuobs=pase['nu_obs'])[0:2]
        smt.MergeSMT_simple(subsmt, silent=True) 

    """ This weird interresult comes from
        output.put( outp + (Rmodel,)) #Rmodel is added to the tuple
        (radiocube, subsmt, Rmodel) = stage1_out.get()
        stage1_list.append( ( radiocube, Rmodel, survey) )
    """
    radiocube = (radiosnap, Rmodel, survey) #Rmodel is added to the tuple


    ##=== Stage II - DoMockObs
    if verbose: print('Start compiling MockObservations for further models of cluster #%5i and snap #%3i with in total %i realisations.' % (realisations[0].mockobs.clid , realisations[0].mockobs.snap,  len(realisations)))


    smt(task='Shed_DoMockObs') 
    # This result of this computation is  independent of rotation and because of this was put here
    for kk, real in enumerate(realisations): # update information on the total radio power (at the rest frame frequency) in the simulational volume
             """ This is wrong and has to be fixed in the future!!!! """
             
             """ also possible: realisations[kk].Rvir       = radiocube[0].head['Rvir']"""
             if realisations[kk].M200.value == 0:
                 try:
                     realisations[kk].M200.value = radiocube[0].head['M200']
                 except:
                     realisations[kk].Mvir.value = radiocube[0].head['Mvir']
                 realisations[kk].updateInformation(massproxis=True)
             
    """ Here we compute the volume weighted radio emission """         
    radiosum  = np.sum(radiocube[0].radi)
    borders   = 2*realisations[0].R200*radiocube[0].head ['hubble']/radiocube[0].head ['aexpan']
    whereR200 = np.where( np.sqrt(np.power(radiocube[0].pos[:,0],2) + np.power(radiocube[0].pos[:,1],2) + np.power(radiocube[0].pos[:,2],2) ) < borders/2 )
    radiosum_R200 = np.sum(radiocube[0].radi[whereR200])

    # update information on the total radio power (at the rest frame frequency) in the simulational volume
    for kk,real in enumerate(realisations):
         realisations[kk].P_rest.value    = radiosum       # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
         realisations[kk].Prest_vol.value = radiosum_R200  # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction

    smt(task='Shed_DoMockObs_misc')
    locations = [survey.outfolder]
    
    if not suut.TestPar(pase['cutradio']):
        radiocubeUse = radiocube
    else:  
        print('Beware, This is slow and should not be paralised!!!! This part is not implemented')
        radiocubeUse = None #radiomodel.PiggyBagSnap_cut(snap, radiocube[0], float(pase['cutradio'])),Rmodel,survey)]
    (nouse, subsmt, GClrealisations_used, Rmodel) = mockobs.Run_MockObs(radiocubeUse, realisations, saveFITS=survey.saveFITS, savewodetect=survey.savewodetect, writeClusters=True) #Mach=pase['Mach'], Dens=pase['Dens'],
    smt.MergeSMT_simple(subsmt, silent=True)

    return (GClrealisations_used,  Rmodel), smt
Example #5
0
def RadioAndMock_loaded(val, verbose=True):
    smt = iom.SmartTiming()
    smt(task='RadioAndMock_initialization')                
    (snapMF,  pase, realisations, survey) = val
    Rmodel = survey.Rmodel

    ##=== Stage II - DoMockObs
    if verbose:
        print('Start compiling MockObservations for further models of cluster #%5i snap #%3i with in total %i realisations.'
              % (realisations[0].mockobs.clid, realisations[0].mockobs.snap, len(realisations)))

    GClrealisations_return = []
    smt(task='Shed_DoMockObs_misc') 
    # This result of this computation is  independent of rotation and because of this was put here
    
    for kk, realisation in enumerate(realisations): # update information on the total radio power (at the rest frame frequency) in the simulational volume
        """ This is wrong and has to be fixed in the future!!!! 
        Currently, we make this code really SMELLY and hard to understand
        """
        (radiosnap, subsmt, poisson_factor) = radiomodel.CreateRadioCube(snapMF, Rmodel, realisation.mockobs.z_snap,
                                                         nuobs=pase['nu_obs'], logging=False)[0:3]
        smt.MergeSMT_simple(subsmt, silent=True)

        """ also possible: realisations[kk].Rvir       = radiocube[0].head['Rvir']"""
        if realisations[kk].M200.value == 0:
            try:
                realisations[kk].M200.value = radiosnap.head['M200']
            except:
                realisations[kk].Mvir.value = radiosnap.head['Mvir']
            realisations[kk].updateInformation(massproxis=True)

        """ Here we add the radio emission due to pre-existing electrons """
        if isinstance(Rmodel, cbclass.PreModel_Gelszinnis):
            randfactor = 10**np.random.normal(0, Rmodel.p_sigma, 1)
            realisation.PreNorm = randfactor*Rmodel.p0
            radiosnap.radiPre += realisations.PreNorm * radiosnap.radiPre
        elif isinstance(Rmodel, cbclass.PreModel_Hoeft):
            realisation.poisson_factor = poisson_factor

        """ Here we compute the volume weighted radio emission """   
        radiosum      = np.sum(radiosnap.radi)
        borders       = 2*realisations[0].R200*radiosnap.head ['hubble']/radiosnap.head ['aexpan']
        whereR200     = np.where(np.sqrt(np.power(radiosnap.pos[:,0], 2) + np.power(radiosnap.pos[:,1], 2)
                                         + np.power(radiosnap.pos[:,2], 2)) < borders/2)
        radiosum_R200 = np.sum(radiosnap.radi[whereR200])
        # update information on the total radio power (at the rest frame frequency) in the simulational volume
        realisations[kk].P_rest.value    = radiosum       # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction
        realisations[kk].Prest_vol.value = radiosum_R200  # This is for a frequency differing from 1.4 GHz and an efficiency of 1, in PostProcessing.py we apply a further correction

        if not suut.TestPar(pase['cutradio']):

            radiosnapUse = copy.deepcopy(radiosnap)
            if hasattr(radiosnapUse, 'radiPre'):
                radiosnapUse.radi += radiosnapUse.radiPre
                #print('Run_MockObs:: Ratio of PREs to total emission', (np.sum(radiosnapUse.radiPre)) / (np.sum(radiosnapUse.radi) + np.sum(radiosnapUse.radiPre)))

            radiocube = (radiosnapUse, Rmodel, survey)  # Rmodel is added to the tuple
        else:
            print('Beware, This is slow and should not be paralised!!!! This part is not implemented')
            radiocube = (radiosnap, Rmodel, survey)  # Rmodel is added to the tuple

        smt(task='Shed_DoMockObs')
        (nouse, subsmt, GClrealisation_used, Rmodel) = mockobs.Run_MockObs(radiocube, [realisation],
                                                                           saveFITS=survey.saveFITS,  savewodetect=survey.savewodetect,
                                                                           side_effects=True)
        GClrealisations_return += GClrealisation_used
        smt.MergeSMT_simple(subsmt, silent=True)
        
    return (GClrealisations_return, Rmodel), smt
Example #6
0
def Run_MockObs_XRay(gcl,
                     savefolder,
                     log=False,
                     PCA=[],
                     saveFITS=True,
                     headerc='Mpc',
                     verbose=True):
    ''' Creates maps of projected quantities in the galaxie cluster, like
          X-ray 
          Mass 
          Velocity field 
    '''

    mockobs = gcl.mockobs
    z = gcl.z.value

    smt = iom.SmartTiming(
        rate=5e4
    )  #, logf=outf+'smt.log'  #;  print '###==== Step2a:  Loading configuration files ====###'

    strSn = ('%s/SHOCKS_%05i/cluster.%05i.snap.%03i.shocks') % (
        gcl.mockobs.xrayfolder, gcl.mockobs.clid, gcl.mockobs.clid,
        gcl.mockobs.snap)
    if verbose: print('Loading snapshot:', strSn)
    snap = LSconv.Loadsnap(strSn, headerc=gcl.mockobs.headerc)

    hSize = mockobs.hsize  # kpc
    HSize = mockobs.hsize * snap.head[
        'hubble']  # kpc/h   multiplied with 0.7  ... reduced Hubble constant used in MUSIC-2

    rot = maut.Kep3D_seb(Omega=mockobs.theta, i=mockobs.phi, omega=mockobs.psi)
    posrot = np.dot(snap.pos, rot)
    velrot = np.dot(snap.vel, rot)

    smt(task='PCA_[sub]')
    xSize = 0.8 * hSize
    XSize = 0.8 * HSize
    XSize_z = XSize * (1 + z)  # This comes fr

    #    if xPixel*gcl.cosmoPS < 10:    # If the cluster is very nearby, use an less resolved  X-Ray/Mass image for the analysis
    #      xPixel    = 10/gcl.cosmoPS
    '''DEVELOPMENT: uses an very well resolved X-ray image '''
    xPixel = 10 / gcl.cosmoPS  #Size of pixel in arcseconds, choosen so that the resolution is 10 kpc

    nbins_x = int(2 * xSize / (gcl.cosmoPS * xPixel))
    #            x_pixel   = [xPixel*gcl.cosmoPS, xPixel]
    Xbeam = [4 * xPixel, 4]
    A_Xbeam = 1.133 * Xbeam[1] * Xbeam[1]
    mapinfo = cbclass.DetInfo(beam=[0, 0, 0],
                              spixel=xPixel,
                              rms=0,
                              limit=0,
                              telescope='MUSIC-2 simulation',
                              nucen=0,
                              center=[0, 0],
                              pcenter=[nbins_x / 2, nbins_x / 2])

    if verbose:
        print('  ###==== Step 3a:  Binning cluster data cube  (X-Ray) ====###')

    #    iL  =  np.where( snap.rho[:] >  -1 )[0] #snap.xray[:] >  -1

    # iL  =  np.where( ( -hThick < posrot[:,2] ) & ( posrot[:,2] < hThick )  &  ( snap.radi[:] > -1e60 ) )

    # bremsstrahlung, compare https://www.mrao.cam.ac.uk/~kjbg1/lectures/lect3.pdf

    fac = LSconv.conversion_fact_gadget_rho_to_nb(
        snap.head) * LSconv.conversion_fact_ne_per_nb()
    brems = np.power(snap.rho * fac, 2) * np.power(
        snap.u, 1 / 2
    )  # mimics the total emissivity of bremsstrahlung; neclects metallicity; neclects that MUSIC-2 doesn't have cooling
    iL = np.where(
        ((np.log10(snap.rho * fac) + 5) - (np.log10(snap.u) - 2.2) *
         (3.8 / 2.8)) < 0)[0]  #snap.rho < 5e-4 with inverse color scalling
    '''  BREMSTRAHLUNG & MASS
         (snap.rho < 3e-1) & (snap.u > 3e4) to remove the galaxies 
         (snap.rho < 5e-4) for a nice contrast image
         
         np.where( (snap.rho < 3e-1) & (snap.u > 3e4) )[0] for galaxy exclution
         
         np.where( (snap.rho > 3e-1) & (snap.u < 3e4) )[0] for galaxies only
         
                   
       For division galaxies and ICM, ...
       log10(rho)/log10(u)
       -5/2.2
       3.8 2.8
       -1.2/5
       
        np.where( ( (np.log10(snap.rho)+5) -(np.log10(snap.u)-2.2)*(3.8/2.8) ) < 0)[0] for galaxies only
         
    '''
    H_bems, xedges, yedges = np.histogram2d(-posrot[iL, 0],
                                            -posrot[iL, 1],
                                            weights=brems[iL],
                                            range=[[-XSize_z, XSize_z],
                                                   [-XSize_z, XSize_z]],
                                            bins=nbins_x)  #
    H_mass, xedges, yedges = np.histogram2d(
        -posrot[iL, 0],
        -posrot[iL, 1],
        range=[[-XSize_z, XSize_z], [-XSize_z, XSize_z]],
        bins=nbins_x)  #weights=np.ones( (iL.shape[0])),
    H_bems_conv = A_Xbeam * ndi.gaussian_filter(
        H_bems, (myu.FWHM2sigma * Xbeam[1], myu.FWHM2sigma * Xbeam[1])
    )  ## gaussian convolution, factor 0.5 steems from the fact that beam with is two times the gaussian standard-deviation
    H_mass_conv = A_Xbeam * ndi.gaussian_filter(
        H_mass, (myu.FWHM2sigma * Xbeam[1], myu.FWHM2sigma * Xbeam[1])
    )  ## gaussian convolution, factor 0.5 steems from the fact that beam with is two times the gaussian standard-deviation
    ''' VELOCITY FIELD
    Here we derive the quantities needed to plot a mass weighted velocity field
    '''
    H_gas_x, xedges, yedges = np.histogram2d(-posrot[iL, 0],
                                             -posrot[iL, 1],
                                             weights=velrot[iL, 0],
                                             range=[[-XSize_z, XSize_z],
                                                    [-XSize_z, XSize_z]],
                                             bins=nbins_x)
    H_gas_x_conv = A_Xbeam * ndi.gaussian_filter(
        H_gas_x, (myu.FWHM2sigma * Xbeam[1], myu.FWHM2sigma * Xbeam[1]))

    # Get the real average velocity, Subtract the average central velocity
    H_gas_x = H_gas_x / H_mass - H_gas_x_conv[
        int(nbins_x / 2), int(nbins_x / 2)] / H_mass_conv[int(nbins_x / 2),
                                                          int(nbins_x / 2)]
    H_gas_x_conv = H_gas_x_conv / H_mass_conv - H_gas_x_conv[
        int(nbins_x / 2), int(nbins_x / 2)] / H_mass_conv[int(nbins_x / 2),
                                                          int(nbins_x / 2)]

    H_gas_y, xedges, yedges = np.histogram2d(-posrot[iL, 0],
                                             -posrot[iL, 1],
                                             weights=velrot[iL, 1],
                                             range=[[-XSize_z, XSize_z],
                                                    [-XSize_z, XSize_z]],
                                             bins=nbins_x)
    H_gas_y_conv = A_Xbeam * ndi.gaussian_filter(
        H_gas_y, (myu.FWHM2sigma * Xbeam[1], myu.FWHM2sigma * Xbeam[1]))

    H_gas_y = H_gas_y / H_mass - H_gas_y_conv[
        int(nbins_x / 2), int(nbins_x / 2)] / H_mass_conv[int(nbins_x / 2),
                                                          int(nbins_x / 2)]
    H_gas_y_conv = H_gas_y_conv / H_mass_conv - H_gas_y_conv[
        int(nbins_x / 2), int(nbins_x / 2)] / H_mass_conv[int(nbins_x / 2),
                                                          int(nbins_x / 2)]

    #    H_gas_speed             = np.sqrt(H_gas_x     **2 + H_gas_y     **2)
    H_gas_speed_conv = np.sqrt(H_gas_x_conv**2 + H_gas_y_conv**2)
    H_gas_speed_conv[np.where(H_mass_conv < 1e2)] = 0

    H_gas_angle = np.arctan(H_gas_y / H_gas_x) * 180 / np.pi
    H_gas_angle_conv = np.arctan(H_gas_y_conv / H_gas_x_conv) * 180 / np.pi

    if saveFITS:
        iom.check_mkdir(savefolder)
        fitstypes = ['Brems', 'MassSpeed', 'MassAngle', 'Mass']
        fitsarray = [
            np.log10(H_bems_conv).clip(min=-6.5) + 6.5, H_gas_speed_conv,
            H_gas_angle_conv,
            np.log10(H_mass_conv)
        ]  # np.clip(np.log10(H_bems_conv), 0, -9)

        for IM, fitstype in zip(fitsarray, fitstypes):
            fitsname = '%s/maps/z%04.f/%s-%s.fits' % (
                savefolder, gcl.mockobs.z_snap * 1000, gcl.name, fitstype)
            if verbose: print('Gonna save', fitsname)
            gcl.maps_update(IM, fitstype, fitsname, dinfo=mapinfo)

        H_x = H_gas_angle  #indarr[0,:] + 1  #add sdsdsdsds
        H_y = H_gas_angle  #indarr[1,:] + 1  #dssdsds

        fitstypes = [
            'Ra',
            'Dec',
            'dx',
            'dy',
        ]
        fitsarray = [H_x, H_y, H_gas_x_conv,
                     H_gas_y_conv]  # np.clip(np.log10(H_bems_conv), 0, -9)

        print(gcl.mapdic['Brems'])

        for IM, fitstype in zip(fitsarray, fitstypes):
            fitsname = '%s/maps/z%04.f/%s-%s.fits' % (
                savefolder, gcl.mockobs.z_snap * 1000, gcl.name, fitstype)
            if verbose: print('Gonna save', fitsname)
            gcl.maps_update(IM, fitstype, fitsname, dinfo=mapinfo)
    return
Example #7
0
def Run_MockObs(bulked,
                GClrealisations,
                CASAmock=False,
                saveFITS=False,
                writeClusters=False,
                savewodetect=False,
                log=False,
                side_effects=False,
                filter_sp_phase=False,
                extract_subtracted=True):
    """ Runs a mock observation
        side_effects: put   True if you want the input galaxy cluster to be changed,
                            False if you want only a copy to be influenced """
    (snap, Rmodel, emptySurvey) = bulked
    savefolder = emptySurvey.outfolder
    iom.check_mkdir(savefolder)

    #Variant B: Clean mask and .fits --> Source parameters; like variant A from step 4 on
    if CASAmock:
        import drivecasa as drica
        casa = drica.Casapy()

    smt = iom.SmartTiming(
        rate=5e4
    )  # logf=outf+'smt.log'  #;  print( '###==== Step2a:  Loading configuration files ====###'  )

    #  Units, conversion factors, and input variables
    fac_rho = loadsnap.conversion_fact_gadget_rho_to_nb(
        snap.head) * loadsnap.conversion_fact_ne_per_nb()  #electrons/cm^-3
    fac_T = loadsnap.conversion_fact_gadget_U_to_keV(snap.head)  # in [keV]
    fac_T2 = loadsnap.conversion_fact_gadget_U_to_keV(
        snap.head) / 8.61732814974056e-08  # to K
    """ determines if you want to change the galaxy cluster or not """
    if side_effects:
        GClrealisations_used = GClrealisations
    else:
        GClrealisations_used = copy.deepcopy(GClrealisations)

    for jj, gcl in enumerate(GClrealisations_used):
        #  Load variables and setting survey parameters
        mockobs = gcl.mockobs
        z = gcl.z.value
        hsize = mockobs.hsize
        dinf = gcl.dinfo  # Some parameters of dinfo could change, because of adaptive pixelsize etc.
        fac_x = loadsnap.comH_to_phys(snap.head, z)
        eff = Rmodel.effList[0]

        #  Units, conversion factors, and input variables
        radiounit = myu.radiounit_A * eff  # erg/s/Hz    --- Unit of particle luminousity in .radio snaps
        rot = mathut.Kep3D_seb(Omega=mockobs.theta,
                               i=mockobs.phi,
                               omega=mockobs.psi)
        posrot = np.dot(snap.pos, rot) * fac_x
        #velrot   = np.dot(snap.vel, rot)  Taken out, as long as we don't need to plot the velocity vetors

        smt(task='Bin_radio_[sub]')
        #print( '###==== Step 3b:  Binning cluster data cube  (radio) ====###'
        # Parameters implied
        # See Nuza+ 2012 Equ (1)
        relativistics = (1 + z)
        s_radio_SI = radiounit / myu.Jy_SI / (
            4 * np.pi * (gcl.cosmoDL * 1e-2)**2
        ) * relativistics  # radiounit*s_radioSI is Jy/particle        #Umrechnung
        nbins = int(2 * hsize / (gcl.cosmoPS * dinf.spixel))
        if nbins > mockobs.binmax:
            binsold = nbins
            spixelold = dinf.spixel
            dinf.spixel = dinf.spixel * np.power(
                float(nbins) / float(mockobs.binmax), 0.5)
            mockobs.hsize = mockobs.hsize * np.power(
                float(nbins) / float(mockobs.binmax), -0.5)
            dinf.update_Abeam()
            nbins = mockobs.binmax
            if log:
                print(
                    'At z=%.3f with an pixel size of %.1f arcsec, the number of pixels per image is %i^2. Due to that the pixelscale was increased to %.1f arcsec and the binned boxsize decreased to %i kpc.'
                    % (z, spixelold, binsold, dinf.spixel, mockobs.hsize))
            hsize = mockobs.hsize
        dinf.pcenter = [nbins / 2, nbins / 2]

        if filter_sp_phase:
            """ Filteres the cooled particles that no longer belong to the hot-ICM"""
            iL = np.where((
                np.sqrt(snap.pos[:, 0]**2 + snap.pos[:, 1]**2 +
                        snap.pos[:, 2]**2) * fac_x < 2.0 * gcl.R200())
                          & ((8.9 + 3.3 - np.log(snap.u * fac_T2) -
                              0.65 * np.log10(snap.rho * fac_rho)) < 0)
                          & ((8.9 - 11.0 - np.log(snap.u * fac_T2) -
                              3.50 * np.log10(snap.rho * fac_rho)) < 0)
                          & ((8.9 + 6.9 - np.log(snap.u * fac_T2) +
                              0.50 * np.log10(snap.rho * fac_rho)) < 0)
                          & (snap.mach < 10))[0]
        else:
            iL = np.where(
                np.sqrt(snap.pos[:, 0]**2 + snap.pos[:, 1]**2 +
                        snap.pos[:, 2]**2) * fac_x < 2.0 * gcl.R200())[0]

        if hasattr(snap, 'radiPre'):
            if log:
                print('Run_MockObs:: Ratio of PREs to total emission',
                      (np.sum(snap.radiPre[iL])) /
                      (np.sum(snap.radi[iL]) + np.sum(snap.radiPre[iL])))

        H1, xedges, yedges = np.histogram2d(-posrot[iL, 0],
                                            -posrot[iL, 1],
                                            weights=s_radio_SI * snap.radi[iL],
                                            range=[[-hsize, hsize],
                                                   [-hsize, hsize]],
                                            bins=nbins)
        """ Difference of gaussians method - accomplishing a simple subtraction of compact sources"
        
        We do this iteratively three times to also remove those particles that where shadowed by other 
        bright particles before
        
        This method is defines by
        
        thresh: A threshold for masking
        scale_1: Smaller scale in kpc
        scale_2: Larger  scale in kpc        
        """
        thresh = 0.75
        scale_1 = 20
        scale_2 = 60

        DoG1_filter = copy.deepcopy(dinf)
        DoG1_filter.beam = [scale_1 / gcl.cosmoPS, scale_1 / gcl.cosmoPS, 0]
        DoG1_filter.update_Abeam()

        DoG2_filter = copy.deepcopy(dinf)
        DoG2_filter.beam = [scale_2 / gcl.cosmoPS, scale_2 / gcl.cosmoPS, 0]
        DoG2_filter.update_Abeam()

        DoG_mask = np.ones_like(H1)
        for no_use in range(2):
            convolved_sigma1 = DoG1_filter.convolve_map(
                H1 * DoG_mask)  ## gaussian convolution
            convolved_sigma2 = DoG2_filter.convolve_map(
                H1 * DoG_mask)  ## gaussian convolution
            DoG_rel = np.divide(
                np.abs(convolved_sigma2 - convolved_sigma1) + 1e-20,
                convolved_sigma2 + 1e-20)
            DoG_mask[np.where(
                DoG_rel < thresh)] = 0.2 * DoG_mask[np.where(DoG_rel < thresh)]
        #convolved_sigma1 = DoG1_filter.convolve_map(H1)  ## gaussian convolution
        #convolved_sigma2 = DoG2_filter.convolve_map(H1)  ## gaussian convolution

        H2 = dinf.convolve_map(H1 * DoG_mask)
        #            print('____ Masked/Unmasked flux (mJy):  %6.3f %6.3f' % (np.sum(H2)/dinf.Abeam[0]*1000,s_radio_SI*np.sum(snap.radi[iL])*1000))

        smt(task='WriteDilMask_[sub]')
        #print( '###==== -- 4b:  Writing dilated .mask  ====###'
        #        mask       =  maput.numpy2mask (H2, dinf.limit, Ndil) # outfile = outfile.replace('.fits','') + '_mask.fits',

        #img = bdsm.process_image(filename= outfile+'_simple_conv.fits', thresh_isl=args.tIs, thresh_pix=args.tPi, mean_map = 'zero', beam = (0.0125,0.0125,0), rms_map = False, rms_value = 0.00045, thresh = 'hard')
        #img.export_image(outfile= outfile+'_simple_conv.ismk.fits'    , img_type='island_mask', img_format='fits', mask_dilation=5, clobber=True)
        #img.export_image(outfile= outfile+'_simple_conv.ismk.mask'    , img_type='island_mask', img_format='casa', mask_dilation=5, clobber=True)

        if CASAmock:
            """ Removed implementation; original idea:
            #1. Python: Create convolved perfect simulational output image
            #2. PyBDSM/Python: Create clean mask on that with some dilation
            #3. Casa/Python: Create constant rms and beam-corrected image (clean)
            #4. Casa/Python: Apply this clean mask with immath on constant-rms image
            #5. PyBDSM/python: Use pybdsm with detection_image= 'masked_constant rms_image'
            #6. Python. Create masked .fits mock imagename
            #7  Python. Extract radio relics """
        else:
            if log:
                print(
                    '###====          - Using the simple convolved image ====###'
                )
            IM0 = H2  #(fits.open(simpleconv))[0].data

        smt(task='CreateMask_[sub]')
        #print( '###==== Step 6:  Create masked .fits mock image ====###'
        IM1 = np.squeeze(
            IM0)  #!!! Here unmasked! ... np.multiply(np.squeeze(IM0), mask)
        """Outdated saveFITS, please update and put to end of procedure """
        if saveFITS and CASAmock:
            maput.numpy2FITS(IM1, 'sim.vla.d.masked.fits', dinf.spixel)

        smt(task='RelicExtr_[sub]')

        relics = relex.RelicExtraction(
            IM1,
            z,
            GCl=gcl,
            dinfo=dinf,
            rinfo=cbclass.RelicRegion(
                '', [], rtype=1))  #, faintexcl=0.4, Mach=Hmach, Dens=Hdens,

        smt(task='RelicHandling_[sub]')
        relics = sorted(relics, key=lambda x: x.flux, reverse=True)
        gcl.add_relics(relics)
        if savewodetect or len(relics) > 0:

            if log:
                print(
                    '  ++The brightest relic found has a flux density of %f mJy'
                    % (relics[0].flux)
                )  #Could producese errors, once there is no relict in the list
            iom.check_mkdir(savefolder + '/maps/z%04.f' %
                            (gcl.mockobs.z_snap * 1000))
            """ Part to derive additional relic information like average and  mach number and alpha. We also get the 
            emission weighted density, as this works only on the bright parts it is fine to work with the subset of 
            particles
            """
            alpha_help = (snap.mach[iL]**2 + 1) / (snap.mach[iL]**2 - 1)

            Hmach = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * x.mach[iL])
            Halpha = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * alpha_help)
            Hrho_up = SPH_binning(snap,
                                  posrot,
                                  dinf,
                                  iL,
                                  immask=DoG_mask,
                                  HSize_z=hsize,
                                  nbins=nbins,
                                  weights=lambda x: s_radio_SI * x.radi[iL] * x
                                  .rup[iL] * fac_rho)
            Htemp = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * x.u[iL] * fac_T)
            Harea = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * x.area[iL])
            Hmag = SPH_binning(
                snap,
                posrot,
                dinf,
                iL,
                immask=DoG_mask,
                HSize_z=hsize,
                nbins=nbins,
                weights=lambda x: s_radio_SI * x.radi[iL] * x.B[iL])
            Hpre = SPH_binning(snap,
                               posrot,
                               dinf,
                               iL,
                               immask=DoG_mask,
                               HSize_z=hsize,
                               nbins=nbins,
                               weights=lambda x: s_radio_SI * x.radiPre[iL])

            allflux = np.asarray([])
            for relic in relics:
                relic.wMach = Hmach[relic.pmask]
                relic.wT = Htemp[relic.pmask]
                relic.wArea = Harea[relic.pmask]
                relic.wAlpha = Halpha[relic.pmask]
                relic.wB = Hmag[relic.pmask]
                relic.wPre = Hpre[relic.pmask]
                relic.wRho_up = Hrho_up[relic.pmask]
                #                    relic.wRho        =  Hrho [relic.pmask]
                #                    relic.wRho_down   =  Hrho_down[relic.pmask]
                #                    relic.wT_up       =  Htemp_up[relic.pmask]
                #                    relic.wT_down     =  Htemp_down[relic.pmask]

                relic.wDoG_rel = DoG_rel[relic.pmask]
                allflux = np.concatenate((relic.sparseW, allflux), axis=0)
                relic.averages_quantities()
            """Save maps"""
            allflux = allflux.flatten()
            """ I couldn't come up with something better to take the inverse """
            mask = np.ones(snap.rho.shape, dtype=bool)
            mask[iL] = 0
            Subtracted, xedges, yedges = np.histogram2d(
                -posrot[mask, 0],
                -posrot[mask, 1],
                weights=s_radio_SI * snap.radi[mask],
                range=[[-hsize, hsize], [-hsize, hsize]],
                bins=nbins)
            Subtracted += H1 * (1 - DoG_mask)
            Subtracted_conv = dinf.convolve_map(Subtracted)
            if extract_subtracted:
                relics_subtracted = relex.RelicExtraction(
                    Subtracted_conv,
                    z,
                    GCl=gcl,
                    dinfo=dinf,
                    rinfo=cbclass.RelicRegion(
                        '', [],
                        rtype=1))  # , faintexcl=0.4, Mach=Hmach, Dens=Hdens,
                for relic in relics_subtracted:
                    relic.wMach = Hmach[relic.pmask]
                    relic.wT = Htemp[relic.pmask]
                    relic.wArea = Harea[relic.pmask]
                    relic.wAlpha = Halpha[relic.pmask]
                    relic.wB = Hmag[relic.pmask]
                    relic.wPre = Hpre[relic.pmask]
                    relic.wRho_up = Hrho_up[relic.pmask]
                    #                    relic.wRho        =  Hrho [relic.pmask]
                    #                    relic.wRho_down   =  Hrho_down[relic.pmask]
                    #                    relic.wT_up       =  Htemp_up[relic.pmask]
                    #                    relic.wT_down     =  Htemp_down[relic.pmask]

                    relic.wDoG_rel = DoG_rel[relic.pmask]
                    relic.averages_quantities()
                gcl.compacts = relics_subtracted

            if saveFITS:
                """ Here the maps are already masked with the detection region """

                smt(task='WriteFits_[writes,sub]')
                if log:
                    print(
                        '###==== Step 4:  Preparing FITS file & folders ====###'
                    )

                parlist = (savefolder, gcl.mockobs.z_snap * 1000, gcl.name,
                           Rmodel.id)
                gcl.maps_update(H1, 'Raw',
                                '%s/maps/z%04.f/%s-%04i_native.fits' % parlist)
                gcl.maps_update(IM1, 'Diffuse',
                                '%s/maps/z%04.f/%s-%04i.fits' % parlist)
                gcl.maps_update(
                    Subtracted, 'CompModell',
                    '%s/maps/z%04.f/%s-%04i_compact.fits' % parlist)
                gcl.maps_update(
                    Subtracted_conv, 'Subtracted',
                    '%s/maps/z%04.f/%s-%04i_compactObserved.fits' % parlist)
                if len(relics) > 0:
                    gcl.maps_update(
                        gcl.Mask_Map(Hmach, normalize=allflux), 'Mach',
                        '%s/maps/z%04.f/%s-%04i_mach.fits' % parlist)
                    gcl.maps_update(
                        gcl.Mask_Map(Hrho_up, normalize=allflux), 'RhoUp',
                        '%s/maps/z%04.f/%s-%04i_rhoup.fits' % parlist)
                    gcl.maps_update(
                        gcl.Mask_Map(Htemp, normalize=allflux), 'Temp',
                        '%s/maps/z%04.f/%s-%04i_temp.fits' % parlist)
                    gcl.maps_update(gcl.Mask_Map(Hmag, normalize=allflux), 'B',
                                    '%s/maps/z%04.f/%s-%04i_B.fits' % parlist)
                    gcl.maps_update(
                        gcl.Mask_Map(Hpre, normalize=allflux), 'PreRatio',
                        '%s/maps/z%04.f/%s-%04i_prerat.fits' % parlist)
                gcl.maps_update(
                    DoG_rel, 'DoG_rel',
                    '%s/maps/z%04.f/%s-%04i_DoG_rel.fits' % parlist)
                gcl.maps_update(
                    DoG_mask, 'DoG_mask',
                    '%s/maps/z%04.f/%s-%04i_DoG_mask.fits' % parlist)
            """ PhD feature --> plot the DoF images in a subplot
##                import matplotlib.pyplot as plt   
##                with np.errstate(divide='ignore', invalid='ignore'):
##                    DoG_rel            = np.divide(np.abs(convolved_sigma2-convolved_sigma1)+1e-20,convolved_sigma2+1e-20)
##                pixR200 =        gcl.R200()/(gcl.cosmoPS*dinf.spixel)     
##                bou     =  gcl.R200()*1.5   # pixR200*1
##                f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, figsize=(30,12)) #, sharey='row', sharex='col', sharey='row'
##                ax1.imshow( np.power((np.abs(convolved_sigma1-convolved_sigma2)), 1/1 )     , extent=(-hsize, hsize, -hsize, hsize)) #-bou+cen
##                im2 = ax2.imshow( DoG_rel                                 , extent=(-hsize, hsize, -hsize, hsize), vmin=0.2, vmax=1 ) #-bou+cen           
##                XX,YY = np.meshgrid(xedges[0:-1]+0.5*gcl.cosmoPS*dinf.spixel,yedges[0:-1][::-1]+0.5*gcl.cosmoPS*dinf.spixel) #yedges[-1:0]
##                ax2.contour(XX, YY, DoG_mask,  colors='r', levels=[0.5])
##                ax3.imshow( np.power(dinf.convolve_map(H1*DoG_mask), 1/1 ), extent=(-hsize, hsize, -hsize, hsize)  ) #-bou+cen
##                ax1.set_xlim(-bou, bou)
##                ax1.set_ylim(-bou, bou)
##                ax2.set_xlim(-bou, bou)
##                ax2.set_ylim(-bou, bou) 
##                ax3.set_xlim(-bou, bou)
##                ax3.set_ylim(-bou, bou)
##                
##                ax1.set_title('DoG')
##                ax2.set_title('DoG/LowResImage + mask (contours)')
##                ax3.set_title('Filtered NVSS')
##                
##                print('CreateMokObs',  pixR200, gcl.R200(),dinf.spixel, gcl.cosmoPS)
##                circle1 = plt.Circle((0, 0), gcl.R200(), fill=False, color='w', ls='-')
##                circle2 = plt.Circle((0, 0), gcl.R200(), fill=False, color='w', ls='-')      
##                circle3 = plt.Circle((0, 0), gcl.R200(), fill=False, color='w', ls='-')   
##
##                ax1.add_artist(circle1)
##                ax2.add_artist(circle2)
##                ax3.add_artist(circle3)
##                
##                cax2 = f.add_axes([0.42, 0.12, 0.2, 0.03]) 
##                cb2  = f.colorbar(im2, format='%.2f', ticks=[0.0, 0.25, 0.5, 0.75, 1.0], cax = cax2, orientation="horizontal")  #label='average Mach', 
##                
##                plt.savefig('%s/%s-%04i_joined.png'        % (savefolder, gcl.name, Rmodel.id)) #dpi=400
##                plt.savefig('%s/%s-%04i_joined.pdf'        % (savefolder, gcl.name, Rmodel.id)) #dpi=400          
#                """ """
            
            gcl.add_relics(relics) 
            PhD feature end """
    if writeClusters:
        """This is here because some outputs get lost in a multiprocessing heavy input/output queue process"""
        for gcl in GClrealisations_used:
            filename = 'GCl-%05i' % (gcl.mockobs.id)
            iom.pickleObject((gcl, Rmodel),
                             savefolder + '/pickled/',
                             filename,
                             append=False)

    if log: print('Finished with all efficency values')
    return True, smt, GClrealisations_used, Rmodel
Example #8
0
def PrepareRadioCube(snap,
                     psiFile='Hoeft_radio/mach_psi_table.txt',
                     machFile='Hoeft_radio/q_mach_machr_table.txt',
                     log=False,
                     machmin=1.5):
    """ 
    This does some preparation steps that should be same, no matter, what model is used.
    I strongly recommend to use interpolated files like Hoeft_radio/mach_psi_tablefine(10,3).txt & Hoeft_radio/q_mach_machr_tablefine(10,3).txt
    - finner interpolation steps will slow down the computation."""

    f = 6.5  # Shock area fraction due to tab. 1 in  Hoeft+2008,  also consider M -> M *1.045 due to  Hoeft+2008
    N_k = 64.  # Smoothing Kernel for Shock detection (not equal the smoothing kernel for other properties)

    smt = io.SmartTiming(rate=5e4)
    smt(task='PrepareRadioCube')
    if log: print('###==== Step 0a:  Prepare cluster data cubes ====###')
    #==== Load psiFile   for psi factor & machfile for mach-numbers conversion factors
    H_mach = np.loadtxt(machFile, skiprows=0)
    H_psi = np.loadtxt(
        psiFile, skiprows=0
    )[:,
      1::]  # you wont get the temperature values ... this is why we read them separetely

    psi_x, psi_y = interpolate.LoadFile_psi(psiFile)

    # First: Apply a filter for points with mach number > psi_y[0] = 1.23 equivalent to the minimal value in the machlist
    # I implement a lower cut of m=1.5 just because it decreases the number of computed particles quite significantly
    MF = np.where(snap.mach >= machmin)

    #==== Gets conversion factors
    rho_to_ne = loadsnap.conversion_fact_gadget_rho_to_nb(
        snap.head) * loadsnap.conversion_fact_ne_per_nb(
        )  # [Msol parsec-3] com+h--> [electrons cm-3] physical
    U_to_keV = loadsnap.conversion_fact_gadget_U_to_keV(snap.head)

    # Derive Temperatur (in subarray)
    T = U_to_keV * snap.udow[MF]  # in [keV]
    """" Finding the closest corresponding value to table entries 
        --> x is for Temperature, y stands for the mach number """
    results_x = math.find_closest(psi_x, T)
    results_y = math.find_closest(psi_y, snap.mach[MF] * 1.045)

    s = H_mach[
        results_y,
        4]  # Electron energy distribution spectral index as function of mach number
    """"... get an idea of the corresponding iluminated area """
    h = snap.hsml[
        MF] * 1e-3  # com+h [kpc] --> com+h [Mpc]  Hydrodynamical smoothing length, i.e. Size of kernel, determined as a measure of density  IMPORTANT: The term h^-1*expansion factor (because of comoving frame) is added later in CreateRadioCube!
    factor_A = loadsnap.comH_to_phys(
        snap.head
    )  # Smoothing kernel part B; 0.7 steems from h=0.7; 1/(1+z) is the expansion factor; second part of computing A_i
    A_i = f * h * h / N_k * factor_A**2  # in [Mpc]^2 ... Inspired by equ 18 Hoeft+2008 (http://mnras.oxfordjournaloadsnap.org/content/391/4/1511.full.pdf)

    #    """ DEBUGGING to infer the internal smoothing length and to compare it with other equations """
    #    for luck in zip(snap.hsml[::30000], snap.rho[::30000]):
    #             print(luck[0]*factor_A,luck[1]*rho_to_ne*1.e4)

    # Get other values for radio emission formula
    rho_e = snap.rdow[
        MF] * rho_to_ne * 1.e4  # in [electrons 10^-4 cm^-3] az z=0
    Xi_e = 1.  # Xi_e : energy fraction of suprathermal electrons, around 10^(-6...-4)  by default set to one and rescalled later on
    """ If the Gelszinnis-model if PREs is not used, this can be omitted """
    snap.DSAPsi = snap.rdow * 0
    snap.DSAPsi = H_psi[results_y, results_x]
    """==="""

    #=== Compute Radio Emission: Using Eq. 32 from Hoeft-Brueggen 2007
    snap.radi = np.array(
        snap.rdow * 0, dtype='float64'
    )  #Used to initiate a numpy array of the same size and shape as the others; the large float currently comes from radio emision which is to high
    if log:
        print(
            "PrepareRadioCube (np.min(A_i),np.max(rho_e),np.max((Xi_e/0.05)),np.max((T/7)),np.max(H_psi[results_y,results_x]): %10.2e %10.2e %10.2e %10.2e %10.2e"
            % (np.min(A_i), np.max(rho_e), np.max(
                (Xi_e / 0.05)), np.max(
                    (T / 7)), np.max(H_psi[results_y, results_x])))
    #    print(':::', np.sum(A_i), np.sum(rho_e), np.sum(Xi_e), np.sum(np.power(T/7.,1.5)), np.max(H_psi[results_y,results_x]) )
    snap.radi[MF] = 6.4e34 * A_i * rho_e * (Xi_e / 0.05) * np.power(
        T / 7.,
        1.5) * snap.DSAPsi  # in  [erg/s/Hz] divided by (factur_nu*factor_B)
    #    print(':_:_:', np.sum(snap.radi[MF]) )

    # The spectral index of the electron energies s / Equals two times the injection index, is defined > 0 in table
    snap.s = snap.rdow * 0
    snap.s[MF] = s

    #=== Add the area
    snap.area = np.array(
        snap.rdow * 0, dtype='float64'
    )  #Used to initiate a numpy array of the same size and shape as the others; the large float currently comes from radio emision which is to high
    snap.area[MF] = A_i

    # The spectral index of the (down stream integrated) radio emission is (1 - s)/2, SO THAT ALPHA FOR RADIO RELICS SHOULD BE NEGATIVE
    #snap.alpha =   snap.rdow*0
    #snap.alpha  = -(s-1.)/2.

    return snap, MF  # better a suparray like snap.radi and snap.alpha
Example #9
0
def CreateRadioCube(snapred, Rmodel, z, nuobs=1.4, logging=True):
    """ The returned radio luminousity is the radio luminousity in the restframe; Compare Nuza+ 2012 Equ (1)
        Also see Section 5 Araya-Melo 2012 et al.  
     Please mind that the density (-->B) estimate is set to the redshift of the cluster snap, but Bcmb and observing frequency to the redshift of the mockobs
    """

    compress = Rmodel.compress
    kappa = Rmodel.kappa
    B0 = Rmodel.B0

    snap, MF = snapred
    smt = io.SmartTiming(rate=5e4)
    smt(task='CreateRadioCube_[sub]')

    # Get the density right
    nurest = float(nuobs) * (1 + z
                             )  # [GHz] It differs from observing frequency!
    z_snap = 1 / snap.head['aexpan'] - 1
    f_cor = (1. + z) / (
        1. + z_snap
    )  # Now we compute the cubes properties at the correct redshift. We apply a correction factor! :-)

    rho_conv = f_cor**3 * loadsnap.conversion_fact_gadget_rho_to_nb(
        snap.head) * loadsnap.conversion_fact_ne_per_nb(
        ) * 1e4  # in [electrons 10^-4 cm^-3] az z=z_obs
    T_conv = loadsnap.conversion_fact_gadget_U_to_keV(
        snap.head) / 8.61732814974056e-08  # K
    # Compute magnetic field

    if compress == -1:
        """Uses the scaling by Nuza+2016"""
        R = 1
        rho_e = rho_conv * snap.rdow[MF]  #Downstream density
        B = B0 * np.power(
            rho_e, kappa)  # in [muG] - using formula for B-field strength
    else:
        R = np.divide(
            snap.rdow[MF], snap.rup[MF]
        )**compress  # Compress of 1 would show the same behavior like the Nuza model, just for an lower magnetic field
        rho_e = rho_conv * snap.rup[MF]  # Upstream density
        """ DEVELOPMENT """
        if Rmodel.B_para == 'press':
            B = B0 * np.power(
                rho_e * snap.u[MF] / 1e6, kappa
            ) * R  # in [muG] - using formula for B-field strength  and compression
        elif Rmodel.B_para == 'dens':
            """ Nuza+2017 parametrisation """
            B = B0 * np.power(
                rho_e, kappa
            ) * R  # in [muG] - using formula for B-field strength  and compression
        else:
            print(
                'Parametrization of the magnetic field is unknown. Choose BFieldModel.B_para.'
            )
        B[B >
          1e5] = 1e5  # set a maximumB field, this has to be done as in some cases for odd magnetic values, we get overflow of magnetic fields valus, as it seems ....

    snap.B = np.array(
        snap.u * 0, dtype='float64'
    )  #Used to initiate a numpy array of the same size and shape as the others; the large float currently comes from radio emision which is to high
    snap.B[MF] = B
    Bcmb = 3.25 * (
        1 + z
    )**2.  # in [muG] - taken from equ. shortly before 2.43 in  Galactic and Intergalactic Magnetic Fields by Ulrich Klein, Andrew Fletcher - 2014 , Science]
    # Compute additional factors needed for radio flux computations
    factor_nu = np.power((nurest / 1.4), (-snap.s[MF] / 2.))  # GHz
    factor_B = np.power(B, 1. + snap.s[MF] / 2.) / (
        B**2. + Bcmb**2.)  # Magnetic field strength
    factor_fudge = f_cor**3 / f_cor**2 / (
        1 + z
    )**0.0  # First time for density factor, second term for area third term is just to make it fit ... It is set to 1 (no correction) as I interpret the radio emissivity in sebastians cube as the one for the observers frequency

    f_boost = np.array(snap.rdow * 0, dtype='float64')
    """Is based on the idea of a boosting-factor, and is related to more recent discussion of our working group.
       The presumption is that an additional CR population exists. In the following model it is assumed that
       the time since the last shock is density dependent. M.Hoeft derived a boosting factor 'f_boosed' based on this assumption.
       
       remark:
       Some authours say, that merger shocks put more energy (and even more highly relativistic particles) in the cluster. This model is not considered here
    """
    poisson_factor = 1
    if Rmodel.pre:

        if isinstance(Rmodel, cbclass.PreModel_Hoeft):

            delta_n = np.log10(Rmodel.n0) - np.log10(Rmodel.n1)  # should be 5
            delta_t = np.log10(Rmodel.t0) - np.log10(
                Rmodel.t1)  # should be < 0
            slope = delta_t / delta_n
            t_shocked = Rmodel.t0 * (rho_e / Rmodel.n0)**slope
            t_shocked[rho_e < Rmodel.n0] = Rmodel.t0
            t_shocked[rho_e > Rmodel.n1] = Rmodel.t1

            poisson_factor = np.random.exponential(1, 1)
            gamma_boost = 2.4e4 / t_shocked * poisson_factor / (
                (B / R)**2. + Bcmb**2.
            )  #Magnetic field before enhancement through compression
            f_boost[MF] = Rmodel.ratio * gamma_boost**(snap.s[MF] - 2)

        elif isinstance(Rmodel, cbclass.PreModel_Gelszinnis):
            """modify xhi,
                 #add a certain fraction (due to amount of preexisting electrons) to emission
                 #Add exponential term to emissivity of xhi --> strengthens the emission of low mach number shocks
                 #Also (log?)normal normalization of plasma --> should saturate at a certain fraction
                 #Influence for high mach number shocks should roughly add the same amount of emission as the thermal pool in average.
            """
            f_expid = expit(
                (Rmodel.sigmoid_0 - np.log10(snap.rdow[MF] * snap.u[MF] *
                                             (rho_conv * T_conv))) /
                Rmodel.sigmoid_width)
            s = 1
            if Rmodel.p_sigma > 0:
                s = np.random.normal(0, Rmodel.p_sigma)
            f_boost[MF] = s * np.power(snap.DSAPsi,
                                       Rmodel.PREexpo - 1) * f_expid
        else:
            print(
                'The model of pre-existing electrons is unknown. Choose the inheriting class of ObjectClasses::Rmodel.'
            )

    #=== Compute Radio Emission: Using Eq. 32 from Hoeft-Brueggen 2007, part B
    ergSI = 1.e-7  # [erg/s per W]
    snap.radi[MF] = ergSI * snap.radi[
        MF] * factor_nu * factor_B * factor_fudge  # in  [W/Hz]
    snap.radiPre = snap.radi * f_boost  # in  [W/Hz]
    #print 'snap.s[MF]', snap.s[MF] DEBUGGING CHEAT here!
    if logging:
        print(
            '  Total radio power in cube: %5.2e W/Hz at %5.2f GHz rest frame frequency'
            % (np.sum(snap.radi[MF]), nurest))
    radiofolder = 'ToImplement'  #strSn.replace('shocks','radio')

    return (snap, smt, poisson_factor, radiofolder
            )  # A tuple, only [0:2] is relevant
Example #10
0
def survey_run(surveys,
               infolder='',
               outfoldertop='/data/ClusterBuster-Output/',
               plot=True):
    """ Extracts survey relics from an real world survey
    """

    for survey in surveys:
        print(
            '###==== Step 0b: Initialize internal variables/objects for survey: %s ====###'
            % survey)
        smt = iom.SmartTiming()

        ClList = []
        Excluded = []
        subtract = ['slist', 'fits']  # im, fits, slist

        Jy_SI = 1e-26  # W/Hz/m^2
        outfolder = '%s%s' % (outfoldertop, survey)
        topfolder = os.getcwd(
        )  # '/home/jakobg/lib/ClusterBuster/Relics_Surveys/'
        iom.check_mkdir(outfolder)  # create folder if necesairy

        print('###==== Step 1: Load data and anaylise it   ====###')
        # np.genfromtxt('Analysis_RORRS/ClusterRelics.csv'', delimiter=';')
        ClusterFile = infolder + 'ClusterList/ClusterAfterNuza2017_clusters.csv'
        RegionFile = infolder + 'ClusterList/ClusterAfterNuza2017_regions.csv'

        Clusters = pd.read_csv(ClusterFile,
                               comment='#',
                               delimiter=',',
                               quotechar='"')
        Clusters.where(Clusters.notnull(), 0)
        """ Part of development: rpelace nan values with values that can be handled by clustebruster """

        for strings in ['REF_LX', 'REF_M200', 'REF_M500', 'REF_F']:
            Clusters[strings] = Clusters[strings].replace(np.nan,
                                                          '',
                                                          regex=True)

        for values in ['M200', 'M500', 'LX_500_0.1-2.4']:
            Clusters[values] = Clusters[values].replace(np.nan, 0, regex=True)

        n = 0
        for index, CL in Clusters.iterrows():
            if CL['Cluster'] and CL['Cluster'] not in [
                    o.name for o in ClList
            ] and CL['Cluster'] not in ['']:
                """I did this to remove unfinished, but recent additions to the relic database"""
                #print(type(CL['Discovery']))
                #if math.isnan(CL['Discovery']):
                #    pass
                #elif int(CL['Discovery']) >= 2018:
                #    continue

                try:
                    """I did this to remove unfinished, but recent additions to the relic database"""
                    if CL['Cluster'] == '#':
                        continue
                    if int(CL['Discovery']) >= 2018:
                        continue
                except:
                    pass

                n += 1
                #                if n > 5:
                #                     continue
                Cl_name = CL['Cluster']
                status = CL['FLAG_INCLUDED']

                print(CL)

                RA_host = float(CL['RA'])  #float(CL[2])
                Dec_host = float(CL['Dec'])

                diff = np.sqrt((float(CL['RA_Xmax']) - RA_host)**2 +
                               (float(CL['Dec_Xmax']) - Dec_host)**2) * 3600
                if not math.isnan(diff):
                    print(
                        'Two different centre positions given for cluster %s. The offset was %.1f arcsec'
                        % (Cl_name, diff))
                    RA_host = float(CL['RA_Xmax'])
                    Dec_host = float(CL['Dec_Xmax'])

                z = float(CL['z'])
                M200 = float(CL['M200']) * 1e14
                M500 = float(CL['M500']) * 1e14
                Lx = float(CL['LX_500_0.1-2.4']) * 1e44
                flux_lit = float(CL['F_lit'])

                halo = CL['FLAG_Halo']

                try:
                    ClassFlag = ('true' in CL['Type_Flag'].lower())
                except:
                    ClassFlag = False

                #create Class object
                GCl = cbclass.Galaxycluster(name=Cl_name,
                                            RA=RA_host,
                                            Dec=Dec_host,
                                            z=z,
                                            M200=M200,
                                            M500=M500,
                                            Lx=Lx,
                                            Lx_lit=Lx,
                                            flux_lit=flux_lit,
                                            ClassFlag=ClassFlag,
                                            halo=halo,
                                            status=status)

                # add further references
                GCl.Lx.ref = cdb.reference(CL['REF_LX'],
                                           rtype='text',
                                           page=None,
                                           nr=None)
                GCl.M200.ref = cdb.reference(CL['REF_M200'],
                                             rtype='text',
                                             page=CL['REFPAGE_M200'],
                                             nr=None)
                GCl.M500.ref = cdb.reference(CL['REF_M500'],
                                             rtype='text',
                                             page=CL['REFPAGE_M500'],
                                             nr=None)
                GCl.flux_lit.ref = cdb.reference(CL['REF_F'],
                                                 rtype='text',
                                                 page=CL['REFPAGE_F'],
                                                 nr=None)

                #============= Load  survey (NVSS) image  =============#
                if GCl.status not in ['TRUE']:
                    ClList.append(GCl)
                    continue
                fitsimage = infolder + 'Images_%s/%s-%s.fits' % (
                    survey, survey, Cl_name)
                image, center, spixel = fitsut.fits2numpy(fitsimage)

                if survey == 'NVSS':
                    s_pixel = [
                        spixel[1] * GCl.cosmoPS * 3600, spixel[1] * 3600
                    ]
                    NVSSbeam = [45., 45. / s_pixel[1]]
                    NVSS_rms = 4.5e-4  # in Jy/beam
                    NVSSlimit = 2 * NVSS_rms
                    NVSSnu = 1.4
                    telescope = 'VLA-D'
                    GCl.dinfo = cbclass.DetInfo(
                        beam=[NVSSbeam[0], NVSSbeam[0], 0],
                        spixel=s_pixel[1],
                        rms=NVSS_rms,
                        limit=NVSSlimit,
                        telescope=telescope,
                        nucen=NVSSnu,
                        center=center[0],
                        pcenter=center[1])
                if survey == 'TGSS':
                    s_pixel = [
                        spixel[1] * GCl.cosmoPS * 3600, spixel[1] * 3600
                    ]
                    TGSSbeam = [25., 25. / s_pixel[1]]
                    TGSS_rms = 3.0e-3  # in Jy/beam
                    TGSSlimit = 2 * TGSS_rms
                    beamrec = 1. if GCl.Dec > 19 else 1. / np.cos(
                        np.radians(GCl.Dec - 19))
                    TGSSnu = 0.1475
                    telescope = 'GMRT'
                    GCl.dinfo = cbclass.DetInfo(
                        beam=[TGSSbeam[0] * beamrec, TGSSbeam[0], 0],
                        spixel=s_pixel[1],
                        rms=TGSS_rms,
                        limit=TGSSlimit,
                        telescope=telescope,
                        nucen=TGSSnu,
                        center=center[0],
                        pcenter=center[1])
                dinfo_survey = GCl.dinfo
                #============= Load relic search region  =============#
                # Make in np.image
                regfile = infolder + 'Regions/RR_%s.reg' % (Cl_name)
                GCl.regions = ioclass.readDS9relics(regfile, spixel, center[0],
                                                    center[1])

                #============= Subtract Sources  =============#
                # in Sources folder
                #try load folder:
                #img = bdsm.process_image(args.file+args.ft, thresh_isl=args.tIs, thresh_pix=args.tPi, mean_map = 'zero', beam = (0.0125,0.0125,0), rms_map = False, rms_value = 0.00045, thresh = 'hard')
                #except:

                #pybdsm.catalog_type
                #--< create .fits image ut of that, which you subtract from your image ....
                smt(task='subtraction')
                model = np.zeros(image.shape)
                model_conv = np.zeros(image.shape)
                use_list, use_im = (False, False)
                if 'slist' in subtract:
                    slist = infolder + 'Sources/slist/%s.slist' % Cl_name
                    if os.path.isfile(slist):
                        scL = iom.read_para_list(slist)

                        for sc in scL:

                            if sc['shape'] == 'Gaussian':
                                g_size = [
                                    float(sc['majoraxis']) / s_pixel[1] * 60,
                                    float(sc['minoraxis']) / s_pixel[1] * 60
                                ]
                            else:
                                g_size = [
                                    GCl.dinfo.beam[0] / GCl.dinfo.spixel,
                                    GCl.dinfo.beam[1] / GCl.dinfo.spixel
                                ]
                            freq_factor = (GCl.dinfo.nucen / 1.4)**(-0.7)
                            COOp = iom.CoordinateToPixel(
                                iom.J2000ToCoordinate(sc['dir']), spixel,
                                center[0], center[1])

                            GCl.compacts.append(sc)
                            #This is not good --> better create an unconcolved model and convolve it with the desired beam
                            model += maput.ImageGaussian_inv(
                                model.shape,
                                sc['flux'] * 1e-3 * freq_factor,
                                g_size, [COOp[0] - 1, COOp[1] - 1],
                                theta=sc['theta'],
                                FWHM=True)  #*gaussian_area
                            #model_conv += maput.ImageGaussian_inv(model_conv.shape, sc['flux']*1e-3*freq_factor, g_size, [COOp[0]-1.,COOp[1]-1], theta = sc['theta'], FWHM=True)  #*gaussian_area
                        model_conv = model
                        use_list = True

                if 'fits' in subtract:
                    highres_image_path = infolder + 'Images_%s/%s-%s.fits' % (
                        "FIRST", "FIRST", Cl_name)
                    if os.path.isfile(highres_image_path):

                        # regridd

                        # http://reproject.readthedocs.io/en/stable/  --> works on fits files
                        hdu_raw = fits.open(fitsimage)[0]
                        image_HR, center_HR, spixel_HR = fitsut.fits2numpy(
                            highres_image_path)
                        s_pixel_HR = [
                            spixel_HR[1] * GCl.cosmoPS * 3600,
                            spixel_HR[1] * 3600
                        ]
                        fitsut.numpy2fits(
                            image_HR, infolder + 'Images_%s/%s-%s_test.fits' %
                            ("FIRST", "FIRST", Cl_name), s_pixel_HR[1],
                            center_HR[0], center_HR[1])
                        hdu_HR = fits.open(infolder +
                                           'Images_%s/%s-%s_test.fits' %
                                           ("FIRST", "FIRST", Cl_name))[0]
                        hdu_HR.data = hdu_HR.data.squeeze()
                        hdu_HR.data[np.isnan(
                            hdu_HR.data
                        )] = 0.  # For contour masked  NVSS images I encountered the issue that some values where nan
                        hdu_HR.data[np.where(hdu_HR.data < 6e-4)] = 0.

                        pad = 50
                        hdu_HR.data = np.lib.pad(hdu_HR.data, pad,
                                                 maput.padwithtens)

                        FWHM2sigma = 1 / 2.354
                        FWHM_FIRST = 5.4
                        FWHM_conv = np.sqrt(GCl.dinfo.beam[0]**2 -
                                            FWHM_FIRST**2)
                        gaussian_2D_kernel = Gaussian2DKernel(
                            FWHM_conv / s_pixel_HR[1] * FWHM2sigma)
                        A_beam_old = 1.133 * (
                            (FWHM_FIRST / s_pixel_HR[1])**2)  # FIRST-beam
                        A_beam = 1.133 * (
                            (GCl.dinfo.beam[0] / s_pixel_HR[1])**2)
                        """
                        The copy action is very dangerous, because the corresponding .header object is cloned, so that
                        any change in hdu_HR_conv.header also influences hdu_HR.header
                        deepcopy() is not possible. Because of the we remove hdu_HR_conv from any changes in the header
                        """
                        hdu_HR_conv = copy(hdu_HR)
                        hdu_HR_conv.data = A_beam / A_beam_old * convolve(
                            hdu_HR.data,
                            gaussian_2D_kernel,
                            normalize_kernel=True)
                        for hdu in [hdu_HR]:
                            #                            hdu.data = np.expand_dims(hdu.data, axis=0)
                            #                            hdu.data = np.expand_dims(hdu.data, axis=0)
                            hdu.header['CRPIX1'] = hdu.header['CRPIX1'] + pad
                            hdu.header['CRPIX2'] = hdu.header['CRPIX2'] + pad
                            hdu.header[
                                'NAXIS1'] = hdu.header['NAXIS1'] + pad * 2
                            hdu.header[
                                'NAXIS2'] = hdu.header['NAXIS2'] + pad * 2


#
#                        from astropy.io import fits
#                        from astropy.utils.data import get_pkg_data_filename
#                        hdu_raw = fits.open(get_pkg_data_filename('galactic_center/gc_2mass_k.fits'))[0]
#                        hdu2 = fits.open(get_pkg_data_filename('galactic_center/gc_msx_e.fits'))[0]
###
                        for hdu in [hdu_raw, hdu_HR]:
                            try:
                                hdu.data = hdu_raw.data[0, 0, :, :]
                            except:
                                print('Test ... data dimensions are matching')
                            hdu.header['NAXIS'] = 2

                            keylist = [
                                'PC01_01', 'PC02_01', 'PC03_01', 'PC04_01',
                                'PC01_02', 'PC02_02', 'PC03_02', 'PC04_02',
                                'PC01_03', 'PC02_03', 'PC03_03', 'PC04_03',
                                'PC01_04', 'PC02_04', 'PC03_04', 'PC04_04',
                                'NAXIS3', 'NAXIS4', 'CTYPE3', 'CRVAL3',
                                'CDELT3', 'CRPIX3', 'CUNIT3', 'CROTA3',
                                'CTYPE4', 'CRVAL4', 'CDELT4', 'CRPIX4',
                                'CUNIT4', 'CROTA4'
                            ]
                            for key in keylist:
                                try:
                                    del hdu.header[key]
                                    print(
                                        '[%s] removed from the .fits header' %
                                        (key))
                                except:
                                    print(
                                        '[%s] not found, so we cannot delete it from the .fits header'
                                        % (key))

                            hdu.header['EPOCH'] = 2e3
                            hdu.header['EQUINOX'] = 2e3
                            print('====================')

                        hdu_HR_conv.writeto(infolder +
                                            'Images_%s/%s-%s_test2b.fits' %
                                            ("FIRST", "FIRST", Cl_name),
                                            overwrite=True)
                        fitsut.numpy2fits(
                            hdu_HR_conv.data.squeeze(),
                            infolder + 'Images_%s/%s-%s_test2.fits' %
                            ("FIRST", "FIRST", Cl_name), s_pixel_HR[1],
                            center_HR[0], [c + pad for c in center_HR[1]])

                        print(
                            'WCS(hdu_raw.header).wcs.naxis, WCS(hdu2.header).wcs.naxis',
                            WCS(hdu_raw.header).wcs.naxis,
                            WCS(hdu_HR_conv.header).wcs.naxis)
                        array, footprint = reproject_interp(
                            hdu_HR_conv, hdu_raw.header
                        )  #hdu 2 image and systm, hdu1--> just the system

                        print('_______', np.sum(array), footprint)
                        print('_______________________________', array.shape,
                              image.shape)
                        array = array.squeeze()  #could be removed
                        array[np.isnan(array)] = 0.

                        fitsut.map2fits(
                            array, GCl.dinfo,
                            infolder + 'Images_%s/%s-%s_test3.fits' %
                            ("FIRST", "FIRST", Cl_name))

                        model_conv = array.squeeze()  # add up  OR replace!
                        print('fits_subtraction: np.sum(model_conv):',
                              np.sum(model_conv))
                        use_im = True

                residuum = image - model_conv
                """ Development: Only get the flux within the search region """
                extreme_res = True
                residuum = maput.ContourMasking(
                    residuum, [region.cnt[0] for region in GCl.regions])

                print('%30s source subtraction;  list: %5r; image: %5r' %
                      (Cl_name, use_list, use_im))
                GCl.maps_update(
                    residuum, 'Diffuse',
                    infolder + '%s/Images_%s/diffuse/%s-%s.fits' %
                    (topfolder, survey, survey, Cl_name))
                if np.sum(model_conv) != 0 or extreme_res:
                    GCl.maps_update(
                        image, 'Raw',
                        infolder + '%s/Images_%s/raw/%s-%s_res.fits' %
                        (topfolder, survey, survey, Cl_name))
                    GCl.maps_update(
                        model, 'Modell',
                        infolder + '%s/Images_%s/subtracted/%s-%s.fits' %
                        (topfolder, survey, survey, Cl_name))
                    GCl.maps_update(
                        model_conv, 'Subtracted',
                        infolder + '%s/Images_%s/subtracted/%s-%s_conv.fits' %
                        (topfolder, survey, survey, Cl_name))
                smt()

                #============= impose relic.search  =============#
                for ii, region in enumerate(GCl.regions):
                    smt(task='RelicExtr')
                    relics = relex.RelicExtraction(
                        residuum,
                        z,
                        GCl=GCl,
                        dinfo=GCl.dinfo,
                        rinfo=region,
                        Imcenter=center,
                        subtracted=model)[0:2]  # faintexcl=3.6
                    smt()
                    relics = sorted(relics, key=lambda x: x.flux, reverse=True)

                    for relic in relics:
                        relic.alpha.value = region.alpha
                        print(region.alpha_err)
                        if region.alpha_err is None:
                            relic.alpha.set_std(0)
                        else:
                            relic.alpha.set_std(region.alpha_err)

                    GCl.add_relics(relics)

                # Add galaxy cluster to the list
                ClList.append(GCl)

            #============= Report why certain clusters are excluded  =============#
            else:
                RL_name = CL['Cluster']
                if CL['Identifier']:
                    RL_name += '_' + CL['Identifier']

                if CL['FLAG_INCLUDED'] in ['noMAP']:
                    string = RL_name + ' excluded because the corresponding region is not mapped by the survey.'
                else:
                    string = RL_name + ' excluded because of: ' + CL[
                        'FLAG_INCLUDED']

                Excluded.append(string)

            mf = open("%s/Excluded.dat" % outfolder, "w")
            for ex in Excluded:
                mf.write(ex + '\n')

        ClList = sorted(ClList, key=iom.Object_natural_keys)

        print('#=====  Last Step: Output is produced ====#')
        smt(task='output')
        """ This is an intervening step: Update and ... the missing clusters, in the future this might done at the beginning at an first step """
        ClList = updateClusters_missingRegions(
            ClList, RegionFile)  # topfolder+RegionFile

        print('#=====  A: Pickle Objects ====#')
        iom.pickleObject(ClList, outfolder + 'pickled/', 'ClList')

        print('#=====  B: Create the Survey and pickle it ====#')
        cnt_levels = [9e-4, 1.8e-3, 3.6e-3, 7.2e-3, 1.44e-2]

        synonyms = [
            ('1RXS J060313.4+421231', '1RXS J06+42'),
            ('ACT-CLJ0102-4915', 'ACT-CLJ01-49'),
            ('CIZA J0649.3+1801', 'CIZA J0649'),  # CIZA J0649+18
            ('CIZA J0107.7+5408', 'CIZA J0107'),
            ('CIZA J2242.8+5301', 'CIZA J2243'),  # CIZA J2243+53
            ('MACS J0025-1222', 'MACS J0025'),
            ('MACS J0152.5-2852', 'MACS J0152'),  # MCS J0717+37
            ('MACS J0717.5+3745', 'MACS J0717'),  # MCS J0717+37
            ('MACS J1149.5+2223', 'MACS J1149'),  # J1149+22
            ('MACS J1752.0+4440', 'MACS J1752'),  # MCS J1752+44
            ('MACS J2243.3-0935', 'MACS J2243'),  # MCS J1752+44
            ('MaxBCG 138.91895+25.19876', 'MaxBCG 138+25'),
            ('MaxBCG 217.95869+13.53470', 'MaxBCG 217+13'),
            ('PSZ1 G004.5-19.5', 'PSZ1 G004'),  # PSZ1 G004-19
            ('PSZ1 G096.89+24.17', 'PSZ1 G097'),  # PSZ1 G097+24
            ('PSZ1 G108.18-11.53', 'PSZ1 G108'),  # PSZ1 G108-12
            ('PLCK G200.9-28.2', 'PLCK G200'),  # PSZ1 G108-12
            ('PLCK G287.0+32.9', 'PLCK G287'),  # PLCK G287+33
            ('RXC J0225.1-2928', 'RXC J0225'),
            ('RXC J1053.7+5452', 'RXC J1054'),  # RXC J1054+55
            ('RXC J1053.7+5452 ', 'RXC J1053'),  # RXC J1054+55
            ('RXC J1234.2+0947', 'RXC J1234'),  # RXC J1054+55
            ('RXC J1314.4-2515', 'RXC J1314'),  # RXC J1314-25
            ('ZwCl 0008+5215', 'ZwCl 0008'),  # ZwCl 0008+52
            ('ZwCl 1447.2+2619', 'ZwCl 1447'),  # ZwCl 0008+52
            ('ZwCl 2341+0000', 'ZwCl 2341'),  # ZwCl 2341+00
            ('[KMA2007] 217.95869+13.53470', 'KMA2007'),  # ZwCl 2341+00
        ]

        #        synonyms_lit = [('2017A&A...597A..15D', '2017A+A_deGasperin+Intema+'),
        #                        ('2017arXiv170801718K', '2017arXiv_Kale+Wik+')]
        for GCl in ClList:

            for syn in synonyms:
                if GCl.name.replace('_', ' ') == syn[0] or GCl.name == syn[0]:
                    print('! Name replacement:', syn)
                    GCl.name = syn[1]
            GCl.name = GCl.name.replace('_', ' ')
            GCl.updateInformation()

        norm = cdb.norm('R200', Nexp=1)
        Histo = cdb.Histogram2D(
            nbins=(64, 46), fromto=[[0, 2. * np.pi], [0, 1.5]],
            norm=norm)  # angle_projected(rad), D_proj(R200)

        Survey = cbclass.Survey(
            ClList,
            survey,
            cnt_levels=cnt_levels,
            synonyms=synonyms,
            dinfo=dinfo_survey,
            hist_main=Histo,
            surshort=survey
        )  # 'NVSS' should be replaced with a real survey class
        Survey.emi_max = 2e-2
        Survey.scatterkwargs = {"alpha": 0.7, "fmt": "o", "markersize": 10}
        Survey.histkwargs = {"alpha": 0.4}
        Survey.relic_filter_kwargs = {
            "Filter": True,
            "shape": False,
            "minrms": 8
        }
        iom.pickleObject(Survey, outfolder + '/pickled/', 'Survey')

        for GCl in Survey.GCls:
            print(GCl.name, GCl.status)

    smt(forced=True)
    return True