Exemplo n.º 1
0
def find_time_interval(fits):
    """
    find time interval of the fits file
    input:  fits            --- fits file name
    output: [tmin, tmax]    --- start and stop time in seconds from 1998.1.1
    """
    cmd = 'dmstat "' + fits + '[cols time]" centroid=no >' + zspace
    scf.run_ascds(cmd)

    out = scf.read_file(zspace, remove=1)

    chk = 0
    for val in out:
        mc1 = re.search('min', val)
        mc2 = re.search('max', val)

        if mc1 is not None:
            atemp = re.split('\s+', val)
            tmin  = int(float(atemp[1]))
            chk  += 1

        elif mc2 is not None:
            atemp = re.split('\s+', val)
            tmax  = int(float(atemp[1]))
            chk  += 1

        if chk > 1:
            break

    return [tmin, tmax]
Exemplo n.º 2
0
def find_time_interval(fits):
    """
    find time interval of the fits file
    input:  fits            --- fits file name
    output: [tmin, tmax]    --- start and stop time in seconds from 1998.1.1
    """
    cmd = 'dmstat "' + fits + '[cols time]" centroid=no >' + zspace
    try:
        scf.run_ascds(cmd)
    except:
        return [0, 0]

    out = scf.read_file(zspace, remove=1)

    chk = 0
    for val in out:
        mc1 = re.search('min', val)
        mc2 = re.search('max', val)

        if mc1 is not None:
            atemp = re.split('\s+', val)
            tmin = int(float(atemp[1]))
            chk += 1

        elif mc2 is not None:
            atemp = re.split('\s+', val)
            tmax = int(float(atemp[1]))
            chk += 1

        if chk > 1:
            break

    return [tmin, tmax]
Exemplo n.º 3
0
def sib_corr_comb(start, stop, lev):
    """
    combined fits files into one per ccd
    input:  start   --- start time of the interval <yyyy>:<ddd>:<hh>:<mm>:<ss>
            stop    --- stop time of the interval  <yyyy>:<ddd>:<hh>:<mm>:<ss>
            lev     --- data level "Lev1" or "Lve2"
    output: combined data: lres_ccd<ccd>_merged.fits in Data directory
    """
#
#--- convert the time to seconds from 1998.1.1
#
    tstart = tcnv.axTimeMTA(start)
    tstop  = tcnv.axTimeMTA(stop)
#
#--- make a list of data fits files
#
    lres = s_dir + lev + '/Outdir/lres/'
    cmd  = 'ls ' + lres + '*fits > ' + zspace
    os.system(cmd)
    data = scf.read_file(zspace, remove=1)
#
#--- initialize ccd_list<ccd>
#
    for ccd in range(0, 10):
        exec 'ccd_list%s = []' % (str(ccd))

    for ent in data:
#
#--- check whether the data are inside of the specified time period
#
        [tmin, tmax] = find_time_interval(ent)
        if tmin >= tstart and tmax <= tstop:
            btemp = re.split('_acis', ent)
            head  = btemp[0]
#
#--- add the fits file to ccd_list 
#
            for ccd in range(0, 10):
                chk = 'acis' + str(ccd)
                mc = re.search(chk, ent)
                if mc is not None:
                    line = str(ent)
                    exec "ccd_list%s.append('%s')" % (str(ccd), line)
                    break
#
#--- combined all fits files of a specific ccd into one fits file
#
    for ccd in range(0, 10):
        exec "alist = ccd_list%s"  % (str(ccd))
        if len(alist) > 0:
#
#--- the first of the list is simply copied to temp.fits
#
            cmd = 'cp ' + alist[0] + ' temp.fits'
            os.system(cmd)

            for k in range(1, len(alist)):

                cmd = 'dmmerge "' + alist[k] + ',temp.fits" outfile=zmerged.fits outBlock=""'
                cmd = cmd + 'columnList="" clobber="yes"'
                scf.run_ascds(cmd)

                cmd = 'mv ./zmerged.fits ./temp.fits'
                os.system(cmd)

            cmd = 'mv ./temp.fits ' + s_dir + lev +  '/Data/lres_ccd' + str(ccd) + '_merged.fits'
            os.system(cmd)
Exemplo n.º 4
0
def find_excess_file(lev = 'Lev2'):
    """
    find data with extremely high radiation and remove it. 
    this is done mainly in Lev2 and copied the procesure in Lev2
    input:  lev --- level. default Lev2 (other option is Lev1)
    output: excess radiation data fits files in ./lres/Save/.
    """

    if lev == 'Lev2':
        lres = s_dir + lev + '/Outdir/lres/'

        cmd  = 'ls ' + lres + 'mtaf*fits > ' + zspace
        os.system(cmd)
        data = scf.read_file(zspace, remove=1)
    
        cmd  = 'mkdir ' + lres + 'Save'
        os.system(cmd)

        for ent in data:
            cmd = 'dmlist ' + ent + ' opt=data > ' + zspace
            scf.run_ascds(cmd)

            out = scf.read_file(zspace, remove=1)
            ssoft   = 0.0
            soft    = 0.0
            med     = 0.0
            hard    = 0.0
            harder  = 0.0
            hardest = 0.0
            tot     = 0
            for val in out:
                atemp    = re.split('\s+', val)
                if mcf.chkNumeric(atemp[0]):
                    ssoft   += float(atemp[6])
                    soft    += float(atemp[7])
                    med     += float(atemp[8])
                    hard    += float(atemp[9])
                    harder  += float(atemp[10])
                    hardest += float(atemp[11])
                    tot     += 1
                else:
                    continue

            if tot > 1:
                ssoft   /= tot
                soft    /= tot
                med     /= tot
                hard    /= tot
                harder  /= tot
                hardest /= tot

            mc = re.search('acis6', ent)
            chk = 0
            if mc is not None:
                if (med > 200):
                    chk = 1
            else:
                if (soft > 500) or (med > 150):
                    chk = 1

            if chk > 0:
                cmd = 'mv ' + ent + ' ' + lres + 'Save/.'
                os.system(cmd)

    else:
#
#--- for Lev1, we move the files which removed in Lev2. we assume that we already
#--- run Lev2 on this function
#
        epath =  s_dir + '/Lev2/Outdir/lres/Save/'
        if os.listdir(epath) != []:

            cmd = 'ls ' + s_dir + '/Lev2/Outdir/lres/Save/*fits > ' + zspace
            os.system(cmd)
            data = scf.read_file(zspace, remove=1)
    
            l1_lres =  s_dir + '/Lev1/Outdir/lres/'
            l1_dir  =  l1_lres  + '/Save/'
            cmd     = 'mkdir ' + l1_dir
            os.system(cmd)
     
            for ent in data:
                atemp = re.split('mtaf', ent)
                btemp = re.split('N', atemp[1])
                mc = re.search('_', btemp[0])
                if mc is not None:
                    ctemp = re.split('_', btemp[0])
                    obsid = ctemp[0]
                else:
                    obsid = btemp[0]
    
                atemp = re.split('acis', ent)
                btemp = re.split('lres', atemp[1])
                ccd   = btemp[0]
                cid   = 'acis' + str(ccd) + 'lres_sibkg.fits'
    
                cmd = 'mv ' + l1_lres + 'mtaf' + obsid + '*' + cid + '  '  + l1_dir + '/.'
                os.system(cmd)
Exemplo n.º 5
0
def correct_factor(lev):
    """
    adjust lres reuslts files for the area removed as the sources remvoed
    input:  lev --- level 1 or 2  
    output: adjusted fits files in lres 
    """
#
#--- read all correciton factor information
#
    file = s_dir + lev + '/Reg_files/ratio_table'
    data = scf.read_file(file)

    ratio    = {}
    for ent in data:
        #atemp = re.split('\s+', ent)
        atemp = re.split(':', ent)
        rate  = float(atemp[1].strip())

        btemp = re.split('N',  atemp[0])
        mc    = re.search('_', btemp[0])
        if mc is not None:
            ctemp = re.split('_', btemp[0])
            msid  = ctemp[0]
        else:
            msid  = btemp[0]

        ctemp = re.split('ccd', atemp[0])
        dtemp = re.split('_',   ctemp[1])
        ccd   = dtemp[0]

        ind   = str(msid) + '.' + str(ccd)
        ratio[ind] = rate
#
#--- find all fits file names processed
#
    cmd = 'ls ' + s_dir + lev + '/Outdir/lres/mtaf*.fits > ' + zspace
    os.system(cmd)
    data = scf.read_file(zspace, remove=1)

    for fits in data:
        atemp = re.split('N', fits)
        btemp = re.split('mtaf', atemp[0])
        msid  = btemp[1]

        mc = re.search('_', msid)
        if mc is not None:
            ctemp = re.split('_', msid)
            msid  = ctemp[0]

        atemp = re.split('acis', fits)
        btemp = re.split('lres', atemp[1])
        ccd   = btemp[0]

        ind   = str(msid) + '.' + str(ccd)
        try:
            div   = ratio[ind]
        except:
            continue 

        if div >= 1:
            continue
#
#--- correct the observation rate by devided by the ratio (all sources removed area)/(original are)
#
        elif div > 0:
            line  = 'SSoft=SSoft/' + str(div) + ',Soft=Soft/' + str(div) + ',Med=Med/' + str(div) + ','
            line  = line + 'Hard=Hard/' + str(div) + ',Harder=Harder/' + str(div) + ',Hardest=Hardest/' + str(div)

            cmd   = 'dmtcalc infile =' + ent + ' outfile=out.fits expression="' + line + '" clobber=yes'
            scf.run_ascds(cmd)

            cmd   = 'mv out.fits ' + ent
            os.system(cmd)

        else:
            print "Warning!!! div < 0 for " + str(ent)
            continue
Exemplo n.º 6
0
def sib_corr_comb(start, stop, lev):
    """
    combined fits files into one per ccd
    input:  start   --- start time of the interval <yyyy>:<ddd>:<hh>:<mm>:<ss>
            stop    --- stop time of the interval  <yyyy>:<ddd>:<hh>:<mm>:<ss>
            lev     --- data level "Lev1" or "Lve2"
    output: combined data: lres_ccd<ccd>_merged.fits in Data directory
    """
    #
    #--- convert the time to seconds from 1998.1.1
    #
    tstart = Chandra.Time.DateTime(start).secs
    tstop = Chandra.Time.DateTime(stop).secs
    #
    #--- make a list of data fits files
    #
    lres = cor_dir + lev + '/Outdir/lres/'
    cmd = 'ls ' + lres + '*fits > ' + zspace
    os.system(cmd)
    data = mcf.read_data_file(zspace, remove=1)
    #
    #--- initialize ccd_list
    #
    ccd_list = [[] for x in range(0, 10)]
    for ent in data:
        #
        #--- check whether the data are inside of the specified time period
        #
        [tmin, tmax] = find_time_interval(ent)
        if tmin >= tstart and tmax <= tstop:
            btemp = re.split('_acis', ent)
            head = btemp[0]
            #
            #--- add the fits file to ccd_list
            #
            for ccd in range(0, 10):
                chk = 'acis' + str(ccd)
                mc = re.search(chk, ent)
                if mc is not None:
                    ccd_list[ccd].append(str(ent))
                    break
#
#--- combined all fits files of a specific ccd into one fits file
#
    for ccd in range(0, 10):
        if len(ccd_list[ccd]) > 0:
            #
            #--- the first of the list is simply copied to temp.fits
            #
            cmd = 'cp ' + ccd_list[ccd][0] + ' temp.fits'
            os.system(cmd)

            for k in range(1, len(ccd_list[ccd])):
                cmd = 'dmmerge "' + ccd_list[ccd][k]
                cmd = cmd + ',temp.fits" outfile=zmerged.fits outBlock=""'
                cmd = cmd + 'columnList="" clobber="yes"'
                try:
                    scf.run_ascds(cmd)
                except:
                    continue

                cmd = 'mv ./zmerged.fits ./temp.fits'
                os.system(cmd)

            cmd = 'mv ./temp.fits ' + cor_dir + lev + '/Data/lres_ccd'
            cmd = cmd + str(ccd) + '_merged.fits'
            os.system(cmd)
Exemplo n.º 7
0
def find_excess_file(lev='Lev2'):
    """
    find data with extremely high radiation and remove it. 
    this is done mainly in Lev2 and copied the procesure in Lev2
    input:  lev --- level. default Lev2 (other option is Lev1)
    output: excess radiation data fits files in ./lres/Save/.
    """
    if lev == 'Lev2':
        lres = cor_dir + lev + '/Outdir/lres/'

        cmd = 'ls ' + lres + 'mtaf*fits > ' + zspace
        os.system(cmd)
        data = mcf.read_data_file(zspace, remove=1)

        cmd = 'mkdir ' + lres + 'Save'
        os.system(cmd)

        for ent in data:
            cmd = 'dmlist ' + ent + ' opt=data > ' + zspace
            try:
                scf.run_ascds(cmd)
            except:
                continue

            out = mcf.read_data_file(zspace, remove=1)
            ssoft = 0.0
            soft = 0.0
            med = 0.0
            hard = 0.0
            harder = 0.0
            hardest = 0.0
            tot = 0.0
            for val in out:
                atemp = re.split('\s+', val)
                try:
                    chk = float(atemp[0])

                    ssoft += float(atemp[6])
                    soft += float(atemp[7])
                    med += float(atemp[8])
                    hard += float(atemp[9])
                    harder += float(atemp[10])
                    hardest += float(atemp[11])
                    tot += 1.0
                except:
                    continue

            if tot > 1:
                ssoft /= tot
                soft /= tot
                med /= tot
                hard /= tot
                harder /= tot
                hardest /= tot

            mc = re.search('acis6', ent)
            chk = 0
            if mc is not None:
                if (med > 200):
                    chk = 1
            else:
                if (soft > 500) or (med > 150):
                    chk = 1

            if chk > 0:
                cmd = 'mv ' + ent + ' ' + lres + 'Save/.'
                os.system(cmd)

    else:
        #
        #--- for Lev1, we move the files which removed in Lev2. we assume that we already
        #--- run Lev2 on this function
        #
        epath = cor_dir + '/Lev2/Outdir/lres/Save/'
        if os.listdir(epath) != []:

            cmd = 'ls ' + cor_dir + '/Lev2/Outdir/lres/Save/*fits > ' + zspace
            os.system(cmd)
            data = mcf.read_data_file(zspace, remove=1)

            l1_lres = cor_dir + '/Lev1/Outdir/lres/'
            l1_dir = l1_lres + '/Save/'
            cmd = 'mkdir ' + l1_dir
            os.system(cmd)

            for ent in data:
                atemp = re.split('mtaf', ent)
                btemp = re.split('N', atemp[1])
                mc = re.search('_', btemp[0])
                if mc is not None:
                    ctemp = re.split('_', btemp[0])
                    obsid = ctemp[0]
                else:
                    obsid = btemp[0]

                atemp = re.split('acis', ent)
                btemp = re.split('lres', atemp[1])
                ccd = btemp[0]
                cid = 'acis' + str(ccd) + 'lres_sibkg.fits'

                cmd = 'mv ' + l1_lres + 'mtaf' + obsid + '*' + cid + '  ' + l1_dir + '/.'
                os.system(cmd)
Exemplo n.º 8
0
def correct_factor(lev):
    """
    adjust lres reuslts files for the area removed as the sources remvoed
    input:  lev --- level 1 or 2  
    output: adjusted fits files in lres 
    """
    #
    #--- read all correciton factor information
    #
    ifile = cor_dir + lev + '/Reg_files/ratio_table'
    data = mcf.read_data_file(ifile)

    ratio = {}
    for ent in data:
        atemp = re.split(':', ent)
        rate = float(atemp[1].strip())

        btemp = re.split('N', atemp[0])
        mc = re.search('_', btemp[0])
        if mc is not None:
            ctemp = re.split('_', btemp[0])
            msid = ctemp[0]
        else:
            msid = btemp[0]

        ctemp = re.split('ccd', atemp[0])
        dtemp = re.split('_', ctemp[1])
        ccd = dtemp[0]

        ind = str(msid) + '.' + str(ccd)
        ratio[ind] = rate
#
#--- find all fits file names processed
#
    cmd = 'ls ' + cor_dir + lev + '/Outdir/lres/mtaf*.fits > ' + zspace
    os.system(cmd)
    data = mcf.read_data_file(zspace, remove=1)

    for fits in data:
        atemp = re.split('N', fits)
        btemp = re.split('mtaf', atemp[0])
        msid = btemp[1]

        mc = re.search('_', msid)
        if mc is not None:
            ctemp = re.split('_', msid)
            msid = ctemp[0]

        atemp = re.split('acis', fits)
        btemp = re.split('lres', atemp[1])
        ccd = btemp[0]

        ind = str(msid) + '.' + str(ccd)
        try:
            div = ratio[ind]
        except:
            continue

        if div >= 1:
            continue
#
#--- correct the observation rate by devided by the ratio
#--- (all sources removed area)/(original are)
#
        elif div > 0:
            line = 'SSoft=SSoft/' + str(div) + ',Soft=Soft/'
            line = line + str(div) + ',Med=Med/' + str(div) + ','
            line = line + 'Hard=Hard/' + str(div) + ',Harder=Harder/'
            line = line + str(div) + ',Hardest=Hardest/' + str(div)

            cmd = 'dmtcalc infile =' + ent + ' outfile=out.fits expression="'
            cmd = cmd + line + '" clobber=yes'
            scf.run_ascds(cmd)

            cmd = 'mv out.fits ' + ent
            os.system(cmd)

        else:
            print("Warning!!! div < 0 for " + str(ent))
            continue
Exemplo n.º 9
0
def exclude_sources(fits):
    """
    remove the area around the main source and all point sources from data
    input:  fits        --- input fits file name
    output: out_name    --- source removed fits file (<header>_ccd<ccd>_cleaned.fits)
    """
    #
    #--- read fits header
    #
    fout = pyfits.open(fits)
    #
    #--- find which ccds used
    #
    ccd_list = []
    for k in range(0, 10):
        bname = 'BIASFIL' + str(k)
        try:
            val = fout[1].header[bname]
            ccd_list.append(k)
        except:
            continue

    ccd_list.sort()
    #
    #--- create key word dictionary
    #
    v_dict = {}
    for name in [
            'SIM_X', 'SIM_Y', 'SIM_Z', 'RA_NOM', 'DEC_NOM', 'ROLL_NOM',
            'RA_TARG', 'DEC_TARG'
    ]:
        lname = name.lower()
        try:
            out = fout[1].header[name]
            v_dict[lname] = str(out)
        except:
            v_dict[lname] = 'NA'
#
#--- guess a source center position on the sky coordinates from the information extracted from the header
#
    cmd = ' dmcoords none none opt=cel '
    cmd = cmd + ' ra=' + v_dict['ra_targ'] + ' dec=' + v_dict['dec_targ']
    cmd = cmd + ' sim="' + v_dict['sim_x'] + ' ' + v_dict[
        'sim_y'] + ' ' + v_dict['sim_z'] + '" '
    cmd = cmd + ' detector=acis celfmt=deg ' + ' ra_nom=' + v_dict['ra_nom']
    cmd = cmd + ' dec_nom=' + v_dict['dec_nom'] + ' roll_nom=' + v_dict[
        'roll_nom'] + ' '
    cmd = cmd + ' ra_asp=")ra_nom" dec_asp=")dec_nom" verbose=1 >' + zspace

    scf.run_ascds(cmd)

    data = mcf.read_data_file(zspace, remove=1)

    for ent in data:
        mc = re.search('SKY', ent)
        if mc is not None:
            atemp = re.split('\s+', ent)
            skyx = atemp[1]
            skyy = atemp[2]
            break
#
#-- keep the record of the source position for the later use (e.g. used for evt1 processing);
#
    o_fits = fits.replace('.gz', '')
    coord_file = o_fits.replace('.fits', '_source_coord')
    ofile = './Reg_files/' + coord_file
    line = str(skyx) + ':' + str(skyy) + '\n'

    with open(ofile, 'w') as fo:
        fo.write(line)
#
#-- remove the 200 pix radius area around the source
#
    cmd = ' dmcopy "' + fits + '[exclude sky=circle(' + skyx + ',' + skyy + ',200)]" '
    cmd = cmd + ' outfile=source_removed.fits clobber="yes"'
    scf.run_ascds(cmd)
    #
    #--- get a file size: will be used to measure the size of removed area later.
    #--- assumption here is the x-ray hit ccd evenly, but of course it is not,
    #--- but this is the best guess we canget
    #
    size = {}
    for ccd in ccd_list:
        cmd = ' dmcopy "' + fits + '[ccd_id=' + str(ccd)
        cmd = cmd + ']" outfile=test.fits clobber=yes'
        scf.run_ascds(cmd)

        cmd = 'ls -l test.fits > ' + zspace
        os.system(cmd)

        data = mcf.read_data_file(zspace, remove=1)

        for line in data:
            atemp = re.split('\s+', line)
            try:
                size[ccd] = int(float(atemp[4]))
            except:
                size[ccd] = int(float(atemp[3]))

        mcf.rm_files('test.fits')
#
#--- now separate observations to indivisual ccds
#
    file_list = []
    for ccd in ccd_list:
        tail = '_ccd' + str(ccd) + '.fits'
        out = o_fits.replace('.fits', tail)
        file_list.append(out)

        cmd = ' dmcopy "source_removed.fits[ccd_id=' + str(ccd)
        cmd = cmd + ']" outfile= ' + out + ' clobber=yes'
        scf.run_ascds(cmd)

    mcf.rm_files('source_removed.fits')
    #
    #--- process each ccd
    #
    for pfits in file_list:
        reg_file = pfits.replace('.fits', '_block_src.reg')
        #
        #--- find point sources
        #
        cmd = ' celldetect infile=' + pfits
        cmd = cmd + ' fixedcell=9 outfile=acisi_block_src.fits regfile=acisi_block_src.reg clobber=yes'
        scf.run_ascds(cmd)

        data = mcf.read_data_file('acisi_block_src.reg')

        exclude = []
        for ent in data:
            atemp = re.split('\,', ent)
            #
            #--- increase the area covered around the sources 3 times to make sure leaks
            #--- from a bright source is minimized
            #
            val2 = float(atemp[2]) * 3
            val3 = float(atemp[3]) * 3
            line = atemp[0] + ',' + atemp[1] + ',' + str(val2) + ',' + str(
                val3) + ',' + atemp[4]
            exclude.append(line)

        out_name = pfits.replace('.gz', '')
        out_name = out_name.replace('.fits', '_cleaned.fits')
        #
        #--- if we actually found point sources, remove them from the ccds
        #
        e_cnt = len(exclude)
        if e_cnt > 0:
            cnt = 0
            chk = 0
            round = 0
            line = ''
            while cnt < e_cnt:
                #
                #--- remove 6 sources at a time so that it won't tax memory too much
                #
                for i in range(cnt, cnt + 6):
                    if i >= e_cnt:
                        chk += 1
                        break

                    if line == '':
                        line = exclude[i]
                    else:
                        line = line + '+' + exclude[i]

                cnt += 6
                if round == 0:
                    cmd = ' dmcopy "' + pfits + '[exclude sky=' + line
                    cmd = cmd + ']" outfile=out.fits clobber="yes"'
                    scf.run_ascds(cmd)
                    round += 1
                else:
                    cmd = 'mv out.fits temp.fits'
                    os.system(cmd)
                    cmd = ' dmcopy "temp.fits[exclude sky=' + line
                    cmd = cmd + ']" outfile=out.fits clobber="yes"'
                    scf.run_ascds(cmd)
                    round += 1

                if chk > 0:
                    break
                else:
                    line = ''

            mcf.rm_files('temp.fits')
            cmd = 'mv out.fits ' + out_name
            os.system(cmd)
        else:
            cmd = 'cp ' + pfits + ' ' + out_name
            os.system(cmd)
#
#--- find the size of cleaned up file size
#
        cmd = 'ls -l ' + out_name + '>' + zspace
        os.system(cmd)

        data = mcf.read_data_file(zspace, remove=1)

        for line in data:
            atemp = re.split('\s+', line)
            try:
                asize = float(atemp[4])
            except:
                asize = float(atempp[3])

        for pccd in range(0, 10):
            check = 'ccd' + str(pccd)
            mc = re.search(check, out_name)
            if mc is not None:
                break
#
#--- compute the ratio of the cleaned to the original file;
#--- 1 - ratio is the  potion that we removed from the original data
#
#ratio = asize / float(size[str(pccd)])
        ratio = asize / float(size[pccd])
        #
        #--- record the ratio for later use
        #
        with open('./Reg_files/ratio_table', 'a') as fo:
            line = reg_file + ': ' + str(ratio) + '\n'
            fo.write(line)

        cmd = 'mv acisi_block_src.reg ./Reg_files/' + reg_file
        os.system(cmd)
        mcf.rm_files('acisi_block_src.fits')
Exemplo n.º 10
0
def exclude_sources(fits):
    """
    remove the area around the main source and all point sources from data
    input:  fits        --- input fits file name
    output: out_name    --- source removed fits file (<header>_ccd<ccd>_cleaned.fits)
    """
#
#--- read which ccds are used and several other info from fits header
#
    cmd = ' dmlist ' + fits + ' opt=head > ' + zspace
    scf.run_ascds(cmd)

    data = scf.read_file(zspace, remove=1)

    ccd_list = []
    for ent in data:
        mc = re.search('bias file used', ent)
        if mc is not None:
            atemp = re.split('CCD', ent)
            val   = atemp[1].strip()
            ccd_list.append(val)
            continue

        for name in ['SIM_X', 'SIM_Y', 'SIM_Z', 'RA_NOM', 'DEC_NOM', 'ROLL_NOM', 'RA_TARG', 'DEC_TARG']:
            mc = re.search(name, ent)
            if mc is not None:
                lname = name.lower()
                atemp = re.split('\s+', ent)
                val   = atemp[2].strip()
                exec "%s = %s" % (lname, val)

                break
#
#--- sort ccd list
#
    ccd_list.sort()
#
#--- guess a source center position on the sky coordinates from the information extracted from the header
#
    cmd = ' dmcoords none none opt=cel '
    cmd = cmd + ' ra=' + str(ra_targ)  + ' dec=' + str(dec_targ )
    cmd = cmd + ' sim="' + str(sim_x) + ' ' +  str(sim_y) + ' ' + str(sim_z) + '" ' 
    cmd = cmd + ' detector=acis celfmt=deg '
    cmd = cmd + ' ra_nom=' + str(ra_nom) + ' dec_nom=' + str(dec_nom) + ' roll_nom=' + str(roll_nom) + ' ' 
    cmd = cmd + ' ra_asp=")ra_nom" dec_asp=")dec_nom" verbose=1 >' + zspace 

    scf.run_ascds(cmd)

    data = scf.read_file(zspace, remove=1)

    for ent in data:
        mc = re.search('SKY', ent)
        if mc is not None:
            atemp = re.split('\s+', ent)
            skyx  = atemp[1]
            skyy  = atemp[2]
            break
#
#-- keep the record of the source position for the later use (e.g. used for evt1 processing);
#
    o_fits     = fits.replace('.gz', '')
    coord_file = o_fits.replace('.fits', '_source_coord')
    ofile      = './Reg_files/' + coord_file
    line       = str(skyx) + ':' + str(skyy) + '\n'

    fo         = open(ofile, 'w')
    fo.write(line)
    fo.close()
#
#-- remove the 200 pix radius area around the source
#
    cmd = ' dmcopy "' + fits + '[exclude sky=circle(' + skyx + ',' + skyy + ',200)]" '
    cmd = cmd + ' outfile=source_removed.fits clobber="yes"'
    scf.run_ascds(cmd)
#
#--- get a file size: will be used to measure the size of removed area later.
#--- assumption here is the x-ray hit ccd evenly, but of course it is not, 
#--- but this is the best guess we canget
#
    size = {}
    for ccd in ccd_list:
        cmd = ' dmcopy "' + fits + '[ccd_id=' + str(ccd) + ']" outfile=test.fits clobber=yes'
        scf.run_ascds(cmd)
        
        cmd  = 'ls -l test.fits > ' + zspace
        os.system(cmd)

        data = scf.read_file(zspace, remove=1)

        for line in data:
            atemp = re.split('\s+', line)
            if mcf.chkNumeric(atemp[4]):
                size[ccd] = int(float(atemp[4]))
            else:
                size[ccd] = int(float(atemp[3]))

        mcf.rm_file('test.fits')
#
#--- now separate observations to indivisual ccds
#
    file_list = []
    for ccd in ccd_list:
        tail = '_ccd' + str(ccd) + '.fits'
        out  = o_fits.replace('.fits', tail)
        file_list.append(out)

        cmd = ' dmcopy "source_removed.fits[ccd_id=' + ccd + ']" outfile= ' + out + ' clobber=yes'
        scf.run_ascds(cmd)

    mcf.rm_file('source_removed.fits')
#
#--- process each ccd
#
    for pfits in file_list:
        reg_file = pfits.replace('.fits', '_block_src.reg')
#
#--- find point sources
#
        cmd = ' celldetect infile=' + pfits 
        cmd = cmd + ' outfile=acisi_block_src.fits regfile=acisi_block_src.reg clobber=yes'
        scf.run_ascds(cmd)

        data = scf.read_file('acisi_block_src.reg')
        
        exclude = []
        for ent in data:
            atemp =  re.split('\,', ent)
#
#--- increase the area covered around the sources 3time to make sure leaks from a bright source is minimized
#
            val2 = float(atemp[2]) * 3
            val3 = float(atemp[3]) * 3
            line = atemp[0] + ',' + atemp[1] + ',' + str(val2) + ',' + str(val3) +',' + atemp[4]
            exclude.append(line)

        out_name = pfits.replace('.gz','')
        out_name = out_name.replace('.fits', '_cleaned.fits')
#
#--- if we actually found point sources, remove them from the ccds
#
        e_cnt = len(exclude)
        if e_cnt  > 0:
            cnt   = 0
            chk   = 0
            round = 0
            line  = ''
            while cnt < e_cnt:
#
#--- remove 6 sources at a time so that it won't tax memory too much
#
                for i in range(cnt, cnt + 6):
                    if i >= e_cnt:
                        chk += 1
                        break

                    if line == '':
                        line = exclude[i]
                    else:
                        line = line + '+' + exclude[i]

                cnt += 6
                if round == 0:
                    cmd = ' dmcopy "' + pfits + '[exclude sky=' + line +']" outfile=out.fits clobber="yes"'
                    scf.run_ascds(cmd)
                    round += 1
                else:
                    cmd = 'mv out.fits temp.fits'
                    os.system(cmd)
                    cmd = ' dmcopy "temp.fits[exclude sky=' + line +']" outfile=out.fits clobber="yes"'
                    scf.run_ascds(cmd)
                    round += 1

                if chk > 0:
                    break 
                else:
                    line = ''

            mcf.rm_file('temp.fits')
            cmd = 'mv out.fits ' + out_name
            os.system(cmd)
        else:
            cmd = 'cp ' + pfits + ' ' + out_name
            os.system(cmd)
#
#--- find the size of cleaned up file size
#
        cmd = 'ls -l ' + out_name + '>' + zspace
        os.system(cmd)

        data = scf.read_file(zspace, remove=1)

        for line in data:
            atemp = re.split('\s+', line)
            if mcf.chkNumeric(atemp[4]):
                asize = float(atemp[4])
            else:
                asize = float(atempp[3])
    
        for pccd in range(0, 10):
            check = 'ccd' + str(pccd)
            mc  = re.search(check,  out_name)
            if mc is not None:
                break
#
#--- compute the ratio of the cleaned to the original file; 1 - ratio is the  potion that we removed
#--- from the original data
#
        ratio = asize / float(size[str(pccd)])
#
#--- record the ratio for later use
#
        fo   = open('./Reg_files/ratio_table', 'a')
        line = reg_file + ': ' + str(ratio) + '\n'
        fo.write(line)
        fo.close()
                    
        cmd = 'mv acisi_block_src.reg ./Reg_files/' + reg_file
        os.system(cmd)
        mcf.rm_file('acisi_block_src.fits')