예제 #1
0
def add_dummy(inlsm):
    dummylsm = 'dummy.lsm.html'
    gi('Adding dummy:  ' + inlsm)
    dsrc = Tigger.load(dummylsm).sources
    model = Tigger.load(inlsm)
    model.sources.append(dsrc[0])
    model.save(inlsm)
예제 #2
0
def transfer_tags(fromlsm="$LSMREF",
                  lsm="$LSM",
                  output="$LSM",
                  tags="dE",
                  tolerance=60 * ARCSEC):
    """Transfers tags from a reference LSM to the given LSM. That is, for every tag
  in the given list, finds all sources with those tags in 'fromlsm', then applies 
  these tags to all nearby sources in 'lsm' (within a radius of 'tolerance'). 
  Saves the result to an LSM file given by 'output'.
  """
    fromlsm, lsm, output, tags = interpolate_locals("fromlsm lsm output tags")
    # now, set dE tags on sources
    tagset = frozenset(tags.split())
    info("Transferring tags %s from %s to %s (%.2f\" tolerance)" %
         (",".join(tagset), fromlsm, lsm, tolerance / ARCSEC))
    import Tigger
    refmodel = Tigger.load(fromlsm)
    model = Tigger.load(lsm)
    # for each dE-tagged source in the reference model, find all nearby sources
    # in our LSM, and tag them
    for src0 in refmodel.getSourceSubset(",".join(["=" + x for x in tagset])):
        for src in model.getSourcesNear(src0.pos.ra,
                                        src0.pos.dec,
                                        tolerance=tolerance):
            for tag in tagset:
                tagval = src0.getTag(tag, None)
                if tagval is not None:
                    if src.getTag(tag, None) != tagval:
                        src.setTag(tag, tagval)
                        info(
                            "setting tag %s=%s on source %s (from reference source %s)"
                            % (tag, tagval, src.name, src0.name))
    model.save(output)
예제 #3
0
파일: lsm.py 프로젝트: brianwelman2/kal-cal
def sel_LSM(n_dir):

    if n_dir == 1:
        return Tigger.load(MODEL_1)
    elif n_dir == 4:
        return Tigger.load(MODEL_4)
    else:
        raise ValueError("Only two sky-models present.")
예제 #4
0
def compare_models(image, icatalog, pcatalog, R_th=None):
    """ Compares the initial simulated model to the model
     obtained from REDDSIT
    """
    model1 = Tigger.load(icatalog, verbose=False)
    model2 = Tigger.load(pcatalog, verbose=False)
    hdu = pyfits.open(image)
    hdr = hdu[0].header
    bmaj = hdr["BMAJ"]

    tolerance = numpy.deg2rad(bmaj)
    T, FP, FN, F = 0, 0, 0, 0

    for i, src in enumerate(model2.sources):
        ra_r = src.pos.ra  #in radians
        dec_r = src.pos.dec
        Rel = src.rel
        if Rel > R_th:
            within = model1.getSourcesNear(ra_r, dec_r, tolerance)
            length = len(within)
            if length > 0:
                #src.setAttribute('real', True)
                src.setTag("t", True)
                T += 1
            else:
                #src.setAttribute('real', False)
                src.setTag("fp", True)
                FP += 1
        else:
            within = model1.getSourcesNear(ra_r, dec_r, tolerance)
            length = len(within)
            if length > 0:
                src.setTag("fn", True)
                FN += 1

            else:
                F += 1
                src.setTag("f", True)

    output = pcatalog.replace('.lsm.html', '.txt')
    summary_detections = open(output, 'w')
    model2.save(pcatalog)
    summary_detections.write("\t\tSUMMARY\t\t\n")
    summary_detections.write('\nTotal number of detections = %d\n' %
                             len(model2))
    summary_detections.write('True Source Detection = %d\n' % T)
    summary_detections.write('False Positive Detections = %d\n' % FP)
    summary_detections.write('False Negative Detections = %d\n' % FN)
    summary_detections.write('False Detection (artifacts) = %d\n' % F)
예제 #5
0
def compare_models(image, icatalog, pcatalog, R_th=None):

    """ Compares the initial simulated model to the model
     obtained from REDDSIT
    """
    model1 = Tigger.load(icatalog, verbose=False)
    model2 = Tigger.load(pcatalog, verbose=False)
    hdu = pyfits.open(image)
    hdr = hdu[0].header
    bmaj = hdr["BMAJ"]
  
    tolerance = numpy.deg2rad(bmaj)
    T, FP, FN, F = 0, 0, 0, 0

    for i, src in enumerate(model2.sources):
        ra_r = src.pos.ra #in radians
	dec_r = src.pos.dec
	Rel = src.rel
	if Rel > R_th:
	    within = model1.getSourcesNear(ra_r, dec_r, tolerance)
	    length = len(within)
	    if length > 0:
	        #src.setAttribute('real', True)
                src.setTag("t", True)
	        T += 1
	    else:
		#src.setAttribute('real', False)
                src.setTag("fp", True)
	        FP += 1
	else:		
	    within = model1.getSourcesNear(ra_r,dec_r,tolerance)
	    length = len(within)
	    if length > 0:
                src.setTag("fn", True) 
		FN += 1
          
	    else:
		F += 1
                src.setTag("f", True)

    output = pcatalog.replace('.lsm.html','.txt')
    summary_detections = open(output,'w')
    model2.save(pcatalog)
    summary_detections.write("\t\tSUMMARY\t\t\n")
    summary_detections.write('\nTotal number of detections = %d\n'%len(model2))
    summary_detections.write('True Source Detection = %d\n'%T)
    summary_detections.write('False Positive Detections = %d\n'%FP)
    summary_detections.write('False Negative Detections = %d\n'%FN)
    summary_detections.write('False Detection (artifacts) = %d\n'%F)
예제 #6
0
def combine_sources(lsm, beam, outlsm):

    model = Tigger.load(lsm, verbose=False)
    tolerance = beam

    sources = model.sources

    for src in sources:
        source_max = []

        srcnear = model.getSourcesNear(src.pos.ra, src.pos.dec, tolerance)
        flux = 0
        err_flux = 0
        err_ra = 0
        err_dec = 0
        if srcnear == []:
            model.sources(src)
        else:
            for srs in srcnear:
                if len(srcnear) > 1:
                    srs_f = srs.flux.I
                    srs_ferr = srs.getTag("_pybdsm_E_Peak_flux")
                    flux += srs_f  # adding the flux
                    err_flux += (srs_ferr)**2
                    source_max.append(srs_f)

            err_flux = (err_flux)**0.5
            if len(srcnear) > 1:
                ind = numpy.where(max(source_max))[0][0]
                srcs = srcnear.pop(ind)
                srcs.flux.I = flux
                srs.setAttribute("_pybdsm_E_Peak_flux", err_flux)
                for srcss in srcnear:
                    model.sources.remove(srcss)
    model.save(outlsm)
예제 #7
0
def RemoveSourcesWithoutSPI(lsmname_in, lsmname_out):
    model = Tigger.load(lsmname_in)
    sources = [src for src in model.sources]
    for src in sources:
        if not src.spectrum:
            model.sources.remove(src)
    model.save(lsmname_out)
예제 #8
0
def combine_sources(lsm, beam, outlsm):

   model = Tigger.load(lsm, verbose=False)
   tolerance = beam

   sources = model.sources

   for src in sources:
       source_max = []
 
       srcnear = model.getSourcesNear(src.pos.ra, src.pos.dec, tolerance)
       flux = 0
       err_flux = 0
       err_ra = 0
       err_dec = 0
       if srcnear == []:
           model.sources(src)
       else:
           for srs in srcnear:
               if len(srcnear) > 1:
                   srs_f = srs.flux.I
                   srs_ferr = srs.getTag("_pybdsm_E_Peak_flux")
                   flux +=  srs_f # adding the flux
                   err_flux += (srs_ferr)**2
                   source_max.append(srs_f)

           err_flux = (err_flux)**0.5
           if len(srcnear) > 1:
               ind = numpy.where(max(source_max))[0][0]
               srcs = srcnear.pop(ind)
               srcs.flux.I = flux
               srs.setAttribute("_pybdsm_E_Peak_flux", err_flux)
               for srcss in srcnear:
                   model.sources.remove(srcss)
   model.save(outlsm)
예제 #9
0
    def __init__(self, lsm, phase_center, dde_tag='dE'):
        """
        Initialises this source provider.

        Args:
            lsm (str):
                Filename containing the sky model
            phase_center (tuple):
                Observation phase centre, as a RA, Dec tuple
            dde_tag (str or None):
                If set, sources are grouped into multiple directions using the specified tag.

        """

        self.filename = lsm
        self._sm = Tigger.load(lsm)
        self._phase_center = phase_center
        self._use_ddes = bool(dde_tag)
        self._dde_tag = dde_tag

        self._freqs = None

        self._clusters = cluster_sources(self._sm, dde_tag)
        self._cluster_keys = list(self._clusters.keys())
        self._nclus = len(self._cluster_keys)

        self._target_key = 0
        self._target_cluster = self._cluster_keys[self._target_key]

        self._pnt_sources = self._clusters[self._target_cluster]["pnt"]
        self._npsrc = len(self._pnt_sources)
        self._gau_sources = self._clusters[self._target_cluster]["gau"]
        self._ngsrc = len(self._gau_sources)
예제 #10
0
def main():

	
	infile = glob.glob('*.gaul')[0]

#	infile = sys.argv[1]

	regionfile = infile.replace('.gaul','.reg')


	tiggerConvert(infile)


	model = Tigger.load(infile.replace('gaul','lsm.html'))

	nodes = []

	for src in model.sources:
		lead = src.getTag('cluster_lead')
		if lead:
			cluster_flux = src.getTag('cluster_flux')
			if cluster_flux > 0.08:
				ra = rad2deg(src.pos.ra)
				dec = rad2deg(src.pos.dec)
				nodes.append((ra,dec))
#				print src.name,ra,dec,'1.0'

	writeDS9(nodes,regionfile)
예제 #11
0
def stackem(start=1, stop=100):
    
    v.MS = MS or II("${LSM:BASE}.MS")
    
    DO_STEP = lambda step: step>=float(start) and step<=float(stop)

    if not exists(MS) or MS_REDO:
        ms.create_empty_ms(tel=OBSERVATORY, pos=ANTENNAS, direction=DIRECTION, synthesis=SYNTHESIS, dtime=DTIME, 
                           freq0=FREQ0, dfreq=DFREQ)
        reload(ms)
        start = 1

    if DO_STEP(1):
        simsky()
        im.make_image()
     
    if DO_STEP(2):
        model = Tigger.load(LSM)
        radec = []
        flux = 0
        for src in model.sources:
            ra = src.pos.ra
            dec = src.pos.dec
            radec.append( map(numpy.rad2deg, [ra,dec]))
            flux += src.flux.I

        flux /=len(model.sources)
        
        sflux = stacker.stackem(im.DIRTY_IMAGE, radec, width=WIDTH)
        _add("%.4g %.4g"%(flux, sflux), STACKFILE, delimeter="\n")
예제 #12
0
def model_in_tigger(ilsm, plsm, tolerance=None):
    """Tags sources as real and artefacts
    Takes in initial LSM and sourcefinder LSM
    Tolerance in radians
    """

    imodel = Tigger.load(ilsm)
    pmodel = Tigger.load(plsm)
    for src in pmodel.sources:
        sources_within = imodel.getSourcesNear(src.pos.ra, src.pos.dec,
                                               tolerance)
        l = len(sources_within)
        if l > 0:
            src.setTag("tr", True)
        else:
            src.setTag("ar", True)
    pmodel.save(plsm)
예제 #13
0
def do_crossmatching(icatalog, pcatalog, Gaul, names, beam_size=None):

    """  Crossmatch sources and save to the source finder LSM and Gaul fiel.

    icatalog: The initial/simulated model.
    pcatalog: The source finder model
    Gaul: The gaul file 
    """

   # changing the initial_gaul is important """"""
    model = Tigger.load(icatalog) # recentered model
    test_model = Tigger.load(pcatalog)    

    data = Gaul
       
    dtype = ['float']*len(names)
    gaul = np.genfromtxt(data, names=names, dtype=dtype)

    tolerance = rad(beam_size) # degrees to radians

    #comparing the positions of the sources in .gaul and the recenter file
    eo = np.zeros(len(gaul), dtype=int)

    for i,src in enumerate(test_model.sources):

        ra = src.pos.ra
        dec = src.pos.dec
	within = model.getSourcesNear(ra, dec, tolerance)
	length = len(within)

	if length > 0:
            src.setAttribute('real',True)
	    eo[i] = True
	else:
   	    eo[i] = False
            src.setAttribute('real',False)

    gaul_modified = open(icatalog.replace('.lsm.html','_tagged.gaul'), 'w')

    names = names + ['Tag']
    gaul_modified.write("%s"%[ x for x in names ])
    gaul_modified.write('\n')
    test_model.save(pcatalog)
    for dt,tag in zip(gaul,eo):
        gaul_modified.write('   '.join(map(str,dt))+' %d\n'%tag)
    return model, tolerance
예제 #14
0
def remove_sources(lsmname="$LSM"):
    lsmname = interpolate_locals("lsmname")

    model = Tigger.load(lsmname)
    zeroflux = filter(lambda a: a.flux.I ==0, model.sources)
    for s in zeroflux:
        model.sources.remove(s)
    model.save(lsmname)
예제 #15
0
파일: utils.py 프로젝트: lowks/sourcery
def verifyModel(lsm):
    ##TODO temporary
    model = Tigger.load(lsm)
    zeroflux = filter(lambda a: (a.flux.I or a.brightness())==0,
                      model.sources)
    for s in zeroflux:
        model.sources.remove(s)
    model.save(lsm)
예제 #16
0
파일: lsm.py 프로젝트: griffinfoster/pyxis
def pointify (lsm="$LSM",output="$LSM",name=""):
  """Replaces names sources with point sources""";
  lsm,output,name = interpolate_locals("lsm output name");
  model = Tigger.load(lsm);
  src = model.findSource(name);
  info("Setting source $name in model $lsm to point source, saving to $output");
  src.shape = None;
  model.save(output);
예제 #17
0
def remove_sources(lsmname="$LSM"):
    lsmname = interpolate_locals("lsmname")

    model = Tigger.load(lsmname)
    zeroflux = filter(lambda a: a.flux.I == 0, model.sources)
    for s in zeroflux:
        model.sources.remove(s)
    model.save(lsmname)
예제 #18
0
def model_in_tigger(ilsm, plsm, tolerance=None):
    """Tags sources as real and artefacts
    Takes in initial LSM and sourcefinder LSM
    Tolerance in radians
    """
      
    imodel = Tigger.load(ilsm)
    pmodel = Tigger.load(plsm)
    for src in pmodel.sources:
        sources_within = imodel.getSourcesNear(src.pos.ra,
            src.pos.dec, tolerance)
        l = len(sources_within)
        if l > 0:
            src.setTag("tr", True)
        else:
            src.setTag("ar", True)
    pmodel.save(plsm)
예제 #19
0
def pointify(lsm="$LSM", output="$LSM", name=""):
    """Replaces names sources with point sources"""
    lsm, output, name = interpolate_locals("lsm output name")
    model = Tigger.load(lsm)
    src = model.findSource(name)
    info(
        "Setting source $name in model $lsm to point source, saving to $output"
    )
    src.shape = None
    model.save(output)
예제 #20
0
def dEtags(inputLsm, namelist):
    print '     Applying tags'
    cluster_leads = []
    skymodel = Tigger.load(inputLsm, verbose=False)
    for src in skymodel.sources:
        name = src.name
        cluster_size = src.getTag('cluster_size')
        if name in namelist:
            print '    ', name, 'tagged'
            src.setAttribute('dE', True)
            if cluster_size > 1:
                print '    ', name, 'is part of a cluster of size', cluster_size
                cluster_leads.append(src.getTag('cluster'))
    for src in skymodel.sources:
        cluster = src.getTag('cluster')
        if cluster in cluster_leads:
            print '          Tagging', src.name, 'as part of cluster', cluster
            src.setAttribute('dE', True)
    Tigger.save(skymodel, inputLsm, verbose=False)
예제 #21
0
def tagsources(inlsm):
    model = Tigger.load(inlsm)
    gi('Reading LSM:   ' + inlsm)
    srcs = model.sources
    counter = 0
    for src in srcs:
        src.setTag('dE', True)
        counter += 1
    gi('Tagged:        ' + str(counter) + ' source(s)')
    model.save(inlsm)
예제 #22
0
    def number_negatives(self):
        
        pmodel = Tigger.load(self.poscatalog, verbose=self.loglevel)
        nmodel = Tigger.load(self.negcatalog, verbose=self.loglevel)
        psources = pmodel.sources 
        sources = filter(lambda src: src.getTag(self.high_corr_tag), psources)

        tolerance = numpy.deg2rad(self.negdetec_region * self.bmaj_deg)

        if self.phasecenter_excl_radius:
            radius = numpy.deg2rad(self.phasecenter_excl_radius * self.bmaj_deg)
            
        for srs in sources:
            ra, dec = srs.pos.ra, srs.pos.dec
            within = nmodel.getSourcesNear(ra, dec, tolerance)    
            if len(within) >= self.negatives_thresh:
                if self.phasecenter_excl_radius:
                    if dist( self.ra0, dec, ra, self.dec0)[0] > radius: 
                        srs.setTag(self.dd_tag, True)
                else:
                    srs.setTag(self.dd_tag, True)
        pmodel.save(self.poscatalog)
예제 #23
0
def CombineSourcesInCluster(lsmname_in, lsmname_out):
    model = Tigger.load(lsmname_in)
    for src in model.sources:
        if src.cluster_size > 1 and rad2arcsec(src.r) > 30:
            cluster_sources = [
                src1 for src1 in model.sources if src1.cluster is src.cluster]
            flux_sources = [src1.flux.I for src1 in cluster_sources]
            max_flux_index = flux_sources.index(max(flux_sources))
            cluster_sources[max_flux_index].flux.I = sum(
                [src1.flux.I for src1 in cluster_sources])
        for src2 in cluster_sources:
            if src2 is not cluster_sources[max_flux_index]:
                model.sources.remove(src2)
        cluster_sources[max_flux_index].cluster_size = 1
    model.save(lsmname_out)
예제 #24
0
  def remove_sources_within(self, catalog, rel_excl_src=None):
 
      model = Tigger.load(catalog)
      sources = model.sources
      
      if rel_excl_src:
          for i in range(len(rel_excl_src)):
              ra, dec, tolerance = rel_excl_src[i].split(",")
              ra, dec, tolerance = map(numpy.deg2rad, (float(ra),
                                       float(dec), float(tolerance)))
              within = model.getSourcesNear(ra, dec, tolerance)   
              for src in sorted(sources):
                  if src in within:
                       sources.remove(src)
          model.save(catalog)
예제 #25
0
파일: lsm.py 프로젝트: griffinfoster/pyxis
def transfer_tags (fromlsm="$LSMREF",lsm="$LSM",output="$LSM",tags="dE",tolerance=60*ARCSEC):
  """Transfers tags from a reference LSM to the given LSM. That is, for every tag
  in the given list, finds all sources with those tags in 'fromlsm', then applies 
  these tags to all nearby sources in 'lsm' (within a radius of 'tolerance'). 
  Saves the result to an LSM file given by 'output'.
  """
  fromlsm,lsm,output,tags = interpolate_locals("fromlsm lsm output tags");
  # now, set dE tags on sources
  tagset = frozenset(tags.split());
  info("Transferring tags %s from %s to %s"%(",".join(tagset),fromlsm,lsm));
  import Tigger
  refmodel = Tigger.load(fromlsm);
  model = Tigger.load(lsm);
  # for each dE-tagged source in the reference model, find all nearby sources
  # in our LSM, and tag them
  for src0 in refmodel.getSourceSubset(",".join(["="+x for x in tagset])):
    for src in model.getSourcesNear(src0.pos.ra,src0.pos.dec,tolerance=tolerance):
      for tag in tagset:
        tagval = src0.getTag(tag,None);
        if tagval is not None:
          if src.getTag(tag,None) != tagval:
            src.setTag(tag,tagval);
            info("setting tag %s=%s on source %s (from reference source %s)"%(tag,tagval,src.name,src0.name))
  model.save(output);
예제 #26
0
def plot_local_variance(modellsm, noise, prefix, threshold):

    model = Tigger.load(modellsm, verbose=False)
    savefig = prefixx + "_variance.png"
    local = [(src.l/1.0e-6) for src in model.sources]
    pylab.figure()
    pylab.plot([noise/1.0e-6] * len(local))
    x = numpy.arange(len(local))
    pylab.plot(x, local)
    for i, src in enumerate(model.sources):
        if local[i] > threshold * noise:
            pylab.plot(x[i], local[i], "rD")
            pylab.annotate(src.name, xy=(x[i], local[i]))

    pylab.ylabel("Local variance[$\mu$]")
    pylab.savefig(savefig)
예제 #27
0
def tag_lsm(lsm,
            stokes_cube,
            tagged_regions,
            hdu_id=0,
            regionsfn = "dE.srcs.reg",
            taggedlsm_fn="tagged.catalog.lsm.html",
            de_tag="dE",
            store_only_dEs=False):
    with fits.open(stokes_cube) as img:
        cube = img[hdu_id].data
        hdr = img[hdu_id].header
        w = wcs.WCS(hdr)

    with open(regionsfn, "w+") as f:
        f.write("# Region file format: DS9 version 4.0\n")
        f.write("global color=green font=\"helvetica 6 normal roman\" edit=1 move=1 delete=1 highlite=1 include=1 wcs=wcs\n")

        mod = Tigger.load(lsm)
        for ireg, reg in enumerate(tagged_regions):
            print>>log, "Tagged sources in Region {0:d}:".format(ireg), str(reg)
            encircled_sources = filter(lambda s: s in reg, mod.sources)
            encircled_fluxes = [s.flux.I for s in encircled_sources]
            for s in encircled_sources:
                s.setTag(de_tag, True)
                s.setTag("cluster", reg.name) #recluster sources
            if len(encircled_fluxes) > 0:
                argmax = np.argmax(encircled_fluxes)
                s = encircled_sources[argmax]
                s.setTag("cluster_lead", True)
                ra = np.rad2deg(s.pos.ra)
                dec = np.rad2deg(s.pos.dec)
                x, y, _, _ = w.all_world2pix([[ra, dec, 0, 0]], 1)[0]
                x = int(x)
                y = int(y)
                f.write("physical;circle({0:d}, {1:d}, 20) # select=1 text={2:s}\n".format(x, y,
                        "{%.2f mJy}" % (s.flux.I * 1.0e3)))
                print>>log, "\t - {0:s} tagged as '{1:s}' cluster lead".format(s.name, de_tag)
        print>>log, "Writing tagged leads to DS9 regions file {0:s}".format(regionsfn)
    if store_only_dEs:
        print>>log, "Removing direction independent components from catalog before writing LSM"
        ncomp_di_dies = len(mod.sources)
        mod.sources = filter(lambda s: de_tag in s.getTagNames(), mod.sources)
        ncomp_des = len(mod.sources)
        print>>log, "\t - Removed {0:d} direction independent sources from catalog".format(ncomp_di_dies - ncomp_des)
    print>>log, "Writing tagged LSM to {0:s}".format(taggedlsm_fn)
    mod.save(taggedlsm_fn)
    return mod.sources
예제 #28
0
def plot_local_variance(modellsm, noise, prefix, threshold):

    model = Tigger.load(modellsm, verbose=False)
    savefig = prefixx + "_variance.png"
    local = [(src.l/1.0e-6) for src in model.sources]
    pylab.figure()
    pylab.plot([noise/1.0e-6] * len(local))
    x = numpy.arange(len(local))
    pylab.plot(x, local)

    for i, src in enumerate(model.sources):
        if local[i] > threshold * noise:
            pylab.plot(x[i], local[i], "rD")
            pylab.annotate(src.name, xy=(x[i], local[i]))

    pylab.ylabel("Local variance[$\mu$]")
    pylab.savefig(savefig)
예제 #29
0
def load_sky_model(model_name):
    """
    Loads the sky model and extracts all the necessary information.

    :param model_name: The name of the sky model to be loaded
    :type model_name: str

    :returns: The point sources from the loaded sky model, the l coordinates of
              the points, the m coordinates of the points, the center declination,
              the flux sources, and the center right ascention
    """
    model = Tigger.load(model_name)
    RA_sources = []
    DEC_sources = []
    Flux_sources = []

    for val in model.sources:
        RA_sources.append(val.pos.ra)
        DEC_sources.append(val.pos.dec)
        Flux_sources.append(val.flux.I)

    RA_sources = np.array(RA_sources)
    DEC_sources = np.array(DEC_sources)
    Flux_sources = np.array(Flux_sources)

    ra_0 = model.ra0
    dec_0 = model.dec0
    ra_0_rad = ra_0 * (np.pi / 12)
    dec_0_rad = dec_0 * (np.pi / 180)

    RA_rad = RA_sources * (np.pi / 12)
    DEC_rad = DEC_sources * (np.pi / 180)
    RA_delta_rad = RA_rad - ra_0_rad

    l = np.cos(DEC_rad) * np.sin(RA_delta_rad)
    m = (np.sin(DEC_rad) * np.cos(dec_0_rad) -
         np.cos(DEC_rad) * np.sin(dec_0_rad) * np.cos(RA_delta_rad))

    point_sources = np.zeros((len(RA_sources), 3))
    point_sources[:, 0] = Flux_sources
    point_sources[:, 1] = l[0:]
    point_sources[:, 2] = m[0:]
    dec = dec_0

    return point_sources, l, m, dec_0, Flux_sources, ra_0_rad
예제 #30
0
    def signal_to_noise(self):
        
        model = Tigger.load(self.poscatalog, verbose=self.loglevel)
        sources = model.sources
            
        if self.noise == 0:
            self.log.error("Division by 0. Aborting")

        snr = [src.flux.I/self.noise for src in sources]
            
        thresh = self.snr_thresh * min(snr)
        n = 0
        for srs, s in zip(sources,snr):
            if s > thresh:
                srs.setTag(self.snr_tag, True)
                n += 1
        self.log.info("There are %d with high SNR"%n)
        model.save(self.poscatalog)
def make_clean_model(image="${im.RESTORED_IMAGE}",
                     psf_image="${im.PSF_IMAGE}",
                     lsm0="$LSM0",
                     threshold=7):

    image, psf_image, lsm0 = interpolate_locals("image psf_image lsm0")
    lsm.pybdsm_search(image, output=lsm0, threshold=threshold)
    catalog = Tigger.load(lsm0)
    src = catalog.sources
    cs = []
    for i in range(len(src)):
        position = [deg(src[i].pos.ra), deg(src[i].pos.dec)]
        c = correlation_factor(src[i],
                               psf=psf_image,
                               img=image,
                               pos_sky=position,
                               step=120)
        cs.append(c)
    for i in range(len(src)):
        if cs[i] > 0.6 * max(cs):
            src[i].setTag('dE', True)

    catalog.save(lsm0)
def calibrate(args, jones, alphas):
    # simple calibration to test if simulation went as expected.
    # Note do not run on large data set

    # load data
    ms = table(args.ms)
    time = ms.getcol('TIME')
    _, tbin_idx, tbin_counts = chunkify_rows(time, args.utimes_per_chunk)
    n_time = tbin_idx.size
    ant1 = ms.getcol('ANTENNA1')
    ant2 = ms.getcol('ANTENNA2')
    n_ant = np.maximum(ant1.max(), ant2.max()) + 1
    uvw = ms.getcol('UVW').astype(np.float64)
    data = ms.getcol(args.out_col)  # this is where we put the data
    # we know it is pure Stokes I so we can solve using diagonals only
    data = data[:, :, (0, 3)].astype(np.complex128)
    n_row, n_freq, n_corr = data.shape
    flag = ms.getcol('FLAG')
    flag = flag[:, :, (0, 3)]

    # get phase dir
    radec0 = table(args.ms + '::FIELD').getcol('PHASE_DIR').squeeze().astype(
        np.float64)

    # get freqs
    freq = table(args.ms + '::SPECTRAL_WINDOW').getcol('CHAN_FREQ')[0].astype(
        np.float64)
    assert freq.size == n_freq

    # now get the model
    # get source coordinates from lsm
    lsm = Tigger.load(args.sky_model)
    radec = []
    stokes = []
    spi = []
    ref_freqs = []

    for source in lsm.sources:
        radec.append([source.pos.ra, source.pos.dec])
        stokes.append([source.flux.I])
        tmp_spec = source.spectrum
        spi.append([tmp_spec.spi if tmp_spec is not None else 0.0])
        ref_freqs.append([tmp_spec.freq0 if tmp_spec is not None else 1.0])

    n_dir = len(stokes)
    radec = np.asarray(radec)
    lm = radec_to_lm(radec, radec0)

    # get model visibilities
    model = np.zeros((n_row, n_freq, n_dir, 2), dtype=np.complex)
    stokes = np.asarray(stokes)
    ref_freqs = np.asarray(ref_freqs)
    spi = np.asarray(spi)
    for d in range(n_dir):
        Stokes_I = stokes[d] * (freq / ref_freqs[d])**spi[d]
        model[:, :, d, 0:1] = im_to_vis(Stokes_I[None, :, None], uvw,
                                        lm[d:d + 1], freq)
        model[:, :, d, 1] = model[:, :, d, 0]

    # set weights to unity
    weight = np.ones_like(data, dtype=np.float64)

    # initialise gains
    jones0 = np.ones((n_time, n_ant, n_freq, n_dir, n_corr),
                     dtype=np.complex128)

    # calibrate
    ti = timeit()
    jones_hat, jhj, jhr, k = gauss_newton(tbin_idx,
                                          tbin_counts,
                                          ant1,
                                          ant2,
                                          jones0,
                                          data,
                                          flag,
                                          model,
                                          weight,
                                          tol=1e-5,
                                          maxiter=100)
    print("%i iterations took %fs" % (k, timeit() - ti))

    # verify result
    for p in range(2):
        for q in range(p):
            diff_true = np.angle(jones[:, p] * jones[:, q].conj())
            diff_hat = np.angle(jones_hat[:, p] * jones_hat[:, q].conj())
            try:
                assert_array_almost_equal(diff_true, diff_hat, decimal=2)
            except Exception as e:
                print(e)
예제 #33
0
def parse_sky_model(filename, chunks):
    """
    Parses a Tigger sky model

    Parameters
    ----------
    filename : str
        Sky Model filename
    chunks : tuple of ints or int
        Source chunking strategy

    Returns
    -------
    source_data : dict
        Dictionary of source data,
        :code:`{'point': (...), 'gauss': (...) }`
    """
    sky_model = Tigger.load(filename, verbose=False)

    _empty_spectrum = object()

    point_radec = []
    point_stokes = []
    point_spi = []
    point_ref_freq = []

    gauss_radec = []
    gauss_stokes = []
    gauss_spi = []
    gauss_ref_freq = []
    gauss_shape = []

    shapelet_radec = []
    shapelet_stokes = []
    shapelet_spi = []
    shapelet_ref_freq = []
    shapelet_beta = []
    shapelet_coeffs = []

    for source in sky_model.sources:
        ra = source.pos.ra
        dec = source.pos.dec
        typecode = source.typecode.lower()

        I = source.flux.I  # noqa
        Q = source.flux.Q
        U = source.flux.U
        V = source.flux.V

        spectrum = (getattr(source, "spectrum", _empty_spectrum)
                    or _empty_spectrum)
        try:
            # Extract reference frequency
            ref_freq = spectrum.freq0
        except AttributeError:
            ref_freq = sky_model.freq0

        try:
            # Extract SPI for I.
            # Zero Q, U and V to get 1 on the exponential
            spi = [[spectrum.spi, 0, 0, 0]]
        except AttributeError:
            # Default I SPI to -0.7
            spi = [[-0.7, 0, 0, 0]]

        if typecode == "gau":
            emaj = source.shape.ex
            emin = source.shape.ey
            pa = source.shape.pa

            gauss_radec.append([ra, dec])
            gauss_stokes.append([I, Q, U, V])
            gauss_spi.append(spi)
            gauss_ref_freq.append(ref_freq)
            gauss_shape.append([emaj, emin, pa])

        elif typecode == "pnt":
            point_radec.append([ra, dec])
            point_stokes.append([I, Q, U, V])
            point_spi.append(spi)
            point_ref_freq.append(ref_freq)

        elif typecode == "sha":
            beta_l = source.shape.sbetal
            beta_m = source.shape.sbetam
            coeffs = source.shape.shapelet_coeffs

            shapelet_radec.append([ra, dec])
            shapelet_stokes.append([I, Q, U, V])
            shapelet_spi.append(spi)
            shapelet_ref_freq.append(ref_freq)
            shapelet_beta.append([beta_l, beta_m])
            shapelet_coeffs.append(np.array(coeffs))
        else:
            raise ValueError("Unknown source morphology %s" % typecode)

    Point = namedtuple("Point", ["radec", "stokes", "spi", "ref_freq"])
    Gauss = namedtuple("Gauss",
                       ["radec", "stokes", "spi", "ref_freq", "shape"])
    Shapelet = namedtuple(
        "Shapelet", ["radec", "stokes", "spi", "ref_freq", "beta", "coeffs"])

    source_data = {}

    if len(point_radec) > 0:
        source_data["point"] = Point(
            da.from_array(point_radec, chunks=(chunks, -1)),
            da.from_array(point_stokes, chunks=(chunks, -1)),
            da.from_array(point_spi, chunks=(chunks, 1, -1)),
            da.from_array(point_ref_freq, chunks=chunks),
        )
    if len(gauss_radec) > 0:
        source_data["gauss"] = Gauss(
            da.from_array(gauss_radec, chunks=(chunks, -1)),
            da.from_array(gauss_stokes, chunks=(chunks, -1)),
            da.from_array(gauss_spi, chunks=(chunks, 1, -1)),
            da.from_array(gauss_ref_freq, chunks=chunks),
            da.from_array(gauss_shape, chunks=(chunks, -1)),
        )
    if len(shapelet_radec) > 0:
        source_data["shapelet"] = Shapelet(
            da.from_array(shapelet_radec, chunks=(chunks, -1)),
            da.from_array(shapelet_stokes, chunks=(chunks, -1)),
            da.from_array(shapelet_spi, chunks=(chunks, 1, -1)),
            da.from_array(shapelet_ref_freq, chunks=(chunks)),
            da.from_array(shapelet_beta, chunks=(chunks, -1)),
            da.from_array(shapelet_coeffs, chunks=(chunks, 1, -1)),
        )

    return source_data
예제 #34
0
def new(ms, sky_model, gains, **kwargs):
    """Generate model visibilties per source (as direction axis)
    for stokes I and Q and generate relevant visibilities."""

    # Options to attributed dictionary
    if kwargs["yaml"] is not None:
        options = ocf.load(kwargs["yaml"])
    else:
        options = ocf.create(kwargs)

    # Set to struct
    ocf.set_struct(options, True)

    # Change path to sky model if chosen
    try:
        sky_model = sky_models[sky_model.lower()]
    except:
        # Own sky model reference
        pass

    # Set thread count to cpu count
    if options.ncpu:
        from multiprocessing.pool import ThreadPool
        import dask
        dask.config.set(pool=ThreadPool(options.ncpu))
    else:
        import multiprocessing
        options.ncpu = multiprocessing.cpu_count()

    # Load gains to corrupt with
    with open(gains, "rb") as file:
        jones = np.load(file)

    # Load dimensions
    n_time, n_ant, n_chan, n_dir, n_corr = jones.shape
    n_row = n_time * (n_ant * (n_ant - 1) // 2)

    # Load ms
    MS = xds_from_ms(ms)[0]

    # Get time-bin indices and counts
    row_chunks, tbin_indices, tbin_counts = chunkify_rows(
        MS.TIME, options.utime)

    # Close and reopen with chunked rows
    MS.close()
    MS = xds_from_ms(ms, chunks={"row": row_chunks})[0]

    # Get antenna arrays (dask ignored for now)
    ant1 = MS.ANTENNA1.data
    ant2 = MS.ANTENNA2.data

    # Adjust UVW based on phase-convention
    if options.phase_convention.upper() == 'CASA':
        uvw = -MS.UVW.data.astype(np.float64)
    elif options.phase_convention.upper() == 'CODEX':
        uvw = MS.UVW.data.astype(np.float64)
    else:
        raise ValueError("Unknown sign convention for phase.")

    # MS dimensions
    dims = ocf.create(dict(MS.sizes))

    # Close MS
    MS.close()

    # Build source model from lsm
    lsm = Tigger.load(sky_model)

    # Check if dimensions match jones
    assert n_time * (n_ant * (n_ant - 1) // 2) == dims.row
    assert n_time == len(tbin_indices)
    assert n_ant == np.max((np.max(ant1), np.max(ant2))) + 1
    assert n_chan == dims.chan
    assert n_corr == dims.corr

    # If gains are DIE
    if options.die:
        assert n_dir == 1
        n_dir = len(lsm.sources)
    else:
        assert n_dir == len(lsm.sources)

    # Get phase direction
    radec0_table = xds_from_table(ms + '::FIELD')[0]
    radec0 = radec0_table.PHASE_DIR.data.squeeze().compute()
    radec0_table.close()

    # Get frequency column
    freq_table = xds_from_table(ms + '::SPECTRAL_WINDOW')[0]
    freq = freq_table.CHAN_FREQ.data.astype(np.float64)[0]
    freq_table.close()

    # Get feed orientation
    feed_table = xds_from_table(ms + '::FEED')[0]
    feeds = feed_table.POLARIZATION_TYPE.data[0].compute()

    # Create initial model array
    model = np.zeros((n_dir, n_chan, n_corr), dtype=np.float64)

    # Create initial coordinate array and source names
    lm = np.zeros((n_dir, 2), dtype=np.float64)
    source_names = []

    # Cycle coordinates creating a source with flux
    print("==> Building model visibilities")
    for d, source in enumerate(lsm.sources):
        # Extract name
        source_names.append(source.name)

        # Extract position
        radec_s = np.array([[source.pos.ra, source.pos.dec]])
        lm[d] = radec_to_lm(radec_s, radec0)

        # Get flux - Stokes I
        if source.flux.I:
            I0 = source.flux.I

            # Get spectrum (only spi currently supported)
            tmp_spec = source.spectrum
            spi = [tmp_spec.spi if tmp_spec is not None else 0.0]
            ref_freq = [tmp_spec.freq0 if tmp_spec is not None else 1.0]

            # Generate model flux
            model[d, :, 0] = I0 * (freq / ref_freq)**spi

        # Get flux - Stokes Q
        if source.flux.Q:
            Q0 = source.flux.Q

            # Get spectrum
            tmp_spec = source.spectrum
            spi = [tmp_spec.spi if tmp_spec is not None else 0.0]
            ref_freq = [tmp_spec.freq0 if tmp_spec is not None else 1.0]

            # Generate model flux
            model[d, :, 1] = Q0 * (freq / ref_freq)**spi

        # Get flux - Stokes U
        if source.flux.U:
            U0 = source.flux.U

            # Get spectrum
            tmp_spec = source.spectrum
            spi = [tmp_spec.spi if tmp_spec is not None else 0.0]
            ref_freq = [tmp_spec.freq0 if tmp_spec is not None else 1.0]

            # Generate model flux
            model[d, :, 2] = U0 * (freq / ref_freq)**spi

        # Get flux - Stokes V
        if source.flux.V:
            V0 = source.flux.V

            # Get spectrum
            tmp_spec = source.spectrum
            spi = [tmp_spec.spi if tmp_spec is not None else 0.0]
            ref_freq = [tmp_spec.freq0 if tmp_spec is not None else 1.0]

            # Generate model flux
            model[d, :, 3] = V0 * (freq / ref_freq)**spi

    # Close sky-model
    del lsm

    # Build dask graph
    tbin_indices = da.from_array(tbin_indices, chunks=(options.utime))
    tbin_counts = da.from_array(tbin_counts, chunks=(options.utime))
    lm = da.from_array(lm, chunks=lm.shape)
    model = da.from_array(model, chunks=model.shape)
    jones = da.from_array(jones, chunks=(options.utime, ) + jones.shape[1::])

    # Apply image to visibility for each source
    sources = []
    for s in range(n_dir):
        source_vis = im_to_vis(model[s].reshape((1, n_chan, n_corr)),
                               uvw,
                               lm[s].reshape((1, 2)),
                               freq,
                               dtype=np.complex64,
                               convention='fourier')

        sources.append(source_vis)
    model_vis = da.stack(sources, axis=2)

    # Sum over direction?
    if options.die:
        model_vis = da.sum(model_vis, axis=2, keepdims=True)
        n_dir = 1
        source_names = [options.mname]

    # Select schema based on feed orientation
    if (feeds == ["X", "Y"]).all():
        out_schema = [["XX", "XY"], ["YX", "YY"]]
    elif (feeds == ["R", "L"]).all():
        out_schema = [['RR', 'RL'], ['LR', 'LL']]
    else:
        raise ValueError("Unknown feed orientation implementation.")

    # Convert Stokes to Correlations
    in_schema = ['I', 'Q', 'U', 'V']
    model_vis = convert(model_vis, in_schema, out_schema).reshape(
        (n_row, n_chan, n_dir, n_corr))

    # Apply gains to model_vis
    print("==> Corrupting visibilities")

    data = corrupt_vis(tbin_indices, tbin_counts, ant1, ant2, jones, model_vis)

    # Reopen MS
    MS = xds_from_ms(ms, chunks={"row": row_chunks})[0]

    # Assign model visibilities
    out_names = []
    for d in range(n_dir):
        MS = MS.assign(
            **{
                source_names[d]: (("row", "chan", "corr"),
                                  model_vis[:, :, d].astype(np.complex64))
            })

        out_names += [source_names[d]]

    # Assign noise free visibilities to 'CLEAN_DATA'
    MS = MS.assign(
        **{
            'CLEAN_' + options.dname: (("row", "chan", "corr"),
                                       data.astype(np.complex64))
        })

    out_names += ['CLEAN_' + options.dname]

    # Get noise realisation
    if options.std > 0.0:

        # Noise matrix
        print(f"==> Applying noise (std={options.std}) to visibilities")
        noise = []
        for i in range(2):
            real = da.random.normal(loc=0.0,
                                    scale=options.std,
                                    size=(n_row, n_chan),
                                    chunks=(row_chunks, n_chan))
            imag = 1.0j * (da.random.normal(loc=0.0,
                                            scale=options.std,
                                            size=(n_row, n_chan),
                                            chunks=(row_chunks, n_chan)))
            noise.append(real + imag)

        # Zero matrix for off-diagonals
        zero = da.zeros((n_row, n_chan), chunks=(row_chunks, n_chan))

        noise.insert(1, zero)
        noise.insert(2, zero)

        # NP to Dask
        noise = da.stack(noise, axis=2).rechunk((row_chunks, n_chan, n_corr))

        # Assign noise to 'NOISE'
        MS = MS.assign(
            **{'NOISE': (("row", "chan", "corr"), noise.astype(np.complex64))})

        out_names += ['NOISE']

        # Add noise to data and assign to 'DATA'
        noisy_data = data + noise

        MS = MS.assign(
            **{
                options.dname: (("row", "chan", "corr"),
                                noisy_data.astype(np.complex64))
            })

        out_names += [options.dname]

    # Create a write to the table
    write = xds_to_table(MS, ms, out_names)

    # Submit all graph computations in parallel
    print(f"==> Executing `dask-ms` write to `{ms}` for the following columns: "\
            + f"{', '.join(out_names)}")

    with ProgressBar():
        write.compute()

    print(f"==> Completed.")
예제 #35
0
pyfits = Kittens.utils.import_pyfits();
startup_dprint(1,"imported pyfits");

DEG = math.pi/180;

startup_dprint(1,"importing WCS");

# If we're being imported outside the main app (e.g. a script is trying to read a Tigger model,
# whether TDL or otherwise), then pylab may be needed by that script for decent God-fearing
# purposes. Since WCS is going to pull it in anyway, we try to import it here, and if that
# fails, replace it by dummies.
if not Tigger.matplotlib_nuked:
  try:
    import pylab;
  except:
    Tigger.nuke_matplotlib();

# some locales cause WCS to complain that "." is not the decimal separator, so reset it to "C"
import locale
locale.setlocale(locale.LC_NUMERIC, 'C')
      

try:
  from astLib.astWCS import WCS
  import PyWCSTools.wcs
except ImportError:
  print "Failed to import the astLib.astWCS and/or PyWCSTools module. Please install the astLib package (http://astlib.sourceforge.net/)."
  raise;

startup_dprint(1,"imported WCS");
예제 #36
0
    def params(self, modelfits):
     
        # reads in source finder output             
        with pyfits.open(modelfits) as hdu:
            data = hdu[1].data

        tfile = tempfile.NamedTemporaryFile(suffix=".txt")
        tfile.flush() 

        # writes a catalogue in a temporaty txt file
        with open(tfile.name, "w") as std:
            std.write("#format:name ra_rad dec_rad i emaj_r emin_r pa_r\n")

        model = Tigger.load(tfile.name) # open a tmp. file
    
        peak, total, area, loc, corr = [], [], [], [], []
        for i in range(len(data)):
            flux = data["Total_flux"][i] 
            dc_emaj, dc_emin = data["DC_Maj"][i], data["DC_Min"][i]
            ra, dec = data["RA"][i], data["DEC"][i]
            pa = data["DC_PA"][i]
            name = "SRC%d"%i
            peak_flux = data["Peak_flux"][i]

            posrd =  ModelClasses.Position(numpy.deg2rad(ra), numpy.deg2rad(dec))
            flux_I = ModelClasses.Polarization(flux, 0, 0, 0)
            if dc_emaj == 0 and dc_emin == 0: 
                shape = None
            else:
                shape = ModelClasses.Gaussian(numpy.deg2rad(dc_emaj), numpy.deg2rad(dc_emin),
                                         numpy.deg2rad(pa))
            
            srs = SkyModel.Source(name, posrd, flux_I, shape=shape)
            
            # using convolved maj and min for reliability estimate
            emaj, emin = data["Maj"][i], data["Min"][i]

            # area: find ex and ey if are 0 assign beam size
            if emaj or emin == 0:
                srcarea = math.pi * (numpy.rad2deg(self.bmaj)) * pow(3600.0, 2) *\
                       (numpy.rad2deg(self.bmin)) 
            if  emaj and emin > 0: 
                srcarea = emaj * emin * math.pi * pow(3600.0, 2) # arcsecond
            
            # only accepts sources with flux > 0 and not nan RA and DEC
            # and local variance
            pos = [self.wcs.wcs2pix(*(ra, dec))][0] #positions from deg to pixel

            with pyfits.open(self.negimage) as hdu:
                negdata = utils.image_data( hdu[0].data )

            if flux > 0 and peak_flux > 0 and not math.isnan(float(ra))\
                and not math.isnan(float(dec)):

                  local = utils.compute_local_variance(negdata,
                            pos, self.locstep)

                  srs.setAttribute("local_variance", local)

                  
                  if not math.isnan(float(local)) or local  > 0:
                      if self.psfname:
                          pdata, psf = utils.compute_psf_correlation(self.imagename,
                                         self.psfname, pos, self.cfstep)

                          if len(pdata) == len(psf):
                              c_region = numpy.corrcoef((pdata, psf))
                              cf =  (numpy.diag((numpy.rot90(c_region))**2)
                                           .sum())**0.5/2**0.5

                              srs.setAttribute("correlation_factor", cf)
                              corr.append(cf)
                              model.sources.append(srs) 
                              peak.append(peak_flux)
                              total.append(flux)
                              area.append(srcarea)
                              loc.append(local)
                      else:
                          model.sources.append(srs) 
                          peak.append(peak_flux)
                          total.append(flux)
                          area.append(srcarea)
                          loc.append(local)
    
        labels = dict(size=(0, "Log$_{10}$(Source area)"), 
                      peak=(1, "Log$_{10}$( Peak flux [Jy] )"), 
                      tot=(2, "Log$_{10}$( Total flux [Jy] )"))

        if self.do_psf_corr:
            labels.update( {"coeff":(len(labels),
                            "Log$_{10}$ (CF)")})
        if self.do_local_var:
            labels.update( {"local": (len(labels),
                            "Log$_{10}$(Local Variance)")})
        if self.nearsources:
            labels.update( {"near": (len(labels),
                            "Log$_{10}$(Near Sources)")})

        nsrc = len(model.sources)
        out = numpy.zeros([nsrc, len(labels)])         
         
        # returning parameters
        for i, src in enumerate(model.sources):

            ra, dec = src.pos.ra, src.pos.dec
            near = model.getSourcesNear(ra, dec, 5 * self.bmaj)
            nonear = len(near) 
            if self.nearsources:
                src.setAttribute("neibours", nonear)

            if self.do_psf_corr and self.do_local_var and self.nearsources:
                 out[i,...] =  area[i], peak[i], total[i], corr[i], loc[i], nonear

            elif self.do_psf_corr and self.do_local_var and not self.nearsources:
                 out[i,...] =   area[i], peak[i], total[i] , corr[i], loc[i]
        
            elif self.do_psf_corr and self.nearsources and not self.do_local_var:
                out[i,...] =   area[i], peak[i], total[i] , corr[i], nonear
            
            elif not self.do_psf_corr and self.do_local_var and self.nearsources:
                out[i,...] =   area[i], peak[i], total[i] , loc[i], nonear
            
            elif self.do_psf_corr and not self.do_local_var and not self.nearsources:
                out[i,...] =   area[i], peak[i], total[i] , corr[i]
            
            elif not self.do_psf_corr and self.do_local_var and not self.nearsources:
                out[i,...] =   area[i], peak[i], total[i] , loc[i]
            
            elif not self.do_psf_corr and not self.do_local_var and self.nearsources:
                out[i,...] =   area[i], peak[i], total[i] , nonear

            else:
                out[i,...] =   area[i], peak[i], total[i]


        # removes the rows with 0s
        removezeros = (out == 0).sum(1)
        output = out[removezeros <= 0, :]
                  
        return model, numpy.log10(output), labels 
예제 #37
0
파일: utils.py 프로젝트: lowks/sourcery
def psf_image_correlation(catalog, psfimage, imagedata, header, wcs=None ,
                     pixelsize=None, corr_region=5, thresh=0.4, tags=None,
                     coefftag='high_corr', setatr=True, do_high=False, 
                     prefix=None):


    """ Computes correlation of the image and PSF image

    catalog : Source model, Tigger format.
    psfimage : Instrument's Point spread functions Fits data.
    imagedata : Fits data
    header : Fits header e.g., img=pyfits.open("test.fits")
        header=img[0].header
    wcs :This class provides methods
        for accessing information from the World  Coordinate System
        (WCS) contained in the header of a FITS image. Conversions 
        between pixel and WCS coordinates can also be performed.
        If not provided it is directly obtained from the Fits header
        provided.
    pixelsize: float, Default is None.
         If not provided then it is directly obtained form a Fits header.
    corr_region : int, optional. A default value of 5.
        The size of the region to correlate given in beam sizes.
    thresh : float, optional. A default value of 0.4. 
        Correlation threshold. Sources with correlation > threshold
        are sources with high correlation.
    tags: str, optional. Default is None.
        If specified only sources with 'Tag' will be evaluated.
    coefftag: str, optional. A Default string is 'high_corr'.
        If provided sources with correlation > thresh will be tagged
        using the user specified tag.
    setatr : bool, optional. Default is True.
        If True all sources will be tagged with 'cf'
        giving each detection an extra correlation with PSF parameter.
    do_high: bool, optional.  Default is False.
        If True, sources of high correlation are tagged using 'coefftag',
        if False no tagging will be made.
    """

    model = Tigger.load(catalog)
   
    image_data = imagedata 
    beam = header["BMAJ"]
    psf_data, wcs_psf, psf_hdr, psf_pix = reshape_data(image=psfimage)
    
    shape = image_data.shape
    log = logger(level=0, prefix=prefix)

    if pixelsize is None:
        pixelsize = abs(header["CDELT1"])
    if wcs is None:
        wcs = WCS(header, mode="pyfits")

    bmaj = int(round(beam/pixelsize))
    log.info("Beam major= %d degrees"%bmaj)

    if bmaj == 0:
        log.debug("Beam major axis was read as 0, setting it to 1")
        bmaj = 1.0

    if not isinstance(corr_region, int):
        if isinstance(corr_region, float):
            corr_region = int(round(corr_region))
            log.debug("Float is provided and int is required,"
                      "arounding off to the nearest integer")
            if corr_region == 0:
                log.error("Rounding off to 0. Provide an integer."
                          "Aborting")
        else:
            log.error("corr_region must be an integer. Abort")
    
    step = corr_region * bmaj
    
    sources = []

    if tags:
        log.debug("Only sources with tag %s will be correlated"
                  "with the PSF"%tags)
        sources = filter(lambda src: src.getTag(tags),model.sources) 
    else:
         for src in model.sources:
             sources.append(src)
    
    positions_sky = [map(lambda rad: numpy.rad2deg(rad),
                    (src.pos.ra, src.pos.dec))  for src in sources]
    pos = [wcs.wcs2pix(*pos) for pos in positions_sky]

    step = [step,step]
    ndim = len(shape)
    if ndim == 4:
        image_data = image_data[0,0,...]
    if ndim == 3:
        image_data = image_data[0,...]

    pdim = len(psf_data.shape)
    if pdim == 4:
        psf_data = psf_data[0,0,...]
    if pdim == 3:
        psf_data = psf_data[0,...]
  
    
    m = 0
    for i, (p, src) in enumerate(zip(pos, sources)):      
        x,y = p
        if x>shape[-2] or y>shape[-1] or numpy.array(p).any()<0:
            pos.remove(p)
            sources.remove(src)
            model.sources.remove(src)
            m += 1

        if (y+step[1] > shape[-1]) or (y-step[1] < 0):
            if p in pos:
                pos.remove(p)
                model.sources.remove(src)
                sources.remove(src)
            m += 1
        if (x+step[0] > shape[-2]) or (x-step[0] < 0):
            if p in pos:
                pos.remove(p)
                model.sources.remove(src)
                sources.remove(src)
            m += 1
    if m > 0:
        log.debug("It will be useful to increase the image size,"
                  "sources with ra+step or dec+step > image size"
                  "are removed")

    central = psf_hdr["CRPIX2"]
    psf_region = psf_data[central-step[0] : central+step[0],
                 central-step[1] : central+step[1]]
    psf_region = psf_region.flatten()
     
    corr = []
    n = 0
    for src, (ra, dec) in zip(sources, pos): 
        data_region = image_data[dec-step[0] : dec+step[0],
                                ra-step[1] : ra+step[1]].flatten()
        norm_data = (data_region-data_region.min())/(data_region.max()-
                                                     data_region.min())
        c_region = numpy.corrcoef((norm_data, psf_region))
        cf_region =  (numpy.diag((numpy.rot90(c_region))**2)
                                  .sum())**0.5/2**0.5
        cf = cf_region

        if math.isnan(float(cf)) or cf == 0:
            model.sources.remove(src)
            sources.remove(src)
            n += 1
        else:
            corr.append(cf)

            if setatr:
                src.setAttribute("cf",cf)

    if n > 0:
        log.debug("%d sources were removed due to 0/nan correlation"%n)

    thresh = thresh 
    coefftag = coefftag
    if do_high:
        for src, crr in zip(sources, corr):
            if crr > thresh:
               src.setTag(coefftag, True)
    model.save(catalog)     

    return corr
예제 #38
0
import numpy
import Tigger
import os
import sys

prefix = sys.argv[1]

fitsfile = prefix + '.fits'
lsmfile = prefix + '.pybdsm.lsm.html'
#pngname = '/nfs/wwwpeople/hey036/ASKAP/pcal_models/'+prefix+'.png'
pngname = prefix + '.png'

srcs = Tigger.load(lsmfile).sources

syscall = 'mViewer -ct 0 -color yellow -grid equatorial -color cyan '


def r2d(x):
    return 180.0 * x / numpy.pi


for src in srcs:
    ra_d = str(r2d(src.pos.ra))
    dec_d = str(r2d(src.pos.dec))
    dE = src.getTag('dE')
    if dE:
        syscall += '-color red '
    else:
        syscall += '-color yellow '
    syscall += '-symbol 0.50 circle -mark ' + ra_d + ' ' + dec_d + ' '
예제 #39
0
def srcthumbs(inputFits, inputLsm):
    padding = 30
    srclist = []

    input_hdu = pyfits.open(inputFits)[0]
    hdr = input_hdu.header
    WCS = astWCS.WCS(hdr, mode='pyfits')
    if len(input_hdu.data.shape) == 2:
        image = numpy.array(input_hdu.data[:, :])
    elif len(input_hdu.data.shape) == 3:
        image = numpy.array(input_hdu.data[0, :, :])
    else:
        image = numpy.array(input_hdu.data[0, 0, :, :])

    pitch = 0.9
    offsets = pitch * square_6x6()
    beam = getBeam(inputFits)
    offset = offsets[beam]

    ra_ptg = hdr.get('CRVAL1')
    dec_ptg = hdr.get('CRVAL2')

    ra0 = ra_ptg + offset[0]
    dec0 = dec_ptg + offset[1]

    print ra_ptg, dec_ptg, ra0, dec0, beam

    tmp_x = []
    tmp_y = []
    for src in Tigger.load(inputLsm, verbose=False).sources:
        tmp_x.append(src.pos.ra * 180.0 / numpy.pi)
        tmp_y.append(src.pos.dec * 180.0 / numpy.pi)
    mx = numpy.mean(numpy.array(tmp_x))
    my = numpy.mean(numpy.array(tmp_y))
    print 'mx,my =', mx, my

    for src in Tigger.load(inputLsm, verbose=False).sources:
        name = src.name
        flux = src.flux.I
        ra_d = src.pos.ra * 180.0 / numpy.pi
        dec_d = src.pos.dec * 180.0 / numpy.pi

        ra_pix, dec_pix = WCS.wcs2pix(ra_d, dec_d)
        x0, x1 = int(ra_pix - padding), int(1 + ra_pix + padding)
        y0, y1 = int(dec_pix - padding), int(1 + dec_pix + padding)
        if x0 < 1:
            x0 = 0
        if y0 < 1:
            y0 = 0
        if x1 > image.shape[1] - 1:
            x1 = image.shape[1]
        if y1 > image.shape[0] - 1:
            y1 = image.shape[0]
        thumbnail = image[y0:y1, x0:x1]
        rms = numpy.std(thumbnail)

        # This probably works for anything other than ASKAP
        # r = src.r

        dx = ra_d - ra0
        dy = dec_d - dec0
        r = ((dx**2.0) + (dy**2.0))**0.5
        print r

        srclist.append((name, ra_d, dec_d, flux, rms, r))
    return srclist
예제 #40
0

def rad2deg(xx):
    return 180.0 * xx / numpy.pi


rf = open('setup_dE_runfile.sh', 'w')

count = 0
limit = 4

for lsm in xx:
    opdirs = lsm + '_collapsed.txt'
    print >> rf, 'python setup_dE_model.py ' + opdirs
    f = open(opdirs, 'w')
    srcs = Tigger.load(lsm, verbose=False).sources
    for src in srcs:
        dE = src.getTag('dE')
        lead = src.getTag('cluster_lead')
        if dE and lead:
            name = src.name
            ra = rad2deg(src.pos.ra)
            dec = rad2deg(src.pos.dec)
            if ra < 0.0:
                ra += 360.0
            if count < limit:
                print >> f, name, ra, dec
                count += 1
    f.close()

rf.close()
예제 #41
0
def reliability(lsm, tag="ar"):
    model = Tigger.load(lsm, verbose= False)
    sources = filter(lambda src: src.getTag("ar"), model.sources)
    rel = [src.rel for src in sources]
    return max(rel)
예제 #42
0
    #text = "Peak=%.4g  : Tot=%s"%(peak, tot)
    #pylab.title(text)
    x,y = [numpy.linspace(0,width,width)]*2
    xx,yy = numpy.meshgrid(x,y)
    #pylab.contour( gaussfitter2.twodgaussian(p,0,1,1)(xx,yy))

    pylab.savefig(prefix+"2d.png")
    pylab.clf()

    pylab.plot(numpy.diagonal(stacked)*1e3, "b-", label="stacked")
    noise = data.std()
    pylab.plot( [noise*1e3],"r--", label="noise")
    pylab.ylabel("mJy/beam")
    pylab.legend()
    pylab.title("Noise = %.3g mJy"%(noise*1e3))
    pylab.savefig(prefix+"1d.png")
    pylab.clf()

    return stacked.max() 


if __name__ == "__main__":
    imagename = sys.argv[1]
    lsmname = sys.argv[2]
    
    import Tigger
    model = Tigger.load(lsmname)
    positions = [ map(numpy.rad2deg, [src.pos.ra, src.pos.dec]) for src in model.sources]

    stackem(imagename, positions, 100, prefix="3c147-stacked-V")
예제 #43
0
      lsms = [ filename for filename in os.listdir(".") if filename.endswith(".lsm.html") ];
      if not lsms:
        parser.error("No LSMs found. Use --lsm to specify one explicitly.");
      lsm_filename = lsms[0];
    else:
      lsm_filename = options.lsm;
    #
    # find tigger
    try:
      import Tigger;
    except ImportError:
      # make plot of average dE to model
      sys.path.append(os.getenv('HOME'));
      import Tigger
    print "Using LSM file %s"%lsm_filename;
    model = Tigger.load(lsm_filename);
    lsm_timestamp = os.path.getmtime(lsm_filename);

    # if MS file not specified, use first one found
    ms,msant = load_ms();
    nant = msant.nrows();
    # read UVWs
    uvw0 = ms.query('ANTENNA1==0 && ANTENNA2==%d'%(nant-1)).getcol('UVW');
    uvw = uvw0[30::60];
    # read phase center
    import pyrap.tables
    radec0 = pyrap.tables.table(ms.getkeyword('FIELD')).getcol('PHASE_DIR')[0][0];
    print "Phase center is at",radec0;
  else:
    model = None;
예제 #44
0
  def source_list (self,ns,max_sources=None,**kw):
    """Reads LSM and returns a list of Meow objects.
    ns is node scope in which they will be created.
    Keyword arguments may be used to indicate which of the source attributes are to be
    created as Parms, use e.g. I=Meow.Parm(tags="flux") for this.
    The use_parms option may override this.
    """;
    if self.filename is None:
      return [];
    # load the sky model
    if self.lsm is None:
      self.lsm = Tigger.load(self.filename);

    # sort by brightness
    sources = sorted(self.lsm.sources,lambda a,b:cmp(b.brightness(),a.brightness()));

    # extract subset, if specified
    sources = SourceSubsetSelector.filter_subset(self.lsm_subset,sources,self._getTagValue);
    # get nulls subset
    if self.null_subset:
      nulls = set([src.name for src in SourceSubsetSelector.filter_subset(self.null_subset,sources)]);
    else:
      nulls = set();
    parm = Meow.Parm(tags="source solvable");
    # make copy of kw dict to be used for sources not in solvable set
    kw_nonsolve = dict(kw);
    # and update kw dict to be used for sources in solvable set
    # this will be a dict of lists of solvable subgroups
    parms = [];
    subgroups = {};
    if self.solvable_sources:
      subgroup_order = [];
      for sgname in _SubgroupOrder:
        if getattr(self,'solve_%s'%sgname):
          sg = subgroups[sgname] = [];
          subgroup_order.append(sgname);

    # make Meow list
    source_model = []

    for src in sources:
      is_null = src.name in nulls;
      # this will be True if this source has solvable parms
      solvable = self.solvable_sources and not is_null and ( not self.lsm_solvable_tag
                  or getattr(src,self.lsm_solvable_tag,False) );
      if solvable:
        # independent groups?
        if self.lsm_solve_group_tag:
          independent_sg = sgname = "%s:%s"%(self.lsm_solve_group_tag,getattr(src,self.lsm_solve_group_tag,"unknown"));
        else:
          independent_sg = "";
          sgname = 'source:%s'%src.name;
        if sgname in subgroups:
          sgsource = subgroups[sgname];
        else:
          sgsource = subgroups[sgname] = [];
          subgroup_order.append(sgname);
      # make dict of source parametrs: for each parameter we have a value,subgroup pair
      if is_null:
        attrs = dict(ra=src.pos.ra,dec=src.pos.dec,I=0,Q=None,U=None,V=None,RM=None,spi=None,freq0=None);
      else:
        attrs = dict(
          ra=     src.pos.ra,
          dec=    src.pos.dec,
          I=      src.flux.I,
          Q=      getattr(src.flux,'Q',None),
          U=      getattr(src.flux,'U',None),
          V=      getattr(src.flux,'V',None),
          RM=     getattr(src.flux,'rm',None),
          freq0=  getattr(src.flux,'freq0',None) or (src.spectrum and getattr(src.spectrum,'freq0',None)),
          spi=    src.spectrum and getattr(src.spectrum,'spi',None)
        );
      if not is_null and isinstance(src.shape,ModelClasses.Gaussian):
        attrs['lproj'] = src.shape.ex*math.sin(src.shape.pa);
        attrs['mproj'] = src.shape.ex*math.cos(src.shape.pa);
        attrs['ratio'] = src.shape.ey/src.shape.ex;
      # construct parms or constants for source attributes, depending on whether the source is solvable or not
      # If source is solvable and this particular attribute is solvable, replace
      # value in attrs dict with a Meq.Parm.
      if solvable:
        for parmname,value in attrs.items():
          sgname = _Subgroups.get(parmname,None);
          if sgname in subgroups:
            solvable = True;
            parm = attrs[parmname] = ns[src.name](parmname) << Meq.Parm(value or 0,
                                                                tags=["solvable",sgname],solve_group=independent_sg);
            subgroups[sgname].append(parm);
            sgsource.append(parm);
            parms.append(parm);

      # construct a direction
      direction = Meow.Direction(ns,src.name,attrs['ra'],attrs['dec'],static=not solvable or not self.solve_pos);

      # construct a point source or gaussian or FITS image, depending on source shape class
      if src.shape is None or is_null:
        msrc = Meow.PointSource(ns,name=src.name,
                I=attrs['I'],Q=attrs['Q'],U=attrs['U'],V=attrs['V'],
                direction=direction,
                spi=attrs['spi'],freq0=attrs['freq0'],RM=attrs['RM']);
      elif isinstance(src.shape,ModelClasses.Gaussian):
        msrc = Meow.GaussianSource(ns,name=src.name,
                I=attrs['I'],Q=attrs['Q'],U=attrs['U'],V=attrs['V'],
                direction=direction,
                spi=attrs['spi'],freq0=attrs['freq0'],
                lproj=attrs['lproj'],mproj=attrs['mproj'],ratio=attrs['ratio']);
        if solvable and 'shape' in subgroups:
          subgroups['pos'] += direction.get_solvables();
      elif isinstance(src.shape,ModelClasses.FITSImage):
        msrc = Meow.FITSImageComponent(ns,name=src.name,
                    filename=src.shape.filename,
                    direction=direction);
        msrc.set_options(fft_pad_factor=(src.shape.pad or 2));

      msrc.solvable = solvable;

      # copy standard attributes from sub-objects
      for subobj in src.flux,src.shape,src.spectrum:
        if subobj:
          for attr,val in src.flux.getAttributes():
            msrc.set_attr(attr,val);
      # copy all extra attrs from source object
      for attr,val in src.getExtraAttributes():
        msrc.set_attr(attr,val);

      # make sure Iapp exists (init with I if it doesn't)
      if msrc.get_attr('Iapp',None) is None:
        msrc.set_attr('Iapp',src.flux.I);

      source_model.append(msrc);

    # if any solvable parms were made, make a parmgroup and solve job for them
    if parms:
      if os.path.isdir(self.filename):
        table_name = os.path.join(self.filename,"sources.fmep");
      else:
        table_name = os.path.splitext(self.filename)[0]+".fmep";
      # make list of Subgroup objects for every non-empty subgroup
      sgs = [];
      for sgname in subgroup_order:
        sglist = subgroups.get(sgname,None);
        if sglist:
          sgs.append(Meow.ParmGroup.Subgroup(sgname,sglist));
      # make main parm group
      pg_src = Meow.ParmGroup.ParmGroup("source parameters",parms,
                  subgroups=sgs,
                  table_name=table_name,table_in_ms=False,bookmark=True);
      # now make a solvejobs for the source
      Meow.ParmGroup.SolveJob("cal_source","Solve for source parameters",pg_src);


    return source_model;
예제 #45
0
rec_dates = [20140110,20140112,20140501,20140521,20140524,20140603,20140614,20140622,20140625] #observation dates
xdates = [datetime.datetime.strptime(str(int(date)),'%Y%m%d') for date in rec_dates]

#clusternames contains the list of our cluster catalog
clusternames = ['recluster-0.lsm.html','recluster-1.lsm.html','recluster-2.lsm.html','recluster-3.lsm.html','recluster-4.lsm.html','recluster-5.lsm.html','recluster-6.lsm.html','recluster-7.lsm.html','recluster-8.lsm.html']

#textnames contains the gaul files produced by the catalog
textnames = ['image-0.lsm.gaul','image-1.lsm.gaul','image-2.lsm.gaul','image-3.lsm.gaul','image-4.lsm.gaul','image-5.lsm.gaul','image-6.lsm.gaul','image-7.lsm.gaul','image-8.lsm.gaul']

#filenames contains the catalogs lsm.html files ontained directly after pydsm search
filenames = ['image-0.lsm.html','image-1.lsm.html','image-2.lsm.html','image-3.lsm.html','image-4.lsm.html','image-5.lsm.html','image-6.lsm.html','image-7.lsm.html','image-8.lsm.html']

#rawmodels contain the catalogs in filenames which will be used to retrive errors
rawmodels = []
for i in range(len(filenames)):
	rawmodels.append(Tigger.load(filenames[i]))

# loading the gaul files which will be used for extracting the errors
data = []
for f in textnames:
	data.append(np.loadtxt(f,dtype = 'str'))


R = len(filenames) # to be use in our manipulation when creating our source table
N = 10 #representing the number of sources we are manipulating

def get_error(A,i):
	''' this function takes the cluster intensity and i which represents the model number and goes through the model and the text data in order to get the error associated with each source in a cluster '''

	errors = []
	for src in rawmodels[i].sources:
예제 #46
0
startup_dprint(1, "imported pyfits")

DEG = math.pi / 180

startup_dprint(1, "importing WCS")

# If we're being imported outside the main app (e.g. a script is trying to read a Tigger model,
# whether TDL or otherwise), then pylab may be needed by that script for decent God-fearing
# purposes. Since WCS is going to pull it in anyway, we try to import it here, and if that
# fails, replace it by dummies.
if not Tigger.matplotlib_nuked:
    try:
        import pylab
    except:
        Tigger.nuke_matplotlib()

# some locales cause WCS to complain that "." is not the decimal separator, so reset it to "C"
import locale

locale.setlocale(locale.LC_NUMERIC, 'C')

from astropy.wcs import WCS, FITSFixedWarning
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs import utils
import PyWCSTools.wcs

startup_dprint(1, "imported WCS")
warnings.simplefilter('ignore', category=FITSFixedWarning)
예제 #47
0
    def get_reliability(self):


        # finding sources 
        self.source_finder(image=self.imagename, lsmname=self.poslsm, 
                           thresh=self.pos_smooth, **self.opts_pos)

        self.source_finder(image=self.negativeimage, lsmname=self.neglsm,
                           thresh=self.neg_smooth, **self.opts_neg)

        # removing sources within a specified radius
        self.remove_sources_within(catalog=self.poslsm, rel_excl_src=
                                   self.rel_excl_src)
        self.remove_sources_within(catalog=self.neglsm, rel_excl_src=
                                   self.rel_excl_src)

        # add local variance as a parameter
        if self.do_local_var:
            utils.local_variance(self.imagedata, self.header, 
                              catalog=self.poslsm, wcs=self.wcs, 
                              pixelsize=self.pixelsize, local_region=
                              self.local_var_region, savefig=False,
                              highvariance_factor=None, prefix=self.prefix,
                              neg_side=True)

            utils.local_variance(self.imagedata, self.header,
                              catalog=self.neglsm, wcs=self.wcs,
                              pixelsize=self.pixelsize, local_region=
                              self.local_var_region, savefig=False,
                              highvariance_factor=None, prefix=self.prefix, neg_side=True)
        # compute correlation if only do_psf_corr = True 
        #and the psf is provided 
        if self.do_psf_corr and self.psfname:
            utils.psf_image_correlation(
                 catalog=self.poslsm, psfimage=self.psfname,
                 imagedata=self.imagedata, header=self.header,
                 wcs=self.wcs, pixelsize=self.pixelsize,
                 corr_region=self.psf_corr_region, prefix= self.prefix)
            utils.psf_image_correlation(
                 catalog=self.neglsm, psfimage=self.psfname, 
                 imagedata=self.imagedata, header=self.header,
                 wcs=self.wcs, pixelsize=self.pixelsize, 
                 corr_region=self.psf_corr_region, prefix=self.prefix)
      
        ##TODO verbose vs. logging
        pmodel = Tigger.load(self.poslsm, verbose=self.loglevel)
        nmodel = Tigger.load(self.neglsm, verbose=self.loglevel)
        
        posSources = pmodel.sources
        negSources = nmodel.sources

        npsrc = len(posSources)
        nnsrc = len(negSources)      
 
        positive, labels = self.params(posSources, pmodel)
        negative, labels = self.params(negSources, nmodel)

        # setting up a kernel, Gaussian kernel
        bandwidth = []

        for plane in negative.T:
            bandwidth.append(plane.std())



        nplanes = len(labels)
        cov = numpy.zeros([nplanes, nplanes])


        for i in range(nplanes):
            for j in range(nplanes):
                if i == j:
                    cov[i, j] = bandwidth[i]*((4.0/((nplanes+2)*
                                  npsrc))**(1.0/(nplanes+4.0)))

        pcov = utils.gaussian_kde_set_covariance(positive.T, cov)
        ncov = utils.gaussian_kde_set_covariance(negative.T, cov)
    

        # get number densities
        nps = pcov(positive.T) * npsrc
        nns = ncov(positive.T) * nnsrc

        # define reliability of positive catalog
        rel = (nps-nns)/nps

        for src, rf in zip(posSources, rel):
            src.setAttribute("rel", rf)
            out_lsm = self.poslsm
        pmodel.save(out_lsm)

        if self.makeplots:
            savefig = self.prefix + "_planes.png"
            utils.plot(positive, negative, rel=rel, labels=labels,
                        savefig=savefig, prefix=self.prefix)

        return  self.poslsm, self.neglsm      
예제 #48
0
파일: utils.py 프로젝트: lowks/sourcery
def local_variance(imagedata, header, catalog, wcs=None,
                   pixelsize=None, tag=None,local_region=5,
                   noise=None, savefig=True, highvariance_factor=0.8,
                   high_local_tag=None, neg_side=False, 
                   setatr=True, do_high_loc=False, prefix=None):

    """ Calculates the local varience (lv) around a source on 
        one side of interest. 
 
    imagedata : Reshaped Fits data
    header : Image header
    catalog : Source catalog, in Tigger format.
        Source model to compute local variance around.

    tag : str, optional.
        if specified then the local variance will be
        computed for only a subset of sources with a tag,
        e.g., 'tag=snr'.
    header : Fits header e.g., img=pyfits.open("test.fits")
        header=img[0].header
    wcs :This class provides methods
        for accessing information from the World  Coordinate System
        (WCS) contained in the header of a FITS image. Conversions 
        between pixel and WCS coordinates can also be performed.
        If not provided it is directly obtained from the Fits header
        provided.
    pixelsize: float, Default is None.
         If not provided then it is directly obtained form a Fits header.
    local_region: int, optional. A default value of 5. 
        Gives a region to compute the local variance in
        psf sizes, e.g, 'local_region = 2',
        then a region (= 2 * beam size) around a source is used.

    highvariance_factor: float, optional. A default value of 0.8. 
        If highvariance_factor=0.8 is given this means that
        the sources with local variance greater than  
        0.8*image_noise will be tagged 'high_var' if
        high_local_tag=None.

    high_local_tag : str, optional. A default tag None.
        Tag assigned to sources of high local variance.
        If None is provided the default tag will be used
        i.e 'high_var'. Else it uses the specified tag.

    setatr : bool, optional. Default is True.
        If True all sources will be tagged with 'l'
        giving each detection an extra local variance
        parameter.

    do_high_loc : bool, optional. Default is False.
        If True sources with high local variance will be tagged
        using 'localvariance_tag' (see above).
    prefix :  str, optional. Default is None.
    """

    data = imagedata
    beam = header["BMAJ"]

    if pixelsize is None:
        pixelsize = abs(header["CDELT1"])
    if wcs is None:
        wcs = WCS(header, mode="pyfits")

    bmaj = int(round(beam/pixelsize)) # beam size in pixels

    log = logger(level=0, prefix=prefix)
    if not isinstance(local_region, int):
        if isinstance(local_region, float):
            local_region = int(round(local_region))
            log.debug("Float is provided and int is required,"
                       "arounding offto the nearest integer")
            if local_region == 0:
                log.error("It rounded off to zero now,"
                          "change local_region into an integer."
                          "Aborting")
        else:
            log.error("local_region must be an integer. Abort")
    
    step = local_region * bmaj

    noise = noise or negative_noise(data)
    
    model = Tigger.load(catalog)
    sources = []

    if tag:
        log.debug("Local variance is only computed"
                  "for sources with a tag %s are"%
                   tag)
        sources = filter(lambda src: src.getTag(tag), model.sources) 
    else:
        for src in model.sources:
            sources.append(src)
    
    positions_sky = [map(lambda rad: numpy.rad2deg(rad),
                    (src.pos.ra,src.pos.dec)) for src in sources]
    positions = [wcs.wcs2pix(*pos) for pos in positions_sky]

    shape = data.shape 
    ndim = len( data.shape)
    if ndim == 4:
        data = data[0,0,...]
    if ndim == 3:
        data = data[0,...]
 
    step = [step, step]
    
    m = 0 
    for i, (pos, src) in enumerate(zip(positions, sources)):
        x,y = pos
        if x>shape[-2] or y>shape[-1] or numpy.array(pos).any()<0:
            positions.remove(pos)
            model.sources.remove(src)
            sources.remove(src)
            m += 1

        if (y+step[1] > shape[-1]) or (y-step[1] < 0):
            if pos in positions:
                positions.remove(pos)
                model.sources.remove(src)
                sources.remove(src)
            m += 1

        if (x+step[0] > shape[-2]) or (x-step[0] < 0):
            if pos in positions:
                positions.remove(pos)
                model.sources.remove(src)
                sources.remove(src)
            m += 1
    if m > 0:
        log.debug("It will be useful to increase the image size,"
                  "sources with ra+step or dec+step > image size"
                  "are removed")
        
    _std = []
    
    if neg_side:
        data = -data
        log.debug("Using the negative side of the provided image.")
 
    n = 0
    for (x, y), srs in zip(positions, sources):
        pos = [x,y]
        subrgn = data[y-step[0] : y+step[0], x-step[1] : x+step[1]]
        subrgn = subrgn[subrgn > 0]
        std = subrgn.std()

        if math.isnan(float(std)) or _std == 0:
            sources.remove(srs)
            model.sources.remove(srs)
            positions.remove(srs)
            n += 1
        else:
            _std.append(std)
            if setatr:
                srs.setAttribute("l", std)
        
    if n > 0:
        log.debug("Nan encountered %d times. Increase the size of the"
                  "region or check the image. Otherwise sources with"
                  "0 or nan are flagged." %n)


    def high_variance_sources(
            pos, local_variance, noise, model, threshold,
            savefig=savefig, prefix=None, localtag=None):

        if savefig:
            save_fig = prefix + "_variance.png" or catalog.replace(
                                                      ".lsm.html", ".png")

        local = [l/1.0e-6 for l in local_variance]
        x = numpy.arange(len(pos))
        pylab.figure()
        pylab.plot(x, local)
        pylab.plot([noise/1.0e-6] * len(local_variance))

        localtag = localtag     
        for i, (pos, src) in enumerate(zip( pos, model.sources)):
            if _std[i] > threshold:
                src.setTag(localtag, True)
                pylab.plot(x[i], local[i], "rD")
                pylab.annotate(src.name, xy=(x[i],local[i]))
        if savefig:
            pylab.ylabel("local variance[$\mu$]")
            pylab.savefig(save_fig)

    if high_local_tag is None:
         high_local_tag = "high_var"
    if do_high_loc:
        threshold = highvariance_factor * noise
        high_variance_sources(positions, _std, noise, model,
                              threshold=threshold, savefig=savefig,
                              prefix=prefix, localtag=high_local_tag)
    model.save(catalog)   

    return _std 
예제 #49
0
#This script compares all the cluster catalogs identify all unique sources which will be stored in a new catalog. This new catalog cluster_catalog.lsm.html will be used as our refernce catalog 

import Tigger
import numpy as np

BMAJ = 0.054613333808050002*np.pi/180  #smallest of all  synthesised beam width in rad to be used as tolerance
filenames = ['cluster-0.lsm.html','cluster-1.lsm.html','cluster-2.lsm.html','cluster-3.lsm.html','cluster-4.lsm.html','cluster-5.lsm.html','cluster-6.lsm.html','cluster-7.lsm.html','cluster-8.lsm.html']

unique = [] # list use for the generation of unique sources for reference


#loading all the catalogs using tigger in a list call models
models = []
for f in filenames:                  
	models.append(Tigger.load(f))

def coor_compare(model1,model2,a,b,tolerance = BMAJ):
	''' function to compare the coordinate of various sources in differents catalogs in order to find sources'''
	
	for src in model1.sources:
		issource = False 
		coord1 = [src.pos.ra,src.pos.dec]
		for src2 in model2.sources:
			coord2 = [src2.pos.ra,src2.pos.dec]
			if (np.sqrt((coord1[0] -coord2[0])**2 + (coord1[1] -coord2[1])**2)) < tolerance:
				for src3 in unique:
					coord3 = [src3.pos.ra,src3.pos.dec]
					if (np.sqrt((coord1[0] -coord3[0])**2 + (coord1[1] -coord3[1])**2)) < tolerance :
						 issource = True
				if issource == False :
예제 #50
0
def abs_ex(cfg_par):
    '''
        Extract spectra from all l.o.s. exctracted using a catalog of sources or a source finder

        WARNING:
            source finder, or extraction of sources from a catalog (cont_src module) must be run first

        INPUT:
            dictionary of parameter file

        OUTPUT:

            spectra are saved in ascii format in cfg_par['general']['workdir']+/spec/
            spectra have the following columns: 
            frequency, flux, noise (MADFM), optical depth, optical depth noise, mean noise
        
        OPTIONS:
            chromatic aberration correction
            continuum subtraction
            hanning smoothing
        '''

    verb = cfg_par['general']['verbose']

    cubename = cfg_par['general'].get('cubename', None)
    cubefile = fits.open(cubename)  # read input

    src_list_csv = cfg_par['general']['absdir'] + 'mir_src_sharp.csv'

    hdr = cubefile[0].header

    if 'NAXIS4' in hdr:
        del hdr['NAXIS4']
    if 'CRVAL4' in hdr:
        del hdr['CRVAL4']
    if 'CDELT4' in hdr:
        del hdr['CDELT4']
    if 'CRPIX4' in hdr:
        del hdr['CRPIX4']
    if 'CTYPE4' in hdr:
        del hdr['CTYPE4']
    if 'CROTA4' in hdr:
        del hdr['CROTA4']
    if 'CUNIT4' in hdr:
        del hdr['CUNIT4']

    sci = cubefile[0].data
    sci = sci.squeeze()
    x = hdr['NAXIS1']
    y = hdr['NAXIS2']
    z = hdr['NAXIS3']
    cen_imx = hdr['CRPIX1']
    cen_imy = hdr['CRPIX2']
    freq0 = hdr['CRVAL3']
    freq_del = hdr['CDELT3']
    key = 'source_catalog'
    if cfg_par['source_catalog'].get('enable', False) == True:

        catalogName = cfg_par[key].get('catalog', 'NVSS')
        if catalogName == 'NVSS':
            catalog_table = str(cfg_par['general'].get('absdir')) + str(
                cfg_par[key].get('catalog_file'))
            tab = ascii.read(catalog_table)
            J2000_name = tab['NVSS']
            ra = tab['RAJ2000']
            dec = tab['DEJ2000']
            flux_cont = tab['S1.4']

            src_id = np.arange(0, len(ra) + 1, dtype=int)

        elif catalogName == 'PYBDSF':
            J2000_name, ra, dec, flux_cont = [], [], [], []
            import Tigger
            from astropy import units as u
            from astropy.coordinates import Angle
            catalog_table = '{:s}{:s}'.format(
                cfg_par['general'].get('workdir'),
                cfg_par['source_catalog'].get('catalog_file'))
            model = Tigger.load(catalog_table)
            sources = model.sources
            for source in sources:
                ra_deg_angle = Angle(np.rad2deg(source.pos.ra) * u.deg)
                dec_deg_angle = Angle(np.rad2deg(source.pos.dec) * u.deg)
                ra_hms = ra_deg_angle.to_string(unit=u.hourangle, sep=':')
                dec_dms = dec_deg_angle.to_string(unit=u.degree, sep=':')
                J2000_name.append('{:s}{:s}{:s}'.format(
                    ra_hms.replace(':', ''),
                    '+' if source.pos.dec > 0.0 else '-',
                    dec_dms.replace(':', '')))
                ra.append(ra_hms)
                dec.append(dec_dms)
                flux_cont.append(source.flux.I)

            src_id = np.arange(0, len(ra) + 1, 1, dtype=int)

    elif os.path.exists(src_list_csv):
        # open file
        src_list_vec = ascii.read(src_list_csv)
        J2000_name = np.array(src_list_vec['J2000'], dtype=str)
        ra = np.array(src_list_vec['ra'], dtype=str)
        dec = np.array(src_list_vec['dec'], dtype=str)
        flux_cont = np.array(src_list_vec['peak'], dtype=float)
        src_id = np.array(src_list_vec['ID'], dtype=int) - 1

    else:
        print(
            "\n\t!!!! catalog of sources does not exist. Enable source_catalog or source_finder first\n"
        )
        sys.exit(0)

    pixels = conv_units.coord_to_pix(cubename, ra, dec, verbose=False)

    key = 'spec_ex'
    freq = cubef.zaxis(cubename)
    abs_mean_rms = np.zeros(pixels.shape[0])
    abs_los_rms = np.zeros(pixels.shape[0])
    tau_los_rms = np.zeros(pixels.shape[0])
    outnames = []
    count_thresh = 0
    count_fov = 0
    count_blanks = 0
    average_noise = []
    for i in xrange(0, pixels.shape[0]):

        # extract spectrum from each line of sight
        flux = np.zeros(freq.shape[0])
        madfm = np.zeros(freq.shape[0])

        if np.isnan(pixels[i, 0]) or np.isnan(pixels[i, 1]):
            count_thresh += 1
            pass

        elif (0 < int(pixels[i, 0]) < x and 0 < int(pixels[i, 1]) < y):
            pix_x_or = int(pixels[i, 0])
            pix_y_or = int(pixels[i, 1])
            for j in xrange(0, z):
                chrom_aber = cfg_par[key].get('chrom_aberration', False)
                #correct for chromatic aberration
                if chrom_aber == True:

                    if (cfg_par[key].get('zunit', 'Hz') == 'm/s'):
                        freq_real = freq * 1e2
                        freq_real = (kk.C * kk.HI) / (freq_real + kk.C)
                        freq_real0 = (kk.C * kk.HI) / (hdr['CRVAL3'] * 1e2 +
                                                       kk.C)
                        freq_del = (freq_real0 -
                                    freq_real[-1]) / len(freq_real)
                        #depending if the cube is in velocity or frequency ?
                        scale = (freq_real0 - j * freq_del) / freq_real0
                    elif (cfg_par[key].get('zunit', 'Hz') == 'Hz'):
                        freq_del = (hdr['CRVAL3'] - freq[-1]) / len(freq)
                        scale = (hdr['CRVAL3'] - j * freq_del) / hdr['CRVAL3']

                    pix_x = (pix_x_or - hdr['CRPIX1']) * scale + hdr['CRPIX1']
                    pix_y = (pix_y_or - hdr['CRPIX2']) * scale + hdr['CRPIX2']
                    #print('before rounding: x={0:.3f}, y={1:.3f}'.format(pix_x, pix_y))
                    pix_x = int(round(pix_x, 0))
                    pix_y = int(round(pix_y, 0))
                else:
                    pix_x = pix_x_or
                    pix_y = pix_y_or

                if (0 < pix_x < x and 0 < pix_y < y):
                    flux[j] = sci[j, pix_y, pix_x]
                else:
                    flux[j] = 0.0

                #print('x={0:d}, y={1:d}, flux={2:.5f}'.format(pix_x, pix_y, flux[j]))

                # determine the noise of the spectrum [Whiting 2012 et al.] in each channel
                # MADMF: median absolute deviation from the median
                # extract a region were to determine the noise: rectangular ring around the l.o.s.
                rInt = cfg_par['spec_ex']['noise_delta_skip']
                rExt = cfg_par['spec_ex']['noise_delta_pix']

                yExtDown = pix_y - rInt - rExt
                yIntDown = pix_y - rInt
                yIntUp = pix_y + rInt
                yExtUp = pix_y + rInt + rExt

                xExtLeft = pix_x - rInt - rExt
                xIntLeft = pix_x - rInt
                xIntRight = pix_x + rInt
                xExtRight = pix_x + rInt + rExt

                if (xExtRight < hdr['NAXIS1'] and xExtLeft > 0
                        and yExtUp < hdr['NAXIS2'] and yExtDown > 0):
                    valueTmp = sci[j, pix_y, pix_x]

                    corona_1 = sci[j, yIntDown:yExtUp, xExtLeft:xIntLeft]
                    corona_2 = sci[j, yIntUp:yExtUp, xIntLeft:xExtRight]
                    corona_3 = sci[j, yIntDown:yIntUp, xIntRight:xExtRight]
                    corona_4 = sci[j, yExtDown:yIntDown, xExtLeft:xExtRight]
                    corona = np.concatenate((corona_1.flat, corona_2.flat,
                                             corona_3.flat, corona_4.flat))

                    rms = np.nanmedian(corona)
                    if rms != 0.0:
                        med2 = np.abs(corona - rms)
                        madfm[j] = np.nanmedian(med2) / 0.6744888
                    else:
                        madfm[j] = 0.0
                else:
                    madfm[j] = 0.0

                abs_mean_rms[i] = np.nanmean(madfm)

            if np.nansum(flux) == 0.:
                count_blanks += 1
                if verb == True:
                    print('# Blank spectrum:\t' + str(src_id[i]) + ' ' +
                          J2000_name[i] + ' #')
                continue

            # measure noise in the spectrum outside of the line
            end_spec = float(sci.shape[0])
            end_spec_th = int(end_spec / 3.)
            end_spec = int(end_spec)
            mean_rms = (np.std(flux[0:end_spec_th]) +
                        np.std(flux[end_spec - end_spec_th:end_spec])) / 2.
            mean_rms_arr = np.zeros(sci.shape[0]) + mean_rms

            average_noise.append(mean_rms)

            tau = hi.optical_depth(flux, flux_cont[i])
            if np.nansum(madfm) != 0.0:
                tau_noise = hi.optical_depth(madfm, flux_cont[i])
            else:
                tau_noise = np.zeros(sci.shape[0])

            #write spectrum
            #out_spec = str(cfg_par['general']['specdir']+str(src_id[i])+'_J'+J2000_name[i])+'.txt'
            out_spec = "{0:s}{1:1d}_J{2:s}.txt".format(
                cfg_par['general']['specdir'], src_id[i], J2000_name[i])
            outnames.append(out_spec)

            flag_chans = cfg_par[key].get('flag_chans', None)
            if flag_chans != None:
                index_flags_l = (np.abs(freq - flag_chans[0])).argmin()
                for k in xrange(1, len(flag_chans)):
                    index_flags = (np.abs(freq - flag_chans[k])).argmin()
                    flux[index_flags_l:index_flags] = np.nan
                    index_flags_l = index_flags

            if cfg_par[key].get('zunit', 'Hz') == 'm/s':
                xcol = 'Velocity [m/s]'
            elif cfg_par[key].get('zunit', 'Hz') == 'km/s':
                xcol = 'Velocity [km/s]'
            elif cfg_par[key].get('zunit', 'Hz') == 'MHz':
                xcol = 'Frequency [MHz]'
            else:
                xcol = 'Frequency [Hz]'

            t = Table([freq, flux, madfm, tau, tau_noise, mean_rms_arr],
                      names=(xcol, 'Flux [Jy]', 'Noise [Jy]', 'Optical depth',
                             'Noise optical depth', 'Mean noise [Jy]'),
                      meta={'name': 'Spectrum'})
            ascii.write(t, out_spec, overwrite=True)
            if verb == True:
                print('# Extracted spectrum: \t' + str(src_id[i]) + ' ' +
                      J2000_name[i] + ' #')

            polysub = cfg_par['polynomial_subtraction'].get('enable', False)
            if polysub == True:

                deg = cfg_par['polynomial_subtraction'].get('degree_pol', 3)
                sub_flux = poly_sub(cfg_par, freq, flux, deg)
                sub_madfm = madfm.copy()
                sub_od = tau.copy()
                sub_noise_od = tau_noise.copy()

                out_spec_polysub = string.split(out_spec, '.')[0]
                out_spec_polysub = out_spec_polysub + '_psub.txt'

                t = Table([
                    freq, sub_flux, sub_madfm, sub_od, sub_noise_od,
                    mean_rms_arr
                ],
                          names=(xcol, 'Flux [Jy]', 'Noise [Jy]',
                                 'Optical depth', 'Noise optical depth',
                                 'Mean noise [Jy]'),
                          meta={'name': 'Spectrum'})
                ascii.write(t, out_spec_polysub, overwrite=True)

            dohan = cfg_par['hanning'].get('enable', False)
            if dohan == True and polysub == False:

                window = cfg_par['hanning'].get('window', 1)
                han_flux = hanning_spec(flux)
                han_madfm = hanning_spec(madfm)
                han_od = hanning_spec(tau)
                han_noise_od = hanning_spec(tau_noise)

                out_spec_han = string.split(out_spec, '.')[0]
                out_spec_han = out_spec_han + '_han.txt'

                t = Table([
                    freq, han_flux, han_madfm, han_od, han_noise_od,
                    mean_rms_arr
                ],
                          names=(xcol, 'Flux [Jy]', 'Noise [Jy]',
                                 'Optical depth', 'Noise optical depth',
                                 'Mean noise [Jy]'),
                          meta={'name': 'Spectrum'})
                ascii.write(t, out_spec_han, overwrite=True)

            elif polysub == True and dohan == True:

                han_sub_flux = hanning_spec(sub_flux)
                han_sub_madfm = hanning_spec(sub_madfm)
                han_sub_od = hanning_spec(sub_od)
                han_sub_noise_od = hanning_spec(sub_noise_od)

                out_spec_han = string.split(out_spec, '.')[0]
                out_spec_han = out_spec_han + 'psub_han.txt'

                t = Table([
                    freq, han_sub_flux, han_sub_madfm, han_sub_od,
                    han_sub_noise_od, mean_rms_arr
                ],
                          names=(xcol, 'Flux [Jy]', 'Noise [Jy]',
                                 'Optical depth', 'Noise optical depth',
                                 'Mean noise [Jy]'),
                          meta={'name': 'Spectrum'})
                ascii.write(t, out_spec_han, overwrite=True)

    # close fits file
    cubefile.close()

    print('# Sources flagged: \t\t' + str(count_thresh))
    print('# Blank spectra:\t\t' + str(count_blanks))
    print('# Total number of spectra: \t' +
          str(pixels.shape[0] - count_thresh - count_fov - count_blanks))
    print('# Average noise in spectra: \t' +
          str(round(np.nanmean(average_noise) * 1e3, 1)) + ' mJy/beam')

    return 0
예제 #51
0
    def params(self, modelfits):

        # reads in source finder output
        with pyfits.open(modelfits) as hdu:
            data = hdu[1].data

        tfile = tempfile.NamedTemporaryFile(suffix=".txt")
        tfile.flush()

        # writes a catalogue in a temporaty txt file
        with open(tfile.name, "w") as std:
            std.write("#format:name ra_rad dec_rad i emaj_r emin_r pa_r\n")

        model = Tigger.load(tfile.name)  # open a tmp. file

        peak, total, area, loc, corr = [], [], [], [], []
        for i in range(len(data)):
            flux = data["Total_flux"][i]
            dc_emaj, dc_emin = data["DC_Maj"][i], data["DC_Min"][i]
            ra, dec = data["RA"][i], data["DEC"][i]
            pa = data["DC_PA"][i]
            name = "SRC%d" % i
            peak_flux = data["Peak_flux"][i]

            posrd = ModelClasses.Position(numpy.deg2rad(ra),
                                          numpy.deg2rad(dec))
            flux_I = ModelClasses.Polarization(flux, 0, 0, 0)
            if dc_emaj == 0 and dc_emin == 0:
                shape = None
            else:
                shape = ModelClasses.Gaussian(numpy.deg2rad(dc_emaj),
                                              numpy.deg2rad(dc_emin),
                                              numpy.deg2rad(pa))

            srs = SkyModel.Source(name, posrd, flux_I, shape=shape)

            # using convolved maj and min for reliability estimate
            emaj, emin = data["Maj"][i], data["Min"][i]

            # area: find ex and ey if are 0 assign beam size
            if emaj or emin == 0:
                srcarea = math.pi * (numpy.rad2deg(self.bmaj)) * pow(3600.0, 2) *\
                       (numpy.rad2deg(self.bmin))
            if emaj and emin > 0:
                srcarea = emaj * emin * math.pi * pow(3600.0, 2)  # arcsecond

            # only accepts sources with flux > 0 and not nan RA and DEC
            # and local variance
            pos = [self.wcs.wcs2pix(*(ra, dec))
                   ][0]  #positions from deg to pixel

            with pyfits.open(self.negimage) as hdu:
                negdata = utils.image_data(hdu[0].data)

            if flux > 0 and peak_flux > 0 and not math.isnan(float(ra))\
                and not math.isnan(float(dec)):

                local = utils.compute_local_variance(negdata, pos,
                                                     self.locstep)

                srs.setAttribute("local_variance", local)

                if not math.isnan(float(local)) or local > 0:
                    if self.psfname:
                        pdata, psf = utils.compute_psf_correlation(
                            self.imagename, self.psfname, pos, self.cfstep)

                        if len(pdata) == len(psf):
                            c_region = numpy.corrcoef((pdata, psf))
                            cf = (numpy.diag((numpy.rot90(c_region))**
                                             2).sum())**0.5 / 2**0.5

                            srs.setAttribute("correlation_factor", cf)
                            corr.append(cf)
                            model.sources.append(srs)
                            peak.append(peak_flux)
                            total.append(flux)
                            area.append(srcarea)
                            loc.append(local)
                    else:
                        model.sources.append(srs)
                        peak.append(peak_flux)
                        total.append(flux)
                        area.append(srcarea)
                        loc.append(local)

        labels = dict(size=(0, "Log$_{10}$(Source area)"),
                      peak=(1, "Log$_{10}$( Peak flux [Jy] )"),
                      tot=(2, "Log$_{10}$( Total flux [Jy] )"))

        if self.do_psf_corr:
            labels.update({"coeff": (len(labels), "Log$_{10}$ (CF)")})
        if self.do_local_var:
            labels.update(
                {"local": (len(labels), "Log$_{10}$(Local Variance)")})
        if self.nearsources:
            labels.update({"near": (len(labels), "Log$_{10}$(Near Sources)")})

        nsrc = len(model.sources)
        out = numpy.zeros([nsrc, len(labels)])

        # returning parameters
        for i, src in enumerate(model.sources):

            ra, dec = src.pos.ra, src.pos.dec
            near = model.getSourcesNear(ra, dec, 5 * self.bmaj)
            nonear = len(near)
            if self.nearsources:
                src.setAttribute("neibours", nonear)

            if self.do_psf_corr and self.do_local_var and self.nearsources:
                out[i,
                    ...] = area[i], peak[i], total[i], corr[i], loc[i], nonear

            elif self.do_psf_corr and self.do_local_var and not self.nearsources:
                out[i, ...] = area[i], peak[i], total[i], corr[i], loc[i]

            elif self.do_psf_corr and self.nearsources and not self.do_local_var:
                out[i, ...] = area[i], peak[i], total[i], corr[i], nonear

            elif not self.do_psf_corr and self.do_local_var and self.nearsources:
                out[i, ...] = area[i], peak[i], total[i], loc[i], nonear

            elif self.do_psf_corr and not self.do_local_var and not self.nearsources:
                out[i, ...] = area[i], peak[i], total[i], corr[i]

            elif not self.do_psf_corr and self.do_local_var and not self.nearsources:
                out[i, ...] = area[i], peak[i], total[i], loc[i]

            elif not self.do_psf_corr and not self.do_local_var and self.nearsources:
                out[i, ...] = area[i], peak[i], total[i], nonear

            else:
                out[i, ...] = area[i], peak[i], total[i]

        # removes the rows with 0s
        removezeros = (out == 0).sum(1)
        output = out[removezeros <= 0, :]

        return model, numpy.log10(output), labels
예제 #52
0
def addSPI(fitsname_alpha=None, fitsname_alpha_error=None, lsmname=None,
           outfile=None, freq0=None, beam=None, spitol=(-10, 10)):
    """
        Add spectral index to a tigger lsm from a spectral index map (fits format)
        takes in a spectral index map, input lsm and output lsm name.
    """
#    import pylab as plt
    if beam is None:
        raise RuntimeError("the beam option must be specified")

    print "INFO: Getting fits info from: %s, %s" % (fitsname_alpha, fitsname_alpha_error)

    fits_alpha = fitsInfo(fitsname_alpha)    # Get fits info
    image_alpha = fits_alpha['image']     # get image data

    if fitsname_alpha_error:
        fits_alpha_error = fitsInfo(fitsname_alpha_error)
        image_alpha_error = fits_alpha_error['image']
    else:
        fits_alpha_error = fitsInfo(fitsname_alpha)
        image_alpha_error = fits_alpha_error['image']
        image_alpha_error[...] = 1.0

    # may supply FITS file for freq0, in which case just pull ref frequency from FITS file,
    # else explicit frequency, else get frequency from alpha image
    if type(freq0) is str:
        freq0 = fitsInfo(freq0)['freq0']
    else:
        freq0 = freq0 or fits_alpha['freq0']

    model = Tigger.load(lsmname)    # load output sky model
    def rad(a): return a*(180/np.pi)  # convert radians to degrees

    for src in model.sources:
        ra = rad(src.pos.ra)
        dec = rad(src.pos.dec)

        # Cater for point sources and assume source extent equal to the
        # Gaussian major axis along both ra and dec axis
        dra = rad(src.shape.ex) if src.shape else beam[0]
        ddec = rad(src.shape.ey) if src.shape else beam[1]
        pa = rad(src.shape.pa) if src.shape else beam[2]

        emin, emaj = sorted([dra, ddec])
        # Determine region of interest
        rgn = sky2px(fits_alpha["wcs"], ra, dec, dra,
                     ddec, fits_alpha["dra"], beam[1])
        imslice = slice(rgn[2], rgn[3]), slice(rgn[0], rgn[3])
        alpha = image_alpha[imslice]
        xpix, ypix = alpha.shape
        xx, yy = np.ogrid[-xpix:xpix, -ypix:ypix]

        emajPix = emaj/fits_alpha["dra"]
        eminPix = emin/fits_alpha["dra"]
        # Create elliptcal mask which has same shape as source
        mask = ((xx/emajPix)**2 + (yy/eminPix)**2 <=
                1)[xpix*2-xpix:xpix*2+xpix, ypix*2-ypix:ypix*2+ypix]
        mask = ndimage.rotate(mask, angle=pa, order=0, reshape=False)

        draPix = dra/fits_alpha["dra"]
        ddPix = ddec/fits_alpha["ddec"]

        alpha *= mask
        alpha_error = image_alpha_error[imslice]*mask
        good = np.where(np.logical_and(alpha != 0, alpha != np.nan))
        alpha = alpha[good]
        alpha_error = alpha_error[good]
        good = np.where(np.logical_and(
            alpha_error != np.nan, alpha_error != np.inf))

        alpha = alpha[good]
        alpha_error = alpha_error[good]

        subIm_weight = 1/alpha_error
        subIm_weighted = alpha*subIm_weight

        if len(subIm_weighted) > 0:
            subIm_normalization = np.sum(subIm_weight)
            spi = float(np.sum(subIm_weighted)/subIm_normalization)
            spi_error = 1/float(subIm_normalization)
            if spi > spitol[0] or spi < spitol[-1]:
                sys.stdout.write("INFO: Adding spi: %.3f (at %.3g MHz) to source %s" % (
                                 spi, freq0/1e6, src.name))
                src.spectrum = Tigger.Models.ModelClasses.SpectralIndex(
                    spi, freq0)
                src.setAttribute('spi_error', spi_error)
        else:
            sys.stdout.write("ALERT: no spi info found in %s for source %s" % (
                             fitsname_alpha, src.name))

    model.save(outfile)
def simulate(args):
    # get full time column and compute row chunks
    ms = table(args.ms)
    time = ms.getcol('TIME')
    row_chunks, tbin_idx, tbin_counts = chunkify_rows(time,
                                                      args.utimes_per_chunk)
    # convert to dask arrays
    tbin_idx = da.from_array(tbin_idx, chunks=(args.utimes_per_chunk))
    tbin_counts = da.from_array(tbin_counts, chunks=(args.utimes_per_chunk))
    n_time = tbin_idx.size
    ant1 = ms.getcol('ANTENNA1')
    ant2 = ms.getcol('ANTENNA2')
    n_ant = np.maximum(ant1.max(), ant2.max()) + 1
    flag = ms.getcol("FLAG")
    n_row, n_freq, n_corr = flag.shape
    if n_corr == 4:
        model_corr = (2, 2)
        jones_corr = (2, )
    elif n_corr == 2:
        model_corr = (2, )
        jones_corr = (2, )
    elif n_corr == 1:
        model_corr = (1, )
        jones_corr = (1, )
    else:
        raise RuntimeError("Invalid number of correlations")
    ms.close()

    # get phase dir
    radec0 = table(args.ms + '::FIELD').getcol('PHASE_DIR').squeeze()

    # get freqs
    freq = table(args.ms + '::SPECTRAL_WINDOW').getcol('CHAN_FREQ')[0].astype(
        np.float64)
    assert freq.size == n_freq

    # get source coordinates from lsm
    lsm = Tigger.load(args.sky_model)
    radec = []
    stokes = []
    spi = []
    ref_freqs = []

    for source in lsm.sources:
        radec.append([source.pos.ra, source.pos.dec])
        stokes.append([source.flux.I])
        tmp_spec = source.spectrum
        spi.append([tmp_spec.spi if tmp_spec is not None else 0.0])
        ref_freqs.append([tmp_spec.freq0 if tmp_spec is not None else 1.0])

    n_dir = len(stokes)
    radec = np.asarray(radec)
    lm = radec_to_lm(radec, radec0)

    # load in the model file
    model = np.zeros((n_freq, n_dir) + model_corr)
    stokes = np.asarray(stokes)
    ref_freqs = np.asarray(ref_freqs)
    spi = np.asarray(spi)
    for d in range(n_dir):
        Stokes_I = stokes[d] * (freq / ref_freqs[d])**spi[d]
        if n_corr == 4:
            model[:, d, 0, 0] = Stokes_I
            model[:, d, 1, 1] = Stokes_I
        elif n_corr == 2:
            model[:, d, 0] = Stokes_I
            model[:, d, 1] = Stokes_I
        else:
            model[:, d, 0] = Stokes_I

    # append antenna columns
    cols = []
    cols.append('ANTENNA1')
    cols.append('ANTENNA2')
    cols.append('UVW')

    # load in gains
    jones, alphas = make_screen(lm, freq, n_time, n_ant, jones_corr[0])
    jones = jones.astype(np.complex128)
    jones_shape = jones.shape
    jones_da = da.from_array(jones,
                             chunks=(args.utimes_per_chunk, ) +
                             jones_shape[1::])

    freqs = da.from_array(freq, chunks=(n_freq))
    lm = da.from_array(np.tile(lm[None], (n_time, 1, 1)),
                       chunks=(args.utimes_per_chunk, n_dir, 2))
    # change model to dask array
    tmp_shape = (n_time, )
    for i in range(len(model.shape)):
        tmp_shape += (1, )
    model = da.from_array(np.tile(model[None], tmp_shape),
                          chunks=(args.utimes_per_chunk, ) + model.shape)

    # load data in in chunks and apply gains to each chunk
    xds = xds_from_ms(args.ms, columns=cols, chunks={"row": row_chunks})[0]
    ant1 = xds.ANTENNA1.data
    ant2 = xds.ANTENNA2.data
    uvw = xds.UVW.data

    # apply gains
    data = compute_and_corrupt_vis(tbin_idx, tbin_counts, ant1, ant2, jones_da,
                                   model, uvw, freqs, lm)

    # Assign visibilities to args.out_col and write to ms
    xds = xds.assign(
        **{
            args.out_col: (("row", "chan", "corr"),
                           data.reshape(n_row, n_freq, n_corr))
        })
    # Create a write to the table
    write = xds_to_table(xds, args.ms, [args.out_col])

    # Submit all graph computations in parallel
    with ProgressBar():
        write.compute()

    return jones, alphas
예제 #54
0
    write_opts['format'] = 'fits'

if not port2tigger:
    sys.exit(0)

# convert to data file to Tigger LSM
# First make dummy tigger model
tfile = tempfile.NamedTemporaryFile(suffix='.txt')
tfile.flush()

prefix = os.path.splitext(outfile)[0]
tname_lsm = prefix + ".lsm.html"
with open(tfile.name, "w") as stdw:
    stdw.write("#format:name ra_d dec_d i emaj_s emin_s pa_d\n")

model = Tigger.load(tfile.name)
tfile.close()


def tigger_src(src, idx):

    name = "SRC%d" % idx
    flux = ModelClasses.Polarization(float(src["int_flux"]),
                                     0,
                                     0,
                                     0,
                                     I_err=float(src["err_int_flux"]))
    ra, ra_err = map(numpy.deg2rad, (float(src["ra"]), float(src["err_ra"])))
    dec, dec_err = map(numpy.deg2rad,
                       (float(src["dec"]), float(src["err_dec"])))
    pos = ModelClasses.Position(ra, dec, ra_err=ra_err, dec_err=dec_err)
예제 #55
0
#!/usr/bin/python

#This script takes all the cluster catalogs, compare each with the reference catalog(cluster_catalog.lsm.htm)renaming each matching source to have the name in the reference catalog. These new catalogs are stord under a new name recluster-i.lsm.html  

import Tigger
import numpy as np

BMAJ = 0.054613333808050002 * np.pi/180 #converting to radians
filenames = ['cluster-0.lsm.html','cluster-1.lsm.html','cluster-2.lsm.html','cluster-3.lsm.html','cluster-4.lsm.html','cluster-5.lsm.html','cluster-6.lsm.html','cluster-7.lsm.html','cluster-8.lsm.html']

models = []
for f in filenames:
	models.append(Tigger.load(f))
unique = Tigger.load('cluster_catalog.lsm.html') #loading the unique

def coor_compare(model2,a,model1 = unique,tolerance = BMAJ):
	''' function to compare the coordinate of various sources in differents catalogs in order to find sources'''
	count = 0
	for src in model1.sources:
		coord1 = [src.pos.ra,src.pos.dec]
		for src2 in model2.sources:
			coord2 = [src2.pos.ra,src2.pos.dec]
			if np.sqrt((coord1[0] - coord2[0])**2 + (coord1[1] -coord2[1])**2) < tolerance :
				src2.name = src.name
				model2.save('recluster-' + str(a) + '.lsm.html')
				src.setAttribute('src' + src.name + '--' + str(a),True) 
				model1.save('renamed_catalog.lsm.html')
				count = count + 1
	
	print count