Exemplo n.º 1
0
    def maxpos_im(self, im):
        """Find the position of the maximum in an image.
        Helper function returns the position of the maximum value in the
        image as an [x,y] list in 0-based pixel coordinates.

        Parameters  
        ----------
        im :  String, CASA image name

        Returns
        -------
        list
            [x,y] list in 0-based pixel coordinates.
        """
        # 2D images don't store maxpos/maxval yet, so we need to grab them
        # imstat on a 512^2 image is about 0.032 sec
        # ia.getchunk is about 0.008, about 4x faster.
        # we're going to assume 2D images fit in memory and always use getchunk
        # @todo  review the use of the new casautil.getdata() style routines
        if True:
            taskinit.ia.open(im)
            plane = taskinit.ia.getchunk(blc=[0,0,0,-1],trc=[-1,-1,-1,-1],dropdeg=True)
            v = ma.masked_invalid(plane)
            taskinit.ia.close()
            mp = np.unravel_index(v.argmax(), v.shape)
            maxval = v[mp[0],mp[1]]
            maxpos = [int(mp[0]),int(mp[1])]
        else:
            imstat0 = casa.imstat(im)
            maxpos = imstat0["maxpos"][:2].tolist()
            maxval = imstat0["max"][0]
        #print "MAXPOS_IM:::",maxpos,maxval,type(maxpos[0])
        return (maxpos,maxval)
Exemplo n.º 2
0
    def maxpos_im(self, im):
        """Find the position of the maximum in an image.
        Helper function returns the position of the maximum value in the
        image as an [x,y] list in 0-based pixel coordinates.

        Parameters  
        ----------
        im :  String, CASA image name

        Returns
        -------
        list
            [x,y] list in 0-based pixel coordinates.
        """
        # 2D images don't store maxpos/maxval yet, so we need to grab them
        # imstat on a 512^2 image is about 0.032 sec
        # ia.getchunk is about 0.008, about 4x faster.
        # we're going to assume 2D images fit in memory and always use getchunk
        # @todo  review the use of the new casautil.getdata() style routines
        if True:
            taskinit.ia.open(im)
            plane = taskinit.ia.getchunk(blc=[0, 0, 0, -1],
                                         trc=[-1, -1, -1, -1],
                                         dropdeg=True)
            v = ma.masked_invalid(plane)
            taskinit.ia.close()
            mp = np.unravel_index(v.argmax(), v.shape)
            maxval = v[mp[0], mp[1]]
            maxpos = [int(mp[0]), int(mp[1])]
        else:
            imstat0 = casa.imstat(im)
            maxpos = imstat0["maxpos"][:2].tolist()
            maxval = imstat0["max"][0]
        #print "MAXPOS_IM:::",maxpos,maxval,type(maxpos[0])
        return (maxpos, maxval)
Exemplo n.º 3
0
def printProperties(filenames):

    from casa import imstat
    from casa import image as ia
    from casa import imager as im

    imageDir = './'

    for i, image in enumerate(filenames):
        flux = imstat(imageDir + image+ '.mom0.blk.image')['flux'][0]
        ia.open(imageDir + image + '.mom0.blk.image')
        beammaj = ia.restoringbeam(channel=0)['major']['value']
        beammin = ia.restoringbeam(channel=0)['minor']['value']
        beamsizeUnit = ia.restoringbeam(channel=0)['major']['unit']
        ia.close()

        ia.open(imageDir + image + '.image')
        noise = ia.statistics()['sigma'][0]
        ia.close()

        if True: mosaic,res = 'Multiscale clean', i
        #else: mosaic,res = 'Mosaic', i*3 -3
        print '\nImage: '+mosaic+' resolution #' + str(res) + \
                '\nBeamsize: ' + str(beammaj) + '" X ' + str(beammin) + '"' + \
                '\nStd: ' + str(noise) + ' Jy/Bm' + \
                '\nFlux: ' + str(flux) + ' Jy km/s'
Exemplo n.º 4
0
def printProperties(filenames):

    from casa import imstat
    from casa import image as ia
    from casa import imager as im

    imageDir = './'

    for i, image in enumerate(filenames):
        flux = imstat(imageDir + image + '.mom0.blk.image')['flux'][0]
        ia.open(imageDir + image + '.mom0.blk.image')
        beammaj = ia.restoringbeam(channel=0)['major']['value']
        beammin = ia.restoringbeam(channel=0)['minor']['value']
        beamsizeUnit = ia.restoringbeam(channel=0)['major']['unit']
        ia.close()

        ia.open(imageDir + image + '.image')
        noise = ia.statistics()['sigma'][0]
        ia.close()

        if True: mosaic, res = 'Multiscale clean', i
        #else: mosaic,res = 'Mosaic', i*3 -3
        print '\nImage: '+mosaic+' resolution #' + str(res) + \
                '\nBeamsize: ' + str(beammaj) + '" X ' + str(beammin) + '"' + \
                '\nStd: ' + str(noise) + ' Jy/Bm' + \
                '\nFlux: ' + str(flux) + ' Jy km/s'
Exemplo n.º 5
0
def nppb(image):
    """work out the flux correction, number of points per beam """
    if True:
        # more expensive, but works for non-ALMA data
        # needs to be done on the MOM0 map
        s = casa.imstat(image)
        if s.has_key('flux'):
            beamarea = s['sum'][0]/s['flux'][0]
        else:
            beamarea = 1.0
        return beamarea
    else:
        h = casa.imhead(image, mode='list')
        # @todo should we not use the casa units for this?
        try:
            bmin = h['beamminor']['value']    # beam in arcsec (not always true)
            bmaj = h['beammajor']['value'] 
            cdelt1 = h['cdelt1'] * 206265.0   # cdelt in radians
            cdelt2 = h['cdelt2'] * 206265.0
            return abs(1.13309 * bmaj * bmin / (cdelt1 * cdelt2))
        except:
            return 1.0
Exemplo n.º 6
0
def nppb(image):
    """work out the flux correction, number of points per beam """
    if True:
        # more expensive, but works for non-ALMA data
        # needs to be done on the MOM0 map
        s = casa.imstat(image)
        if s.has_key('flux'):
            beamarea = s['sum'][0]/s['flux'][0]
        else:
            beamarea = 1.0
        return beamarea
    else:
        h = casa.imhead(image, mode='list')
        # @todo should we not use the casa units for this?
        try:
            bmin = h['beamminor']['value']    # beam in arcsec (not always true)
            bmaj = h['beammajor']['value'] 
            cdelt1 = h['cdelt1'] * 206265.0   # cdelt in radians
            cdelt2 = h['cdelt2'] * 206265.0
            return abs(1.13309 * bmaj * bmin / (cdelt1 * cdelt2))
        except:
            return 1.0
Exemplo n.º 7
0
    def run(self):
        """Runs the task.

           Parameters
           ----------
           None

           Returns
           -------
           None
        """

        self._summary = {}
        dt = utils.Dtime("CubeSpectrum")

        # our BDP's
        # b1  = input BDP
        # b1s = optional input CubeSpectrum
        # b1m = optional input Moment
        # b1p = optional input SourceList for positions
        # b2  = output BDP

        b1 = self._bdp_in[0]  # check input SpwCube (or LineCube)
        fin = b1.getimagefile(bt.CASA)
        if self._bdp_in[0]._type == bt.LINECUBE_BDP:
            use_vel = True
        else:
            use_vel = False

        sources = self.getkey("sources")
        pos = [
        ]  # blank it first, then try and grab it from the optional bdp_in's
        cmean = 0.0
        csigma = 0.0
        smax = []  # accumulate max in each spectrum for regression
        self.spec_description = []  # for summary()

        if self._bdp_in[1] != None:  # check if CubeStats_BDP
            #print "BDP[1] type: ",self._bdp_in[1]._type
            if self._bdp_in[1]._type != bt.CUBESTATS_BDP:
                raise Exception, "bdp_in[1] not a CubeStats_BDP, should never happen"
            # a table (cubestats)
            b1s = self._bdp_in[1]
            pos.append(b1s.maxpos[0])
            pos.append(b1s.maxpos[1])
            logging.info('CubeStats::maxpos,val=%s,%f' %
                         (str(b1s.maxpos), b1s.maxval))
            cmean = b1s.mean
            csigma = b1s.sigma
            dt.tag("CubeStats-pos")

        if self._bdp_in[
                2] != None:  # check if Moment_BDP (probably from CubeSum)
            #print "BDP[2] type: ",self._bdp_in[2]._type
            if self._bdp_in[2]._type != bt.MOMENT_BDP:
                raise Exception, "bdp_in[2] not a Moment_BDP, should never happen"
            b1m = self._bdp_in[2]
            fim = b1m.getimagefile(bt.CASA)
            pos1, maxval = self.maxpos_im(
                self.dir(fim))  # compute maxpos, since it is not in bdp (yet)
            logging.info('CubeSum::maxpos,val=%s,%f' % (str(pos1), maxval))
            pos.append(pos1[0])
            pos.append(pos1[1])
            dt.tag("Moment-pos")

        if self._bdp_in[3] != None:  # check if SourceList
            #print "BDP[3] type: ",self._bdp_in[3]._type
            # a table (SourceList)
            b1p = self._bdp_in[3]
            ra = b1p.table.getFullColumnByName("RA")
            dec = b1p.table.getFullColumnByName("DEC")
            peak = b1p.table.getFullColumnByName("Peak")
            if sources == []:
                # use the whole SourceList
                for (r, d, p) in zip(ra, dec, peak):
                    rdc = convert_sexa(r, d)
                    pos.append(rdc[0])
                    pos.append(rdc[1])
                    logging.info('SourceList::maxpos,val=%s,%f' %
                                 (str(rdc), p))
            else:
                # select specific ones from the source list
                for ipos in sources:
                    if ipos < len(ra):
                        radec = convert_sexa(ra[ipos], dec[ipos])
                        pos.append(radec[0])
                        pos.append(radec[1])
                        logging.info('SourceList::maxpos,val=%s,%f' %
                                     (str(radec), peak[ipos]))
                    else:
                        logging.warning('Skipping illegal source number %d' %
                                        ipos)

            dt.tag("SourceList-pos")

        # if pos[] still blank, use the AT keyword.
        if len(pos) == 0:
            pos = self.getkey("pos")

        # if still none, try the map center
        if len(pos) == 0:
            # @todo  this could result in a masked pixel and cause further havoc
            # @todo  could also take the reference pixel, but that could be outside image
            taskinit.ia.open(self.dir(fin))
            s = taskinit.ia.summary()
            pos = [int(s['shape'][0]) / 2, int(s['shape'][1]) / 2]
            logging.warning(
                "No input positions supplied, map center choosen: %s" %
                str(pos))
            dt.tag("map-center")

        # exhausted all sources where pos[] can be set; if still zero, bail out
        if len(pos) == 0:
            raise Exception, "No positions found from input BDP's or pos="

        # convert this regular list to a list of tuples with duplicates removed
        # sadly the order is lost.
        pos = list(set(zip(pos[0::2], pos[1::2])))
        npos = len(pos)

        dt.tag("open")

        bdp_name = self.mkext(fin, "csp")
        b2 = CubeSpectrum_BDP(bdp_name)
        self.addoutput(b2)

        imval = range(npos)  # spectra, one for each pos (placeholder)
        planes = range(npos)  # labels for the tables (placeholder)
        images = {}  # png's accumulated

        for i in range(npos):  # loop over pos, they can have mixed types now
            sd = []
            caption = "Spectrum"
            xpos = pos[i][0]
            ypos = pos[i][1]
            if type(xpos) != type(ypos):
                print "POS:", xpos, ypos
                raise Exception, "position pair not of the same type"
            if type(xpos) == int:
                # for integers, boxes are allowed, even multiple
                box = '%d,%d,%d,%d' % (xpos, ypos, xpos, ypos)
                # convention for summary is (box)
                cbox = '(%d,%d,%d,%d)' % (xpos, ypos, xpos, ypos)
                # use extend here, not append, we want individual values in a list
                sd.extend([xpos, ypos, cbox])
                caption = "Average Spectrum at %s" % cbox
                if False:
                    # this will fail on 3D cubes (see CAS-7648)
                    imval[i] = casa.imval(self.dir(fin), box=box)
                else:
                    # work around that CAS-7648 bug
                    # another approach is the ia.getprofile(), see CubeStats, this will
                    # also integrate over regions, imval will not (!!!)
                    region = 'centerbox[[%dpix,%dpix],[1pix,1pix]]' % (xpos,
                                                                       ypos)
                    caption = "Average Spectrum at %s" % region
                    imval[i] = casa.imval(self.dir(fin), region=region)
            elif type(xpos) == str:
                # this is tricky, to stay under 1 pixel , or you get a 2x2 back.
                region = 'centerbox[[%s,%s],[1pix,1pix]]' % (xpos, ypos)
                caption = "Average Spectrum at %s" % region
                sd.extend([xpos, ypos, region])
                imval[i] = casa.imval(self.dir(fin), region=region)
            else:
                print "Data type: ", type(xpos)
                raise Exception, "Data type for region not handled"
            dt.tag("imval")

            flux = imval[i]['data']
            if len(flux.shape
                   ) > 1:  # rare case if we step on a boundary between cells?
                logging.warning(
                    "source %d has spectrum shape %s: averaging the spectra" %
                    (i, repr(flux.shape)))
                flux = np.average(flux, axis=0)
            logging.debug('minmax: %f %f %d' %
                          (flux.min(), flux.max(), len(flux)))
            smax.append(flux.max())
            if i == 0:  # for first point record few extra things
                if len(imval[i]['coords'].shape) == 2:  # normal case: 1 pixel
                    freqs = imval[i]['coords'].transpose(
                    )[2] / 1e9  # convert to GHz  @todo: input units ok?
                elif len(imval[i]['coords'].shape
                         ) == 3:  # rare case if > 1 point in imval()
                    freqs = imval[i]['coords'][0].transpose(
                    )[2] / 1e9  # convert to GHz  @todo: input units ok?
                else:
                    logging.fatal(
                        "bad shape %s in freq return from imval - SHOULD NEVER HAPPEN"
                        % imval[i]['coords'].shape)
                chans = np.arange(len(freqs))  # channels 0..nchans-1
                unit = imval[i]['unit']
                restfreq = casa.imhead(
                    self.dir(fin), mode="get",
                    hdkey="restfreq")['value'] / 1e9  # in GHz
                dt.tag("imhead")
                vel = (
                    1 - freqs / restfreq
                ) * utils.c  #  @todo : use a function (and what about relativistic?)

            # construct the Table for CubeSpectrum_BDP
            # @todo note data needs to be a tuple, later to be column_stack'd
            labels = ["channel", "frequency", "flux"]
            units = ["number", "GHz", unit]
            data = (chans, freqs, flux)

            if i == 0:
                # plane 0 : we are allowing a multiplane table, so the first plane is special
                table = Table(columns=labels,
                              units=units,
                              data=np.column_stack(data),
                              planes=["0"])
            else:
                # planes 1,2,3.... are stacked onto the previous one
                table.addPlane(np.column_stack(data), "%d" % i)

            # example plot , one per position for now
            if use_vel:
                x = vel
                xlab = 'VLSR (km/s)'
            else:
                x = chans
                xlab = 'Channel'
            y = [flux]
            sd.append(xlab)
            if type(xpos) == int:
                # grab the RA/DEC... kludgy
                h = casa.imstat(self.dir(fin), box=box)
                ra = h['blcf'].split(',')[0]
                dec = h['blcf'].split(',')[1]
                title = '%s %d @ %d,%d = %s,%s' % (bdp_name, i, xpos, ypos, ra,
                                                   dec)
            else:
                title = '%s %d @ %s,%s' % (
                    bdp_name, i, xpos, ypos
                )  # or use box, once we allow non-points

            myplot = APlot(ptype=self._plot_type,
                           pmode=self._plot_mode,
                           abspath=self.dir())
            ylab = 'Flux (%s)' % unit
            p1 = "%s_%d" % (bdp_name, i)
            myplot.plotter(x,
                           y,
                           title,
                           p1,
                           xlab=xlab,
                           ylab=ylab,
                           thumbnail=True)
            # Why not use p1 as the key?
            ii = images["pos%d" % i] = myplot.getFigure(figno=myplot.figno,
                                                        relative=True)
            thumbname = myplot.getThumbnail(figno=myplot.figno, relative=True)
            sd.extend([ii, thumbname, caption, fin])
            self.spec_description.append(sd)

        logging.regression("CSP: %s" % str(smax))

        image = Image(images=images, description="CubeSpectrum")
        b2.setkey("image", image)
        b2.setkey("table", table)
        b2.setkey("sigma", csigma)  # TODO: not always available
        b2.setkey("mean", cmean)  # TODO: not always available

        if True:
            #       @todo     only first plane due to limitation in exportTable()
            islash = bdp_name.find('/')
            if islash < 0:
                tabname = self.dir("testCubeSpectrum.tab")
            else:
                tabname = self.dir(bdp_name[:islash] + "/testCubeSpectrum.tab")
            table.exportTable(tabname, cols=["frequency", "flux"])
        dt.tag("done")
        # For a single spectrum this is
        # SummaryEntry([[data for spec1]], "CubeSpectrum_AT",taskid)
        # For multiple spectra this is
        # SummaryEntry([[data for spec1],[data for spec2],...], "CubeSpectrum_AT",taskid)
        self._summary["spectra"] = SummaryEntry(self.spec_description,
                                                "CubeSpectrum_AT",
                                                self.id(True))
        taskargs = "pos=" + str(pos)
        taskargs += '&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; <span style="background-color:white">&nbsp;' + fin.split(
            '/')[0] + '&nbsp;</span>'
        for v in self._summary:
            self._summary[v].setTaskArgs(taskargs)
        dt.tag("summary")
        dt.end()
Exemplo n.º 8
0
    def run(self):
        """ The run method creates the BDP

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        dt = utils.Dtime("SFind2D")  # tagging time
        self._summary = {}
        # get key words that user input
        nsigma = self.getkey("numsigma")
        sigma = self.getkey("sigma")
        region = self.getkey("region")
        robust = self.getkey("robust")
        snmax = self.getkey("snmax")
        nmax = self.getkey("nmax")
        ds9 = True  # writes a "ds9.reg" file
        mpl = True  # aplot.map1() plot
        dynlog = 20.0  # above this value of dyn range finder chart is log I-scaled
        bpatch = True  # patch units to Jy/beam for ia.findsources()

        # get the input casa image from bdp[0]
        bdpin = self._bdp_in[0]
        infile = bdpin.getimagefile(bt.CASA)
        if mpl:
            data = np.flipud(np.rot90(casautil.getdata(self.dir(infile)).data))

        # check if there is a 2nd image (which will be a PB)
        for i in range(len(self._bdp_in)):
            print 'BDP', i, type(self._bdp_in[i])

        if self._bdp_in[2] != None:
            bdpin_pb = self._bdp_in[1]
            bdpin_cst = self._bdp_in[2]
            print "Need to process PB"
        else:
            bdpin_pb = None
            bdpin_cst = self._bdp_in[1]
            print "No PB given"

        # get the output bdp basename
        slbase = self.mkext(infile, 'sl')

        # make sure it's a 2D map
        if not casautil.mapdim(self.dir(infile), 2):
            raise Exception, "Input map dimension not 2: %s" % infile

        # arguments for imstat call if required
        args = {"imagename": self.dir(infile)}
        if region != "":
            args["region"] = region
        dt.tag("start")

        # The following code sets the sigma level for searching for sources using
        # the sigma and snmax keyword as appropriate
        # if no CubeStats BDP was given and no sigma was specified:
        # find a noise level via casa.imstat()
        # if a CubeStat_BDP is given get it from there.
        if bdpin_cst == None:
            # get statistics from input image with imstat because no CubeStat_BDP
            stat = casa.imstat(**args)
            dmin = float(
                stat["min"]
                [0])  # these would be wrong if robust were used already
            dmax = float(stat["max"][0])
            args.update(casautil.parse_robust(
                robust))  # only now add robust keywords for the sigma
            stat = casa.imstat(**args)
            if sigma <= 0.0:
                sigma = float(stat["sigma"][0])
            dt.tag("imstat")
        else:
            # get statistics from CubeStat_BDP
            sigma = bdpin_cst.get("sigma")
            dmin = bdpin_cst.get("minval")
            dmax = bdpin_cst.get("maxval")

        self.setkey("sigma", sigma)
        # calculate cutoff based either on RMS or dynamic range limitation
        drange = dmax / (nsigma * sigma)
        if snmax < 0.0:
            snmax = drange
        if drange > snmax:
            cutoff = 1.0 / snmax
        else:
            cutoff = 1.0 / drange
        logging.info("sigma, dmin, dmax, snmax, cutoff %g %g %g %g %g" %
                     (sigma, dmin, dmax, snmax, cutoff))
        # define arguments for call to findsources
        args2 = {"cutoff": cutoff}
        args2["nmax"] = nmax
        if region != "":
            args2["region"] = region
        #args2["mask"] = ""
        args2["point"] = False
        args2["width"] = 5
        args2["negfind"] = False
        # set-up for SourceList_BDP
        slbdp = SourceList_BDP(slbase)

        # connect to casa image and call casa ia.findsources tool
        ia = taskinit.iatool()
        ia.open(self.dir(infile))

        # findsources() cannot deal with  'Jy/beam.km/s' ???
        # so for the duration of findsources() we patch it
        bunit = ia.brightnessunit()
        if bpatch and bunit != 'Jy/beam':
            logging.warning(
                "Temporarely patching your %s units to Jy/beam for ia.findsources()"
                % bunit)
            ia.setbrightnessunit('Jy/beam')
        else:
            bpatch = False
        atab = ia.findsources(**args2)
        if bpatch:
            ia.setbrightnessunit(bunit)

        taskargs = "nsigma=%4.1f sigma=%g region=%s robust=%s snmax=%5.1f nmax=%d" % (
            nsigma, sigma, str(region), str(robust), snmax, nmax)
        dt.tag("findsources")
        nsources = atab["nelements"]
        xtab = []
        ytab = []
        logscale = False
        sumflux = 0.0
        if nsources > 0:
            # @TODO: Why are Xpix, YPix not stored in the table?
            #        -> PJT: I left them out since they are connected to an image which may not be available here
            #                but we should store the frequency of the observation here for later bandmerging
            logging.debug("%s" % str(atab['component0']['shape']))
            logging.info(
                "Right Ascen.  Declination   X(pix)   Y(pix)      Peak       Flux    Major   Minor    PA    SNR"
            )
            funits = atab['component0']['flux']['unit']
            if atab['component0']['shape'].has_key('majoraxis'):
                sunits = atab['component0']['shape']['majoraxis']['unit']
                aunits = atab['component0']['shape']['positionangle']['unit']
            else:
                sunits = "n/a"
                aunits = "n/a"
            punits = ia.summary()['unit']
            logging.info(
                "                                               %s       %s    %s   %s   %s"
                % (punits, funits, sunits, sunits, aunits))
            #
            # @todo future improvement is to look at image coordinates and control output appropriately
            #
            if ds9:
                # @todo variable name
                regname = self.mkext(infile, 'ds9.reg')
                fp9 = open(self.dir(regname), "w!")
            sn0 = -1.0
            for i in range(nsources):
                c = "component%d" % i
                name = "%d" % (i + 1)
                r = atab[c]['shape']['direction']['m0']['value']
                d = atab[c]['shape']['direction']['m1']['value']
                pixel = ia.topixel([r, d])
                xpos = pixel['numeric'][0]
                ypos = pixel['numeric'][1]
                rd = ia.toworld([xpos, ypos], 's')
                ra = rd['string'][0][:12]
                dec = rd['string'][1][:12]
                flux = atab[c]['flux']['value'][0]
                sumflux = sumflux + flux
                if atab[c]['shape'].has_key('majoraxis'):
                    smajor = atab[c]['shape']['majoraxis']['value']
                    sminor = atab[c]['shape']['minoraxis']['value']
                    sangle = atab[c]['shape']['positionangle']['value']
                else:
                    smajor = 0.0
                    sminor = 0.0
                    sangle = 0.0
                peakstr = ia.pixelvalue([xpos, ypos, 0, 0])
                if len(peakstr) == 0:
                    logging.warning("Problem with source %d @ %d,%d" %
                                    (i, xpos, ypos))
                    continue
                peakf = peakstr['value']['value']
                snr = peakf / sigma
                if snr > dynlog:
                    logscale = True
                if snr > sn0:
                    sn0 = snr
                logging.info(
                    "%s %s %8.2f %8.2f %10.3g %10.3g %7.3f %7.3f %6.1f %6.1f" %
                    (ra, dec, xpos, ypos, peakf, flux, smajor, sminor, sangle,
                     snr))

                xtab.append(xpos)
                ytab.append(ypos)
                slbdp.addRow(
                    [name, ra, dec, flux, peakf, smajor, sminor, sangle])
                if ds9:
                    ras = ra
                    des = dec.replace('.', ':', 2)
                    msg = 'ellipse(%s,%s,%g",%g",%g) # text={%s}' % (
                        ras, des, smajor, sminor, sangle + 90.0, i + 1)
                    fp9.write("%s\n" % msg)
            if ds9:
                fp9.close()
                logging.info("Wrote ds9.reg")
            dt.tag("table")
        logging.regression("CONTFLUX: %d %g" % (nsources, sumflux))

        summary = ia.summary()
        beammaj = summary['restoringbeam']['major']['value']
        beammin = summary['restoringbeam']['minor']['value']
        beamunit = summary['restoringbeam']['minor']['unit']
        beamang = summary['restoringbeam']['positionangle']['value']
        angunit = summary['restoringbeam']['positionangle']['unit']
        # @todo add to table comments?
        logging.info(" Fitted Gaussian size; NOT deconvolved source size.")
        logging.info(
            " Restoring Beam: Major axis: %10.3g %s , Minor axis: %10.3g %s , PA: %5.1f %s"
            % (beammaj, beamunit, beammin, beamunit, beamang, angunit))
        # form into a xml table

        # output is a table_bdp
        self.addoutput(slbdp)

        # instantiate a plotter for all plots made herein
        myplot = APlot(ptype=self._plot_type,
                       pmode=self._plot_mode,
                       abspath=self.dir())

        # make output png with circles marking sources found
        if mpl:
            circles = []
            nx = data.shape[1]  # data[] array was already flipud(rot90)'d
            ny = data.shape[0]  #
            for (x, y) in zip(xtab, ytab):
                circles.append([x, y, 1])
            # @todo variable name
            if logscale:
                logging.warning("LogScaling applied")
                data = data / sigma
                data = np.where(data < 0, -np.log10(1 - data),
                                +np.log10(1 + data))
            if nsources == 0:
                title = "SFind2D: 0 sources above S/N=%.1f" % (nsigma)
            elif nsources == 1:
                title = "SFind2D: 1 source (%.1f < S/N < %.1f)" % (nsigma, sn0)
            else:
                title = "SFind2D: %d sources (%.1f < S/N < %.1f)" % (
                    nsources, nsigma, sn0)
            myplot.map1(data,
                        title,
                        slbase,
                        thumbnail=True,
                        circles=circles,
                        zoom=self.getkey("zoom"))

        #---------------------------------------------------------
        # Get the figure and thumbmail names and create a caption
        #---------------------------------------------------------
        imname = myplot.getFigure(figno=myplot.figno, relative=True)
        thumbnailname = myplot.getThumbnail(figno=myplot.figno, relative=True)
        caption = "Image of input map with sources found by SFind2D overlayed in green."
        slbdp.table.description = "Table of source locations and sizes (not deconvolved)"

        #---------------------------------------------------------
        # Add finder image to the BDP
        #---------------------------------------------------------
        image = Image(images={bt.PNG: imname},
                      thumbnail=thumbnailname,
                      thumbnailtype=bt.PNG,
                      description=caption)
        slbdp.image.addimage(image, "finderimage")

        #-------------------------------------------------------------
        # Create the summary entry for the table and image
        #-------------------------------------------------------------
        self._summary["sources"] = SummaryEntry(
            [slbdp.table.serialize(),
             slbdp.image.serialize()], "SFind2D_AT", self.id(True), taskargs)

        dt.tag("done")
        dt.end()
Exemplo n.º 9
0
    def run(self):
        """ The run method, calculates the moments and creates the BDP(s)

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        self._summary = {}
        momentsummary = []
        dt = utils.Dtime("Moment")

        # variable to track if we are using a single cutoff for all moment maps
        allsame = False
        moments = self.getkey("moments")
        numsigma = self.getkey("numsigma")
        mom0clip = self.getkey("mom0clip")
        # determine if there is only 1 cutoff or if there is a cutoff for each moment
        if len(moments) != len(numsigma):
            if len(numsigma) != 1:
                raise Exception("Length of numsigma and moment lists do not match. They must be the same length or the length of the cutoff list must be 1.")
            allsame = True
        # default moment file extensions, this is information copied from casa.immoments()
        momentFileExtensions = {-1: ".average",
                                 0: ".integrated",
                                 1: ".weighted_coord",
                                 2: ".weighted_dispersion_coord",
                                 3: ".median",
                                 4: "",
                                 5: ".standard_deviation",
                                 6: ".rms",
                                 7: ".abs_mean_dev",
                                 8: ".maximum",
                                 9: ".maximum_coord",
                                10: ".minimum",
                                11: ".minimum_coord",
                                }

        logging.debug("MOMENT: %s %s %s" %  (str(moments), str(numsigma), str(allsame)))

        # get the input casa image from bdp[0]
        # also get the channels the line actually covers (if any)
        bdpin = self._bdp_in[0]
        infile = bdpin.getimagefile(bt.CASA)
        chans = self.getkey("chans")
        # the basename of the moments, we will append _0, _1, etc.
        basename = self.mkext(infile, "mom")
        fluxname = self.mkext(infile, "flux")
        # beamarea = nppb(self.dir(infile))
        beamarea = 1.0  # until we have it from the MOM0 map

        sigma0 = self.getkey("sigma")
        sigma  = sigma0

        ia = taskinit.iatool()

        dt.tag("open")

        # if no CubseStats BDP was given and no sigma was specified, find a 
        # noise level via casa.imstat()
        if self._bdp_in[1] is None and sigma <= 0.0:
            raise Exception("A sigma or a CubeStats_BDP must be input to calculate the cutoff")
        elif self._bdp_in[1] is not None:
            sigma = self._bdp_in[1].get("sigma")

        # immoments is a bit peculiar. If you give one moment, it will use 
        # exactly the outfile you picked for multiple moments, it will pick
        # extensions such as .integrated [0], .weighted_coord [1] etc.
        # we loop over the moments and will use the numeric extension instead. 
        # Might be laborious loop for big input cubes
        #
        # arguments for immoments
        args = {"imagename" : self.dir(infile),
                "moments"   : moments,
                "outfile"   : self.dir(basename)}

        # set the channels if given
        if chans != "":
            args["chans"] = chans
        # error check the mom0clip input
        if mom0clip > 0.0 and not 0 in moments:
            logging.warning("mom0clip given, but no moment0 map was requested. One will be generated anyway.")
            # add moment0 to the list of computed moments, but it has to be first
            moments.insert(0,0)
            if not allsame:
                numsigma.insert(0, 2.0*sigma)

        if allsame:
            # this is only executed now if len(moments) > 1 and len(cutoff)==1
            args["excludepix"] = [-numsigma[0] * sigma, numsigma[0] * sigma]
            casa.immoments(**args)
            dt.tag("immoments-all")
        else:
            # this is execute if len(moments)==len(cutoff) , even when len=1
            for i in range(len(moments)):
                args["excludepix"] = [-numsigma[i] * sigma, numsigma[i] * sigma]
                args["moments"] = moments[i]
                args["outfile"] = self.dir(basename + momentFileExtensions[moments[i]])
                casa.immoments(**args)
                dt.tag("immoments-%d" % moments[i])

        taskargs = "moments=%s numsigma=%s" % (str(moments), str(numsigma)) 
        if sigma0 > 0:
            taskargs = taskargs + " sigma=%.2f" % sigma0
        if mom0clip > 0:
            taskargs = taskargs + " mom0clip=%g" % mom0clip
        if chans == "": 
            taskargs = taskargs + " chans=all"
        else:
            taskargs = taskargs + " chans=%s" % str(chans)
        taskargs += '&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; <span style="background-color:white">&nbsp;' + basename.split('/')[0] + '&nbsp;</span>'

        # generate the mask to be applied to all but moment 0
        if mom0clip > 0.0:
            # get the statistics from mom0 map
            # this is usually a very biased map, so unclear if mom0sigma is all that reliable
            args = {"imagename": self.dir(infile)}
            stat = casa.imstat(imagename=self.dir(basename + momentFileExtensions[0]))
            mom0sigma = float(stat["sigma"][0])
            # generate a temporary masked file, mask will be copied to other moments
            args = {"imagename" : self.dir(basename + momentFileExtensions[0]),
                    "expr"      : 'IM0[IM0>%f]' % (mom0clip * mom0sigma),
                    "outfile"   : self.dir("mom0.masked")
                    }
            casa.immath(**args)
            # get the default mask name
            ia.open(self.dir("mom0.masked"))
            defmask = ia.maskhandler('default')
            ia.close()
            dt.tag("mom0clip")

        # loop over moments to rename them to _0, _1, _2 etc.
        # apply a mask as well for proper histogram creation
        map = {}
        myplot = APlot(pmode=self._plot_mode,ptype=self._plot_type,abspath=self.dir())
        implot = ImPlot(pmode=self._plot_mode,ptype=self._plot_type,abspath=self.dir())

        for mom in moments:
            figname = imagename = "%s_%i" % (basename, mom)
            tempname = basename + momentFileExtensions[mom]
            # rename and remove the old one if there is one
            utils.rename(self.dir(tempname), self.dir(imagename))
            # copy the moment0 mask if requested; this depends on that mom0 was done before
            if mom0clip > 0.0 and mom != 0:
                #print "PJT: output=%s:%s" % (self.dir(imagename), defmask[0])
                #print "PJT: inpmask=%s:%s" % (self.dir("mom0.masked"),defmask[0])
                makemask(mode="copy", inpimage=self.dir("mom0.masked"),
                         output="%s:%s" % (self.dir(imagename), defmask[0]),
                         overwrite=True, inpmask="%s:%s" % (self.dir("mom0.masked"),
                                                            defmask[0]))
                ia.open(self.dir(imagename))
                ia.maskhandler('set', defmask)
                ia.close()
                dt.tag("makemask")
            if mom == 0:
                beamarea = nppb(self.dir(imagename))
            implot.plotter(rasterfile=imagename,figname=figname,
                           colorwedge=True,zoom=self.getkey("zoom"))
            imagepng  = implot.getFigure(figno=implot.figno,relative=True)
            thumbname = implot.getThumbnail(figno=implot.figno,relative=True)
            images = {bt.CASA : imagename, bt.PNG  : imagepng}
            thumbtype=bt.PNG
            dt.tag("implot")

            # get the data for a histogram (ia access is about 1000-2000 faster than imval())
            map[mom] = casautil.getdata(self.dir(imagename))
            data = map[mom].compressed()
            dt.tag("getdata")

            # make the histogram plot

            # get the label for the x axis
            bunit = casa.imhead(imagename=self.dir(imagename), mode="get", hdkey="bunit")
            # object for the caption
            objectname = casa.imhead(imagename=self.dir(imagename), mode="get", hdkey="object")

            # Make the histogram plot
            # Since we give abspath in the constructor, figname should be relative
            auxname = imagename + '_histo'
            auxtype = bt.PNG
            myplot.histogram(columns = data,
                             figname = auxname,
                             xlab    = bunit,
                             ylab    = "Count",
                             title   = "Histogram of Moment %d: %s" % (mom, imagename), thumbnail=True)

            casaimage = Image(images    = images,
                                    auxiliary = auxname,
                                    auxtype   = auxtype,
                                    thumbnail = thumbname,
                                    thumbnailtype = thumbtype)
            auxname = myplot.getFigure(figno=myplot.figno,relative=True)
            auxthumb = myplot.getThumbnail(figno=myplot.figno,relative=True)

            if hasattr(self._bdp_in[0], "line"):   # SpwCube doesn't have Line
                line = deepcopy(getattr(self._bdp_in[0], "line"))
                if not isinstance(line, Line):
                    line = Line(name="Unidentified")
            else:
                # fake a Line if there wasn't one
                line = Line(name="Unidentified")
            # add the BDP to the output array
            self.addoutput(Moment_BDP(xmlFile=imagename, moment=mom,
                           image=deepcopy(casaimage), line=line))
            dt.tag("ren+mask_%d" % mom)

            imcaption = "%s Moment %d map of Source %s" % (line.name, mom, objectname)
            auxcaption = "Histogram of %s Moment %d of Source %s" % (line.name, mom, objectname)
            thismomentsummary = [line.name, mom, imagepng, thumbname, imcaption,
                                 auxname, auxthumb, auxcaption, infile]
            momentsummary.append(thismomentsummary)

        if map.has_key(0) and map.has_key(1) and map.has_key(2):
            logging.debug("MAPs present: %s" % (map.keys()))

            # m0 needs a new mask, inherited from the more restricted m1 (and m2)
            m0 = ma.masked_where(map[1].mask,map[0])
            m1 = map[1]
            m2 = map[2]
            m01 = m0*m1
            m02 = m0*m1*m1
            m22 = m0*m2*m2
            sum0 = m0.sum()
            vmean = m01.sum()/sum0
            # lacking the full 3D cube, get two estimates and take the max
            sig1  = math.sqrt(m02.sum()/sum0 - vmean*vmean)
            sig2  = m2.max()
            #vsig = max(sig1,sig2)
            vsig = sig1
            
            # consider clipping in the masked array (mom0clip)
            # @todo   i can't use info from line, so just borrow basename for now for grepping
            #         this also isn't really the flux, the points per beam is still in there
            loc = basename.rfind('/')
            sum1 = ma.masked_less(map[0],0.0).sum()   # mom0clip
            # print out:   LINE,FLUX1,FLUX0,BEAMAREA,VMEAN,VSIGMA for regression
            # the linechans parameter in bdpin is not useful to print out here, it's local to the LineCube
            s_vlsr = admit.Project.summaryData.get('vlsr')[0].getValue()[0]
            s_rest = admit.Project.summaryData.get('restfreq')[0].getValue()[0]/1e9
            s_line = line.frequency
            if loc>0:
                if basename[:loc][0:2] == 'U_':
                    # for U_ lines we'll reference the VLSR w.r.t. RESTFREQ in that band
                    if abs(vmean) > vsig:
                        vwarn = '*'
                    else:
                        vwarn = ''
                    vlsr = vmean + (1.0-s_line/s_rest)*utils.c
                    msg = "MOM0FLUX: %s %g %g %g %g %g %g" % (basename[:loc],map[0].sum(),sum0,beamarea,vmean,vlsr,vsig)
                else:
                    # for identified lines we'll assume the ID was correct and not bother with RESTFREQ
                    msg = "MOM0FLUX: %s %g %g %g %g %g %g" % (basename[:loc],map[0].sum(),sum0,beamarea,vmean,vmean,vsig)
            else:
                msg = "MOM0FLUX: %s %g %g %g %g %g %g" % ("SPW_FULL"    ,map[0].sum(),sum0,beamarea,vmean,vmean,vsig)
            logging.regression(msg)
            dt.tag("mom0flux")

            # create a histogram of flux per channel

            # grab the X coordinates for the histogram, we want them in km/s
            # restfreq should also be in summary
            restfreq = casa.imhead(self.dir(infile),mode="get",hdkey="restfreq")['value']/1e9    # in GHz
            # print "PJT  %.10f %.10f" % (restfreq,s_rest)
            imval0 = casa.imval(self.dir(infile))
            freqs = imval0['coords'].transpose()[2]/1e9
            x = (1-freqs/restfreq)*utils.c
            # 
            h = casa.imstat(self.dir(infile), axes=[0,1])
            if h.has_key('flux'):
                flux0 = h['flux']
            else:
                flux0 = h['sum']/beamarea
            flux0sum = flux0.sum() * abs(x[1]-x[0])
            # @todo   make a flux1 with fluxes derived from a good mask
            flux1 = flux0 
            # construct histogram
            title = 'Flux Spectrum (%g)' % flux0sum
            xlab = 'VLSR (km/s)'
            ylab = 'Flux (Jy)'
            myplot.plotter(x,[flux0,flux1],title=title,figname=fluxname,xlab=xlab,ylab=ylab,histo=True)
            dt.tag("flux-spectrum")
            
        self._summary["moments"] = SummaryEntry(momentsummary, "Moment_AT", 
                                                self.id(True), taskargs)
        # get rid of the temporary mask
        if mom0clip > 0.0: 
            utils.rmdir(self.dir("mom0.masked"))

        dt.tag("done")
        dt.end()
Exemplo n.º 10
0
def blankcube(image,
              dummyMS,
              smooth=True,
              verbose=True,
              region='centerbox[[10h21m45s,18.05.14.9],[15arcmin,15arcmin]]',
              ruthless=False,
              extension='.image',
              beamround='int',
              blankThreshold=2.5,
              moments=[0]):
    '''
    Parameters
    ----------
    image : string
        Base name of image. If target image has extension other than '.image',
        change the extension keyword.

    dummyMS : string
        MS file required for blanking process in CASA

    smooth : bool, optional
        Smooth the image to a circular beam before blanking?
        Default True

    verbose : bool, optional

    region : string, optional
        region parameter featured in CASA

    ruthless : bool, optional
        Delete previous outputs from blankcube

    extension : string, optional
        Extension of the target image. Must include the '.', e.g., '.restored'
        Default '.image'

    beamround : string,float
        P

    blankThreshold : float, optional
        Initial blanking threshold of all pixels scaled by the standard
        deviation times the blankingthreshold
        Default = 2.5 (Walter et al. 2008)

    moments : list, optional
        Moments to calculate from cube. Options are 0,1,2.
        Example: [0,1,2]
        Default: [0]

    Returns
    -------
    out : null

    Examples
    --------

    '''

    from casa import immath, imsmooth, immoments
    from casa import image as ia
    from casa import imager as im
    from casa import quanta as qa
    import os
    import numpy as np

    # Delete files associated with previous runs
    if ruthless:
        os.system('rm -rf ' + image + '.smooth.blk.image')
        os.system('rm -rf ' + image + '.smooth.image')
        os.system('rm -rf ' + image + '.blk.image')
        os.system('rm -rf ' + image + '.mask')

    imageDir = './'

    # Create moment maps
    mom0, mom1, mom2 = False, False, False
    if len(moments) > 0:
        for i, moment in enumerate(moments):
            if moment == 0:
                mom0 = True
            if moment == 1:
                mom1 = True
            if moment == 2:
                mom2 = True
    if mom1 == True or mom2 == True:
        beamScale = 1.01
    elif mom0 == True:
        beamScale = 2.

    # determine beamsize of cube
    ia.open(imageDir + image + extension)
    beamsizes = np.zeros(ia.shape()[2])
    for i in range(ia.shape()[2]):
        beamsizes[i] = ia.restoringbeam(channel=i)['major']['value']
    beamsizeUnit = ia.restoringbeam(channel=i)['major']['unit']
    beamsize = qa.convert(str(beamsizes.max()) + beamsizeUnit,
                          'arcsec')['value']
    if type(beamround) == float:
        beamsize_smooth = beamround * beamsize
    else:
        beamsize_smooth = np.ceil(beamsize)  #beamsize_smooth = 1.01 * beamsize
    ia.close()

    if verbose:
        print 'Max beamsize is ' + str(beamsize) + '"'

    if not os.path.exists(image + '.blk.image'):
        # create cube for blanking
        if smooth:  # smooth to a larger beam if the user desires
            if verbose:
                print 'Convolving to ' + str(beamsize_smooth) + '"'

            imsmooth(imagename=image + extension,
                     outfile=image + '.blk.image',
                     major=str(beamsize_smooth) + 'arcsec',
                     minor=str(beamsize_smooth) + 'arcsec',
                     region=region,
                     pa='0deg',
                     targetres=True)
        else:  # do no smooth
            immath(imagename=image + extension,
                   outfile=image + '.blk.image',
                   mode='evalexpr',
                   region=region,
                   expr='IM0')

    if not os.path.exists(image + '.smooth.image'):
        # convolve cube to 2X beam for blanking
        imsmooth(imagename=image + extension,
                 outfile=image + '.smooth.image',
                 major=str(beamsize * beamScale) + 'arcsec',
                 minor=str(beamsize * beamScale) + 'arcsec',
                 pa='0deg',
                 region=region,
                 targetres=True)

    # determine threshold of cube
    ia.open(image + '.smooth.image')
    threshold = ia.statistics()['sigma'][0] * blankThreshold
    ia.close()

    # blank the cube at threshold*sigma
    ia.open(image + '.smooth.image')
    ia.calcmask(mask=image + '.smooth.image > ' + str(threshold), name='mask1')
    wait = 'waits for calcmask to close'
    ia.close()

    # hand blank the cube
    im.open(dummyMS)
    pause = None
    while pause is None:
        im.drawmask(image=image + '.smooth.image', mask=image + '.mask')
        pause = 0
    im.close

    # mask contains values of 0 and 1, change to a mask with only values of 1
    ia.open(image + '.mask')
    ia.calcmask(image + '.mask' + '>0.5')
    ia.close()

    # apply mask on smoothed image
    immath(imagename=image + '.smooth.image',
           outfile=image + '.smooth.blk.image',
           mode='evalexpr',
           mask='mask(' + image + '.mask)',
           expr='IM0')

    # apply mask on image
    ia.open(imageDir + image + '.blk.image')
    ia.maskhandler('copy', [image + '.smooth.blk.image:mask0', 'newmask'])
    ia.maskhandler('set', 'newmask')
    ia.done()

    cube = '.blk.image'  # specify name of cube for moment calculation

    # create moment 0 map
    if mom0:
        if ruthless:
            os.system('rm -rf ' + image + '.mom0.image')
        immoments(imagename=image + cube,
                  moments=[0],
                  axis='spectra',
                  chans='',
                  mask='mask(' + image + cube + ')',
                  outfile=image + '.mom0.image')

    # create moment 1 map
    if mom1:
        if ruthless:
            os.system('rm -rf ' + image + '.mom1.image')
        immoments(imagename=image + cube,
                  moments=[1],
                  axis='spectra',
                  chans='',
                  mask='mask(' + image + cube + ')',
                  outfile=image + '.mom1.image')

    # create moment 2 map
    if mom2:
        if ruthless:
            os.system('rm -rf ' + image + '.mom2.image')
        immoments(imagename=image + cube,
                  moments=[2],
                  axis='spectra',
                  chans='',
                  mask='mask(' + image + cube + ')',
                  outfile=image + '.mom2.image')

    if verbose and mom0:
        from casa import imstat
        flux = imstat(image + '.mom0.image')['flux'][0]
        ia.open(image + '.mom0.image')
        beammaj = ia.restoringbeam(channel=0)['major']['value']
        beammin = ia.restoringbeam(channel=0)['minor']['value']
        beamsizeUnit = ia.restoringbeam(channel=0)['major']['unit']
        ia.close()
        print 'Moment Image: ' + str(image) + '.mom0.image'
        print 'Beamsize: ' + str(beammaj) + '" X ' + str(beammin) + '"'
        print 'Flux: ' + str(flux) + ' Jy km/s'
Exemplo n.º 11
0
    def run(self):
        dt = utils.Dtime("PVCorr")
        self._summary = {}

        numsigma = self.getkey("numsigma")
        mode = 1                                            # PV corr mode (1,2,3)
        normalize = True
        # normalize = False

        b1 = self._bdp_in[0]                                # PVSlice_BDP
        fin = b1.getimagefile(bt.CASA)                      # CASA image
        data = casautil.getdata_raw(self.dir(fin))          # grab the data as a numpy array
        self.myplot = APlot(ptype=self._plot_type,pmode=self._plot_mode,abspath=self.dir())
        #print 'DATA[0,0]:',data[0,0]
        #print 'pv shape: ',data.shape
        npos = data.shape[0]
        nvel = data.shape[1]
        dt.tag("getdata")

        b2 = self._bdp_in[1]                                # CubeStats_BDP
        sigma = b2.sigma                                    # global sigma in the cube
        cutoff = numsigma * sigma
        freq =  b2.table.getColumnByName("frequency")

        chans = self.getkey("range")                        # range of channels, if used
        if len(chans) > 0:
            if len(chans) != 2:
                logging.fatal("range=%s" % chans)
                raise Exception,"range= needs two values, left and right (inclusive) channel"
            ch0 = chans[0]
            ch1 = chans[1]
        else:
            nchan = self.getkey("nchan")
            imstat0 = casa.imstat(self.dir(fin))         # @todo   can use data[] now
            xmaxpos = imstat0['maxpos'][0]
            ymaxpos = imstat0['maxpos'][1]
            logging.info("MAXPOS-VEL %s %g" % (str(imstat0['maxpos']),imstat0['max'][0]))
            if nchan > 0:
                # expand around it, later ch0,ch1 will be checked for running off the edge
                ch0 = ymaxpos - nchan/2
                ch1 = ymaxpos + nchan/2
            else:
                # watershed down to find ch0 and ch1 ?
                # this doesn't work well in crowded areas
                ch0 = ymaxpos
                ch1 = ymaxpos
                spmax = data.max(axis=0)
                k = spmax.argmax()
                n = len(spmax)
                logging.debug('spmax %s %d %g' % (str(spmax.shape),k,spmax[k]))
                # find lower cutoff
                for i in range(n):
                    ch0 = ymaxpos - i
                    if ch0<0: break
                    if spmax[ch0] < cutoff: break
                ch0 = ch0 + 1
                # find higher cutoff
                for i in range(n):
                    ch1 = ymaxpos + i
                    if ch1==n: break
                    if spmax[ch1] < cutoff: break
                ch1 = ch1 - 1
            dt.tag("imstat")

        
        bdp_name = self.mkext(fin,"pvc")                    # output PVCorr_BDP
        b3 = PVCorr_BDP(bdp_name)
        self.addoutput(b3)

        if ch0<0 or ch1>=nvel:
            # this probably only happens to small cubes (problematic for PVCorr)
            # or when the strongest line is really close to the edge of the band
            # (which is probably ok)
            if ch0<0 and ch1>=nvel:
                logging.warning("Serious issues with the size of this cube")
            if ch0<0: 
                logging.warning("Resetting ch0 edge to 0")
                ch0=0
            if ch1>=nvel: 
                ch1=nvel-1
                logging.warning("Resetting ch1 edge to the maximum")

        if ch0 > ch1:
            logging.warning("Sanity swapping ch0,1 due to likely noisy data")
            ch0,ch1 = ch1,ch0

        if mode == 1:
            out,rms = mode1(data, ch0, ch1, cutoff, normalize)
            corr = out
        elif mode == 2:
            out,rms = mode2(data, ch0, ch1, cutoff)             # slower 2D version
            corr = out[npos/2,:]                                # center cut, but could also try feature detection
        elif mode == 3:
            out,rms = self.mode3(data, ch0, ch1, cutoff)        # Doug's faster 2D version
            # get the peak of each column
            corr = np.amax(out,axis=0)
        # print "PVCORR SHAPE ",corr.shape," mode", mode
        if len(corr) > 0:
            # print "SHAPE out:",out.shape,corr.shape,npos/2
            ch  = range(len(corr))
            if len(corr) != len(freq):
                logging.fatal("ch (%d) and freq (%d) do not have same size" % (len(corr),len(freq)))
                raise Exception,"ch and freq do not have same dimension"
            dt.tag("mode")
            labels = ["channel",   "frequency",  "pvcorr"]
            units  = ["number",    "GHz",        "N/A"]
            data   = (ch,          freq,         corr)
            table = Table(columns=labels,units=units,data=np.column_stack(data))
        else:
            # still construct a table, but with no rows
            labels = ["channel",   "frequency",  "pvcorr"]
            units  = ["number",    "GHz",        "N/A"]
            table = Table(columns=labels,units=units)
        b3.setkey("table",table)
        b3.setkey("sigma",float(rms))
        dt.tag("table")
        if len(corr) > 0:
            table.exportTable(self.dir("testPVCorr.tab"),cols=['frequency','pvcorr'])
            test_single(ch,freq,corr)

            logging.regression("PVC: %f %f" % (corr.min(),corr.max()))

            title = 'PVCorr mode=%d [%d,%d] %g' % (mode,ch0,ch1,cutoff)
            x = ch
            xlab = 'Channel'
            y = [corr]
            ylab = 'PV Correlation'
            p1 = "%s_%d" % (bdp_name,0)
            segp = []
            segp.append( [0,len(ch),0.0,0.0] )
            segp.append( [0,len(ch),3.0*rms, 3.0*rms] )
            # @todo:   in principle we know with given noise and  size of box, what the sigma in pvcorr should be
            self.myplot.plotter(x,y,title,figname=p1,xlab=xlab,ylab=ylab,segments=segp, thumbnail=True)

            #out1 = np.rot90 (data.reshape((nvel,npos)) )
            if mode > 1:
                self.myplot.map1(data=out,title="testing PVCorr_AT:  mode%d"%mode,figname='testPVCorr', thumbnail=True)

            taskargs = "numsigma=%.1f range=[%d,%d]" % (numsigma,ch0,ch1)
            caption = "Position-velocity correlation plot"
            thumbname = self.myplot.getThumbnail(figno=self.myplot.figno,relative=True)
            figname   = self.myplot.getFigure(figno=self.myplot.figno,relative=True)
            image = Image(images={bt.PNG: figname}, thumbnail=thumbname, thumbnailtype=bt.PNG,
                description=caption)
            b3.image.addimage(image, "pvcorr")

            self._summary["pvcorr"] = SummaryEntry([figname,thumbname,caption,fin],"PVCorr_AT",self.id(True),taskargs)
        else:
            self._summary["pvcorr"] = None
            logging.warning("No summary")
            logging.regression("PVC: -1")

        dt.tag("done")
        dt.end()
Exemplo n.º 12
0
def blankcube(image,
              dummyMS,
              smooth=True,
              verbose=True,
              region='centerbox[[10h21m45s,18.05.14.9],[15arcmin,15arcmin]]',
              ruthless=False,
              extension='.image',
              beamround='int',
              blankThreshold=2.5,
              moments=[0]):

    '''
    Parameters
    ----------
    image : string
        Base name of image. If target image has extension other than '.image',
        change the extension keyword.

    dummyMS : string
        MS file required for blanking process in CASA

    smooth : bool, optional
        Smooth the image to a circular beam before blanking?
        Default True

    verbose : bool, optional

    region : string, optional
        region parameter featured in CASA

    ruthless : bool, optional
        Delete previous outputs from blankcube

    extension : string, optional
        Extension of the target image. Must include the '.', e.g., '.restored'
        Default '.image'

    beamround : string,float
        P

    blankThreshold : float, optional
        Initial blanking threshold of all pixels scaled by the standard
        deviation times the blankingthreshold
        Default = 2.5 (Walter et al. 2008)

    moments : list, optional
        Moments to calculate from cube. Options are 0,1,2.
        Example: [0,1,2]
        Default: [0]

    Returns
    -------
    out : null

    Examples
    --------

    '''

    from casa import immath,imsmooth,immoments
    from casa import image as ia
    from casa import imager as im
    from casa import quanta as qa
    import os
    import numpy as np

    # Delete files associated with previous runs
    if ruthless:
        os.system('rm -rf ' + image + '.smooth.blk.image')
        os.system('rm -rf ' + image + '.smooth.image')
        os.system('rm -rf ' + image + '.blk.image')
        os.system('rm -rf ' + image + '.mask')

    imageDir = './'

    # Create moment maps
    mom0,mom1,mom2 = False,False,False
    if len(moments) > 0:
        for i, moment in enumerate(moments):
            if moment == 0:
                mom0 = True
            if moment == 1:
                mom1 = True
            if moment == 2:
                mom2 = True
    if mom1 == True or mom2 == True:
        beamScale = 1.01
    elif mom0 == True:
        beamScale = 2.

    # determine beamsize of cube
    ia.open(imageDir + image + extension)
    beamsizes = np.zeros(ia.shape()[2])
    for i in range(ia.shape()[2]):
        beamsizes[i] = ia.restoringbeam(channel=i)['major']['value']
    beamsizeUnit = ia.restoringbeam(channel=i)['major']['unit']
    beamsize = qa.convert(str(beamsizes.max()) + beamsizeUnit,'arcsec')['value']
    if type(beamround) == float:
        beamsize_smooth = beamround*beamsize
    else:
        beamsize_smooth = np.ceil(beamsize)   #beamsize_smooth = 1.01 * beamsize
    ia.close()

    if verbose:
        print 'Max beamsize is ' + str(beamsize) + '"'

    if not os.path.exists(image + '.blk.image'):
        # create cube for blanking
        if smooth: # smooth to a larger beam if the user desires
            if verbose:
                print 'Convolving to ' + str(beamsize_smooth) + '"'

            imsmooth(imagename=image + extension,
                     outfile=image + '.blk.image',
                     major=str(beamsize_smooth) + 'arcsec',
                     minor=str(beamsize_smooth) + 'arcsec',
                     region=region,
                     pa='0deg',
                     targetres=True)
        else: # do no smooth
            immath(imagename=image + extension,
               outfile=image + '.blk.image',
                mode='evalexpr',
                region=region,
                expr='IM0')

    if not os.path.exists(image + '.smooth.image'):
        # convolve cube to 2X beam for blanking
        imsmooth(imagename=image + extension,
             outfile=image + '.smooth.image',
             major=str(beamsize*beamScale) + 'arcsec',
             minor=str(beamsize*beamScale) + 'arcsec',
             pa='0deg',
             region=region,
             targetres=True)

    # determine threshold of cube
    ia.open(image + '.smooth.image')
    threshold = ia.statistics()['sigma'][0] * blankThreshold
    ia.close()

    # blank the cube at threshold*sigma
    ia.open(image + '.smooth.image')
    ia.calcmask(mask=image + '.smooth.image > ' + str(threshold),
             name='mask1')
    wait = 'waits for calcmask to close'
    ia.close()

    # hand blank the cube
    im.open(dummyMS)
    pause = None
    while pause is None:
        im.drawmask(image=image + '.smooth.image',
                           mask=image + '.mask')
        pause = 0
    im.close

    # mask contains values of 0 and 1, change to a mask with only values of 1
    ia.open(image + '.mask')
    ia.calcmask(image + '.mask' + '>0.5')
    ia.close()

    # apply mask on smoothed image
    immath(imagename=image + '.smooth.image',
       outfile=image + '.smooth.blk.image',
       mode='evalexpr',
       mask='mask(' + image + '.mask)',
       expr='IM0')

    # apply mask on image
    ia.open(imageDir + image + '.blk.image')
    ia.maskhandler('copy',[image + '.smooth.blk.image:mask0', 'newmask'])
    ia.maskhandler('set','newmask')
    ia.done()

    cube = '.blk.image' # specify name of cube for moment calculation


    # create moment 0 map
    if mom0:
        if ruthless:
            os.system('rm -rf ' + image + '.mom0.image')
        immoments(imagename=image + cube,
                  moments=[0],
                  axis='spectra',
                  chans='',
                  mask='mask(' + image + cube + ')',
                  outfile=image + '.mom0.image')

    # create moment 1 map
    if mom1:
        if ruthless:
            os.system('rm -rf ' + image + '.mom1.image')
        immoments(imagename=image + cube,
                  moments=[1],
                  axis='spectra',
                  chans='',
                  mask='mask(' + image + cube + ')',
                  outfile=image + '.mom1.image')

    # create moment 2 map
    if mom2:
        if ruthless:
            os.system('rm -rf ' + image + '.mom2.image')
        immoments(imagename=image + cube,
                  moments=[2],
                  axis='spectra',
                  chans='',
                  mask='mask(' + image + cube + ')',
                  outfile=image + '.mom2.image')

    if verbose and mom0:
	from casa import imstat
        flux = imstat(image + '.mom0.image')['flux'][0]
        ia.open(image + '.mom0.image')
        beammaj = ia.restoringbeam(channel=0)['major']['value']
        beammin = ia.restoringbeam(channel=0)['minor']['value']
        beamsizeUnit = ia.restoringbeam(channel=0)['major']['unit']
        ia.close()
        print 'Moment Image: ' + str(image) + '.mom0.image'
        print 'Beamsize: ' + str(beammaj) + '" X ' + str(beammin) + '"'
        print 'Flux: ' + str(flux) + ' Jy km/s'
Exemplo n.º 13
0
    def run(self):
        """ The run method creates the BDP

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        dt = utils.Dtime("SFind2D")               # tagging time
        self._summary = {}
        # get key words that user input
        nsigma = self.getkey("numsigma")
        sigma  = self.getkey("sigma")
        region = self.getkey("region")
        robust = self.getkey("robust")
        snmax  = self.getkey("snmax")
        ds9 = True                                     # writes a "ds9.reg" file
        mpl = True                                     # aplot.map1() plot
        dynlog = 20.0                                  # above this value of dyn range finder chart is log I-scaled
        bpatch = True                                  # patch units to Jy/beam for ia.findsources()
        
        # get the input casa image from bdp[0]
        bdpin = self._bdp_in[0]
        infile = bdpin.getimagefile(bt.CASA)
        if mpl:
            data = np.flipud(np.rot90(casautil.getdata(self.dir(infile)).data))

        # check if there is a 2nd image (which will be a PB)
        for i in range(len(self._bdp_in)):
            print 'BDP',i,type(self._bdp_in[i])

        if self._bdp_in[2] != None:
            bdpin_pb  = self._bdp_in[1]            
            bdpin_cst = self._bdp_in[2]
            print "Need to process PB"
        else:
            bdpin_pb  = None
            bdpin_cst = self._bdp_in[1]
            print "No PB given"
            

        # get the output bdp basename
        slbase = self.mkext(infile,'sl')

        # make sure it's a 2D map
        if not casautil.mapdim(self.dir(infile),2):
            raise Exception,"Input map dimension not 2: %s" % infile

        # arguments for imstat call if required
        args = {"imagename" : self.dir(infile)}
        if region != "":
            args["region"] = region
        dt.tag("start")

        # The following code sets the sigma level for searching for sources using
        # the sigma and snmax keyword as appropriate
        # if no CubeStats BDP was given and no sigma was specified:
        # find a noise level via casa.imstat()
        # if a CubeStat_BDP is given get it from there.
        if bdpin_cst == None:
            # get statistics from input image with imstat because no CubeStat_BDP
            stat  = casa.imstat(**args)
            dmin  = float(stat["min"][0])                 # these would be wrong if robust were used already
            dmax  = float(stat["max"][0])
            args.update(casautil.parse_robust(robust))    # only now add robust keywords for the sigma
            stat  = casa.imstat(**args)            
            if sigma <= 0.0 :
                sigma = float(stat["sigma"][0])
            dt.tag("imstat")
        else:
            # get statistics from CubeStat_BDP 
            sigma = bdpin_cst.get("sigma")
            dmin  = bdpin_cst.get("minval")
            dmax  = bdpin_cst.get("maxval")

        self.setkey("sigma",sigma)
        # calculate cutoff based either on RMS or dynamic range limitation
        drange = dmax/(nsigma*sigma)
        if snmax < 0.0 :
            snmax = drange
        if drange > snmax :
            cutoff = 1.0/snmax
        else:
            cutoff = 1.0/drange
        logging.info("sigma, dmin, dmax, snmax, cutoff %g %g %g %g %g" % (sigma, dmin, dmax, snmax, cutoff))
        # define arguments for call to findsources
        args2 = {"cutoff" : cutoff}
        args2["nmax"] = 30
        if region != "" :
            args2["region"] = region
        #args2["mask"] = ""
        args2["point"] = False
        args2["width"] = 5
        args2["negfind"] = False
        # set-up for SourceList_BDP
        slbdp = SourceList_BDP(slbase)

        # connect to casa image and call casa ia.findsources tool
        taskinit.ia.open(self.dir(infile))

        # findsources() cannot deal with  'Jy/beam.km/s' ???
        # so for the duration of findsources() we patch it
        bunit = taskinit.ia.brightnessunit()
        if bpatch and bunit != 'Jy/beam':
            logging.warning("Temporarely patching your %s units to Jy/beam for ia.findsources()" % bunit) 
            taskinit.ia.setbrightnessunit('Jy/beam')
        else:
            bpatch = False
        atab = taskinit.ia.findsources(**args2)
        if bpatch:
            taskinit.ia.setbrightnessunit(bunit)
        
        taskargs = "nsigma=%4.1f sigma=%g region=%s robust=%s snmax=%5.1f" % (nsigma,sigma,str(region),str(robust),snmax)
        dt.tag("findsources")
        nsources = atab["nelements"] 
        xtab = []
        ytab = []
        logscale = False
        sumflux = 0.0
        if nsources > 0:
            # @TODO: Why are Xpix, YPix not stored in the table?
            #        -> PJT: I left them out since they are connected to an image which may not be available here
            #                but we should store the frequency of the observation here for later bandmerging
            logging.debug("%s" % str(atab['component0']['shape']))
            logging.info("Right Ascen.  Declination   X(pix)   Y(pix)      Peak       Flux    Major   Minor    PA    SNR")
            funits = atab['component0']['flux']['unit']
            if atab['component0']['shape'].has_key('majoraxis'):
                sunits = atab['component0']['shape']['majoraxis']['unit']
                aunits = atab['component0']['shape']['positionangle']['unit']
            else:
                sunits = "n/a"
                aunits = "n/a"
            punits = taskinit.ia.summary()['unit']
            logging.info("                                               %s       %s    %s   %s   %s" % (punits,funits,sunits,sunits,aunits))
            #
            # @todo future improvement is to look at image coordinates and control output appropriately
            #
            if ds9:
                # @todo variable name
                regname = self.mkext(infile,'ds9.reg')
                fp9 = open(self.dir(regname),"w!")
            for i in range(nsources):
                c = "component%d" % i
                name = "%d" % (i+1)
                r = atab[c]['shape']['direction']['m0']['value']
                d = atab[c]['shape']['direction']['m1']['value']
                pixel = taskinit.ia.topixel([r,d])
                xpos = pixel['numeric'][0]
                ypos = pixel['numeric'][1]
                rd = taskinit.ia.toworld([xpos,ypos],'s')
                ra = rd['string'][0][:12]
                dec = rd['string'][1][:12]
                flux = atab[c]['flux']['value'][0]
                sumflux = sumflux + flux
                if atab[c]['shape'].has_key('majoraxis'):
                    smajor = atab[c]['shape']['majoraxis']['value']
                    sminor = atab[c]['shape']['minoraxis']['value']
                    sangle = atab[c]['shape']['positionangle']['value']
                else:
                    smajor = 0.0
                    sminor = 0.0
                    sangle = 0.0
                peakstr = taskinit.ia.pixelvalue([xpos,ypos,0,0])
                if len(peakstr) == 0:
                    logging.warning("Problem with source %d @ %d,%d" % (i,xpos,ypos))
                    continue
                peakf = peakstr['value']['value']
                snr = peakf/sigma
                if snr > dynlog:
                    logscale = True
                logging.info("%s %s %8.2f %8.2f %10.3g %10.3g %7.3f %7.3f %6.1f %6.1f" % (ra,dec,xpos,ypos,peakf,flux,smajor,sminor,sangle,snr))
                
                xtab.append(xpos)
                ytab.append(ypos)
                slbdp.addRow([name,ra,dec,flux,peakf,smajor,sminor,sangle])
                if ds9:
                    ras = ra
                    des = dec.replace('.',':',2)
                    msg = 'ellipse(%s,%s,%g",%g",%g) # text={%s}' % (ras,des,smajor,sminor,sangle+90.0,i+1)
                    fp9.write("%s\n" % msg)
            if ds9:
                fp9.close()
                logging.info("Wrote ds9.reg")
            dt.tag("table")
        logging.regression("CONTFLUX: %d %g" % (nsources,sumflux))
        

        summary = taskinit.ia.summary()
        beammaj = summary['restoringbeam']['major']['value']
        beammin = summary['restoringbeam']['minor']['value']
        beamunit = summary['restoringbeam']['minor']['unit']
        beamang = summary['restoringbeam']['positionangle']['value']
        angunit = summary['restoringbeam']['positionangle']['unit']
        # @todo add to table comments?
        logging.info(" Fitted Gaussian size; NOT deconvolved source size.")
        logging.info(" Restoring Beam: Major axis: %10.3g %s , Minor axis: %10.3g %s , PA: %5.1f %s" % (beammaj, beamunit, beammin, beamunit, beamang, angunit))
        # form into a xml table
        
        # output is a table_bdp
        self.addoutput(slbdp)

        # instantiate a plotter for all plots made herein
        myplot = APlot(ptype=self._plot_type,pmode=self._plot_mode,abspath=self.dir())

        # make output png with circles marking sources found
        if mpl:
            circles=[]
            nx = data.shape[1]             # data[] array was already flipud(rot90)'d
            ny = data.shape[0]             # 
            for (x,y) in zip(xtab,ytab):
                circles.append([x,y,1])
            # @todo variable name
            if logscale:
                logging.warning("LogScaling applied")
                data = data/sigma
                data = np.where(data<0,-np.log10(1-data),+np.log10(1+data))
            title = "SFind2D: %d sources" % nsources
            myplot.map1(data,title,slbase,thumbnail=True,circles=circles)

        #---------------------------------------------------------
        # Get the figure and thumbmail names and create a caption
        #---------------------------------------------------------
        imname = myplot.getFigure(figno=myplot.figno,relative=True)
        thumbnailname = myplot.getThumbnail(figno=myplot.figno,relative=True)
        caption = "Image of input map with sources found by SFind2D overlayed in green."
        slbdp.table.description="Table of source locations and sizes (not deconvolved)"
 
        #---------------------------------------------------------
        # Add finder image to the BDP
        #---------------------------------------------------------
        image = Image(images={bt.PNG: imname}, 
                      thumbnail=thumbnailname, 
                      thumbnailtype=bt.PNG, description=caption)
        slbdp.image.addimage(image, "finderimage")

        #-------------------------------------------------------------
        # Create the summary entry for the table and image
        #-------------------------------------------------------------
        self._summary["sources"] = SummaryEntry([slbdp.table.serialize(),
                                                 slbdp.image.serialize()],
                                                "SFind2D_AT", 
                                                self.id(True), taskargs)
        
        dt.tag("done")
        dt.end()
Exemplo n.º 14
0
    def run(self):
        """Runs the task.

           Parameters
           ----------
           None

           Returns
           -------
           None
        """

        self._summary = {}
        dt = utils.Dtime("CubeStats")

        #maxvrms = 2.0      # maximum variation in rms allowed (hardcoded for now)
        #maxvrms = -1.0     # turn maximum variation in rms allowed off
        maxvrms = self.getkey("maxvrms")

        psample = -1
        psample = self.getkey("psample")        

        # BDP's used :
        #   b1 = input BDP
        #   b2 = output BDP

        b1 = self._bdp_in[0]
        fin = b1.getimagefile(bt.CASA)

        bdp_name = self.mkext(fin,'cst')
        b2 = CubeStats_BDP(bdp_name)
        self.addoutput(b2)

        # PeakPointPlot 
        use_ppp = self.getkey("ppp")

        # peakstats: not enabled for mortal users yet
        # peakstats = (psample=1, numsigma=4, minchan=3, maxgap=2, peakfit=False)
        pnumsigma = 4
        minchan   = 3
        maxgap    = 2
        peakfit   = False             # True will enable a true gaussian fit
        
        # numsigma:  adding all signal > numsigma ; not user enabled;   for peaksum.
        numsigma = -1.0
        numsigma = 3.0

        # grab the new robust statistics. If this is used, 'rms' will be the RMS,
        # else we will use RMS = 1.4826*MAD (MAD does a decent job on outliers as well)
        # and was the only method available before CASA 4.4 when robust was implemented
        robust = self.getkey("robust")
        rargs = casautil.parse_robust(robust)
        nrargs = len(rargs)

        if nrargs == 0:
           sumrargs = "medabsdevmed"      # for the summary, indicate the default robust
        else:
           sumrargs = str(rargs)

        self._summary["rmsmethd"] = SummaryEntry([sumrargs,fin],"CubeStats_AT",self.id(True))
        #@todo think about using this instead of putting 'fin' in all the SummaryEntry
        #self._summary["casaimage"] = SummaryEntry(fin,"CubeStats_AT",self.id(True))

        # extra CASA call to get the freq's in GHz, as these are not in imstat1{}
        # @todo what if the coordinates are not in FREQ ?
        # Note: CAS-7648 bug on 3D cubes
        if False:
            # csys method
            ia.open(self.dir(fin))
            csys = ia.coordsys() 
            spec_axis = csys.findaxisbyname("spectral") 
            # ieck, we need a valid position, or else it will come back and "Exception: All selected pixels are masked"
            #freqs = ia.getprofile(spec_axis, region=rg.box([0,0],[0,0]))['coords']/1e9
            #freqs = ia.getprofile(spec_axis)['coords']/1e9
            freqs = ia.getprofile(spec_axis,unit="GHz")['coords']
            dt.tag("getprofile")
        else:
            # old imval method 
            #imval0 = casa.imval(self.dir(fin),box='0,0,0,0')     # this fails on 3D
            imval0 = casa.imval(self.dir(fin))
            freqs = imval0['coords'].transpose()[2]/1e9
            dt.tag("imval")
        nchan = len(freqs)
        chans = np.arange(nchan)

        # call CASA to get what we want
        # imstat0 is the whole cube, imstat1 the plane based statistics
        # warning: certain robust stats (**rargs) on the whole cube are going to be very slow
        dt.tag("start")
        imstat0 = casa.imstat(self.dir(fin),           logfile=self.dir('imstat0.logfile'),append=False,**rargs)
        dt.tag("imstat0")
        imstat1 = casa.imstat(self.dir(fin),axes=[0,1],logfile=self.dir('imstat1.logfile'),append=False,**rargs)
        dt.tag("imstat1")
        # imm = casa.immoments(self.dir(fin),axis='spec', moments=8, outfile=self.dir('ppp.im'))
        if nrargs > 0:
            # need to get the peaks without rubust
            imstat10 = casa.imstat(self.dir(fin),           logfile=self.dir('imstat0.logfile'),append=True)
            dt.tag("imstat10")
            imstat11 = casa.imstat(self.dir(fin),axes=[0,1],logfile=self.dir('imstat1.logfile'),append=True)
            dt.tag("imstat11")

        # grab the relevant plane-based things from imstat1
        if nrargs == 0:
            mean    = imstat1["mean"]
            sigma   = imstat1["medabsdevmed"]*1.4826     # see also: astropy.stats.median_absolute_deviation()
            peakval = imstat1["max"]
            minval  = imstat1["min"]
        else:
            mean    = imstat1["mean"]
            sigma   = imstat1["rms"]
            peakval = imstat11["max"]
            minval  = imstat11["min"]

        if True:
            # work around a bug in imstat(axes=[0,1]) for last channel [CAS-7697]
            for i in range(len(sigma)):
                if sigma[i] == 0.0:
                    minval[i] = peakval[i] = 0.0

        # too many variations in the RMS ?
        sigma_pos = sigma[np.where(sigma>0)]
        smin = sigma_pos.min()
        smax = sigma_pos.max()
        logging.info("sigma varies from %f to %f; %d/%d channels ok" % (smin,smax,len(sigma_pos),len(sigma)))
        if maxvrms > 0:
            if smax/smin > maxvrms:
                cliprms = smin * maxvrms
                logging.warning("sigma varies too much, going to clip to %g (%g > %g)" % (cliprms, smax/smin, maxvrms))
                sigma = np.where(sigma < cliprms, sigma, cliprms)

        # @todo   (and check again) for foobar.fits all sigma's became 0 when robust was selected
        #         was this with mask=True/False?

        # PeakPointPlot (can be expensive, hence the option)
        if use_ppp:
            logging.info("Computing MaxPos for PeakPointPlot")
            xpos    = np.zeros(nchan)
            ypos    = np.zeros(nchan)
            peaksum = np.zeros(nchan)

            ia.open(self.dir(fin))
            for i in range(nchan):
                if sigma[i] > 0.0:
                    plane = ia.getchunk(blc=[0,0,i,-1],trc=[-1,-1,i,-1],dropdeg=True)
                    v = ma.masked_invalid(plane)
                    v_abs = np.absolute(v)
                    max = np.unravel_index(v_abs.argmax(), v_abs.shape)
                    xpos[i] = max[0]
                    ypos[i] = max[1]
                    if numsigma > 0.0:
                        peaksum[i] = ma.masked_less(v,numsigma * sigma[i]).sum()
            peaksum = np.nan_to_num(peaksum)    # put 0's where nan's are found
            ia.close()
            dt.tag("ppp")

        nzeros = len(np.where(sigma<=0.0))
        if nzeros > 0:
            zeroch = np.where(sigma<=0.0)
            logging.warning("There are %d fully masked channels (%s)" % (nzeros,str(zeroch)))

        # construct the admit Table for CubeStats_BDP
        # note data needs to be a tuple, later to be column_stack'd
        if use_ppp:
            labels = ["channel" ,"frequency" ,"mean"    ,"sigma"   ,"max"     ,"maxposx" ,"maxposy" ,"min",     "peaksum"]
            units  = ["number"  ,"GHz"       ,"Jy/beam" ,"Jy/beam" ,"Jy/beam" ,"number"  ,"number"  ,"Jy/beam", "Jy"]
            data   = (chans     ,freqs       ,mean      ,sigma     ,peakval   ,xpos      ,ypos      ,minval,    peaksum)

        else:
            labels = ["channel" ,"frequency" ,"mean"    ,"sigma"   ,"max"     ,"min"]
            units  = ["number"  ,"GHz"       ,"Jy/beam" ,"Jy/beam" ,"Jy/beam" ,"Jy/beam"]
            data   = (chans     ,freqs       ,mean      ,sigma     ,peakval   ,minval)

        table = Table(columns=labels,units=units,data=np.column_stack(data))
        b2.setkey("table",table)

        # get the full cube statistics, it depends if robust was pre-selected
        if nrargs == 0:
            mean0  = imstat0["mean"][0]
            sigma0 = imstat0["medabsdevmed"][0]*1.4826
            peak0  = imstat0["max"][0]
            b2.setkey("mean" , float(mean0))
            b2.setkey("sigma", float(sigma0))
            b2.setkey("minval",float(imstat0["min"][0]))
            b2.setkey("maxval",float(imstat0["max"][0]))
            b2.setkey("minpos",imstat0["minpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            b2.setkey("maxpos",imstat0["maxpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            logging.info("CubeMax: %f @ %s" % (imstat0["max"][0],str(imstat0["maxpos"])))
            logging.info("CubeMin: %f @ %s" % (imstat0["min"][0],str(imstat0["minpos"])))
            logging.info("CubeRMS: %f" % sigma0)
        else:
            mean0  = imstat0["mean"][0]
            sigma0 = imstat0["rms"][0]
            peak0  = imstat10["max"][0]
            b2.setkey("mean" , float(mean0))
            b2.setkey("sigma", float(sigma0))
            b2.setkey("minval",float(imstat10["min"][0]))
            b2.setkey("maxval",float(imstat10["max"][0]))
            b2.setkey("minpos",imstat10["minpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            b2.setkey("maxpos",imstat10["maxpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            logging.info("CubeMax: %f @ %s" % (imstat10["max"][0],str(imstat10["maxpos"])))
            logging.info("CubeMin: %f @ %s" % (imstat10["min"][0],str(imstat10["minpos"])))
            logging.info("CubeRMS: %f" % sigma0)
        b2.setkey("robust",robust)
        rms_ratio = imstat0["rms"][0]/sigma0
        logging.info("RMS Sanity check %f" % rms_ratio)
        if rms_ratio > 1.5:
            logging.warning("RMS sanity check = %f.  Either bad sidelobes, lotsa signal, or both" % rms_ratio)
        logging.regression("CST: %f %f" % (sigma0, rms_ratio))

        # plots: no plots need to be made when nchan=1 for continuum
        # however we could make a histogram, overlaying the "best" gauss so 
        # signal deviations are clear?

        logging.info('mean,rms,S/N=%f %f %f' % (mean0,sigma0,peak0/sigma0))

        if nchan == 1:
            # for a continuum/1-channel we only need to stuff some numbers into the _summary
            self._summary["chanrms"] = SummaryEntry([float(sigma0), fin], "CubeStats_AT", self.id(True))
            self._summary["dynrange"] = SummaryEntry([float(peak0)/float(sigma0), fin], "CubeStats_AT", self.id(True))
            self._summary["datamean"] = SummaryEntry([float(mean0), fin], "CubeStats_AT", self.id(True))
        else:
            y1 = np.log10(ma.masked_invalid(peakval))
            y2 = np.log10(ma.masked_invalid(sigma))
            y3 = y1-y2
            y4 = np.log10(ma.masked_invalid(-minval))
            y5 = y1-y4
            y = [y1,y2,y3,y4]
            title = 'CubeStats: ' + bdp_name+'_0'
            xlab  = 'Channel'
            ylab  = 'log(Peak,Noise,Peak/Noise)'
            labels = ['log(peak)','log(rms noise)','log(peak/noise)','log(|minval|)']
            myplot = APlot(ptype=self._plot_type,pmode=self._plot_mode,abspath=self.dir())
            segp = [[chans[0],chans[nchan-1],math.log10(sigma0),math.log10(sigma0)]]
            myplot.plotter(chans,y,title,bdp_name+"_0",xlab=xlab,ylab=ylab,segments=segp,labels=labels,thumbnail=True)
            imfile = myplot.getFigure(figno=myplot.figno,relative=True)
            thumbfile = myplot.getThumbnail(figno=myplot.figno,relative=True)

            image0 = Image(images={bt.PNG:imfile},thumbnail=thumbfile,thumbnailtype=bt.PNG,description="CubeStats_0")
            b2.addimage(image0,"im0")

            if use_ppp:
                # new trial for Lee
                title = 'PeakSum: (numsigma=%.1f)' % (numsigma)
                ylab = 'Jy*N_ppb'
                myplot.plotter(chans,[peaksum],title,bdp_name+"_00",xlab=xlab,ylab=ylab,thumbnail=False)

            if True:
                # hack ascii table
                y30 = np.where(sigma > 0, np.log10(peakval/sigma), 0.0)
                table2 = Table(columns=["freq","log(P/N)"],data=np.column_stack((freqs,y30)))
                table2.exportTable(self.dir("testCubeStats.tab"))
                del table2

            # the "box" for the "spectrum" is all pixels.  Don't know how to 
            # get this except via shape.
            ia.open(self.dir(fin))
            s = ia.summary()
            ia.close()
            if 'shape' in s:
                specbox = (0,0,s['shape'][0],s['shape'][1])
            else:
                specbox = ()

            caption = "Emission characteristics as a function of channel, as derived by CubeStats_AT "
            caption += "(cyan: global rms,"
            caption += " green: noise per channel,"
            caption += " blue: peak value per channel,"
            caption += " red: peak/noise per channel)."
            self._summary["spectra"] = SummaryEntry([0, 0, str(specbox), 'Channel', imfile, thumbfile , caption, fin], "CubeStats_AT", self.id(True))
            self._summary["chanrms"] = SummaryEntry([float(sigma0), fin], "CubeStats_AT", self.id(True))

            # @todo Will imstat["max"][0] always be equal to s['datamax']?  If not, why not?
            if 'datamax' in s:
                self._summary["dynrange"] = SummaryEntry([float(s['datamax']/sigma0), fin], "CubeStats_AT", self.id(True))
            else:
                self._summary["dynrange"] = SummaryEntry([float(imstat0["max"][0]/sigma0), fin], "CubeStats_AT", self.id(True))
            self._summary["datamean"] = SummaryEntry([imstat0["mean"][0], fin], "CubeStats_AT", self.id(True))

            title = bdp_name + "_1"
            xlab =  'log(Peak,Noise,P/N)'
            myplot.histogram([y1,y2,y3],title,bdp_name+"_1",xlab=xlab,thumbnail=True)

            imfile = myplot.getFigure(figno=myplot.figno,relative=True)
            thumbfile = myplot.getThumbnail(figno=myplot.figno,relative=True)
            image1 = Image(images={bt.PNG:imfile},thumbnail=thumbfile,thumbnailtype=bt.PNG,description="CubeStats_1")
            b2.addimage(image1,"im1")

            # note that the 'y2' can have been clipped, which can throw off stats.robust()
            # @todo  should set a mask for those.

            title = bdp_name + "_2"
            xlab = 'log(Noise))'
            n = len(y2)
            ry2 = stats.robust(y2)
            y2_mean = ry2.mean()
            y2_std  = ry2.std()
            if n>9: logging.debug("NORMALTEST2: %s" % str(scipy.stats.normaltest(ry2)))
            myplot.hisplot(y2,title,bdp_name+"_2",xlab=xlab,gauss=[y2_mean,y2_std],thumbnail=True)

            title = bdp_name + "_3"
            xlab = 'log(diff[Noise])'
            n = len(y2)
            # dy2 = y2[0:-2] - y2[1:-1]
            dy2 = ma.masked_equal(y2[0:-2] - y2[1:-1],0.0).compressed()
            rdy2 = stats.robust(dy2)
            dy2_mean = rdy2.mean()
            dy2_std  = rdy2.std()
            if n>9: logging.debug("NORMALTEST3: %s" % str(scipy.stats.normaltest(rdy2)))
            myplot.hisplot(dy2,title,bdp_name+"_3",xlab=xlab,gauss=[dy2_mean,dy2_std],thumbnail=True)


            title = bdp_name + "_4"
            xlab = 'log(Signal/Noise))'
            n = len(y3)
            ry3 = stats.robust(y3)
            y3_mean = ry3.mean()
            y3_std  = ry3.std()
            if n>9: logging.debug("NORMALTEST4: %s" % str(scipy.stats.normaltest(ry3)))
            myplot.hisplot(y3,title,bdp_name+"_4",xlab=xlab,gauss=[y3_mean,y3_std],thumbnail=True)

            title = bdp_name + "_5"
            xlab = 'log(diff[Signal/Noise)])'
            n = len(y3)
            dy3 = y3[0:-2] - y3[1:-1]
            rdy3 = stats.robust(dy3)
            dy3_mean = rdy3.mean()
            dy3_std  = rdy3.std()
            if n>9: logging.debug("NORMALTEST5: %s" % str(scipy.stats.normaltest(rdy3)))
            myplot.hisplot(dy3,title,bdp_name+"_5",xlab=xlab,gauss=[dy3_mean,dy3_std],thumbnail=True)


            title = bdp_name + "_6"
            xlab = 'log(Peak+Min)'
            n = len(y1)
            ry5 = stats.robust(y5)
            y5_mean = ry5.mean()
            y5_std  = ry5.std()
            if n>9: logging.debug("NORMALTEST6: %s" % str(scipy.stats.normaltest(ry5)))
            myplot.hisplot(y5,title,bdp_name+"_6",xlab=xlab,gauss=[y5_mean,y5_std],thumbnail=True)

            logging.debug("LogPeak: m,s= %f %f min/max %f %f" % (y1.mean(),y1.std(),y1.min(),y1.max()))
            logging.debug("LogNoise: m,s= %f %f %f %f min/max %f %f" % (y2.mean(),y2.std(),y2_mean,y2_std,y2.min(),y2.max()))
            logging.debug("LogDeltaNoise: RMS/sqrt(2)= %f %f " % (dy2.std()/math.sqrt(2),dy2_std/math.sqrt(2)))
            logging.debug("LogDeltaP/N:   RMS/sqrt(2)= %f %f" % (dy3.std()/math.sqrt(2),dy3_std/math.sqrt(2)))
            logging.debug("LogPeak+Min: robust m,s= %f %f" % (y5_mean,y5_std))

            # compute two ratios that should both be near 1.0 if noise is 'normal'
            ratio  = y2.std()/(dy2.std()/math.sqrt(2))
            ratio2 = y2_std/(dy2_std/math.sqrt(2))
            logging.info("RMS BAD VARIATION RATIO: %f %f" % (ratio,ratio2))

        # making PPP plot
        if nchan > 1 and use_ppp:
            smax = 10
            gamma = 0.75

            z0 = peakval/peakval.max()
            # point sizes
            s = np.pi * ( smax * (z0**gamma) )**2
            cmds = ["grid", "axis equal"]
            title = "Peak Points per channel"
            pppimage = bdp_name + '_ppp'
            myplot.scatter(xpos,ypos,title=title,figname=pppimage,size=s,color=chans,cmds=cmds,thumbnail=True)
            pppimage     = myplot.getFigure(figno=myplot.figno,relative=True)
            pppthumbnail = myplot.getThumbnail(figno=myplot.figno,relative=True)
            caption = "Peak point plot: Locations of per-channel peaks in the image cube " + fin
            self._summary["peakpnt"] = SummaryEntry([pppimage, pppthumbnail, caption, fin], "CubeStats_AT", self.id(True))
        dt.tag("plotting")

        # making PeakStats plot
        if nchan > 1 and psample > 0:
            logging.info("Computing peakstats")
            # grab peak,mean and width values for all peaks
            (pval,mval,wval) = peakstats(self.dir(fin),freqs,sigma0,pnumsigma,minchan,maxgap,psample,peakfit)
            title = "PeakStats: cutoff = %g" % (sigma0*pnumsigma)
            xlab = 'Peak value'
            ylab = 'FWHM (channels)'
            pppimage = bdp_name + '_peakstats'
            cval = mval
            myplot.scatter(pval,wval,title=title,xlab=xlab,ylab=ylab,color=cval,figname=pppimage,thumbnail=False)
            dt.tag("peakstats")
            

        # myplot.final()    # pjt debug 
        # all done!
        dt.tag("done")

        taskargs = "robust=" + sumrargs 
        if use_ppp: 
            taskargs = taskargs + " ppp=True"
        else: 
            taskargs = taskargs + " ppp=False"
        for v in self._summary:
            self._summary[v].setTaskArgs(taskargs)

        dt.tag("summary")
        dt.end()
Exemplo n.º 15
0
    def run(self):
        """Runs the task.

           Parameters
           ----------
           None

           Returns
           -------
           None
        """

        self._summary = {}
        dt = utils.Dtime("CubeSpectrum")

        # our BDP's
        # b1  = input BDP
        # b1s = optional input CubeSpectrum
        # b1m = optional input Moment
        # b1p = optional input SourceList for positions
        # b2  = output BDP

        b1 = self._bdp_in[0]                                            # check input SpwCube (or LineCube)
        fin = b1.getimagefile(bt.CASA)
        if self._bdp_in[0]._type == bt.LINECUBE_BDP:
            use_vel = True
        else:
            use_vel = False

        sources = self.getkey("sources")
        pos = []                     # blank it first, then try and grab it from the optional bdp_in's
        cmean  = 0.0
        csigma = 0.0
        smax  = []                   # accumulate max in each spectrum for regression
        self.spec_description = []   # for summary() 

        if self._bdp_in[1] != None:                                      # check if CubeStats_BDP
            #print "BDP[1] type: ",self._bdp_in[1]._type
            if self._bdp_in[1]._type != bt.CUBESTATS_BDP:
                raise Exception,"bdp_in[1] not a CubeStats_BDP, should never happen"
            # a table (cubestats)
            b1s = self._bdp_in[1]
            pos.append(b1s.maxpos[0])
            pos.append(b1s.maxpos[1])
            logging.info('CubeStats::maxpos,val=%s,%f' % (str(b1s.maxpos),b1s.maxval))
            cmean  = b1s.mean
            csigma = b1s.sigma
            dt.tag("CubeStats-pos")
            
        if self._bdp_in[2] != None:                                      # check if Moment_BDP (probably from CubeSum)
            #print "BDP[2] type: ",self._bdp_in[2]._type
            if self._bdp_in[2]._type != bt.MOMENT_BDP:
                raise Exception,"bdp_in[2] not a Moment_BDP, should never happen"
            b1m = self._bdp_in[2]
            fim = b1m.getimagefile(bt.CASA)
            pos1,maxval = self.maxpos_im(self.dir(fim))     # compute maxpos, since it is not in bdp (yet)
            logging.info('CubeSum::maxpos,val=%s,%f' % (str(pos1),maxval))
            pos.append(pos1[0])
            pos.append(pos1[1])
            dt.tag("Moment-pos")

        if self._bdp_in[3] != None:                                      # check if SourceList
            #print "BDP[3] type: ",self._bdp_in[3]._type
            # a table (SourceList)
            b1p = self._bdp_in[3]
            ra   = b1p.table.getFullColumnByName("RA")
            dec  = b1p.table.getFullColumnByName("DEC")
            peak = b1p.table.getFullColumnByName("Peak")
            if sources == []:
                # use the whole SourceList
                for (r,d,p) in zip(ra,dec,peak):
                  rdc = convert_sexa(r,d)
                  pos.append(rdc[0])
                  pos.append(rdc[1])
                  logging.info('SourceList::maxpos,val=%s,%f' % (str(rdc),p))
            else:                  
                # select specific ones from the source list
                for ipos in sources:
                    if ipos < len(ra):
                        radec =  convert_sexa(ra[ipos],dec[ipos])
                        pos.append(radec[0])
                        pos.append(radec[1])
                        logging.info('SourceList::maxpos,val=%s,%f' % (str(radec),peak[ipos]))
                    else:
                        logging.warning('Skipping illegal source number %d' % ipos)

            dt.tag("SourceList-pos")

        # if pos[] still blank, use the AT keyword.
        if len(pos) == 0:
            pos = self.getkey("pos")

        # if still none, try the map center
        if len(pos) == 0:
            # @todo  this could result in a masked pixel and cause further havoc
            # @todo  could also take the reference pixel, but that could be outside image
            taskinit.ia.open(self.dir(fin))
            s = taskinit.ia.summary()
            pos = [int(s['shape'][0])/2, int(s['shape'][1])/2]
            logging.warning("No input positions supplied, map center choosen: %s" % str(pos))
            dt.tag("map-center")

        # exhausted all sources where pos[] can be set; if still zero, bail out
        if len(pos) == 0:
            raise Exception,"No positions found from input BDP's or pos="

        # convert this regular list to a list of tuples with duplicates removed
        # sadly the order is lost.
        pos = list(set(zip(pos[0::2],pos[1::2])))
        npos = len(pos)
        
        dt.tag("open")

        bdp_name = self.mkext(fin,"csp")
        b2 = CubeSpectrum_BDP(bdp_name)
        self.addoutput(b2)

        imval  = range(npos)                             # spectra, one for each pos (placeholder)
        planes = range(npos)                             # labels for the tables (placeholder)
        images = {}                                      # png's accumulated

        for i in range(npos):                            # loop over pos, they can have mixed types now
            sd = []
            caption = "Spectrum"
            xpos = pos[i][0]
            ypos = pos[i][1]
            if type(xpos) != type(ypos):
                print "POS:",xpos,ypos
                raise Exception,"position pair not of the same type"
            if type(xpos)==int:
                # for integers, boxes are allowed, even multiple
                box = '%d,%d,%d,%d' % (xpos,ypos,xpos,ypos)
                # convention for summary is (box)
                cbox = '(%d,%d,%d,%d)' % (xpos,ypos,xpos,ypos)
                # use extend here, not append, we want individual values in a list
                sd.extend([xpos,ypos,cbox])
                caption = "Average Spectrum at %s" % cbox
                if False:
                    # this will fail on 3D cubes (see CAS-7648)
                    imval[i] = casa.imval(self.dir(fin),box=box)
                else:
                    # work around that CAS-7648 bug 
                    # another approach is the ia.getprofile(), see CubeStats, this will
                    # also integrate over regions, imval will not (!!!)
                    region = 'centerbox[[%dpix,%dpix],[1pix,1pix]]' % (xpos,ypos)
                    caption = "Average Spectrum at %s" % region
                    imval[i] = casa.imval(self.dir(fin),region=region)
            elif type(xpos)==str:
                # this is tricky, to stay under 1 pixel , or you get a 2x2 back.
                region = 'centerbox[[%s,%s],[1pix,1pix]]' % (xpos,ypos)
                caption = "Average Spectrum at %s" % region
                sd.extend([xpos,ypos,region])
                imval[i] = casa.imval(self.dir(fin),region=region)
            else:
                print "Data type: ",type(xpos)
                raise Exception,"Data type for region not handled"
            dt.tag("imval")

            flux  = imval[i]['data']
            if len(flux.shape) > 1:     # rare case if we step on a boundary between cells?
                logging.warning("source %d has spectrum shape %s: averaging the spectra" % (i,repr(flux.shape)))
                flux = np.average(flux,axis=0)
            logging.debug('minmax: %f %f %d' % (flux.min(),flux.max(),len(flux)))
            smax.append(flux.max())
            if i==0:                                              # for first point record few extra things
                if len(imval[i]['coords'].shape) == 2:                   # normal case: 1 pixel
                    freqs = imval[i]['coords'].transpose()[2]/1e9        # convert to GHz  @todo: input units ok?
                elif len(imval[i]['coords'].shape) == 3:                 # rare case if > 1 point in imval()
                    freqs = imval[i]['coords'][0].transpose()[2]/1e9     # convert to GHz  @todo: input units ok?
                else:
                    logging.fatal("bad shape %s in freq return from imval - SHOULD NEVER HAPPEN" % imval[i]['coords'].shape)
                chans = np.arange(len(freqs))                     # channels 0..nchans-1
                unit  = imval[i]['unit']
                restfreq = casa.imhead(self.dir(fin),mode="get",hdkey="restfreq")['value']/1e9    # in GHz
                dt.tag("imhead")
                vel   = (1-freqs/restfreq)*utils.c                #  @todo : use a function (and what about relativistic?)

            # construct the Table for CubeSpectrum_BDP 
            # @todo note data needs to be a tuple, later to be column_stack'd
            labels = ["channel" ,"frequency" ,"flux" ]
            units  = ["number"  ,"GHz"       ,unit   ]
            data   = (chans     ,freqs       ,flux   )

            if i==0:
                # plane 0 : we are allowing a multiplane table, so the first plane is special
                table = Table(columns=labels,units=units,data=np.column_stack(data),planes=["0"])
            else:
                # planes 1,2,3.... are stacked onto the previous one
                table.addPlane(np.column_stack(data),"%d" % i)

            # example plot , one per position for now
            if use_vel:
                x = vel
                xlab = 'VLSR (km/s)'
            else:
                x = chans
                xlab  = 'Channel'
            y = [flux]
            sd.append(xlab)
            if type(xpos)==int:
                # grab the RA/DEC... kludgy
                h = casa.imstat(self.dir(fin),box=box)
                ra  = h['blcf'].split(',')[0]
                dec = h['blcf'].split(',')[1]
                title = '%s %d @ %d,%d = %s,%s' % (bdp_name,i,xpos,ypos,ra,dec)
            else:
                title = '%s %d @ %s,%s' % (bdp_name,i,xpos,ypos)       # or use box, once we allow non-points

            myplot = APlot(ptype=self._plot_type,pmode=self._plot_mode, abspath=self.dir())
            ylab  = 'Flux (%s)' % unit
            p1 = "%s_%d" % (bdp_name,i)
            myplot.plotter(x,y,title,p1,xlab=xlab,ylab=ylab,thumbnail=True)
            # Why not use p1 as the key?
            ii = images["pos%d" % i] = myplot.getFigure(figno=myplot.figno,relative=True)
            thumbname = myplot.getThumbnail(figno=myplot.figno,relative=True)
            sd.extend([ii, thumbname, caption, fin])
            self.spec_description.append(sd)

        logging.regression("CSP: %s" % str(smax))

        image = Image(images=images, description="CubeSpectrum")
        b2.setkey("image",image)
        b2.setkey("table",table)
        b2.setkey("sigma",csigma)   # TODO: not always available
        b2.setkey("mean",cmean)     # TODO: not always available

        if True:
            #       @todo     only first plane due to limitation in exportTable()
            islash = bdp_name.find('/')
            if islash < 0:
                tabname = self.dir("testCubeSpectrum.tab")
            else:
                tabname = self.dir(bdp_name[:islash] + "/testCubeSpectrum.tab")
            table.exportTable(tabname,cols=["frequency" ,"flux"])
        dt.tag("done")
        # For a single spectrum this is
        # SummaryEntry([[data for spec1]], "CubeSpectrum_AT",taskid)
        # For multiple spectra this is
        # SummaryEntry([[data for spec1],[data for spec2],...], "CubeSpectrum_AT",taskid)
        self._summary["spectra"] = SummaryEntry(self.spec_description,"CubeSpectrum_AT",self.id(True))
        taskargs = "pos="+str(pos)
        taskargs += '&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; <span style="background-color:white">&nbsp;' + fin.split('/')[0] + '&nbsp;</span>'
        for v in self._summary:
            self._summary[v].setTaskArgs(taskargs)
        dt.tag("summary")
        dt.end()
Exemplo n.º 16
0
    def run(self):
        dt = utils.Dtime("PVCorr")
        self._summary = {}

        numsigma = self.getkey("numsigma")
        mode = 1  # PV corr mode (1,2,3)
        normalize = True
        # normalize = False

        b1 = self._bdp_in[0]  # PVSlice_BDP
        fin = b1.getimagefile(bt.CASA)  # CASA image
        data = casautil.getdata_raw(
            self.dir(fin))  # grab the data as a numpy array
        self.myplot = APlot(ptype=self._plot_type,
                            pmode=self._plot_mode,
                            abspath=self.dir())
        #print 'DATA[0,0]:',data[0,0]
        #print 'pv shape: ',data.shape
        npos = data.shape[0]
        nvel = data.shape[1]
        dt.tag("getdata")

        b2 = self._bdp_in[1]  # CubeStats_BDP
        sigma = b2.sigma  # global sigma in the cube
        cutoff = numsigma * sigma
        freq = b2.table.getColumnByName("frequency")

        chans = self.getkey("range")  # range of channels, if used
        if len(chans) > 0:
            if len(chans) != 2:
                logging.fatal("range=%s" % chans)
                raise Exception, "range= needs two values, left and right (inclusive) channel"
            ch0 = chans[0]
            ch1 = chans[1]
        else:
            nchan = self.getkey("nchan")
            imstat0 = casa.imstat(self.dir(fin))  # @todo   can use data[] now
            xmaxpos = imstat0['maxpos'][0]
            ymaxpos = imstat0['maxpos'][1]
            logging.info("MAXPOS-VEL %s %g" %
                         (str(imstat0['maxpos']), imstat0['max'][0]))
            if nchan > 0:
                # expand around it, later ch0,ch1 will be checked for running off the edge
                ch0 = ymaxpos - nchan / 2
                ch1 = ymaxpos + nchan / 2
            else:
                # watershed down to find ch0 and ch1 ?
                # this doesn't work well in crowded areas
                ch0 = ymaxpos
                ch1 = ymaxpos
                spmax = data.max(axis=0)
                k = spmax.argmax()
                n = len(spmax)
                logging.debug('spmax %s %d %g' %
                              (str(spmax.shape), k, spmax[k]))
                # find lower cutoff
                for i in range(n):
                    ch0 = ymaxpos - i
                    if ch0 < 0: break
                    if spmax[ch0] < cutoff: break
                ch0 = ch0 + 1
                # find higher cutoff
                for i in range(n):
                    ch1 = ymaxpos + i
                    if ch1 == n: break
                    if spmax[ch1] < cutoff: break
                ch1 = ch1 - 1
            dt.tag("imstat")

        bdp_name = self.mkext(fin, "pvc")  # output PVCorr_BDP
        b3 = PVCorr_BDP(bdp_name)
        self.addoutput(b3)

        if ch0 < 0 or ch1 >= nvel:
            # this probably only happens to small cubes (problematic for PVCorr)
            # or when the strongest line is really close to the edge of the band
            # (which is probably ok)
            if ch0 < 0 and ch1 >= nvel:
                logging.warning("Serious issues with the size of this cube")
            if ch0 < 0:
                logging.warning("Resetting ch0 edge to 0")
                ch0 = 0
            if ch1 >= nvel:
                ch1 = nvel - 1
                logging.warning("Resetting ch1 edge to the maximum")

        if ch0 > ch1:
            logging.warning("Sanity swapping ch0,1 due to likely noisy data")
            ch0, ch1 = ch1, ch0

        if mode == 1:
            out, rms = mode1(data, ch0, ch1, cutoff, normalize)
            corr = out
        elif mode == 2:
            out, rms = mode2(data, ch0, ch1, cutoff)  # slower 2D version
            corr = out[
                npos /
                2, :]  # center cut, but could also try feature detection
        elif mode == 3:
            out, rms = self.mode3(data, ch0, ch1,
                                  cutoff)  # Doug's faster 2D version
            # get the peak of each column
            corr = np.amax(out, axis=0)
        # print "PVCORR SHAPE ",corr.shape," mode", mode
        if len(corr) > 0:
            # print "SHAPE out:",out.shape,corr.shape,npos/2
            ch = range(len(corr))
            if len(corr) != len(freq):
                logging.fatal("ch (%d) and freq (%d) do not have same size" %
                              (len(corr), len(freq)))
                raise Exception, "ch and freq do not have same dimension"
            dt.tag("mode")
            labels = ["channel", "frequency", "pvcorr"]
            units = ["number", "GHz", "N/A"]
            data = (ch, freq, corr)
            table = Table(columns=labels,
                          units=units,
                          data=np.column_stack(data))
        else:
            # still construct a table, but with no rows
            labels = ["channel", "frequency", "pvcorr"]
            units = ["number", "GHz", "N/A"]
            table = Table(columns=labels, units=units)
        b3.setkey("table", table)
        b3.setkey("sigma", float(rms))
        dt.tag("table")
        if len(corr) > 0:
            table.exportTable(self.dir("testPVCorr.tab"),
                              cols=['frequency', 'pvcorr'])
            test_single(ch, freq, corr)

            logging.regression("PVC: %f %f" % (corr.min(), corr.max()))

            title = 'PVCorr mode=%d [%d,%d] %g' % (mode, ch0, ch1, cutoff)
            x = ch
            xlab = 'Channel'
            y = [corr]
            ylab = 'PV Correlation'
            p1 = "%s_%d" % (bdp_name, 0)
            segp = []
            segp.append([0, len(ch), 0.0, 0.0])
            segp.append([0, len(ch), 3.0 * rms, 3.0 * rms])
            # @todo:   in principle we know with given noise and  size of box, what the sigma in pvcorr should be
            self.myplot.plotter(x,
                                y,
                                title,
                                figname=p1,
                                xlab=xlab,
                                ylab=ylab,
                                segments=segp,
                                thumbnail=True)

            #out1 = np.rot90 (data.reshape((nvel,npos)) )
            if mode > 1:
                self.myplot.map1(data=out,
                                 title="testing PVCorr_AT:  mode%d" % mode,
                                 figname='testPVCorr',
                                 thumbnail=True)

            taskargs = "numsigma=%.1f range=[%d,%d]" % (numsigma, ch0, ch1)
            caption = "Position-velocity correlation plot"
            thumbname = self.myplot.getThumbnail(figno=self.myplot.figno,
                                                 relative=True)
            figname = self.myplot.getFigure(figno=self.myplot.figno,
                                            relative=True)
            image = Image(images={bt.PNG: figname},
                          thumbnail=thumbname,
                          thumbnailtype=bt.PNG,
                          description=caption)
            b3.image.addimage(image, "pvcorr")

            self._summary["pvcorr"] = SummaryEntry(
                [figname, thumbname, caption, fin], "PVCorr_AT", self.id(True),
                taskargs)
        else:
            self._summary["pvcorr"] = None
            logging.warning("No summary")
            logging.regression("PVC: -1")

        dt.tag("done")
        dt.end()
Exemplo n.º 17
0
    def run(self):
        """ The run method, calculates the moments and creates the BDP(s)

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        self._summary = {}
        momentsummary = []
        dt = utils.Dtime("Moment")

        # variable to track if we are using a single cutoff for all moment maps
        allsame = False
        moments = self.getkey("moments")
        numsigma = self.getkey("numsigma")
        mom0clip = self.getkey("mom0clip")
        # determine if there is only 1 cutoff or if there is a cutoff for each moment
        if len(moments) != len(numsigma):
            if len(numsigma) != 1:
                raise Exception("Length of numsigma and moment lists do not match. They must be the same length or the length of the cutoff list must be 1.")
            allsame = True
        # default moment file extensions, this is information copied from casa.immoments()
        momentFileExtensions = {-1: ".average",
                                 0: ".integrated",
                                 1: ".weighted_coord",
                                 2: ".weighted_dispersion_coord",
                                 3: ".median",
                                 4: "",
                                 5: ".standard_deviation",
                                 6: ".rms",
                                 7: ".abs_mean_dev",
                                 8: ".maximum",
                                 9: ".maximum_coord",
                                10: ".minimum",
                                11: ".minimum_coord",
                                }

        logging.debug("MOMENT: %s %s %s" %  (str(moments), str(numsigma), str(allsame)))

        # get the input casa image from bdp[0]
        # also get the channels the line actually covers (if any)
        bdpin = self._bdp_in[0]
        infile = bdpin.getimagefile(bt.CASA)
        chans = self.getkey("chans")
        # the basename of the moments, we will append _0, _1, etc.
        basename = self.mkext(infile, "mom")
        fluxname = self.mkext(infile, "flux")
        # beamarea = nppb(self.dir(infile))
        beamarea = 1.0  # until we have it from the MOM0 map

        sigma0 = self.getkey("sigma")
        sigma  = sigma0

        dt.tag("open")

        # if no CubseStats BDP was given and no sigma was specified, find a 
        # noise level via casa.imstat()
        if self._bdp_in[1] is None and sigma <= 0.0:
            raise Exception("A sigma or a CubeStats_BDP must be input to calculate the cutoff")
        elif self._bdp_in[1] is not None:
            sigma = self._bdp_in[1].get("sigma")

        # immoments is a bit peculiar. If you give one moment, it will use 
        # exactly the outfile you picked for multiple moments, it will pick
        # extensions such as .integrated [0], .weighted_coord [1] etc.
        # we loop over the moments and will use the numeric extension instead. 
        # Might be laborious loop for big input cubes
        #
        # arguments for immoments
        args = {"imagename" : self.dir(infile),
                "moments"   : moments,
                "outfile"   : self.dir(basename)}

        # set the channels if given
        if chans != "":
            args["chans"] = chans
        # error check the mom0clip input
        if mom0clip > 0.0 and not 0 in moments:
            logging.warning("mom0clip given, but no moment0 map was requested. One will be generated anyway.")
            # add moment0 to the list of computed moments, but it has to be first
            moments.insert(0,0)
            if not allsame:
                numsigma.insert(0, 2.0*sigma)

        if allsame:
            # this is only executed now if len(moments) > 1 and len(cutoff)==1
            args["excludepix"] = [-numsigma[0] * sigma, numsigma[0] * sigma]
            casa.immoments(**args)
            dt.tag("immoments-all")
        else:
            # this is execute if len(moments)==len(cutoff) , even when len=1
            for i in range(len(moments)):
                args["excludepix"] = [-numsigma[i] * sigma, numsigma[i] * sigma]
                args["moments"] = moments[i]
                args["outfile"] = self.dir(basename + momentFileExtensions[moments[i]])
                casa.immoments(**args)
                dt.tag("immoments-%d" % moments[i])

        taskargs = "moments=%s numsigma=%s" % (str(moments), str(numsigma)) 
        if sigma0 > 0:
            taskargs = taskargs + " sigma=%.2f" % sigma0
        if mom0clip > 0:
            taskargs = taskargs + " mom0clip=%g" % mom0clip
        if chans == "": 
            taskargs = taskargs + " chans=all"
        else:
            taskargs = taskargs + " chans=%s" % str(chans)
        taskargs += '&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; <span style="background-color:white">&nbsp;' + basename.split('/')[0] + '&nbsp;</span>'

        # generate the mask to be applied to all but moment 0
        if mom0clip > 0.0:
            # get the statistics from mom0 map
            # this is usually a very biased map, so unclear if mom0sigma is all that reliable
            args = {"imagename": self.dir(infile)}
            stat = casa.imstat(imagename=self.dir(basename + momentFileExtensions[0]))
            mom0sigma = float(stat["sigma"][0])
            # generate a temporary masked file, mask will be copied to other moments
            args = {"imagename" : self.dir(basename + momentFileExtensions[0]),
                    "expr"      : 'IM0[IM0>%f]' % (mom0clip * mom0sigma),
                    "outfile"   : self.dir("mom0.masked")
                    }
            casa.immath(**args)
            # get the default mask name
            taskinit.ia.open(self.dir("mom0.masked"))
            defmask = taskinit.ia.maskhandler('default')
            taskinit.ia.close()
            dt.tag("mom0clip")

        # loop over moments to rename them to _0, _1, _2 etc.
        # apply a mask as well for proper histogram creation
        map = {}
        myplot = APlot(pmode=self._plot_mode,ptype=self._plot_type,abspath=self.dir())
        implot = ImPlot(pmode=self._plot_mode,ptype=self._plot_type,abspath=self.dir())

        for mom in moments:
            figname = imagename = "%s_%i" % (basename, mom)
            tempname = basename + momentFileExtensions[mom]
            # rename and remove the old one if there is one
            utils.rename(self.dir(tempname), self.dir(imagename))
            # copy the moment0 mask if requested; this depends on that mom0 was done before
            if mom0clip > 0.0 and mom != 0:
                #print "PJT: output=%s:%s" % (self.dir(imagename), defmask[0])
                #print "PJT: inpmask=%s:%s" % (self.dir("mom0.masked"),defmask[0])
                makemask(mode="copy", inpimage=self.dir("mom0.masked"),
                         output="%s:%s" % (self.dir(imagename), defmask[0]),
                         overwrite=True, inpmask="%s:%s" % (self.dir("mom0.masked"),
                                                            defmask[0]))
                taskinit.ia.open(self.dir(imagename))
                taskinit.ia.maskhandler('set', defmask)
                taskinit.ia.close()
                dt.tag("makemask")
            if mom == 0:
                beamarea = nppb(self.dir(imagename))
            implot.plotter(rasterfile=imagename,figname=figname,colorwedge=True)
            imagepng  = implot.getFigure(figno=implot.figno,relative=True)
            thumbname = implot.getThumbnail(figno=implot.figno,relative=True)
            images = {bt.CASA : imagename, bt.PNG  : imagepng}
            thumbtype=bt.PNG
            dt.tag("implot")

            # get the data for a histogram (ia access is about 1000-2000 faster than imval())
            map[mom] = casautil.getdata(self.dir(imagename))
            data = map[mom].compressed()
            dt.tag("getdata")

            # make the histogram plot

            # get the label for the x axis
            bunit = casa.imhead(imagename=self.dir(imagename), mode="get", hdkey="bunit")
            # object for the caption
            objectname = casa.imhead(imagename=self.dir(imagename), mode="get", hdkey="object")

            # Make the histogram plot
            # Since we give abspath in the constructor, figname should be relative
            auxname = imagename + '_histo'
            auxtype = bt.PNG
            myplot.histogram(columns = data,
                             figname = auxname,
                             xlab    = bunit,
                             ylab    = "Count",
                             title   = "Histogram of Moment %d: %s" % (mom, imagename), thumbnail=True)

            casaimage = Image(images    = images,
                                    auxiliary = auxname,
                                    auxtype   = auxtype,
                                    thumbnail = thumbname,
                                    thumbnailtype = thumbtype)
            auxname = myplot.getFigure(figno=myplot.figno,relative=True)
            auxthumb = myplot.getThumbnail(figno=myplot.figno,relative=True)

            if hasattr(self._bdp_in[0], "line"):   # SpwCube doesn't have Line
                line = deepcopy(getattr(self._bdp_in[0], "line"))
                if not isinstance(line, Line):
                    line = Line(name="Unidentified")
            else:
                # fake a Line if there wasn't one
                line = Line(name="Unidentified")
            # add the BDP to the output array
            self.addoutput(Moment_BDP(xmlFile=imagename, moment=mom,
                           image=deepcopy(casaimage), line=line))
            dt.tag("ren+mask_%d" % mom)

            imcaption = "%s Moment %d map of Source %s" % (line.name, mom, objectname)
            auxcaption = "Histogram of %s Moment %d of Source %s" % (line.name, mom, objectname)
            thismomentsummary = [line.name, mom, imagepng, thumbname, imcaption,
                                 auxname, auxthumb, auxcaption, infile]
            momentsummary.append(thismomentsummary)

        if map.has_key(0) and map.has_key(1) and map.has_key(2):
            logging.debug("MAPs present: %s" % (map.keys()))

            # m0 needs a new mask, inherited from the more restricted m1 (and m2)
            m0 = ma.masked_where(map[1].mask,map[0])
            m1 = map[1]
            m2 = map[2]
            m01 = m0*m1
            m02 = m0*m1*m1
            m22 = m0*m2*m2
            sum0 = m0.sum()
            vmean = m01.sum()/sum0
            # lacking the full 3D cube, get two estimates and take the max
            sig1  = math.sqrt(m02.sum()/sum0 - vmean*vmean)
            sig2  = m2.max()
            #vsig = max(sig1,sig2)
            vsig = sig1
            
            # consider clipping in the masked array (mom0clip)
            # @todo   i can't use info from line, so just borrow basename for now for grepping
            #         this also isn't really the flux, the points per beam is still in there
            loc = basename.rfind('/')
            sum1 = ma.masked_less(map[0],0.0).sum()   # mom0clip
            # print out:   LINE,FLUX1,FLUX0,BEAMAREA,VMEAN,VSIGMA for regression
            # the linechans parameter in bdpin is not useful to print out here, it's local to the LineCube
            s_vlsr = admit.Project.summaryData.get('vlsr')[0].getValue()[0]
            s_rest = admit.Project.summaryData.get('restfreq')[0].getValue()[0]/1e9
            s_line = line.frequency
            if loc>0:
                if basename[:loc][0:2] == 'U_':
                    # for U_ lines we'll reference the VLSR w.r.t. RESTFREQ in that band
                    if abs(vmean) > vsig:
                        vwarn = '*'
                    else:
                        vwarn = ''
                    vlsr = vmean + (1.0-s_line/s_rest)*utils.c
                    msg = "MOM0FLUX: %s %g %g %g %g %g %g" % (basename[:loc],map[0].sum(),sum0,beamarea,vmean,vlsr,vsig)
                else:
                    # for identified lines we'll assume the ID was correct and not bother with RESTFREQ
                    msg = "MOM0FLUX: %s %g %g %g %g %g %g" % (basename[:loc],map[0].sum(),sum0,beamarea,vmean,vmean,vsig)
            else:
                msg = "MOM0FLUX: %s %g %g %g %g %g %g" % ("SPW_FULL"    ,map[0].sum(),sum0,beamarea,vmean,vmean,vsig)
            logging.regression(msg)
            dt.tag("mom0flux")

            # create a histogram of flux per channel

            # grab the X coordinates for the histogram, we want them in km/s
            # restfreq should also be in summary
            restfreq = casa.imhead(self.dir(infile),mode="get",hdkey="restfreq")['value']/1e9    # in GHz
            # print "PJT  %.10f %.10f" % (restfreq,s_rest)
            imval0 = casa.imval(self.dir(infile))
            freqs = imval0['coords'].transpose()[2]/1e9
            x = (1-freqs/restfreq)*utils.c
            # 
            h = casa.imstat(self.dir(infile), axes=[0,1])
            if h.has_key('flux'):
                flux0 = h['flux']
            else:
                flux0 = h['sum']/beamarea
            flux0sum = flux0.sum() * abs(x[1]-x[0])
            # @todo   make a flux1 with fluxes derived from a good mask
            flux1 = flux0 
            # construct histogram
            title = 'Flux Spectrum (%g)' % flux0sum
            xlab = 'VLSR (km/s)'
            ylab = 'Flux (Jy)'
            myplot.plotter(x,[flux0,flux1],title=title,figname=fluxname,xlab=xlab,ylab=ylab,histo=True)
            dt.tag("flux-spectrum")
            
        self._summary["moments"] = SummaryEntry(momentsummary, "Moment_AT", 
                                                self.id(True), taskargs)
        # get rid of the temporary mask
        if mom0clip > 0.0: 
            utils.rmdir(self.dir("mom0.masked"))

        dt.tag("done")
        dt.end()