Esempio n. 1
0
def make_SBimage(image='',cellsize=None,beamsize=None,distance=None,
                 extension='.image'):
    from casa import immath,imhead

    beamarea = 1.13*beamsize**2/cellsize**2
    massCoeff = 2.36e5 * distance**2 / beamarea
    pcPerArcsec = distance *1e6 / 206265.
    pcPerPixel = cellsize * pcPerArcsec
    pixSB = massCoeff / pcPerPixel**2 # units in Msun per pixel

    immath(imagename=image + extension,
       outfile=image + '.sb.image',
       mode='evalexpr',
       expr='IM0*' + str(pixSB))
    imhead(imagename=image + '.sb.image',
           mode='put',
           hdkey='bunit',
           hdvalue='',
           hdcomment='Units Msun/pc^-2')
Esempio n. 2
0
def make_SBimage(image='',
                 cellsize=None,
                 beamsize=None,
                 distance=None,
                 extension='.image'):
    from casa import immath, imhead

    beamarea = 1.13 * beamsize**2 / cellsize**2
    massCoeff = 2.36e5 * distance**2 / beamarea
    pcPerArcsec = distance * 1e6 / 206265.
    pcPerPixel = cellsize * pcPerArcsec
    pixSB = massCoeff / pcPerPixel**2  # units in Msun per pixel

    immath(imagename=image + extension,
           outfile=image + '.sb.image',
           mode='evalexpr',
           expr='IM0*' + str(pixSB))
    imhead(imagename=image + '.sb.image',
           mode='put',
           hdkey='bunit',
           hdvalue='',
           hdcomment='Units Msun/pc^-2')
Esempio n. 3
0
def nppb(image):
    """work out the flux correction, number of points per beam """
    if True:
        # more expensive, but works for non-ALMA data
        # needs to be done on the MOM0 map
        s = casa.imstat(image)
        if s.has_key('flux'):
            beamarea = s['sum'][0]/s['flux'][0]
        else:
            beamarea = 1.0
        return beamarea
    else:
        h = casa.imhead(image, mode='list')
        # @todo should we not use the casa units for this?
        try:
            bmin = h['beamminor']['value']    # beam in arcsec (not always true)
            bmaj = h['beammajor']['value'] 
            cdelt1 = h['cdelt1'] * 206265.0   # cdelt in radians
            cdelt2 = h['cdelt2'] * 206265.0
            return abs(1.13309 * bmaj * bmin / (cdelt1 * cdelt2))
        except:
            return 1.0
Esempio n. 4
0
def nppb(image):
    """work out the flux correction, number of points per beam """
    if True:
        # more expensive, but works for non-ALMA data
        # needs to be done on the MOM0 map
        s = casa.imstat(image)
        if s.has_key('flux'):
            beamarea = s['sum'][0]/s['flux'][0]
        else:
            beamarea = 1.0
        return beamarea
    else:
        h = casa.imhead(image, mode='list')
        # @todo should we not use the casa units for this?
        try:
            bmin = h['beamminor']['value']    # beam in arcsec (not always true)
            bmaj = h['beammajor']['value'] 
            cdelt1 = h['cdelt1'] * 206265.0   # cdelt in radians
            cdelt2 = h['cdelt2'] * 206265.0
            return abs(1.13309 * bmaj * bmin / (cdelt1 * cdelt2))
        except:
            return 1.0
Esempio n. 5
0
    def run(self):
        """ The run method, creates the slices, regrids if requested, and 
            creates the BDP(s)

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        dt = utils.Dtime("LineCube")
        self._summary = {}
        # look for an input noise level, either through keyword or input
        # CubeStats BDP or calculate it if needed
        pad = self.getkey("pad")
        fpad = self.getkey("fpad")
        equalize = self.getkey("equalize")
        minchan = 0

        linelist = self._bdp_in[1]
        if linelist == None or len(linelist) == 0:
            logging.info("No lines found in input LineList_BDP, exiting.")
            return

        spw = self._bdp_in[0]
        # get the columns from the table
        cols = linelist.table.getHeader()
        # get the casa image
        imagename = spw.getimagefile(bt.CASA)
        imh = imhead(self.dir(imagename), mode='list')
        # set the overall parameters for imsubimage
        args = {"imagename": self.dir(imagename), "overwrite": True}

        dt.tag("start")

        if pad != 0 or fpad > 0:
            nchan = imh['shape'][2]
            dt.tag("pad")

        # if equal size cubes are requested, this will honor the requested pad
        if equalize:
            start = linelist.table.getColumnByName("startchan")
            end = linelist.table.getColumnByName("endchan")
            # look for the widest line
            for i in range(len(start)):
                diff = end[i] - start[i] + 1
                if fpad > 0:
                    minchan = max(minchan, diff * int(1 + 2 * fpad))
                else:
                    minchan = max(minchan, diff + (2 * pad))
            dt.tag("equalize")

        # get all of the rows in the table
        rows = linelist.getall()
        delrow = set()
        procblend = [0]
        # search through looking for blended lines, leave only the strongest from each blend
        # in the list
        for i, row in enumerate(rows):
            if row.blend in procblend:
                continue
            strongest = -100.
            index = -1
            indexes = []
            blend = row.blend
            for j in range(i, len(rows)):
                if rows[j].blend != blend:
                    continue
                indexes.append(j)
                if rows[j].linestrength > strongest:
                    strongest = rows[j].linestrength
                    index = j
            indexes.remove(index)
            delrow = delrow | set(indexes)
            procblend.append(blend)
        dr = list(delrow)
        dr.sort()
        dr.reverse()
        for row in dr:
            del rows[row]

        # check on duplicate UID's, since those are the directory names here
        uid1 = []
        for row in rows:
            uid1.append(row.getkey("uid"))
        uid2 = set(uid1)
        if len(uid1) != len(uid2):
            print "LineList:", uid1
            logging.warning("There are duplicate names in the LineList")
            #raise Exception,"There are duplicate names in the LineList"

        # Create Summary table
        lc_description = admit.util.Table()
        lc_description.columns = [
            "Line Name", "Start Channel", "End Channel", "Output Cube"
        ]
        lc_description.units = ["", "int", "int", ""]
        lc_description.description = "Parameters of Line Cubes"
        # loop over all entries in the line list
        rdata = []
        for row in rows:
            uid = row.getkey("uid")
            cdir = self.mkext(imagename, uid)
            self.mkdir(cdir)
            basefl = uid
            lcd = [basefl]
            outfl = cdir + os.sep + "lc.im"
            args["outfile"] = self.dir(outfl)
            start = row.getkey("startchan")
            end = row.getkey("endchan")
            diff = end - start + 1
            startch = 0
            if diff < minchan:
                add = int(math.ceil(float(minchan - diff) / 2.0))
                start -= add
                end += add
                startch += add
                if start < 0:
                    logging.info(
                        "%s is too close to the edge to encompass with the " +
                        "requested channels, start=%d resetting to 0" %
                        (uid, start))
                    startch += abs(start)
                    start = 0
                if end >= nchan:
                    logging.info(
                        "%s is too close to the edge to encompass with the " +
                        "requested channels, end=%d resetting to %d" %
                        (uid, end, nchan - 1))
                    end = nchan - 1
                #print "\n\nDIFF ",startch,"\n\n"
            if not equalize:
                if fpad > 0:
                    diff = end - start + 1
                    start -= int(fpad * diff)
                    end += int(fpad * diff)
                    if start < 0:
                        logging.warning(
                            "fpad=%d too large, start=%d resetting to 0" %
                            (int(fpad * diff), start))
                        startch += abs(start)
                        start = 0
                    else:
                        startch += int(fpad * diff)
                    if end >= nchan:
                        logging.warning(
                            "fpad=%d too large, end=%d resetting to %d" %
                            (int(fpad * diff), end, nchan - 1))
                        end = nchan - 1
                elif pad > 0:
                    start -= pad
                    end += pad
                    if start < 0:
                        logging.warning(
                            "pad=%d too large, start=%d resetting to 0" %
                            (pad, start))
                        startch += abs(start)
                        start = 0
                    else:
                        startch += pad
                    if end >= nchan:
                        logging.warning(
                            "pad=%d too large, end=%d resetting to %d" %
                            (pad, end, nchan - 1))
                        end = nchan - 1
                elif pad < 0:
                    mid = (start + end) / 2
                    start = mid + pad / 2
                    end = mid - pad / 2 - 1
                    if start < 0:
                        logging.warning(
                            "pad=%d too large, start=%d resetting to 0" %
                            (pad, start))
                        startch += abs(start)
                        start = 0
                    else:
                        startch += abs(start)
                    if end >= nchan:
                        logging.warning(
                            "pad=%d too large, end=%d resetting to %d" %
                            (pad, end, nchan - 1))
                        end = nchan - 1
            endch = startch + diff
            args["chans"] = "%i~%i" % (start, end)
            rdata.append(start)
            rdata.append(end)
            # for the summmary, which will be a table of
            # Line name, start channel, end channel, output image
            lc_description.addRow([basefl, start, end, outfl])

            # create the slices
            imsubimage(**args)

            line = row.converttoline()
            # set the restfrequency ouf the output cube
            imhead(imagename=args["outfile"],
                   mode="put",
                   hdkey="restfreq",
                   hdvalue="%fGHz" % (row.getkey("frequency")))
            # set up the output BDP
            images = {bt.CASA: outfl}
            casaimage = Image(images=images)
            # note that Summary.getLineFluxes() implicitly relies on the BDP out order
            # being the same order as in the line list table.  If this is ever not
            # true, then Summary.getLineFluxes mismatch BDPs and flux values.
            #self.addoutput(LineCube_BDP(xmlFile=cdir + os.sep + basefl + ".lc",
            self.addoutput(
                LineCube_BDP(xmlFile=outfl,
                             image=casaimage,
                             line=line,
                             linechans="%i~%i" % (startch, endch)))
            dt.tag("trans-%s" % cdir)

        logging.regression("LC: %s" % str(rdata))

        taskargs = "pad=%s fpad=%g equalize=%s" % (pad, fpad, equalize)

        self._summary["linecube"] = SummaryEntry(lc_description.serialize(),
                                                 "LineCube_AT", self.id(True),
                                                 taskargs)
        dt.tag("done")
        dt.end()
Esempio n. 6
0
# cl.addcomponent(dir=NewDir2, flux=7e9, fluxunit='Jy', freq=freq, shape='point')
# cl.addcomponent(dir=NewDir2,
#                 flux=1e10, fluxunit='Jy', freq=freq,
#                 shape="Gaussian", majoraxis=qa.toangle(str(2)+'deg'), minoraxis=qa.toangle(str(1)+'deg'), positionangle=qa.toangle(str(45)+'deg'))

cs=ia.coordsys()
cs.setunits(['rad','rad','','Hz'])
cell_rad=dres #qa.convert(qa.quantity("1arcmin"),"rad")['value']
cs.setincrement([-cell_rad,cell_rad],'direction')
cs.setreferencevalue([phaseCenter['m0']['value'], phaseCenter['m1']['value']],
                      type="direction")
cs.setrestfrequency(freq)


# add important header keywords
imhead(imagename=imageName,mode="put",hdkey="object",hdvalue="DRAGN")
imhead(imagename=imageName,mode="put",hdkey="imtype",hdvalue='Intensity')
imhead(imagename=imageName,
      mode="put",hdkey="observer",hdvalue="simulation")
imhead(imagename=imageName,
      mode="put",hdkey="date-obs",hdvalue="2023/03/15/00:00:00")
imhead(imagename=imageName,mode="put",hdkey="reffreqtype",hdvalue='TOPO')
imhead(imagename=imageName,
       mode="put",hdkey="restfreq",hdvalue=str(freq))
imhead(imagename=imageName,mode='list')
cs.setreferencevalue(str(freq)+'Hz', 'spectral')
Telescope='VLA' #or else it breaks and whines
cs.settelescope(Telescope)
ia.setcoordsys(cs.torecord())
ia.setbrightnessunit("Jy/pixel")
Esempio n. 7
0
    def run(self):
        """ The run method creates the BDP

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        dt = utils.Dtime("CubeSum")              # tagging time
        self._summary = {}                       # an ADMIT summary will be created here
 
        numsigma = self.getkey("numsigma")       # get the input keys
        sigma = self.getkey("sigma")
        use_lines = self.getkey("linesum")
        pad = self.getkey("pad") 

        b1  = self._bdp_in[0]                    # spw image cube
        b1a = self._bdp_in[1]                    # cubestats (optional)
        b1b = self._bdp_in[2]                    # linelist  (optional)

        f1 =  b1.getimagefile(bt.CASA)
        taskinit.ia.open(self.dir(f1))
        s = taskinit.ia.summary()
        nchan = s['shape'][2]

        if b1b != None:
            ch0 = b1b.table.getFullColumnByName("startchan")
            ch1 = b1b.table.getFullColumnByName("endchan")
            s = Segments(ch0,ch1,nchan=nchan)
            # @todo something isn't merging here as i would have expected,
            #       e.g. test0.fits [(16, 32), (16, 30), (16, 29)]
            if pad > 0:
                for (c0,c1) in s.getsegmentsastuples():
                    s.append([c0-pad,c0])
                    s.append([c1,c1+pad])
            s.merge()
            s.recalcmask()
            # print "PJT segments:",s.getsegmentsastuples()
            ns = len(s.getsegmentsastuples())
            chans = s.chans(not use_lines)
            if use_lines:
                msum = s.getmask()
            else:
                msum = 1 - s.getmask()
            logging.info("Read %d segments" % ns)
            # print "chans",chans
            # print "msum",msum

        #  from a deprecated keyword, but kept here to pre-smooth the spectrum before clipping
        #  examples are:  ['boxcar',3]    ['gaussian',7]    ['hanning',5] 
        smooth= []
                
        sig_const = False                        # figure out if sigma is taken as constant in the cube
        if b1a == None:                          # if no 2nd BDP was given, sigma needs to be specified 
            if sigma <= 0.0:
                raise Exception,"Neither user-supplied sigma nor CubeStats_BDP input given. One is required."
            else:
                sig_const = True                 # and is constant
        else:
            if sigma > 0:
                sigma = b1a.get("sigma")
                sig_const = True

        if sig_const:
            logging.info("Using constant sigma = %f" % sigma)
        else:
            logging.info("Using varying sigma per plane")

        infile = b1.getimagefile(bt.CASA)          # ADMIT filename of the image (cube)
        bdp_name = self.mkext(infile,'csm')        # morph to the new output name with replaced extension 'csm'
        image_out = self.dir(bdp_name)             # absolute filename
        
        args = {"imagename" : self.dir(infile)}    # assemble arguments for immoments()
        args["moments"] = 0                        # only need moments=0 (or [0] is ok as well)
        args["outfile"] = image_out                # note full pathname

        dt.tag("start")

        if sig_const:
            args["excludepix"] = [-numsigma*sigma, numsigma*sigma]        # single global sigma
            if b1b != None:
                # print "PJT: ",chans
                args["chans"] = chans
        else:
            # @todo    in this section bad channels can cause a fully masked cubesum = bad
            # cubestats input
            sigma_array = b1a.table.getColumnByName("sigma")              # channel dependent sigma
            sigma_pos = sigma_array[np.where(sigma_array>0)]
            smin = sigma_pos.min()
            smax = sigma_pos.max()
            logging.info("sigma varies from %f to %f" % (smin,smax))
            maxval = b1a.get("maxval")                                    # max in cube
            nzeros = len(np.where(sigma_array<=0.0)[0])                   # check bad channels
            if nzeros > 0:
                logging.warning("There are %d NaN channels " % nzeros)
                # raise Exception,"need to recode CubeSum or use constant sigma" 
            dt.tag("grab_sig")

            if len(smooth) > 0:
                # see also LineID and others
                filter = Filter1D.Filter1D(sigma_array,smooth[0],**Filter1D.Filter1D.convertargs(smooth))
                sigma_array = filter.run()
                dt.tag("smooth_sig")
            # create a CASA image copy for making the mirror sigma cube to mask against
            file = self.dir(infile)
            mask = file+"_mask"
            taskinit.ia.fromimage(infile=file, outfile=mask)
            nx = taskinit.ia.shape()[0]
            ny = taskinit.ia.shape()[1]
            nchan = taskinit.ia.shape()[2]
            taskinit.ia.fromshape(shape=[nx,ny,1])
            plane = taskinit.ia.getchunk([0,0,0],[-1,-1,0])     # convenience plane for masking operation
            dt.tag("mask_sig")

            taskinit.ia.open(mask) 
            dt.tag("open_mask")
              
            count = 0
            for i in range(nchan):
                if sigma_array[i] > 0:
                    if b1b != None:
                        if msum[i]:
                            taskinit.ia.putchunk(plane*0+sigma_array[i],blc=[0,0,i,-1])
                            count = count + 1
                        else:
                            taskinit.ia.putchunk(plane*0+maxval,blc=[0,0,i,-1])                            
                    else:
                        taskinit.ia.putchunk(plane*0+sigma_array[i],blc=[0,0,i,-1])
                        count = count + 1
                else:
                    taskinit.ia.putchunk(plane*0+maxval,blc=[0,0,i,-1])
            taskinit.ia.close()
            logging.info("%d/%d channels used for CubeSum" % (count,nchan))
            dt.tag("close_mask")

            names = [file, mask]
            tmp = file + '.tmp'
            if numsigma == 0.0:
                # hopefully this will also make use of the mask
                exp = "IM0[IM1<%f]" % (0.99*maxval)
            else:
                exp = "IM0[abs(IM0/IM1)>%f]" % (numsigma)
            # print "PJT: exp",exp
            casa.immath(mode='evalexpr', imagename=names, expr=exp, outfile=tmp) 
            args["imagename"] = tmp
            dt.tag("immath")

        casa.immoments(**args) 
        dt.tag("immoments")

        if sig_const is False:  
            # get rid of temporary files
            utils.remove(tmp)
            utils.remove(mask)

        # get the flux
        taskinit.ia.open(image_out)
        st = taskinit.ia.statistics()
        taskinit.ia.close()
        dt.tag("statistics")
        # report that flux, but there's no way to get the units from casa it seems
        # ia.summary()['unit'] is usually 'Jy/beam.km/s' for ALMA
        # imstat() does seem to know it.
        if st.has_key('flux'):
            rdata = [st['flux'][0],st['sum'][0]]
            logging.info("Total flux: %f (sum=%f)" % (st['flux'],st['sum']))
        else:
            rdata = [st['sum'][0]]
            logging.info("Sum: %f (beam parameters missing)" % (st['sum']))
        logging.regression("CSM: %s" % str(rdata))
            
        # Create two output images for html and their thumbnails, too
        implot = ImPlot(ptype=self._plot_type,pmode=self._plot_mode,abspath=self.dir())
        implot.plotter(rasterfile=bdp_name,figname=bdp_name,colorwedge=True)
        figname   = implot.getFigure(figno=implot.figno,relative=True)
        thumbname = implot.getThumbnail(figno=implot.figno,relative=True)
       
        dt.tag("implot")

        thumbtype = bt.PNG            # really should be correlated with self._plot_type!!

        # 2. Create a histogram of the map data
        # get the data for a histogram
        data = casautil.getdata(image_out,zeromask=True).compressed()
        dt.tag("getdata")

        # get the label for the x axis
        bunit = casa.imhead(imagename=image_out, mode="get", hdkey="bunit")

        # Make the histogram plot
        # Since we give abspath in the constructor, figname should be relative
        myplot = APlot(ptype=self._plot_type,pmode=self._plot_mode,abspath=self.dir())
        auxname = bdp_name + "_histo"
        auxtype = bt.PNG  # really should be correlated with self._plot_type!!
        myplot.histogram(columns = data,
                         figname = auxname,
                         xlab    = bunit,
                         ylab    = "Count",
                         title   = "Histogram of CubeSum: %s" % (bdp_name),
                         thumbnail=True)
        auxname = myplot.getFigure(figno=myplot.figno,relative=True)
        auxthumb = myplot.getThumbnail(figno=myplot.figno,relative=True)

        images = {bt.CASA : bdp_name, bt.PNG : figname}
        casaimage = Image(images    = images,
                                auxiliary = auxname,
                                auxtype   = auxtype,
                                thumbnail = thumbname,
                                thumbnailtype = thumbtype)

        if hasattr(b1,"line"):                      # SpwCube doesn't have Line
            line = deepcopy(getattr(b1,"line"))
            if type(line) != type(Line):
                line = Line(name="Undetermined")
        else:
            line = Line(name="Undetermined")    # fake a Line if there wasn't one

        self.addoutput(Moment_BDP(xmlFile=bdp_name,moment=0,image=deepcopy(casaimage),line=line))
        imcaption = "Integral (moment 0) of all emission in image cube"
        auxcaption = "Histogram of cube sum for image cube"
        taskargs = "numsigma=%.1f sigma=%g smooth=%s" % (numsigma, sigma, str(smooth))
        self._summary["cubesum"] = SummaryEntry([figname,thumbname,imcaption,auxname,auxthumb,auxcaption,bdp_name,infile],"CubeSum_AT",self.id(True),taskargs)
        
        dt.tag("done")
        dt.end()
Esempio n. 8
0
    def run(self):
        """ The run method creates the BDP

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        self._summary = {}
        dt = utils.Dtime("Regrid")
        dt.tag("start")

        do_spatial_regrid = self.getkey("do_spatial_regrid")
        pix_scale = self.getkey("pix_scale")
        do_freq_regrid = self.getkey("do_freq_regrid")
        chan_width = self.getkey("chan_width")

        pix_size = []
        chan_size = []
        im_size = []
        pix_wc_x = []
        pix_wc_y = []
        pix_wc_nu = []
        src_dec = []

        RADPERARCSEC = 4.848137E-6

        for ibdp in self._bdp_in:
            # Convert input CASA images to numpy arrays.
            istem = ibdp.getimagefile(bt.CASA)
            ifile = ibdp.baseDir() + istem

            h = casa.imhead(ifile, mode='list')
            pix_size.append(np.abs(
                h['cdelt1']))  # pix scale in asec @todo QA ?
            chan_size.append(np.abs(h['cdelt3']))
            # grab the pixels
            pix_x = h['shape'][0]
            pix_y = h['shape'][1]
            pix_nu = h['shape'][2]

            taskinit.ia.open(ifile)
            mycs = taskinit.ia.coordsys(axes=[0, 1, 2])
            #           getting all four corners handles the case of images where
            #           x-y axis not aligned with RA-dec
            for xpix in [0, pix_x]:
                for ypix in [0, pix_y]:
                    x = mycs.toworld([xpix, ypix])['numeric'][0]
                    y = mycs.toworld([xpix, ypix])['numeric'][1]
                    pix_wc_x.append(x)
                    pix_wc_y.append(y)

            nu = mycs.toworld([pix_x, pix_y, 0])['numeric'][2]
            pix_wc_nu.append(nu)
            nu = mycs.toworld([pix_x, pix_y, pix_nu])['numeric'][2]
            pix_wc_nu.append(nu)
            taskinit.ia.close()

        min_ra = np.min(pix_wc_x)
        max_ra = np.max(pix_wc_x)
        min_dec = np.min(pix_wc_y)
        max_dec = np.max(pix_wc_y)
        mean_ra = 0.5 * (min_ra + max_ra)
        mean_dec = 0.5 * (min_dec + max_dec)

        if (pix_scale < 0):
            pix_scale = np.min(pix_size)
        else:
            pix_scale = pix_scale * RADPERARCSEC

        npix_ra = int((max_ra - min_ra) / pix_scale * np.cos(mean_dec))
        npix_dec = (max_dec - min_dec) / pix_scale
        npix_dec = (int(npix_dec)
                    if npix_dec == int(npix_dec) else int(npix_dec) + 1)
        min_nu = np.min(pix_wc_nu)
        max_nu = np.max(pix_wc_nu)
        mean_nu = 0.5 * (min_nu + max_nu)
        if (chan_width < 0):
            chan_width = np.min(chan_size)
        npix_nu = int((max_nu - min_nu) / chan_width) + 1

        # now regrid everything
        innames = []
        outnames = []
        incdelt = []
        outcdelt = []

        #=========================================================
        #@todo - check if bdp_ins refer to same input file.
        # If so, the current code will fail because the output
        # file name is fixed to $INPUT_regrid.  A valid use case
        # is "regrid the same input file different ways" -- which
        # is not supported in the current code but should be
        #=========================================================
        for ibdp in self._bdp_in:
            istem = ibdp.getimagefile(bt.CASA)
            ifile = ibdp.baseDir() + istem
            ostem = "%s_regrid/" % (istem)
            ofile = self.baseDir() + ostem
            # save the input/output file names
            innames.append(istem)
            outnames.append(ostem)

            header = casa.imregrid(imagename=ifile, template='get')
            # save the input cdelt1,2,3 for summary table
            incdelt.append(
                (header['csys']['direction0']['cdelt'][0] / RADPERARCSEC,
                 header['csys']['direction0']['cdelt'][1] / RADPERARCSEC,
                 utils.freqtovel(mean_nu,
                                 header['csys']['spectral2']['wcs']['cdelt'])))

            if (do_spatial_regrid):
                header['csys']['direction0']['cdelt'] = [
                    -1 * pix_scale, pix_scale
                ]
                header['csys']['direction0']['crval'] = [mean_ra, mean_dec]
                header['shap'][0] = npix_ra
                header['shap'][1] = npix_dec
                header['csys']['direction0']['crpix'] = [
                    npix_ra / 2, npix_dec / 2
                ]

            if (do_freq_regrid):
                header['csys']['spectral2']['wcs']['crval'] = min_nu
                header['shap'][2] = npix_nu
                chan_size = np.abs(header['csys']['spectral2']['wcs']['cdelt'])
                header['csys']['spectral2']['wcs']['cdelt'] = chan_width
                flux_correction = chan_width / chan_size

            casa.imregrid(imagename=ifile, output=ofile, template=header)
            # save the output cdelt1,2,3 for summary table
            newhead = casa.imregrid(imagename=ofile, template='get')
            outcdelt.append(
                (newhead['csys']['direction0']['cdelt'][0] / RADPERARCSEC,
                 newhead['csys']['direction0']['cdelt'][1] / RADPERARCSEC,
                 utils.freqtovel(
                     mean_nu, newhead['csys']['spectral2']['wcs']['cdelt'])))

            if (do_freq_regrid):
                taskinit.ia.open(ofile)
                print flux_correction
                taskinit.ia.calc(pixels=ofile.replace(r"/", r"\/") + '*' +
                                 str(flux_correction))
                taskinit.ia.done()
            obdp = admit.SpwCube_BDP(ostem)
            self.addoutput(obdp)

        # make a table for summary
        atable = admit.util.Table()
        atable.columns = [
            "Input Image", "cdelt1", "cdelt2", "cdelt3", "Regridded Image",
            "cdelt1", "cdelt2", "cdelt3"
        ]
        atable.units = [
            "", "arcsec", "arcsec", "km/s", "", "arcsec", "arcsec", "km/s"
        ]
        for i in range(len(innames)):
            atable.addRow([
                innames[i], incdelt[i][0], incdelt[i][1], incdelt[i][2],
                outnames[i], outcdelt[i][0], outcdelt[i][1], outcdelt[i][2]
            ])

        #keys = "pixsize=%.4g naxis1=%d naxis2=%d mean_ra=%0.4f mean_dec=%0.4f reffreq=%g chan_width=%g" % (pix_scale/(4.848137E-6), npix_ra, npix_dec, mean_ra,mean_dec,min_nu,chan_width)
        taskargs = "pix_scale = " + str(
            self.getkey("pix_scale")) + " chan_width = " + str(
                self.getkey("chan_width"))
        self._summary["regrid"] = SummaryEntry(atable.serialize(), "Regrid_AT",
                                               self.id(True), taskargs)
        dt.tag("done")
        dt.end()
Esempio n. 9
0
    def run(self):
        """ The run method creates the BDP

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        self._summary = {}
        dt = utils.Dtime("Smooth")
        dt.tag("start")
        # get the input keys
        bmaj = self.getkey("bmaj")
        bmin = self.getkey("bmin")
        bpa = self.getkey("bpa")
        velres = self.getkey("velres")

        # take care of potential issues in the unit strings
        # @todo  if not provided?
        bmaj['unit'] = bmaj['unit'].lower()
        bmin['unit'] = bmin['unit'].lower()
        velres['unit'] = velres['unit'].lower()
        taskargs = "bmaj=%s bmin=%s bpa=%s velres=%s" % (bmaj, bmin, bpa,
                                                         velres)

        bdpnames = []
        for ibdp in self._bdp_in:
            istem = ibdp.getimagefile(bt.CASA)
            image_in = ibdp.baseDir() + istem

            bdp_name = self.mkext(istem, 'sim')
            image_out = self.dir(bdp_name)

            taskinit.ia.open(image_in)
            h = casa.imhead(image_in, mode='list')
            pix_scale = np.abs(h['cdelt1'] *
                               206265.0)  # pix scale in asec @todo QA ?
            CC = 299792458.0  # speed of light  @todo somewhere else   [utils.c , but in km/s]

            rest_freq = h['crval3']
            # frequency pixel scale in km/s
            vel_scale = np.abs(CC * h['cdelt3'] / rest_freq / 1000.0)

            # unit conversion to arcsec (spatial) or km/s
            # (velocity) or some flavor of Hz.

            if (bmaj['unit'] == 'pixel'):
                bmaj = bmaj['value'] * pix_scale
            else:
                bmaj = bmaj['value']
            if (bmin['unit'] == 'pixel'):
                bmin = bmin['value'] * pix_scale
            else:
                bmin = bmin['value']

            hertz_input = False
            if velres['unit'] == 'pixel':
                velres['value'] = velres['value'] * vel_scale
                velres['unit'] = 'km/s'
            elif velres['unit'] == 'm/s':
                velres['value'] = velres['value'] / 1000.0
                velres['unit'] = 'km/s'
            elif velres['unit'][-2:] == 'hz':
                hertz_input = True
            elif velres['unit'] == 'km/s':
                pass
            else:
                logging.error("Unknown units in velres=%s" % velres['unit'])

            rdata = bmaj

            # we smooth in velocity first. if smoothing in velocity
            # the cube apparently must be closed afterwards and
            # then reopened if spatial smoothing is to be done.

            if velres['value'] > 0:
                # handle the different units allowed. CASA doesn't
                # like lowercase for hz units...
                if not hertz_input:
                    freq_res = str(
                        velres['value'] * 1000.0 / CC * rest_freq) + 'Hz'
                else:
                    freq_res = str(velres['value'])
                    # try to convert velres to km/s for debug purposes
                    velres['value'] = velres['value'] / rest_freq * CC / 1000.0
                    if (velres['unit'] == 'khz'):
                        velres['value'] = velres['value'] * 1000.0
                        velres['unit'] = 'kHz'
                    elif (velres['unit'] == 'mhz'):
                        velres['value'] = velres['value'] * 1E6
                        velres['unit'] = 'MHz'
                    elif (velres['unit'] == 'ghz'):
                        velres['value'] = velres['value'] * 1E9
                        velres['unit'] = 'GHz'
                    freq_res = freq_res + velres['unit']

                # NB: there is apparently a bug in CASA. only smoothing along the frequency
                # axis does not work. sepconvolve gives a unit error (says axis unit is radian rather
                # than Hz). MUST smooth in 2+ dimensions if you want this to work.

                if (velres['value'] < vel_scale):
                    raise Exception, "Desired velocity resolution %g less than pixel scale %g" % (
                        velres['value'], vel_scale)
                image_tmp = self.dir('tmp.smooth')
                im2=taskinit.ia.sepconvolve(outfile=image_tmp,axes=[0,1,2], types=["boxcar","boxcar","gauss"],\
                                              widths=['1pix','1pix',freq_res], overwrite=True)
                im2.done()
                logging.debug("sepconvolve to %s" % image_out)
                # for some reason, doing this in memory does not seem to work, so outfile must be specified.

                logging.info(
                    "Smoothing cube to a velocity resolution of %s km/s" %
                    str(velres['value']))
                logging.info("Smoothing cube to a frequency resolution of %s" %
                             freq_res)
                taskinit.ia.close()
                taskinit.ia.open(image_tmp)
                dt.tag("sepconvolve")
            else:
                image_tmp = image_out

            # now do the spatial smoothing

            convolve_to_min_beam = True  # default is to convolve to a min enclosing beam

            if bmaj > 0 and bmin > 0:
                # form qa objects out of these so that casa can understand
                bmaj = taskinit.qa.quantity(bmaj, 'arcsec')
                bmin = taskinit.qa.quantity(bmin, 'arcsec')
                bpa = taskinit.qa.quantity(bpa, 'deg')

                target_res = {}
                target_res['major'] = bmaj
                target_res['minor'] = bmin
                target_res['positionangle'] = bpa

                # throw an exception if cannot be convolved

                try:
                    # for whatever reason, if you give convolve2d a beam parameter,
                    # it complains ...
                    im2=taskinit.ia.convolve2d(outfile=image_out,major = bmaj,\
                                             minor = bmin, pa = bpa,\
                                             targetres=True,overwrite=True)
                    im2.done()
                    logging.info(
                        "Smoothing cube to a resolution of %s by %s at a PA of %s"
                        % (str(bmaj['value']), str(
                            bmin['value']), str(bpa['value'])))
                    convolve_to_min_beam = False
                    achieved_res = target_res
                except:
                    # @todo   remind what you need ?
                    logging.error("Warning: Could not convolve to requested resolution of "\
                            +str(bmaj['value']) + " by " + str(bmin['value']) + \
                            " at a PA of "+ str(bpa['value']))
                    raise Exception, "Could not convolve to beam given!"
            dt.tag("convolve2d-1")

            if convolve_to_min_beam:
                restoring_beams = taskinit.ia.restoringbeam()
                commonbeam = taskinit.ia.commonbeam()
                # for whatever reason, setrestoringbeam does not use the same set of hashes...
                commonbeam['positionangle'] = commonbeam['pa']
                del commonbeam['pa']

                # if there's one beam, apparently the beams keyword does not exist
                if 'beams' in restoring_beams:
                    print "Smoothing cube to a resolution of "+  \
                         str(commonbeam['major']['value']) +" by "+ \
                         str(commonbeam['minor']['value'])+" at a PA of "\
                        +str(commonbeam['pa']['value'])
                    target_res = commonbeam
                    im2=taskinit.ia.convolve2d(outfile=image_out,major=commonbeam['major'],\
                                               minor=commonbeam['minor'],\
                                               pa=commonbeam['positionangle'],\
                                               targetres=True,overwrite=True)
                    im2.done()
                    achieved_res = commonbeam
                    dt.tag("convolve2d-2")
                else:
                    print "One beam for all planes. Smoothing to common beam redundant."
                    achieved_res = commonbeam
                    if velres['value'] < 0:
                        taskinit.ia.fromimage(outfile=image_out,
                                              infile=image_in)
                    # not really doing anything
                # else, we've already done what we needed to

                taskinit.ia.setrestoringbeam(beam=achieved_res)
                rdata = achieved_res['major']['value']

            # else do no smoothing and just close the image

            taskinit.ia.close()
            dt.tag("close")

            b1 = SpwCube_BDP(bdp_name)
            self.addoutput(b1)
            # need to update for multiple images.

            b1.setkey("image", Image(images={bt.CASA: bdp_name}))

            bdpnames = bdpnames.append(bdp_name)

            # and clean up the temp image before the next image
            if velres['value'] > 0:
                utils.remove(image_tmp)

        # thes are task arguments not summary entries.
        _bmaj = taskinit.qa.convert(achieved_res['major'], 'rad')['value']
        _bmin = taskinit.qa.convert(achieved_res['minor'], 'rad')['value']
        _bpa = taskinit.qa.convert(achieved_res['positionangle'],
                                   'deg')['value']
        vres = "%.2f %s" % (velres['value'], velres['unit'])

        logging.regression("SMOOTH: %f %f" % (rdata, velres['value']))

        self._summary["smooth"] = SummaryEntry(
            [bdp_name, convolve_to_min_beam, _bmaj, _bmin, _bpa, vres],
            "Smooth_AT", self.id(True), taskargs)
        dt.tag("done")
        dt.end()
Esempio n. 10
0
    def run(self):
        """Runs the task.

           Parameters
           ----------
           None

           Returns
           -------
           None
        """

        self._summary = {}
        dt = utils.Dtime("CubeSpectrum")

        # our BDP's
        # b1  = input BDP
        # b1s = optional input CubeSpectrum
        # b1m = optional input Moment
        # b1p = optional input SourceList for positions
        # b2  = output BDP

        b1 = self._bdp_in[0]  # check input SpwCube (or LineCube)
        fin = b1.getimagefile(bt.CASA)
        if self._bdp_in[0]._type == bt.LINECUBE_BDP:
            use_vel = True
        else:
            use_vel = False

        sources = self.getkey("sources")
        pos = [
        ]  # blank it first, then try and grab it from the optional bdp_in's
        cmean = 0.0
        csigma = 0.0
        smax = []  # accumulate max in each spectrum for regression
        self.spec_description = []  # for summary()

        if self._bdp_in[1] != None:  # check if CubeStats_BDP
            #print "BDP[1] type: ",self._bdp_in[1]._type
            if self._bdp_in[1]._type != bt.CUBESTATS_BDP:
                raise Exception, "bdp_in[1] not a CubeStats_BDP, should never happen"
            # a table (cubestats)
            b1s = self._bdp_in[1]
            pos.append(b1s.maxpos[0])
            pos.append(b1s.maxpos[1])
            logging.info('CubeStats::maxpos,val=%s,%f' %
                         (str(b1s.maxpos), b1s.maxval))
            cmean = b1s.mean
            csigma = b1s.sigma
            dt.tag("CubeStats-pos")

        if self._bdp_in[
                2] != None:  # check if Moment_BDP (probably from CubeSum)
            #print "BDP[2] type: ",self._bdp_in[2]._type
            if self._bdp_in[2]._type != bt.MOMENT_BDP:
                raise Exception, "bdp_in[2] not a Moment_BDP, should never happen"
            b1m = self._bdp_in[2]
            fim = b1m.getimagefile(bt.CASA)
            pos1, maxval = self.maxpos_im(
                self.dir(fim))  # compute maxpos, since it is not in bdp (yet)
            logging.info('CubeSum::maxpos,val=%s,%f' % (str(pos1), maxval))
            pos.append(pos1[0])
            pos.append(pos1[1])
            dt.tag("Moment-pos")

        if self._bdp_in[3] != None:  # check if SourceList
            #print "BDP[3] type: ",self._bdp_in[3]._type
            # a table (SourceList)
            b1p = self._bdp_in[3]
            ra = b1p.table.getFullColumnByName("RA")
            dec = b1p.table.getFullColumnByName("DEC")
            peak = b1p.table.getFullColumnByName("Peak")
            if sources == []:
                # use the whole SourceList
                for (r, d, p) in zip(ra, dec, peak):
                    rdc = convert_sexa(r, d)
                    pos.append(rdc[0])
                    pos.append(rdc[1])
                    logging.info('SourceList::maxpos,val=%s,%f' %
                                 (str(rdc), p))
            else:
                # select specific ones from the source list
                for ipos in sources:
                    if ipos < len(ra):
                        radec = convert_sexa(ra[ipos], dec[ipos])
                        pos.append(radec[0])
                        pos.append(radec[1])
                        logging.info('SourceList::maxpos,val=%s,%f' %
                                     (str(radec), peak[ipos]))
                    else:
                        logging.warning('Skipping illegal source number %d' %
                                        ipos)

            dt.tag("SourceList-pos")

        # if pos[] still blank, use the AT keyword.
        if len(pos) == 0:
            pos = self.getkey("pos")

        # if still none, try the map center
        if len(pos) == 0:
            # @todo  this could result in a masked pixel and cause further havoc
            # @todo  could also take the reference pixel, but that could be outside image
            taskinit.ia.open(self.dir(fin))
            s = taskinit.ia.summary()
            pos = [int(s['shape'][0]) / 2, int(s['shape'][1]) / 2]
            logging.warning(
                "No input positions supplied, map center choosen: %s" %
                str(pos))
            dt.tag("map-center")

        # exhausted all sources where pos[] can be set; if still zero, bail out
        if len(pos) == 0:
            raise Exception, "No positions found from input BDP's or pos="

        # convert this regular list to a list of tuples with duplicates removed
        # sadly the order is lost.
        pos = list(set(zip(pos[0::2], pos[1::2])))
        npos = len(pos)

        dt.tag("open")

        bdp_name = self.mkext(fin, "csp")
        b2 = CubeSpectrum_BDP(bdp_name)
        self.addoutput(b2)

        imval = range(npos)  # spectra, one for each pos (placeholder)
        planes = range(npos)  # labels for the tables (placeholder)
        images = {}  # png's accumulated

        for i in range(npos):  # loop over pos, they can have mixed types now
            sd = []
            caption = "Spectrum"
            xpos = pos[i][0]
            ypos = pos[i][1]
            if type(xpos) != type(ypos):
                print "POS:", xpos, ypos
                raise Exception, "position pair not of the same type"
            if type(xpos) == int:
                # for integers, boxes are allowed, even multiple
                box = '%d,%d,%d,%d' % (xpos, ypos, xpos, ypos)
                # convention for summary is (box)
                cbox = '(%d,%d,%d,%d)' % (xpos, ypos, xpos, ypos)
                # use extend here, not append, we want individual values in a list
                sd.extend([xpos, ypos, cbox])
                caption = "Average Spectrum at %s" % cbox
                if False:
                    # this will fail on 3D cubes (see CAS-7648)
                    imval[i] = casa.imval(self.dir(fin), box=box)
                else:
                    # work around that CAS-7648 bug
                    # another approach is the ia.getprofile(), see CubeStats, this will
                    # also integrate over regions, imval will not (!!!)
                    region = 'centerbox[[%dpix,%dpix],[1pix,1pix]]' % (xpos,
                                                                       ypos)
                    caption = "Average Spectrum at %s" % region
                    imval[i] = casa.imval(self.dir(fin), region=region)
            elif type(xpos) == str:
                # this is tricky, to stay under 1 pixel , or you get a 2x2 back.
                region = 'centerbox[[%s,%s],[1pix,1pix]]' % (xpos, ypos)
                caption = "Average Spectrum at %s" % region
                sd.extend([xpos, ypos, region])
                imval[i] = casa.imval(self.dir(fin), region=region)
            else:
                print "Data type: ", type(xpos)
                raise Exception, "Data type for region not handled"
            dt.tag("imval")

            flux = imval[i]['data']
            if len(flux.shape
                   ) > 1:  # rare case if we step on a boundary between cells?
                logging.warning(
                    "source %d has spectrum shape %s: averaging the spectra" %
                    (i, repr(flux.shape)))
                flux = np.average(flux, axis=0)
            logging.debug('minmax: %f %f %d' %
                          (flux.min(), flux.max(), len(flux)))
            smax.append(flux.max())
            if i == 0:  # for first point record few extra things
                if len(imval[i]['coords'].shape) == 2:  # normal case: 1 pixel
                    freqs = imval[i]['coords'].transpose(
                    )[2] / 1e9  # convert to GHz  @todo: input units ok?
                elif len(imval[i]['coords'].shape
                         ) == 3:  # rare case if > 1 point in imval()
                    freqs = imval[i]['coords'][0].transpose(
                    )[2] / 1e9  # convert to GHz  @todo: input units ok?
                else:
                    logging.fatal(
                        "bad shape %s in freq return from imval - SHOULD NEVER HAPPEN"
                        % imval[i]['coords'].shape)
                chans = np.arange(len(freqs))  # channels 0..nchans-1
                unit = imval[i]['unit']
                restfreq = casa.imhead(
                    self.dir(fin), mode="get",
                    hdkey="restfreq")['value'] / 1e9  # in GHz
                dt.tag("imhead")
                vel = (
                    1 - freqs / restfreq
                ) * utils.c  #  @todo : use a function (and what about relativistic?)

            # construct the Table for CubeSpectrum_BDP
            # @todo note data needs to be a tuple, later to be column_stack'd
            labels = ["channel", "frequency", "flux"]
            units = ["number", "GHz", unit]
            data = (chans, freqs, flux)

            if i == 0:
                # plane 0 : we are allowing a multiplane table, so the first plane is special
                table = Table(columns=labels,
                              units=units,
                              data=np.column_stack(data),
                              planes=["0"])
            else:
                # planes 1,2,3.... are stacked onto the previous one
                table.addPlane(np.column_stack(data), "%d" % i)

            # example plot , one per position for now
            if use_vel:
                x = vel
                xlab = 'VLSR (km/s)'
            else:
                x = chans
                xlab = 'Channel'
            y = [flux]
            sd.append(xlab)
            if type(xpos) == int:
                # grab the RA/DEC... kludgy
                h = casa.imstat(self.dir(fin), box=box)
                ra = h['blcf'].split(',')[0]
                dec = h['blcf'].split(',')[1]
                title = '%s %d @ %d,%d = %s,%s' % (bdp_name, i, xpos, ypos, ra,
                                                   dec)
            else:
                title = '%s %d @ %s,%s' % (
                    bdp_name, i, xpos, ypos
                )  # or use box, once we allow non-points

            myplot = APlot(ptype=self._plot_type,
                           pmode=self._plot_mode,
                           abspath=self.dir())
            ylab = 'Flux (%s)' % unit
            p1 = "%s_%d" % (bdp_name, i)
            myplot.plotter(x,
                           y,
                           title,
                           p1,
                           xlab=xlab,
                           ylab=ylab,
                           thumbnail=True)
            # Why not use p1 as the key?
            ii = images["pos%d" % i] = myplot.getFigure(figno=myplot.figno,
                                                        relative=True)
            thumbname = myplot.getThumbnail(figno=myplot.figno, relative=True)
            sd.extend([ii, thumbname, caption, fin])
            self.spec_description.append(sd)

        logging.regression("CSP: %s" % str(smax))

        image = Image(images=images, description="CubeSpectrum")
        b2.setkey("image", image)
        b2.setkey("table", table)
        b2.setkey("sigma", csigma)  # TODO: not always available
        b2.setkey("mean", cmean)  # TODO: not always available

        if True:
            #       @todo     only first plane due to limitation in exportTable()
            islash = bdp_name.find('/')
            if islash < 0:
                tabname = self.dir("testCubeSpectrum.tab")
            else:
                tabname = self.dir(bdp_name[:islash] + "/testCubeSpectrum.tab")
            table.exportTable(tabname, cols=["frequency", "flux"])
        dt.tag("done")
        # For a single spectrum this is
        # SummaryEntry([[data for spec1]], "CubeSpectrum_AT",taskid)
        # For multiple spectra this is
        # SummaryEntry([[data for spec1],[data for spec2],...], "CubeSpectrum_AT",taskid)
        self._summary["spectra"] = SummaryEntry(self.spec_description,
                                                "CubeSpectrum_AT",
                                                self.id(True))
        taskargs = "pos=" + str(pos)
        taskargs += '&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; <span style="background-color:white">&nbsp;' + fin.split(
            '/')[0] + '&nbsp;</span>'
        for v in self._summary:
            self._summary[v].setTaskArgs(taskargs)
        dt.tag("summary")
        dt.end()
Esempio n. 11
0
    def run(self):
        """Runs the task.

           Parameters
           ----------
           None

           Returns
           -------
           None
        """

        self._summary = {}
        dt = utils.Dtime("CubeSpectrum")

        # our BDP's
        # b1  = input BDP
        # b1s = optional input CubeSpectrum
        # b1m = optional input Moment
        # b1p = optional input SourceList for positions
        # b2  = output BDP

        b1 = self._bdp_in[0]                                            # check input SpwCube (or LineCube)
        fin = b1.getimagefile(bt.CASA)
        if self._bdp_in[0]._type == bt.LINECUBE_BDP:
            use_vel = True
        else:
            use_vel = False

        sources = self.getkey("sources")
        pos = []                     # blank it first, then try and grab it from the optional bdp_in's
        cmean  = 0.0
        csigma = 0.0
        smax  = []                   # accumulate max in each spectrum for regression
        self.spec_description = []   # for summary() 

        if self._bdp_in[1] != None:                                      # check if CubeStats_BDP
            #print "BDP[1] type: ",self._bdp_in[1]._type
            if self._bdp_in[1]._type != bt.CUBESTATS_BDP:
                raise Exception,"bdp_in[1] not a CubeStats_BDP, should never happen"
            # a table (cubestats)
            b1s = self._bdp_in[1]
            pos.append(b1s.maxpos[0])
            pos.append(b1s.maxpos[1])
            logging.info('CubeStats::maxpos,val=%s,%f' % (str(b1s.maxpos),b1s.maxval))
            cmean  = b1s.mean
            csigma = b1s.sigma
            dt.tag("CubeStats-pos")
            
        if self._bdp_in[2] != None:                                      # check if Moment_BDP (probably from CubeSum)
            #print "BDP[2] type: ",self._bdp_in[2]._type
            if self._bdp_in[2]._type != bt.MOMENT_BDP:
                raise Exception,"bdp_in[2] not a Moment_BDP, should never happen"
            b1m = self._bdp_in[2]
            fim = b1m.getimagefile(bt.CASA)
            pos1,maxval = self.maxpos_im(self.dir(fim))     # compute maxpos, since it is not in bdp (yet)
            logging.info('CubeSum::maxpos,val=%s,%f' % (str(pos1),maxval))
            pos.append(pos1[0])
            pos.append(pos1[1])
            dt.tag("Moment-pos")

        if self._bdp_in[3] != None:                                      # check if SourceList
            #print "BDP[3] type: ",self._bdp_in[3]._type
            # a table (SourceList)
            b1p = self._bdp_in[3]
            ra   = b1p.table.getFullColumnByName("RA")
            dec  = b1p.table.getFullColumnByName("DEC")
            peak = b1p.table.getFullColumnByName("Peak")
            if sources == []:
                # use the whole SourceList
                for (r,d,p) in zip(ra,dec,peak):
                  rdc = convert_sexa(r,d)
                  pos.append(rdc[0])
                  pos.append(rdc[1])
                  logging.info('SourceList::maxpos,val=%s,%f' % (str(rdc),p))
            else:                  
                # select specific ones from the source list
                for ipos in sources:
                    if ipos < len(ra):
                        radec =  convert_sexa(ra[ipos],dec[ipos])
                        pos.append(radec[0])
                        pos.append(radec[1])
                        logging.info('SourceList::maxpos,val=%s,%f' % (str(radec),peak[ipos]))
                    else:
                        logging.warning('Skipping illegal source number %d' % ipos)

            dt.tag("SourceList-pos")

        # if pos[] still blank, use the AT keyword.
        if len(pos) == 0:
            pos = self.getkey("pos")

        # if still none, try the map center
        if len(pos) == 0:
            # @todo  this could result in a masked pixel and cause further havoc
            # @todo  could also take the reference pixel, but that could be outside image
            taskinit.ia.open(self.dir(fin))
            s = taskinit.ia.summary()
            pos = [int(s['shape'][0])/2, int(s['shape'][1])/2]
            logging.warning("No input positions supplied, map center choosen: %s" % str(pos))
            dt.tag("map-center")

        # exhausted all sources where pos[] can be set; if still zero, bail out
        if len(pos) == 0:
            raise Exception,"No positions found from input BDP's or pos="

        # convert this regular list to a list of tuples with duplicates removed
        # sadly the order is lost.
        pos = list(set(zip(pos[0::2],pos[1::2])))
        npos = len(pos)
        
        dt.tag("open")

        bdp_name = self.mkext(fin,"csp")
        b2 = CubeSpectrum_BDP(bdp_name)
        self.addoutput(b2)

        imval  = range(npos)                             # spectra, one for each pos (placeholder)
        planes = range(npos)                             # labels for the tables (placeholder)
        images = {}                                      # png's accumulated

        for i in range(npos):                            # loop over pos, they can have mixed types now
            sd = []
            caption = "Spectrum"
            xpos = pos[i][0]
            ypos = pos[i][1]
            if type(xpos) != type(ypos):
                print "POS:",xpos,ypos
                raise Exception,"position pair not of the same type"
            if type(xpos)==int:
                # for integers, boxes are allowed, even multiple
                box = '%d,%d,%d,%d' % (xpos,ypos,xpos,ypos)
                # convention for summary is (box)
                cbox = '(%d,%d,%d,%d)' % (xpos,ypos,xpos,ypos)
                # use extend here, not append, we want individual values in a list
                sd.extend([xpos,ypos,cbox])
                caption = "Average Spectrum at %s" % cbox
                if False:
                    # this will fail on 3D cubes (see CAS-7648)
                    imval[i] = casa.imval(self.dir(fin),box=box)
                else:
                    # work around that CAS-7648 bug 
                    # another approach is the ia.getprofile(), see CubeStats, this will
                    # also integrate over regions, imval will not (!!!)
                    region = 'centerbox[[%dpix,%dpix],[1pix,1pix]]' % (xpos,ypos)
                    caption = "Average Spectrum at %s" % region
                    imval[i] = casa.imval(self.dir(fin),region=region)
            elif type(xpos)==str:
                # this is tricky, to stay under 1 pixel , or you get a 2x2 back.
                region = 'centerbox[[%s,%s],[1pix,1pix]]' % (xpos,ypos)
                caption = "Average Spectrum at %s" % region
                sd.extend([xpos,ypos,region])
                imval[i] = casa.imval(self.dir(fin),region=region)
            else:
                print "Data type: ",type(xpos)
                raise Exception,"Data type for region not handled"
            dt.tag("imval")

            flux  = imval[i]['data']
            if len(flux.shape) > 1:     # rare case if we step on a boundary between cells?
                logging.warning("source %d has spectrum shape %s: averaging the spectra" % (i,repr(flux.shape)))
                flux = np.average(flux,axis=0)
            logging.debug('minmax: %f %f %d' % (flux.min(),flux.max(),len(flux)))
            smax.append(flux.max())
            if i==0:                                              # for first point record few extra things
                if len(imval[i]['coords'].shape) == 2:                   # normal case: 1 pixel
                    freqs = imval[i]['coords'].transpose()[2]/1e9        # convert to GHz  @todo: input units ok?
                elif len(imval[i]['coords'].shape) == 3:                 # rare case if > 1 point in imval()
                    freqs = imval[i]['coords'][0].transpose()[2]/1e9     # convert to GHz  @todo: input units ok?
                else:
                    logging.fatal("bad shape %s in freq return from imval - SHOULD NEVER HAPPEN" % imval[i]['coords'].shape)
                chans = np.arange(len(freqs))                     # channels 0..nchans-1
                unit  = imval[i]['unit']
                restfreq = casa.imhead(self.dir(fin),mode="get",hdkey="restfreq")['value']/1e9    # in GHz
                dt.tag("imhead")
                vel   = (1-freqs/restfreq)*utils.c                #  @todo : use a function (and what about relativistic?)

            # construct the Table for CubeSpectrum_BDP 
            # @todo note data needs to be a tuple, later to be column_stack'd
            labels = ["channel" ,"frequency" ,"flux" ]
            units  = ["number"  ,"GHz"       ,unit   ]
            data   = (chans     ,freqs       ,flux   )

            if i==0:
                # plane 0 : we are allowing a multiplane table, so the first plane is special
                table = Table(columns=labels,units=units,data=np.column_stack(data),planes=["0"])
            else:
                # planes 1,2,3.... are stacked onto the previous one
                table.addPlane(np.column_stack(data),"%d" % i)

            # example plot , one per position for now
            if use_vel:
                x = vel
                xlab = 'VLSR (km/s)'
            else:
                x = chans
                xlab  = 'Channel'
            y = [flux]
            sd.append(xlab)
            if type(xpos)==int:
                # grab the RA/DEC... kludgy
                h = casa.imstat(self.dir(fin),box=box)
                ra  = h['blcf'].split(',')[0]
                dec = h['blcf'].split(',')[1]
                title = '%s %d @ %d,%d = %s,%s' % (bdp_name,i,xpos,ypos,ra,dec)
            else:
                title = '%s %d @ %s,%s' % (bdp_name,i,xpos,ypos)       # or use box, once we allow non-points

            myplot = APlot(ptype=self._plot_type,pmode=self._plot_mode, abspath=self.dir())
            ylab  = 'Flux (%s)' % unit
            p1 = "%s_%d" % (bdp_name,i)
            myplot.plotter(x,y,title,p1,xlab=xlab,ylab=ylab,thumbnail=True)
            # Why not use p1 as the key?
            ii = images["pos%d" % i] = myplot.getFigure(figno=myplot.figno,relative=True)
            thumbname = myplot.getThumbnail(figno=myplot.figno,relative=True)
            sd.extend([ii, thumbname, caption, fin])
            self.spec_description.append(sd)

        logging.regression("CSP: %s" % str(smax))

        image = Image(images=images, description="CubeSpectrum")
        b2.setkey("image",image)
        b2.setkey("table",table)
        b2.setkey("sigma",csigma)   # TODO: not always available
        b2.setkey("mean",cmean)     # TODO: not always available

        if True:
            #       @todo     only first plane due to limitation in exportTable()
            islash = bdp_name.find('/')
            if islash < 0:
                tabname = self.dir("testCubeSpectrum.tab")
            else:
                tabname = self.dir(bdp_name[:islash] + "/testCubeSpectrum.tab")
            table.exportTable(tabname,cols=["frequency" ,"flux"])
        dt.tag("done")
        # For a single spectrum this is
        # SummaryEntry([[data for spec1]], "CubeSpectrum_AT",taskid)
        # For multiple spectra this is
        # SummaryEntry([[data for spec1],[data for spec2],...], "CubeSpectrum_AT",taskid)
        self._summary["spectra"] = SummaryEntry(self.spec_description,"CubeSpectrum_AT",self.id(True))
        taskargs = "pos="+str(pos)
        taskargs += '&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; <span style="background-color:white">&nbsp;' + fin.split('/')[0] + '&nbsp;</span>'
        for v in self._summary:
            self._summary[v].setTaskArgs(taskargs)
        dt.tag("summary")
        dt.end()
Esempio n. 12
0
    def run(self):
        # 
        self._summary = {}                  # prepare to make a summary here
        dt = utils.Dtime("Ingest")          # timer for debugging

        do_cbeam = True                     # enforce a common beam
        #
        pb = self.getkey('pb')
        do_pb = len(pb) > 0
        use_pb = self.getkey("usepb")
        # 
        create_mask = self.getkey('mask')   # create a new mask ?
        box   = self.getkey("box")          # corners in Z, XY or XYZ
        edge  = self.getkey("edge")         # number of edge channels to remove
        restfreq = self.getkey("restfreq")  # < 0 means not activated

        # smooth=  could become deprecated, and/or include a decimation option to make it useful
        #          again, Smooth_AT() does this also , at the cost of an extra cube to store
        smooth = self.getkey("smooth")      # 
        #
        vlsr = self.getkey("vlsr")          # see also LineID, where this could be given again

        # first place a fits file in the admit project directory (symlink)
        # this is a bit involved, depending on if an absolute or relative path was
        # give to Ingest_AT(file=)
        fitsfile = self.getkey('file')
        if fitsfile[0] != os.sep:
            fitsfile = os.path.abspath(os.getcwd() + os.sep + fitsfile)
        logging.debug('FILE=%s' % fitsfile)
        if fitsfile[0] != os.sep:
            raise Exception,"Bad file=%s, expected absolute name",fitsfile

        # now determine if it could have been a CASA (or MIRIAD) image already 
        # which we'll assume if it's a directory; this is natively supported by CASA
        # but there are tools where if you pass it a FITS or MIRIAD
        # MIRIAD is not recommended for serious work, especially big files, since there
        # is a performance penalty due to tiling.
        file_is_casa = casautil.iscasa(fitsfile)

        loc = fitsfile.rfind(os.sep)               # find the '/'
        ffile0 = fitsfile[loc+1:]                  # basename.fits
        basename = self.getkey('basename')         # (new) basename allowed (allow no dots?)
        if len(basename) == 0:
            basename = ffile0[:ffile0.rfind('.')]  # basename
        logging.info("basename=%s" % basename)
        target = self.dir(ffile0)

        if not os.path.exists(target) :
            cmd = 'ln -s "%s" "%s"' % (fitsfile, target)
            logging.debug("CMD: %s" % cmd)
            os.system(cmd)

        readonly = False
        if file_is_casa:
            logging.debug("Assuming input %s is a CASA (or MIRIAD) image" % ffile0)
            bdpfile = self.mkext(basename,"im")
            if bdpfile == ffile0:
                logging.warning("No selections allowed on CASA image, since no alias was given")
                readonly = True
            b1  = SpwCube_BDP(bdpfile)
            self.addoutput(b1)
            b1.setkey("image", Image(images={bt.CASA:bdpfile}))
            # @todo b2 and PB?
        else:
            # construct the output name and construct the BDP based on the CASA image name
            # this also takes care of the behind the scenes alias= substitution
            bdpfile = self.mkext(basename,"im")
            if bdpfile == basename:
                raise Exception,"basename and bdpfile are the same, Ingest_AT needs a fix for this"
            b1  = SpwCube_BDP(bdpfile)
            self.addoutput(b1)
            if do_pb:
                print "doing the PB"
                bdpfile2 = self.mkext(basename,"pb")
                b2 = Image_BDP(bdpfile2)
                self.addoutput(b2)

        # @todo    we should also set readonly=True if no box, no mask etc. and still an alias
        #          that way it will speed up and not make a copy of the image ?

        # fni and fno are full (abspath) filenames, ready for CASA
        # fni is the same as fitsfile
        fni = self.dir(ffile0)
        fno = self.dir(bdpfile)
        if do_pb: fno2 = self.dir(bdpfile2)
        dt.tag("start")

        if file_is_casa:
            taskinit.ia.open(fni)
        else:
            if do_pb and use_pb:
                # @todo   this needs a fix for the path for pb, only works if abs path is given
                # impbcor(im.fits,pb.fits,out.im,overwrite=True,mode='m')
                if False:
                    # this may seem like a nice shortcut, to have the fits->casa conversion be done
                    # internally in impbcor, but it's a terrible performance for big cubes. (tiling?)
                    # we keep this code here, perhaps at some future time (mpi?) this performs better
                    # @todo fno2
                    impbcor(fni,pb,fno,overwrite=True,mode='m')
                    dt.tag("impbcor-1")
                else:
                    # the better way is to convert FITS->CASA and then call impbcor()
                    # the CPU savings are big, but I/O overhead can still be substantial
                    taskinit.ia.fromfits('_pbcor',fni,overwrite=True)
                    taskinit.ia.fromfits('_pb',pb,overwrite=True)
                    dt.tag("impbcor-1f")
                    if False:
                        impbcor('_pbcor','_pb',fno,overwrite=True,mode='m')
                        # @todo fno2
                        utils.remove('_pbcor')
                        utils.remove('_pb')
                        dt.tag("impbcor-2")
                    else:
                        # immath appears to be even faster (2x in CPU)
                        # https://bugs.nrao.edu/browse/CAS-8299
                        # @todo  this needs to be confirmed that impbcor is now good to go (r36078)
                        casa.immath(['_pbcor','_pb'],'evalexpr',fno,'IM0*IM1')
                        dt.tag("immath")
                        if True:
                            # use the mean of all channels... faster may be to use the middle plane
                            # barf; edge channels can be with fewer subfields in a mosaic 
                            taskinit.ia.open('_pb')
                            taskinit.ia.summary()
                            ia1=taskinit.ia.moments(moments=[-1],drop=True,outfile=fno2)
                            ia1.done()
                            taskinit.ia.close()
                            dt.tag("moments")
                        utils.remove('_pbcor')
                        utils.remove('_pb')
                        dt.tag("impbcor-3")
            elif do_pb and not use_pb:
                # cheat case: PB was given, but not meant to be used
                # not implemented yet
                print "cheat case dummy PB not implemented yet"
            else:
                # no PB given
                if True:
                    # re-running this was more consistently faster in wall clock time
                    # note that zeroblanks=True will still keep the mask
                    logging.debug("casa::ia.fromfits(%s) -> %s" % (fni,bdpfile))
                    taskinit.ia.fromfits(fno,fni,overwrite=True)
                    #taskinit.ia.fromfits(fno,fni,overwrite=True,zeroblanks=True)
                    dt.tag("fromfits")
                else:
                    # not working to extend 3D yet, but this would solve the impv() 3D problem
                    logging.debug("casa::importfits(%s) -> %s" % (fni,bdpfile))
                    #casa.importfits(fni,fno,defaultaxes=True,defaultaxesvalues=[None,None,None,'I'])
                    # possible bug: zeroblanks=True has no effect?
                    casa.importfits(fni,fno,zeroblanks=True)
                    dt.tag("importfits")
            taskinit.ia.open(fno)
            if len(smooth) > 0:
                # smooth here, but Smooth_AT is another option
                # here we only allow pixel smoothing
                # spatial: gauss
                # spectral: boxcar/hanning (check for flux conservation)
                #     is the boxcar wrong, not centered, but edged?
                # @todo CASA BUG:  this will loose the object name (and maybe more?) from header, so VLSR lookup fails
                fnos = fno + '.smooth'
                taskinit.ia.convolve2d(outfile=fnos, overwrite=True, pa='0deg',
                                       major='%gpix' % smooth[0], minor='%gpix' % smooth[1], type='gaussian')
                taskinit.ia.close()
                srcname = casa.imhead(fno,mode="get",hdkey="object")          # work around CASA bug
                #@todo use safer ia.rename() here.
                # https://casa.nrao.edu/docs/CasaRef/image.rename.html
                utils.rename(fnos,fno)
                casa.imhead(fno,mode="put",hdkey="object",hdvalue=srcname)    # work around CASA bug
                dt.tag("convolve2d")
                if len(smooth) > 2 and smooth[2] > 0:
                    if smooth[2] == 1:
                        # @todo only 1 channel option
                        specsmooth(fno,fnos,axis=2,function='hanning',dmethod="")
                    else:
                        # @todo may have the wrong center
                        specsmooth(fno,fnos,axis=2,function='boxcar',dmethod="",width=smooth[2])
                    #@todo use safer ia.rename() here.
                    # https://casa.nrao.edu/docs/CasaRef/image.rename.html
                    utils.rename(fnos,fno)
                    dt.tag("specsmooth")
                taskinit.ia.open(fno)

            s = taskinit.ia.summary()
            if len(s['shape']) != 4:
                logging.warning("Adding dummy STOKES-I axis")
                fnot = fno + '_4'
                taskinit.ia.adddegaxes(stokes='I',outfile=fnot)
                taskinit.ia.close()
                #@todo use safer ia.rename() here.
                # https://casa.nrao.edu/docs/CasaRef/image.rename.html
                utils.rename(fnot,fno)
                taskinit.ia.open(fno)
                dt.tag("adddegaxes")
            else:
                logging.info("SHAPE: %s" % str(s['shape']))
        s = taskinit.ia.summary()
        dt.tag("summary-0")
        if s['hasmask'] and create_mask:
            logging.warning("no extra mask created because input image already had one")
            create_mask = False

        # if a box= or edge= was given, only a subset of the cube needs to be ingested
        # this however complicates PB correction later on
        if len(box) > 0 or len(edge) > 0:
            if readonly:
                raise Exception,"Cannot use box= or edge=, data is read-only, or use an basename/alias"
            if len(edge) == 1:  edge.append(edge[0])

            nx = s['shape'][0]
            ny = s['shape'][1]
            nz = s['shape'][2]
            logging.info("box=%s edge=%s processing with SHAPE: %s" % (str(box),str(edge),str(s['shape'])))
                                                                                                 
            if len(box) == 2:
                # select zrange
                if len(edge)>0:
                    raise Exception,"Cannot use edge= when box=[z1,z2] is used"
                r1 = taskinit.rg.box([0,0,box[0]] , [nx-1,ny-1,box[1]])
            elif len(box) == 4:
                if len(edge) == 0:
                    # select just an XY box
                    r1 = taskinit.rg.box([box[0],box[1]] , [box[2],box[3]])
                elif len(edge) == 2:
                    # select an XY box, but remove some edge channels
                    r1 = taskinit.rg.box([box[0],box[1],edge[0]] , [box[2],box[3],nz-edge[1]-1])
                else:
                    raise Exception,"Bad edge= for len(box)=4"
            elif len(box) == 6:
                # select an XYZ box
                r1 = taskinit.rg.box([box[0],box[1],box[2]] , [box[3],box[4],box[5]])
            elif len(edge) == 2:
                # remove some edge channels, but keep the whole XY box
                r1 = taskinit.rg.box([0,0,edge[0]] , [nx-1,ny-1,nz-edge[1]-1])
            else:
                raise Exception,"box=%s illegal" % box
            logging.debug("BOX/EDGE selection: %s %s" % (str(r1['blc']),str(r1['trc']))) 
            #if taskinit.ia.isopen(): taskinit.ia.close()

            logging.info("SUBIMAGE")
            subimage = taskinit.ia.subimage(region=r1,outfile=fno+'.box',overwrite=True)
            taskinit.ia.close()
            taskinit.ia.done()
            subimage.rename(fno,overwrite=True)
            subimage.close()
            subimage.done()
            taskinit.ia.open(fno)
            dt.tag("subimage-1")
        else:
            # the whole cube is passed onto ADMIT
            if readonly and create_mask:
                raise Exception,"Cannot use mask=True, data read-only, or use an alias"
            if file_is_casa and not readonly:
                # @todo a miriad file - which should be read only - will also create a useless copy here if no alias used
                taskinit.ia.subimage(overwrite=True,outfile=fno)
                taskinit.ia.close()
                taskinit.ia.open(fno)
                dt.tag("subimage-0")

        if create_mask:
            if readonly:
                raise Exception,"Cannot create mask, data read-only, or use an alias"
            # also check out the 'fromfits::zeroblanks = False'
            # calcmask() will overwrite any previous pixelmask
            #taskinit.ia.calcmask('mask("%s") && "%s" != 0.0' % (fno,fno))
            taskinit.ia.calcmask('"%s" != 0.0' % fno)
            dt.tag("mask")

        s = taskinit.ia.summary()
        dt.tag("summary-1")

        # do a fast statistics (no median or robust)
        s0 = taskinit.ia.statistics()
        dt.tag("statistics")
        if len(s0['npts']) == 0:
            raise Exception,"No statistics possible, are there valid data in this cube?"
        # There may be multiple beams per plane so we can't
        # rely on the BEAM's 'major', 'minor', 'positionangle' being present.
        # ia.commonbeam() is guaranteed to return beam parameters
        # if present
        if do_cbeam and s.has_key('perplanebeams'):
            # report on the beam extremities, need to loop over all, 
            # first and last don't need to be extremes....
            n = s['perplanebeams']['nChannels']
            ab0 = '*0'
            bb0 = s['perplanebeams']['beams'][ab0]['*0']
            bmaj0 = bb0['major']['value']
            bmin0 = bb0['minor']['value']
            beamd = 0.0
            for i in range(n):
                ab1 = '*%d' % i
                bb1 = s['perplanebeams']['beams'][ab1]['*0']
                bmaj1 = bb1['major']['value']
                bmin1 = bb1['minor']['value']
                beamd = max(beamd,abs(bmaj0-bmaj1),abs(bmin0-bmin1))
            logging.warning("MAX-BEAMSPREAD %f" % (beamd))
            #
            if True:
                logging.info("Applying a commonbeam from the median beam accross the band")
                # imhead is a bit slow; alternatively use ia.summary() at the half point for setrestoringbeam()
                h = casa.imhead(fno,mode='list')
                b = h['perplanebeams']['median area beam']
                taskinit.ia.setrestoringbeam(remove=True)
                taskinit.ia.setrestoringbeam(beam=b)
                commonbeam = taskinit.ia.commonbeam()

            else:
                # @todo : this will be VERY slow - code not finished, needs renaming etc.
                #         this is however formally the better solution
                logging.warning("commmonbeam code not finished")
                cb = taskinit.ia.commonbeam()
                taskinit.ia.convolve2d(outfile='junk-common.im', major=cb['major'], minor=cb['minor'], pa=cb['pa'], 
                                       targetres=True, overwrite=True)
                dt.tag('convolve2d')
                commonbeam = {}
        else:
            try:
                commonbeam = taskinit.ia.commonbeam()
            except:
                nppb = 4.0
                logging.warning("No synthesized beam found, faking one to prevent downstream problems: nppb=%f" % nppb)
                s = taskinit.ia.summary()
                cdelt2 = abs(s['incr'][0]) * 180.0/math.pi*3600.0
                bmaj = nppb * cdelt2      # use a nominal 4 points per (round) beam 
                bmin = nppb * cdelt2
                bpa  = 0.0
                taskinit.ia.setrestoringbeam(major='%farcsec' % bmaj, minor='%farcsec' % bmin, pa='%fdeg' % bpa)
                commonbeam = {}
        logging.info("COMMONBEAM[%d] %s" % (len(commonbeam),str(commonbeam)))

        first_point = taskinit.ia.getchunk(blc=[0,0,0,0],trc=[0,0,0,0],dropdeg=True)
        logging.debug("DATA0*: %s" % str(first_point))

        taskinit.ia.close()
        logging.info('BASICS: [shape] npts min max: %s %d %f %f' % (s['shape'],s0['npts'][0],s0['min'][0],s0['max'][0]))
        logging.info('S/N (all data): %f' % (s0['max'][0]/s0['rms'][0]))
        npix = 1
        nx = s['shape'][0]
        ny = s['shape'][1]
        nz = s['shape'][2]
        for n in s['shape']:
            npix = npix * n
        ngood = int(s0['npts'][0])
        fgood = (1.0*ngood)/npix
        logging.info('GOOD PIXELS: %d/%d (%f%% good or %f%% bad)' % (ngood,npix,100.0*fgood,100.0*(1 - fgood)))
        if s['hasmask']:
            logging.warning('MASKS: %s' % (str(s['masks'])))

        if not file_is_casa:
            b1.setkey("image", Image(images={bt.CASA:bdpfile}))
            if do_pb:
                b2.setkey("image", Image(images={bt.CASA:bdpfile2}))            

        # cube sanity: needs to be either 4D or 2D. But p-p-v cube
        # alternative: ia.subimage(dropdeg = True)
        # see also: https://bugs.nrao.edu/browse/CAS-5406
        shape = s['shape']
        if len(shape)>3:
            if shape[3]>1:
                # @todo this happens when you ingest a fits or casa image which is ra-dec-pol-freq
                if nz > 1:
                    msg = 'Ingest_AT: cannot deal with real 4D cubes yet'
                    logging.critical(msg)
                    raise Exception,msg
                else:
                    # @todo this is not working yet when the input was a casa image, but ok when fits. go figure.
                    fnot = fno + ".trans"
                    if True:
                        # this works
            #@todo use safer ia.rename() here.
            # https://casa.nrao.edu/docs/CasaRef/image.rename.html
                        utils.rename(fno,fnot)
                        imtrans(fnot,fno,"0132")
                        utils.remove(fnot)
                    else:
                        # this does not work, what the heck
                        imtrans(fno,fnot,"0132")
            #@todo use safer ia.rename() here.
            # https://casa.nrao.edu/docs/CasaRef/image.rename.html
                        utils.rename(fnot,fno)
                    nz = s['shape'][3]
                    # get a new summary 's'
                    taskinit.ia.open(fno)
                    s = taskinit.ia.summary()
                    taskinit.ia.close()
                    logging.warning("Using imtrans, with nz=%d, to fix axis ordering" % nz)
                    dt.tag("imtrans4")
            # @todo  ensure first two axes are position, followed by frequency
        elif len(shape)==3:
            # the current importfits() can do defaultaxes=True,defaultaxesvalues=['', '', '', 'I']
            # but then appears to return a ra-dec-pol-freq cube
            # this branch probably never happens, since ia.fromfits() will 
            # properly convert a 3D cube to 4D now !!
            # NO: when NAXIS=3 but various AXIS4's are present, that works. But not if it's pure 3D
            # @todo  box=
            logging.warning("patching up a 3D to 4D cube")
            raise Exception,"SHOULD NEVER GET HERE"
            fnot = fno + ".trans"
            casa.importfits(fni,fnot,defaultaxes=True,defaultaxesvalues=['', '', '', 'I'])
            utils.remove(fno)        # ieck
            imtrans(fnot,fno,"0132")
            utils.remove(fnot)
            dt.tag("imtrans3")

        logging.regression('CUBE: %g %g %g  %d %d %d  %f' % (s0['min'],s0['max'],s0['rms'],nx,ny,nz,100.0*(1 - fgood)))

        # if the cube has only 1 plane (e.g. continuum) , create a visual (png or so)
        # for 3D cubes, rely on something like CubeSum
        if nz == 1:
            implot = ImPlot(pmode=self._plot_mode,ptype=self._plot_type,abspath=self.dir())
            implot.plotter(rasterfile=bdpfile,figname=bdpfile)
            # @todo needs to be registered for the BDP, right now we only have the plot

        # ia.summary() doesn't have this easily available, so run the more expensive imhead()
        h = casa.imhead(fno,mode='list')
        telescope = h['telescope']
        # work around CASA's PIPELINE bug/feature?   if 'OBJECT' is blank, try 'FIELD'
        srcname = h['object']
        if srcname == ' ':
            logging.warning('FIELD used for OBJECT')
            srcname = casa.imhead(fno,mode='get',hdkey='field')
            if srcname == False:
                # if no FIELD either, we're doomed.  yes, this did happen.
                srcname = 'Unknown'
            casa.imhead(fno,mode="put",hdkey="object",hdvalue=srcname)
            h['object'] = srcname
        logging.info('TELESCOPE: %s' % telescope)
        logging.info('OBJECT: %s' % srcname)
        logging.info('REFFREQTYPE: %s' % h['reffreqtype'])
        if h['reffreqtype'].find('TOPO')>=0:
            msg = 'Ingest_AT: cannot deal with cubes with TOPOCENTRIC frequencies yet - winging it'
            logging.warning(msg)
            #raise Exception,msg
        # Ensure beam parameters are available if there are multiple beams
        # If there is just one beam, then we are just overwriting the header
        # variables with their identical values.
        if len(commonbeam) != 0:
            h['beammajor'] = commonbeam['major']
            h['beamminor'] = commonbeam['minor']
            h['beampa']    = commonbeam['pa']
        # cheat add some things that need to be passed to summary....
        h['badpixel'] = 1.0-fgood
        if vlsr < -999998.0:
            vlsr          = admit.VLSR().vlsr(h['object'].upper()) 
        h['vlsr']     = vlsr
        logging.info("VLSR = %f (from source catalog)" % vlsr)
        
        taskargs = "file=" + fitsfile
        if create_mask == True:
            taskargs = taskargs + " mask=True" 
        if len(box) > 0:
            taskargs = taskargs + " " + str(box)
        if len(edge) > 0:
            taskargs = taskargs + " " + str(edge)
        r2d = 57.29577951308232
        logging.info("RA   Axis 1: %f %f %f" % (h['crval1']*r2d,h['cdelt1']*r2d*3600.0,h['crpix1']))
        logging.info("DEC  Axis 2: %f %f %f" % (h['crval2']*r2d,h['cdelt2']*r2d*3600.0,h['crpix2']))
        if nz > 1:
            # @todo check if this is really a freq axis (for ALMA it is, but...)
            t3 = h['ctype3']
            df = h['cdelt3']
            fc = h['crval3'] + (0.5*(float(shape[2])-1)-h['crpix3'])*df        # center freq; 0 based pixels
            if h.has_key('restfreq'):
                fr = float(h['restfreq'][0])
            else:
                fr = fc
            fw = df*float(shape[2])
            dv = -df/fr*utils.c 
            logging.info("Freq Axis 3: %g %g %g" % (h['crval3']/1e9,h['cdelt3']/1e9,h['crpix3']))
            logging.info("Cube Axis 3: type=%s  velocity increment=%f km/s @ fc=%f fw=%f GHz" % (t3,dv,fc/1e9,fw/1e9))
        # @todo sort out this restfreq/vlsr
        # report 'reffreqtype', 'restfreq' 'telescope'
        # if the fits file has ALTRVAL/ALTRPIX, this is lost in CASA?
        # but if you do fits->casa->fits , it's back in fits (with some obvious single precision loss of digits)
        # @todo ZSOURCE is the proposed VLSR slot in the fits header, but this has frame issues (it's also optical)
        #
        # Another method to get the vlsr is to override the restfreq (f0) with an AT keyword
        # and the 'restfreq' from the header (f) is then used to compute the vlsr:   v = c (1 - f/f0)
        #
        if shape[2] > 1 and h.has_key('restfreq'):
            logging.info("RESTFREQ: %g %g %g" % (fr/1e9,h['restfreq'][0]/1e9,restfreq))
            if shape[2] > 1:
                # v_radio of the center of the window w.r.t. restfreq
                c = utils.c             # 299792.458 km/s
                vlsrc = c*(1-fc/fr)     # @todo rel frame?
                vlsrw = dv*float(shape[2])
                if restfreq > 0:
                    vlsrf = c*(1-fr/restfreq/1e9)
                    h['vlsr'] = vlsrf
                else:
                    vlsrf = 0.0
                logging.info("VLSRc = %f  VLSRw = %f  VLSRf = %f VLSR = %f" % (vlsrc, vlsrw, vlsrf, vlsr))
                if h['vlsr'] == 0.0: # @todo! This fails if vlsr actually is zero. Need another magic number
                    h['vlsr'] = vlsrc
                    logging.warning("Warning: No VLSR found, substituting VLSRc = %f" % vlsrc)
        else:
            msg = 'Ingest_AT: missing RESTFREQ'
            print msg
        # @todo   LINTRN  is the ALMA keyword that designates the expected line transition in a spw

        self._summarize(fitsfile, bdpfile, h, shape, taskargs)

        dt.tag("done")
        dt.end()
Esempio n. 13
0
    def run(self):
        """ The run method creates the BDP

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        dt = utils.Dtime("CubeSum")  # tagging time
        self._summary = {}  # an ADMIT summary will be created here

        numsigma = self.getkey("numsigma")  # get the input keys
        sigma = self.getkey("sigma")
        use_lines = self.getkey("linesum")
        pad = self.getkey("pad")

        b1 = self._bdp_in[0]  # spw image cube
        b1a = self._bdp_in[1]  # cubestats (optional)
        b1b = self._bdp_in[2]  # linelist  (optional)

        f1 = b1.getimagefile(bt.CASA)
        taskinit.ia.open(self.dir(f1))
        s = taskinit.ia.summary()
        nchan = s['shape'][2]

        if b1b != None:
            ch0 = b1b.table.getFullColumnByName("startchan")
            ch1 = b1b.table.getFullColumnByName("endchan")
            s = Segments(ch0, ch1, nchan=nchan)
            # @todo something isn't merging here as i would have expected,
            #       e.g. test0.fits [(16, 32), (16, 30), (16, 29)]
            if pad > 0:
                for (c0, c1) in s.getsegmentsastuples():
                    s.append([c0 - pad, c0])
                    s.append([c1, c1 + pad])
            s.merge()
            s.recalcmask()
            # print "PJT segments:",s.getsegmentsastuples()
            ns = len(s.getsegmentsastuples())
            chans = s.chans(not use_lines)
            if use_lines:
                msum = s.getmask()
            else:
                msum = 1 - s.getmask()
            logging.info("Read %d segments" % ns)
            # print "chans",chans
            # print "msum",msum

        #  from a deprecated keyword, but kept here to pre-smooth the spectrum before clipping
        #  examples are:  ['boxcar',3]    ['gaussian',7]    ['hanning',5]
        smooth = []

        sig_const = False  # figure out if sigma is taken as constant in the cube
        if b1a == None:  # if no 2nd BDP was given, sigma needs to be specified
            if sigma <= 0.0:
                raise Exception, "Neither user-supplied sigma nor CubeStats_BDP input given. One is required."
            else:
                sig_const = True  # and is constant
        else:
            if sigma > 0:
                sigma = b1a.get("sigma")
                sig_const = True

        if sig_const:
            logging.info("Using constant sigma = %f" % sigma)
        else:
            logging.info("Using varying sigma per plane")

        infile = b1.getimagefile(bt.CASA)  # ADMIT filename of the image (cube)
        bdp_name = self.mkext(
            infile, 'csm'
        )  # morph to the new output name with replaced extension 'csm'
        image_out = self.dir(bdp_name)  # absolute filename

        args = {
            "imagename": self.dir(infile)
        }  # assemble arguments for immoments()
        args["moments"] = 0  # only need moments=0 (or [0] is ok as well)
        args["outfile"] = image_out  # note full pathname

        dt.tag("start")

        if sig_const:
            args["excludepix"] = [-numsigma * sigma,
                                  numsigma * sigma]  # single global sigma
            if b1b != None:
                # print "PJT: ",chans
                args["chans"] = chans
        else:
            # @todo    in this section bad channels can cause a fully masked cubesum = bad
            # cubestats input
            sigma_array = b1a.table.getColumnByName(
                "sigma")  # channel dependent sigma
            sigma_pos = sigma_array[np.where(sigma_array > 0)]
            smin = sigma_pos.min()
            smax = sigma_pos.max()
            logging.info("sigma varies from %f to %f" % (smin, smax))
            maxval = b1a.get("maxval")  # max in cube
            nzeros = len(np.where(sigma_array <= 0.0)[0])  # check bad channels
            if nzeros > 0:
                logging.warning("There are %d NaN channels " % nzeros)
                # raise Exception,"need to recode CubeSum or use constant sigma"
            dt.tag("grab_sig")

            if len(smooth) > 0:
                # see also LineID and others
                filter = Filter1D.Filter1D(
                    sigma_array, smooth[0],
                    **Filter1D.Filter1D.convertargs(smooth))
                sigma_array = filter.run()
                dt.tag("smooth_sig")
            # create a CASA image copy for making the mirror sigma cube to mask against
            file = self.dir(infile)
            mask = file + "_mask"
            taskinit.ia.fromimage(infile=file, outfile=mask)
            nx = taskinit.ia.shape()[0]
            ny = taskinit.ia.shape()[1]
            nchan = taskinit.ia.shape()[2]
            taskinit.ia.fromshape(shape=[nx, ny, 1])
            plane = taskinit.ia.getchunk(
                [0, 0, 0],
                [-1, -1, 0])  # convenience plane for masking operation
            dt.tag("mask_sig")

            taskinit.ia.open(mask)
            dt.tag("open_mask")

            count = 0
            for i in range(nchan):
                if sigma_array[i] > 0:
                    if b1b != None:
                        if msum[i]:
                            taskinit.ia.putchunk(plane * 0 + sigma_array[i],
                                                 blc=[0, 0, i, -1])
                            count = count + 1
                        else:
                            taskinit.ia.putchunk(plane * 0 + maxval,
                                                 blc=[0, 0, i, -1])
                    else:
                        taskinit.ia.putchunk(plane * 0 + sigma_array[i],
                                             blc=[0, 0, i, -1])
                        count = count + 1
                else:
                    taskinit.ia.putchunk(plane * 0 + maxval, blc=[0, 0, i, -1])
            taskinit.ia.close()
            logging.info("%d/%d channels used for CubeSum" % (count, nchan))
            dt.tag("close_mask")

            names = [file, mask]
            tmp = file + '.tmp'
            if numsigma == 0.0:
                # hopefully this will also make use of the mask
                exp = "IM0[IM1<%f]" % (0.99 * maxval)
            else:
                exp = "IM0[abs(IM0/IM1)>%f]" % (numsigma)
            # print "PJT: exp",exp
            casa.immath(mode='evalexpr',
                        imagename=names,
                        expr=exp,
                        outfile=tmp)
            args["imagename"] = tmp
            dt.tag("immath")

        casa.immoments(**args)
        dt.tag("immoments")

        if sig_const is False:
            # get rid of temporary files
            utils.remove(tmp)
            utils.remove(mask)

        # get the flux
        taskinit.ia.open(image_out)
        st = taskinit.ia.statistics()
        taskinit.ia.close()
        dt.tag("statistics")
        # report that flux, but there's no way to get the units from casa it seems
        # ia.summary()['unit'] is usually 'Jy/beam.km/s' for ALMA
        # imstat() does seem to know it.
        if st.has_key('flux'):
            rdata = [st['flux'][0], st['sum'][0]]
            logging.info("Total flux: %f (sum=%f)" % (st['flux'], st['sum']))
        else:
            rdata = [st['sum'][0]]
            logging.info("Sum: %f (beam parameters missing)" % (st['sum']))
        logging.regression("CSM: %s" % str(rdata))

        # Create two output images for html and their thumbnails, too
        implot = ImPlot(ptype=self._plot_type,
                        pmode=self._plot_mode,
                        abspath=self.dir())
        implot.plotter(rasterfile=bdp_name, figname=bdp_name, colorwedge=True)
        figname = implot.getFigure(figno=implot.figno, relative=True)
        thumbname = implot.getThumbnail(figno=implot.figno, relative=True)

        dt.tag("implot")

        thumbtype = bt.PNG  # really should be correlated with self._plot_type!!

        # 2. Create a histogram of the map data
        # get the data for a histogram
        data = casautil.getdata(image_out, zeromask=True).compressed()
        dt.tag("getdata")

        # get the label for the x axis
        bunit = casa.imhead(imagename=image_out, mode="get", hdkey="bunit")

        # Make the histogram plot
        # Since we give abspath in the constructor, figname should be relative
        myplot = APlot(ptype=self._plot_type,
                       pmode=self._plot_mode,
                       abspath=self.dir())
        auxname = bdp_name + "_histo"
        auxtype = bt.PNG  # really should be correlated with self._plot_type!!
        myplot.histogram(columns=data,
                         figname=auxname,
                         xlab=bunit,
                         ylab="Count",
                         title="Histogram of CubeSum: %s" % (bdp_name),
                         thumbnail=True)
        auxname = myplot.getFigure(figno=myplot.figno, relative=True)
        auxthumb = myplot.getThumbnail(figno=myplot.figno, relative=True)

        images = {bt.CASA: bdp_name, bt.PNG: figname}
        casaimage = Image(images=images,
                          auxiliary=auxname,
                          auxtype=auxtype,
                          thumbnail=thumbname,
                          thumbnailtype=thumbtype)

        if hasattr(b1, "line"):  # SpwCube doesn't have Line
            line = deepcopy(getattr(b1, "line"))
            if type(line) != type(Line):
                line = Line(name="Undetermined")
        else:
            line = Line(name="Undetermined")  # fake a Line if there wasn't one

        self.addoutput(
            Moment_BDP(xmlFile=bdp_name,
                       moment=0,
                       image=deepcopy(casaimage),
                       line=line))
        imcaption = "Integral (moment 0) of all emission in image cube"
        auxcaption = "Histogram of cube sum for image cube"
        taskargs = "numsigma=%.1f sigma=%g smooth=%s" % (numsigma, sigma,
                                                         str(smooth))
        self._summary["cubesum"] = SummaryEntry([
            figname, thumbname, imcaption, auxname, auxthumb, auxcaption,
            bdp_name, infile
        ], "CubeSum_AT", self.id(True), taskargs)

        dt.tag("done")
        dt.end()
Esempio n. 14
0
    def run(self):
        """ The run method creates the BDP

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        self._summary = {}
        dt = utils.Dtime("Smooth")
        dt.tag("start")
        # get the input keys
        bmaj   = self.getkey("bmaj")
        bmin   = self.getkey("bmin")
        bpa    = self.getkey("bpa")
        velres = self.getkey("velres")

        # take care of potential issues in the unit strings
        # @todo  if not provided?
        bmaj['unit'] = bmaj['unit'].lower()
        bmin['unit'] = bmin['unit'].lower()
        velres['unit'] = velres['unit'].lower()
        taskargs = "bmaj=%s bmin=%s bpa=%s velres=%s" % (bmaj,bmin,bpa,velres)
        

        bdpnames=[]
        for ibdp in self._bdp_in:
            istem = ibdp.getimagefile(bt.CASA)
            image_in = ibdp.baseDir() + istem

            bdp_name = self.mkext(istem,'sim')
            image_out = self.dir(bdp_name)
          
            taskinit.ia.open(image_in)        
            h = casa.imhead(image_in, mode='list')
            pix_scale = np.abs(h['cdelt1'] * 206265.0) # pix scale in asec @todo QA ?
            CC = 299792458.0 # speed of light  @todo somewhere else   [utils.c , but in km/s]

            rest_freq = h['crval3']
            # frequency pixel scale in km/s 
            vel_scale = np.abs(CC*h['cdelt3']/rest_freq/1000.0)

            # unit conversion to arcsec (spatial) or km/s 
            # (velocity) or some flavor of Hz.

            if(bmaj['unit'] == 'pixel'):
                bmaj = bmaj['value']*pix_scale
            else:
                bmaj = bmaj['value']
            if(bmin['unit'] == 'pixel'):
                bmin = bmin['value']*pix_scale
            else:
                bmin = bmin['value']

            hertz_input = False
            if velres['unit'] == 'pixel':
                velres['value'] = velres['value']*vel_scale
                velres['unit'] = 'km/s'
            elif velres['unit'] == 'm/s':
                velres['value'] = velres['value']/1000.0
                velres['unit'] = 'km/s'
            elif velres['unit'][-2:] == 'hz':
                hertz_input = True
            elif velres['unit'] == 'km/s':
                pass
            else:
                logging.error("Unknown units in velres=%s" % velres['unit'])

            rdata = bmaj

            # we smooth in velocity first. if smoothing in velocity
            # the cube apparently must be closed afterwards and 
            # then reopened if spatial smoothing is to be done.

            if velres['value'] > 0:
                # handle the different units allowed. CASA doesn't
                # like lowercase for hz units...          
                if not hertz_input:
                    freq_res = str(velres['value']*1000.0/CC *rest_freq )+'Hz'
                else:
                    freq_res = str(velres['value'])
                    # try to convert velres to km/s for debug purposes
                    velres['value'] = velres['value']/rest_freq*CC / 1000.0 
                    if(velres['unit'] == 'khz'):
                        velres['value'] = velres['value']*1000.0
                        velres['unit'] = 'kHz'
                    elif(velres['unit']=='mhz'):
                        velres['value'] = velres['value']*1E6
                        velres['unit'] = 'MHz'
                    elif(velres['unit']=='ghz'):
                        velres['value'] = velres['value']*1E9
                        velres['unit'] = 'GHz'
                    freq_res = freq_res + velres['unit']

                # NB: there is apparently a bug in CASA. only smoothing along the frequency
                # axis does not work. sepconvolve gives a unit error (says axis unit is radian rather 
                # than Hz). MUST smooth in 2+ dimensions if you want this to work.

                if(velres['value'] < vel_scale):
                    raise Exception,"Desired velocity resolution %g less than pixel scale %g" % (velres['value'],vel_scale)
                image_tmp = self.dir('tmp.smooth')
                im2=taskinit.ia.sepconvolve(outfile=image_tmp,axes=[0,1,2], types=["boxcar","boxcar","gauss"],\
                                              widths=['1pix','1pix',freq_res], overwrite=True)
                im2.done()
                logging.debug("sepconvolve to %s" % image_out)
                # for some reason, doing this in memory does not seem to work, so outfile must be specified.

                logging.info("Smoothing cube to a velocity resolution of %s km/s" % str(velres['value']))
                logging.info("Smoothing cube to a frequency resolution of %s" % freq_res)
                taskinit.ia.close()
                taskinit.ia.open(image_tmp)
                dt.tag("sepconvolve")
            else:
                image_tmp = image_out

            # now do the spatial smoothing 

            convolve_to_min_beam = True                     # default is to convolve to a min enclosing beam

            if bmaj > 0 and bmin > 0:
                # form qa objects out of these so that casa can understand
                bmaj = taskinit.qa.quantity(bmaj,'arcsec')
                bmin = taskinit.qa.quantity(bmin,'arcsec')
                bpa  = taskinit.qa.quantity(bpa,'deg')

                target_res={}
                target_res['major'] = bmaj
                target_res['minor'] = bmin
                target_res['positionangle'] = bpa

                # throw an exception if cannot be convolved

                try:
                    # for whatever reason, if you give convolve2d a beam parameter,
                    # it complains ...
                    im2=taskinit.ia.convolve2d(outfile=image_out,major = bmaj,\
                                             minor = bmin, pa = bpa,\
                                             targetres=True,overwrite=True)
                    im2.done()
                    logging.info("Smoothing cube to a resolution of %s by %s at a PA of %s" %
                                      (str(bmaj['value']), str(bmin['value']), str(bpa['value'])))
                    convolve_to_min_beam = False
                    achieved_res = target_res
                except:
                    # @todo   remind what you need ?
                    logging.error("Warning: Could not convolve to requested resolution of "\
                            +str(bmaj['value']) + " by " + str(bmin['value']) + \
                            " at a PA of "+ str(bpa['value']))
                    raise Exception,"Could not convolve to beam given!"
            dt.tag("convolve2d-1")

            if convolve_to_min_beam:
                restoring_beams = taskinit.ia.restoringbeam()
                commonbeam = taskinit.ia.commonbeam()
                # for whatever reason, setrestoringbeam does not use the same set of hashes...
                commonbeam['positionangle']=commonbeam['pa']
                del commonbeam['pa']

                # if there's one beam, apparently the beams keyword does not exist
                if 'beams' in restoring_beams: 
                    print "Smoothing cube to a resolution of "+  \
                         str(commonbeam['major']['value']) +" by "+ \
                         str(commonbeam['minor']['value'])+" at a PA of "\
                        +str(commonbeam['pa']['value'])  
                    target_res = commonbeam
                    im2=taskinit.ia.convolve2d(outfile=image_out,major=commonbeam['major'],\
                                               minor=commonbeam['minor'],\
                                               pa=commonbeam['positionangle'],\
                                               targetres=True,overwrite=True)
                    im2.done()
                    achieved_res = commonbeam
                    dt.tag("convolve2d-2")
                else:
                    print "One beam for all planes. Smoothing to common beam redundant."
                    achieved_res = commonbeam 
                    if velres['value'] < 0:
                        taskinit.ia.fromimage(outfile=image_out, infile=image_in)
                    # not really doing anything
                # else, we've already done what we needed to

                taskinit.ia.setrestoringbeam(beam = achieved_res)
                rdata = achieved_res['major']['value']

            # else do no smoothing and just close the image

            taskinit.ia.close() 
            dt.tag("close")

            b1 = SpwCube_BDP(bdp_name)
            self.addoutput(b1) 
            # need to update for multiple images.

            b1.setkey("image", Image(images={bt.CASA:bdp_name}))

            bdpnames = bdpnames.append(bdp_name)

            # and clean up the temp image before the next image
            if velres['value'] > 0:
                utils.remove(image_tmp)

        # thes are task arguments not summary entries.
        _bmaj = taskinit.qa.convert(achieved_res['major'],'rad')['value']
        _bmin = taskinit.qa.convert(achieved_res['minor'],'rad')['value']
        _bpa = taskinit.qa.convert(achieved_res['positionangle'],'deg')['value']
        vres = "%.2f %s" % (velres['value'],velres['unit'])

        logging.regression("SMOOTH: %f %f" % (rdata,velres['value']))
       
        self._summary["smooth"] = SummaryEntry([bdp_name,convolve_to_min_beam,_bmaj,_bmin,_bpa,vres],"Smooth_AT",self.id(True),taskargs)
        dt.tag("done")
        dt.end()
Esempio n. 15
0
def CMEform(comp, truthName, CMEDir, fMHz=-1.):
    '''
    Form the truth image, We assume that the CME moves outward in frequency.

    comp is a list of doubles with structure = (MHz, delta, offangle, major, minor, PA, flux, resolution(arcmin), imSize)

    if fMHz is defined, it takes the place of comp[0]

    truthName (string) is the basename of the .truth created.  CMEDir is the center of the image.

    Puts a single gaussian source on a truth model to be fed through the simulation pipeline.

    '''
    ##
    if fMHz == -1.:
        freq0 = comp[0]
    else:
        freq0 = fMHz
    delta = comp[1]
    delta1 = qa.toangle(str(delta) + 'deg')
    offangle = comp[2]
    # offangle1 = initoffangle #qa.toangle(str(offangle)+'deg') #for random
    offangle1 = qa.toangle(str(offangle) + 'deg')
    NewDir = me.shift(CMEDir, offset=delta1, pa=offangle1)
    major = comp[3]
    SizeMajor = qa.toangle(str(major) + 'deg')
    minor = comp[4]
    SizeMinor = qa.toangle(str(minor) + 'deg')
    PA = comp[5]  #(90 + initoffangle['value'])%360. #comp[5]
    PA = qa.toangle(str(PA) + 'deg')
    Flux = comp[6]
    resolution = comp[7]  #in arcmin
    imSize = int(comp[8])

    #Construct an empty casa image from a shape
    ia.fromshape(truthName, shape=[imSize, imSize, 1, 1], overwrite=True)
    #adding components to  the empty image file
    cl.addcomponent(dir=NewDir, \
               flux=Flux, fluxunit='Jy', freq=str(freq0)+'MHz', \
                   shape="Gaussian", majoraxis=SizeMajor, minoraxis=SizeMinor, positionangle=PA)

    # cl.addcomponent(dir=CMEDir, flux=1e10, fluxunit='Jy', freq=comp[0],
    #                 shape='point')
    #
    # cl.addcomponent(dir=NewDir, flux=1e9, fluxunit='Jy', freq=comp[0],
    #                 shape='point')
    #
    # cl.addcomponent(dir=NewDir2, flux=7e9, fluxunit='Jy', freq=comp[0],
    #                  shape='point')

    cs = ia.coordsys()
    cs.setunits(['rad', 'rad', '', 'Hz'])
    cell_rad = qa.convert(qa.quantity(str(resolution) + "arcmin"),
                          "rad")['value']
    cs.setincrement([-cell_rad, cell_rad], 'direction')
    cs.setreferencevalue([CMEDir['m0']['value'], CMEDir['m1']['value']],
                         type="direction")
    cs.setrestfrequency(comp[0])
    # add important header keywords
    imhead(imagename=truthName,
           mode="put",
           hdkey="object",
           hdvalue="Model CME")
    imhead(imagename=truthName,
           mode="put",
           hdkey="imtype",
           hdvalue='Intensity')
    imhead(imagename=truthName,
           mode="put",
           hdkey="observer",
           hdvalue="simulation")
    imhead(imagename=truthName,
           mode="put",
           hdkey="date-obs",
           hdvalue="2023/03/15/00:00:00")
    imhead(imagename=truthName,
           mode="put",
           hdkey="reffreqtype",
           hdvalue='TOPO')
    imhead(imagename=truthName,
           mode="put",
           hdkey="restfreq",
           hdvalue=str(comp[0]) + 'MHz')
    imhead(imagename=truthName, mode='list')
    cs.setreferencevalue(str(comp[0]) + 'MHz', 'spectral')
    cs.settelescope('VLA')
    ia.setcoordsys(cs.torecord())
    ia.setbrightnessunit("Jy/pixel")
    ia.modify(cl.torecord(), subtract=False)

    # pix = ia.getchunk()
    # pix = pix.reshape((imSize, imSize))

    # p = np.flipud(pix.T)

    ia.close()
    cl.done()
    qa.done()
    me.done()

    return  #pix
Esempio n. 16
0
def imageForm(filename, truthImage, freq, dataArr):

    brightness = sum(dataArr)

    print 'tot brightness is ' + str(brightness)
    imSize = 400
    zoomimg = spndint.zoom(dataArr, float(imSize) / 400)
    zdims = np.shape(zoomimg)

    for i in range(zdims[0]):
        for j in range(zdims[1]):
            if zoomimg[i][j] < 0.0:
                zoomimg[i][j] = 0.0

    newbrightness = sum(zoomimg)
    zoomimg *= brightness / newbrightness

    print 'tot brightness is ' + str(sum(zoomimg))

    z = zoomimg.copy().T
    z = np.fliplr(z)  #these operations flip to CASA style of storing data
    #which starts at lower left corner, and goes up by columns left to right

    casaArr = z.reshape((imSize, imSize, 1, 1))

    toutfile = os.path.join('.', truthImage)
    #if the truth image already exists remove it

    if os.path.exists(toutfile):
        shutil.rmtree(toutfile)

    ia.fromarray(truthImage, pixels=casaArr, overwrite=True)

    cs = ia.coordsys()
    cs.setunits(['rad', 'rad', '', 'Hz'])
    res1 = 400
    res = 400
    width = res1 * .038 * np.pi / 180.
    dres = width / res
    cell_rad = dres  #qa.convert(qa.quantity("1arcmin"),"rad")['value']
    imSize = res
    cs.setincrement([-cell_rad, cell_rad], 'direction')
    cs.setreferencevalue([CMEDir['m0']['value'], CMEDir['m1']['value']],
                         type="direction")
    cs.setrestfrequency(freq)

    # add important header keywords
    imhead(imagename=truthImage, mode="put", hdkey="object", hdvalue="DRAGN")
    imhead(imagename=truthImage,
           mode="put",
           hdkey="imtype",
           hdvalue='Intensity')
    imhead(imagename=truthImage,
           mode="put",
           hdkey="observer",
           hdvalue="simulation")
    imhead(imagename=truthImage,
           mode="put",
           hdkey="date-obs",
           hdvalue="2023/03/15/00:00:00")
    imhead(imagename=truthImage,
           mode="put",
           hdkey="reffreqtype",
           hdvalue='TOPO')
    imhead(imagename=truthImage,
           mode="put",
           hdkey="restfreq",
           hdvalue=str(freq))
    imhead(imagename=truthImage, mode='list')
    cs.setreferencevalue(str(freq) + 'Hz', 'spectral')
    Telescope = 'VLA'  #or else it breaks and whines
    cs.settelescope(Telescope)
    ia.setcoordsys(cs.torecord())
    ia.setbrightnessunit("Jy/pixel")

    pix = ia.getchunk()
    pix = pix.reshape((imSize, imSize))

    zoomimg = pix

    ia.close()

    return zoomimg
Esempio n. 17
0
    def run(self):
        """ The run method, calculates the moments and creates the BDP(s)

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        self._summary = {}
        momentsummary = []
        dt = utils.Dtime("Moment")

        # variable to track if we are using a single cutoff for all moment maps
        allsame = False
        moments = self.getkey("moments")
        numsigma = self.getkey("numsigma")
        mom0clip = self.getkey("mom0clip")
        # determine if there is only 1 cutoff or if there is a cutoff for each moment
        if len(moments) != len(numsigma):
            if len(numsigma) != 1:
                raise Exception("Length of numsigma and moment lists do not match. They must be the same length or the length of the cutoff list must be 1.")
            allsame = True
        # default moment file extensions, this is information copied from casa.immoments()
        momentFileExtensions = {-1: ".average",
                                 0: ".integrated",
                                 1: ".weighted_coord",
                                 2: ".weighted_dispersion_coord",
                                 3: ".median",
                                 4: "",
                                 5: ".standard_deviation",
                                 6: ".rms",
                                 7: ".abs_mean_dev",
                                 8: ".maximum",
                                 9: ".maximum_coord",
                                10: ".minimum",
                                11: ".minimum_coord",
                                }

        logging.debug("MOMENT: %s %s %s" %  (str(moments), str(numsigma), str(allsame)))

        # get the input casa image from bdp[0]
        # also get the channels the line actually covers (if any)
        bdpin = self._bdp_in[0]
        infile = bdpin.getimagefile(bt.CASA)
        chans = self.getkey("chans")
        # the basename of the moments, we will append _0, _1, etc.
        basename = self.mkext(infile, "mom")
        fluxname = self.mkext(infile, "flux")
        # beamarea = nppb(self.dir(infile))
        beamarea = 1.0  # until we have it from the MOM0 map

        sigma0 = self.getkey("sigma")
        sigma  = sigma0

        ia = taskinit.iatool()

        dt.tag("open")

        # if no CubseStats BDP was given and no sigma was specified, find a 
        # noise level via casa.imstat()
        if self._bdp_in[1] is None and sigma <= 0.0:
            raise Exception("A sigma or a CubeStats_BDP must be input to calculate the cutoff")
        elif self._bdp_in[1] is not None:
            sigma = self._bdp_in[1].get("sigma")

        # immoments is a bit peculiar. If you give one moment, it will use 
        # exactly the outfile you picked for multiple moments, it will pick
        # extensions such as .integrated [0], .weighted_coord [1] etc.
        # we loop over the moments and will use the numeric extension instead. 
        # Might be laborious loop for big input cubes
        #
        # arguments for immoments
        args = {"imagename" : self.dir(infile),
                "moments"   : moments,
                "outfile"   : self.dir(basename)}

        # set the channels if given
        if chans != "":
            args["chans"] = chans
        # error check the mom0clip input
        if mom0clip > 0.0 and not 0 in moments:
            logging.warning("mom0clip given, but no moment0 map was requested. One will be generated anyway.")
            # add moment0 to the list of computed moments, but it has to be first
            moments.insert(0,0)
            if not allsame:
                numsigma.insert(0, 2.0*sigma)

        if allsame:
            # this is only executed now if len(moments) > 1 and len(cutoff)==1
            args["excludepix"] = [-numsigma[0] * sigma, numsigma[0] * sigma]
            casa.immoments(**args)
            dt.tag("immoments-all")
        else:
            # this is execute if len(moments)==len(cutoff) , even when len=1
            for i in range(len(moments)):
                args["excludepix"] = [-numsigma[i] * sigma, numsigma[i] * sigma]
                args["moments"] = moments[i]
                args["outfile"] = self.dir(basename + momentFileExtensions[moments[i]])
                casa.immoments(**args)
                dt.tag("immoments-%d" % moments[i])

        taskargs = "moments=%s numsigma=%s" % (str(moments), str(numsigma)) 
        if sigma0 > 0:
            taskargs = taskargs + " sigma=%.2f" % sigma0
        if mom0clip > 0:
            taskargs = taskargs + " mom0clip=%g" % mom0clip
        if chans == "": 
            taskargs = taskargs + " chans=all"
        else:
            taskargs = taskargs + " chans=%s" % str(chans)
        taskargs += '&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; <span style="background-color:white">&nbsp;' + basename.split('/')[0] + '&nbsp;</span>'

        # generate the mask to be applied to all but moment 0
        if mom0clip > 0.0:
            # get the statistics from mom0 map
            # this is usually a very biased map, so unclear if mom0sigma is all that reliable
            args = {"imagename": self.dir(infile)}
            stat = casa.imstat(imagename=self.dir(basename + momentFileExtensions[0]))
            mom0sigma = float(stat["sigma"][0])
            # generate a temporary masked file, mask will be copied to other moments
            args = {"imagename" : self.dir(basename + momentFileExtensions[0]),
                    "expr"      : 'IM0[IM0>%f]' % (mom0clip * mom0sigma),
                    "outfile"   : self.dir("mom0.masked")
                    }
            casa.immath(**args)
            # get the default mask name
            ia.open(self.dir("mom0.masked"))
            defmask = ia.maskhandler('default')
            ia.close()
            dt.tag("mom0clip")

        # loop over moments to rename them to _0, _1, _2 etc.
        # apply a mask as well for proper histogram creation
        map = {}
        myplot = APlot(pmode=self._plot_mode,ptype=self._plot_type,abspath=self.dir())
        implot = ImPlot(pmode=self._plot_mode,ptype=self._plot_type,abspath=self.dir())

        for mom in moments:
            figname = imagename = "%s_%i" % (basename, mom)
            tempname = basename + momentFileExtensions[mom]
            # rename and remove the old one if there is one
            utils.rename(self.dir(tempname), self.dir(imagename))
            # copy the moment0 mask if requested; this depends on that mom0 was done before
            if mom0clip > 0.0 and mom != 0:
                #print "PJT: output=%s:%s" % (self.dir(imagename), defmask[0])
                #print "PJT: inpmask=%s:%s" % (self.dir("mom0.masked"),defmask[0])
                makemask(mode="copy", inpimage=self.dir("mom0.masked"),
                         output="%s:%s" % (self.dir(imagename), defmask[0]),
                         overwrite=True, inpmask="%s:%s" % (self.dir("mom0.masked"),
                                                            defmask[0]))
                ia.open(self.dir(imagename))
                ia.maskhandler('set', defmask)
                ia.close()
                dt.tag("makemask")
            if mom == 0:
                beamarea = nppb(self.dir(imagename))
            implot.plotter(rasterfile=imagename,figname=figname,
                           colorwedge=True,zoom=self.getkey("zoom"))
            imagepng  = implot.getFigure(figno=implot.figno,relative=True)
            thumbname = implot.getThumbnail(figno=implot.figno,relative=True)
            images = {bt.CASA : imagename, bt.PNG  : imagepng}
            thumbtype=bt.PNG
            dt.tag("implot")

            # get the data for a histogram (ia access is about 1000-2000 faster than imval())
            map[mom] = casautil.getdata(self.dir(imagename))
            data = map[mom].compressed()
            dt.tag("getdata")

            # make the histogram plot

            # get the label for the x axis
            bunit = casa.imhead(imagename=self.dir(imagename), mode="get", hdkey="bunit")
            # object for the caption
            objectname = casa.imhead(imagename=self.dir(imagename), mode="get", hdkey="object")

            # Make the histogram plot
            # Since we give abspath in the constructor, figname should be relative
            auxname = imagename + '_histo'
            auxtype = bt.PNG
            myplot.histogram(columns = data,
                             figname = auxname,
                             xlab    = bunit,
                             ylab    = "Count",
                             title   = "Histogram of Moment %d: %s" % (mom, imagename), thumbnail=True)

            casaimage = Image(images    = images,
                                    auxiliary = auxname,
                                    auxtype   = auxtype,
                                    thumbnail = thumbname,
                                    thumbnailtype = thumbtype)
            auxname = myplot.getFigure(figno=myplot.figno,relative=True)
            auxthumb = myplot.getThumbnail(figno=myplot.figno,relative=True)

            if hasattr(self._bdp_in[0], "line"):   # SpwCube doesn't have Line
                line = deepcopy(getattr(self._bdp_in[0], "line"))
                if not isinstance(line, Line):
                    line = Line(name="Unidentified")
            else:
                # fake a Line if there wasn't one
                line = Line(name="Unidentified")
            # add the BDP to the output array
            self.addoutput(Moment_BDP(xmlFile=imagename, moment=mom,
                           image=deepcopy(casaimage), line=line))
            dt.tag("ren+mask_%d" % mom)

            imcaption = "%s Moment %d map of Source %s" % (line.name, mom, objectname)
            auxcaption = "Histogram of %s Moment %d of Source %s" % (line.name, mom, objectname)
            thismomentsummary = [line.name, mom, imagepng, thumbname, imcaption,
                                 auxname, auxthumb, auxcaption, infile]
            momentsummary.append(thismomentsummary)

        if map.has_key(0) and map.has_key(1) and map.has_key(2):
            logging.debug("MAPs present: %s" % (map.keys()))

            # m0 needs a new mask, inherited from the more restricted m1 (and m2)
            m0 = ma.masked_where(map[1].mask,map[0])
            m1 = map[1]
            m2 = map[2]
            m01 = m0*m1
            m02 = m0*m1*m1
            m22 = m0*m2*m2
            sum0 = m0.sum()
            vmean = m01.sum()/sum0
            # lacking the full 3D cube, get two estimates and take the max
            sig1  = math.sqrt(m02.sum()/sum0 - vmean*vmean)
            sig2  = m2.max()
            #vsig = max(sig1,sig2)
            vsig = sig1
            
            # consider clipping in the masked array (mom0clip)
            # @todo   i can't use info from line, so just borrow basename for now for grepping
            #         this also isn't really the flux, the points per beam is still in there
            loc = basename.rfind('/')
            sum1 = ma.masked_less(map[0],0.0).sum()   # mom0clip
            # print out:   LINE,FLUX1,FLUX0,BEAMAREA,VMEAN,VSIGMA for regression
            # the linechans parameter in bdpin is not useful to print out here, it's local to the LineCube
            s_vlsr = admit.Project.summaryData.get('vlsr')[0].getValue()[0]
            s_rest = admit.Project.summaryData.get('restfreq')[0].getValue()[0]/1e9
            s_line = line.frequency
            if loc>0:
                if basename[:loc][0:2] == 'U_':
                    # for U_ lines we'll reference the VLSR w.r.t. RESTFREQ in that band
                    if abs(vmean) > vsig:
                        vwarn = '*'
                    else:
                        vwarn = ''
                    vlsr = vmean + (1.0-s_line/s_rest)*utils.c
                    msg = "MOM0FLUX: %s %g %g %g %g %g %g" % (basename[:loc],map[0].sum(),sum0,beamarea,vmean,vlsr,vsig)
                else:
                    # for identified lines we'll assume the ID was correct and not bother with RESTFREQ
                    msg = "MOM0FLUX: %s %g %g %g %g %g %g" % (basename[:loc],map[0].sum(),sum0,beamarea,vmean,vmean,vsig)
            else:
                msg = "MOM0FLUX: %s %g %g %g %g %g %g" % ("SPW_FULL"    ,map[0].sum(),sum0,beamarea,vmean,vmean,vsig)
            logging.regression(msg)
            dt.tag("mom0flux")

            # create a histogram of flux per channel

            # grab the X coordinates for the histogram, we want them in km/s
            # restfreq should also be in summary
            restfreq = casa.imhead(self.dir(infile),mode="get",hdkey="restfreq")['value']/1e9    # in GHz
            # print "PJT  %.10f %.10f" % (restfreq,s_rest)
            imval0 = casa.imval(self.dir(infile))
            freqs = imval0['coords'].transpose()[2]/1e9
            x = (1-freqs/restfreq)*utils.c
            # 
            h = casa.imstat(self.dir(infile), axes=[0,1])
            if h.has_key('flux'):
                flux0 = h['flux']
            else:
                flux0 = h['sum']/beamarea
            flux0sum = flux0.sum() * abs(x[1]-x[0])
            # @todo   make a flux1 with fluxes derived from a good mask
            flux1 = flux0 
            # construct histogram
            title = 'Flux Spectrum (%g)' % flux0sum
            xlab = 'VLSR (km/s)'
            ylab = 'Flux (Jy)'
            myplot.plotter(x,[flux0,flux1],title=title,figname=fluxname,xlab=xlab,ylab=ylab,histo=True)
            dt.tag("flux-spectrum")
            
        self._summary["moments"] = SummaryEntry(momentsummary, "Moment_AT", 
                                                self.id(True), taskargs)
        # get rid of the temporary mask
        if mom0clip > 0.0: 
            utils.rmdir(self.dir("mom0.masked"))

        dt.tag("done")
        dt.end()
Esempio n. 18
0
def CMEform(comp):
    '''
    Form the truth image, We assume that the CME moves outward in frequency.

    '''
    ##
    delta = comp[1]
    delta1 = qa.toangle(str(delta) + 'deg')
    offangle = comp[2]
    offangle1 = initoffangle  #qa.toangle(str(offangle)+'deg')
    NewDir = me.shift(CMEDir, offset=delta1, pa=offangle1)
    major = comp[3]
    SizeMajor = qa.toangle(str(major) + 'deg')
    minor = comp[4]
    SizeMinor = qa.toangle(str(minor) + 'deg')
    PA = (90 + initoffangle['value']) % 360.  #comp[5]
    PA = qa.toangle(str(PA) + 'deg')
    Flux = comp[6]
    resolution = comp[7]
    imSize = int(comp[8])

    delta2 = qa.toangle(str(delta) + 'deg')
    offangle2 = qa.toangle(str(45) + 'deg')
    NewDir2 = me.shift(CMEDir,
                       offset=qa.toangle(str(2) + 'deg'),
                       pa=qa.toangle(str(280) + 'deg'))
    #Construct an empty casa image from a shape
    ia.fromshape(imageName, shape=[imSize, imSize, 1, 1], overwrite=True)
    #adding components to  the empty image file
    cl.addcomponent(dir=NewDir,
                    flux=Flux / 10,
                    fluxunit='Jy',
                    freq=comp[0],
                    shape="Gaussian",
                    majoraxis=SizeMajor,
                    minoraxis=SizeMinor,
                    positionangle=PA)

    # cl.addcomponent(dir=CMEDir, flux=1e10, fluxunit='Jy', freq=comp[0],
    #                 shape='point')
    #
    # cl.addcomponent(dir=NewDir, flux=5e9, fluxunit='Jy', freq=comp[0],
    #                 shape='point')
    #
    # cl.addcomponent(dir=NewDir2, flux=7e9, fluxunit='Jy', freq=comp[0],
    #                 shape='point')

    cs = ia.coordsys()
    cs.setunits(['rad', 'rad', '', 'Hz'])
    cell_rad = qa.convert(qa.quantity("1.0arcmin"), "rad")['value']
    cs.setincrement([-cell_rad, cell_rad], 'direction')
    cs.setreferencevalue([CMEDir['m0']['value'], CMEDir['m1']['value']],
                         type="direction")
    cs.setrestfrequency(comp[0])
    # add important header keywords
    imhead(imagename=imageName,
           mode="put",
           hdkey="object",
           hdvalue="Model CME")
    imhead(imagename=imageName,
           mode="put",
           hdkey="imtype",
           hdvalue='Intensity')
    imhead(imagename=imageName,
           mode="put",
           hdkey="observer",
           hdvalue="simulation")
    imhead(imagename=imageName,
           mode="put",
           hdkey="date-obs",
           hdvalue="2023/03/15/00:00:00")
    imhead(imagename=imageName,
           mode="put",
           hdkey="reffreqtype",
           hdvalue='TOPO')
    imhead(imagename=imageName,
           mode="put",
           hdkey="restfreq",
           hdvalue=str(comp[0]))
    imhead(imagename=imageName, mode='list')
    cs.setreferencevalue(str(comp[0]) + 'MHz', 'spectral')
    cs.settelescope('VLA')
    ia.setcoordsys(cs.torecord())
    ia.setbrightnessunit("Jy/pixel")
    ia.modify(cl.torecord(), subtract=False)

    pix = ia.getchunk()
    pix = pix.reshape((imSize, imSize))

    ia.close()
    return pix
Esempio n. 19
0
    def run(self):
        """ The run method creates the BDP

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        self._summary = {}
        dt = utils.Dtime("Regrid")
        dt.tag("start")

        do_spatial_regrid = self.getkey("do_spatial_regrid")
        pix_scale   = self.getkey("pix_scale")
        do_freq_regrid=self.getkey("do_freq_regrid")
        chan_width  = self.getkey("chan_width")

        pix_size=[]
        chan_size=[]
        im_size =[]
        pix_wc_x = []
        pix_wc_y = []
        pix_wc_nu=[]
        src_dec = []
        
        RADPERARCSEC = 4.848137E-6 

        for ibdp in self._bdp_in:
          # Convert input CASA images to numpy arrays.
          istem = ibdp.getimagefile(bt.CASA)
          ifile = ibdp.baseDir() + istem
          
          h = casa.imhead(ifile, mode='list')
          pix_size.append(np.abs(h['cdelt1'])) # pix scale in asec @todo QA ?                                    
          chan_size.append(np.abs(h['cdelt3']))
          # grab the pixels 
          pix_x = h['shape'][0]
          pix_y = h['shape'][1]          
          pix_nu= h['shape'][2]
          
          taskinit.ia.open(ifile)
          mycs = taskinit.ia.coordsys(axes=[0,1,2])
#           getting all four corners handles the case of images where
#           x-y axis not aligned with RA-dec
          for xpix in [0,pix_x]:
            for ypix in [0,pix_y]:
              x = mycs.toworld([xpix,ypix])['numeric'][0] 
              y = mycs.toworld([xpix,ypix])['numeric'][1]
              pix_wc_x.append(x)
              pix_wc_y.append(y)
              
          nu= mycs.toworld([pix_x,pix_y,0])['numeric'][2]
          pix_wc_nu.append(nu)
          nu= mycs.toworld([pix_x,pix_y,pix_nu])['numeric'][2]
          pix_wc_nu.append(nu)
          taskinit.ia.close()
 
        min_ra = np.min(pix_wc_x)
        max_ra = np.max(pix_wc_x)
        min_dec = np.min(pix_wc_y)
        max_dec = np.max(pix_wc_y)        
        mean_ra  = 0.5*(min_ra + max_ra)
        mean_dec  = 0.5*(min_dec + max_dec)

        if(pix_scale < 0):
          pix_scale = np.min(pix_size)
        else:
          pix_scale = pix_scale * RADPERARCSEC
        
          
        npix_ra = int((max_ra - min_ra) / pix_scale * np.cos(mean_dec))
        npix_dec = (max_dec - min_dec) / pix_scale
        npix_dec = (int(npix_dec) if npix_dec == int(npix_dec) else int(npix_dec) + 1) 
        min_nu  = np.min(pix_wc_nu)
        max_nu  = np.max(pix_wc_nu)
        mean_nu   = 0.5*(min_nu + max_nu)
        if(chan_width < 0):
          chan_width = np.min(chan_size)
        npix_nu  =int((max_nu - min_nu)/chan_width)+1

        # now regrid everything
        innames =[]
        outnames = []
        incdelt = []
        outcdelt = []

        #=========================================================
        #@todo - check if bdp_ins refer to same input file.
        # If so, the current code will fail because the output
        # file name is fixed to $INPUT_regrid.  A valid use case
        # is "regrid the same input file different ways" -- which
        # is not supported in the current code but should be
        #=========================================================
        for ibdp in self._bdp_in:      
          istem = ibdp.getimagefile(bt.CASA)
          ifile = ibdp.baseDir() + istem          
          ostem = "%s_regrid/" % (istem)
          ofile = self.baseDir() + ostem
          # save the input/output file names
          innames.append(istem)
          outnames.append(ostem)
          
          header=casa.imregrid(imagename=ifile,template='get')
          # save the input cdelt1,2,3 for summary table
          incdelt.append((header['csys']['direction0']['cdelt'][0]/RADPERARCSEC,header['csys']['direction0']['cdelt'][1]/RADPERARCSEC,utils.freqtovel(mean_nu,header['csys']['spectral2']['wcs']['cdelt'])))
          
          if(do_spatial_regrid):
            header['csys']['direction0']['cdelt'] = [ -1*pix_scale,pix_scale] 
            header['csys']['direction0']['crval'] = [ mean_ra,mean_dec] 
            header['shap'][0] = npix_ra 
            header['shap'][1] = npix_dec 
            header['csys']['direction0']['crpix'] = [npix_ra/2,npix_dec/2] 
            
          if(do_freq_regrid):
              header['csys']['spectral2']['wcs']['crval']=min_nu
              header['shap'][2] = npix_nu
              chan_size = np.abs(header['csys']['spectral2']['wcs']['cdelt'])
              header['csys']['spectral2']['wcs']['cdelt'] = chan_width
              flux_correction = chan_width/chan_size             

          casa.imregrid(imagename=ifile,output=ofile,template=header)
          # save the output cdelt1,2,3 for summary table
          newhead = casa.imregrid(imagename=ofile,template='get')
          outcdelt.append((newhead['csys']['direction0']['cdelt'][0]/RADPERARCSEC,newhead['csys']['direction0']['cdelt'][1]/RADPERARCSEC,utils.freqtovel(mean_nu,newhead['csys']['spectral2']['wcs']['cdelt'])))

          if(do_freq_regrid):
            taskinit.ia.open(ofile)
            print flux_correction
            taskinit.ia.calc(pixels=ofile.replace(r"/",r"\/")+'*'+str(flux_correction))
            taskinit.ia.done()
          obdp = admit.SpwCube_BDP(ostem)
          self.addoutput(obdp)

        # make a table for summary
        atable = admit.util.Table()
        atable.columns = ["Input Image","cdelt1", "cdelt2", "cdelt3", "Regridded Image","cdelt1","cdelt2","cdelt3"]
        atable.units = ["","arcsec", "arcsec", "km/s", "","arcsec","arcsec","km/s"]
        for i in range(len(innames)):
            atable.addRow([ innames[i],incdelt[i][0],incdelt[i][1],incdelt[i][2], outnames[i],outcdelt[i][0],outcdelt[i][1],outcdelt[i][2] ])


        #keys = "pixsize=%.4g naxis1=%d naxis2=%d mean_ra=%0.4f mean_dec=%0.4f reffreq=%g chan_width=%g" % (pix_scale/(4.848137E-6), npix_ra, npix_dec, mean_ra,mean_dec,min_nu,chan_width)
        taskargs = "pix_scale = " + str(self.getkey("pix_scale")) + " chan_width = "+str(self.getkey("chan_width"))
        self._summary["regrid"] = SummaryEntry(atable.serialize(),"Regrid_AT",self.id(True),taskargs)
        dt.tag("done")
        dt.end()
Esempio n. 20
0
    def run(self):
        """ The run method, creates the slices, regrids if requested, and 
            creates the BDP(s)

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        dt = utils.Dtime("LineCube")
        self._summary = {}
        # look for an input noise level, either through keyword or input 
        # CubeStats BDP or calculate it if needed
        pad = self.getkey("pad")
        equalize = self.getkey("equalize")
        minchan = 0

        linelist = self._bdp_in[1]
        if linelist == None or len(linelist) == 0:
            logging.info("No lines found in input LineList_BDP, exiting.")
            return

        spw = self._bdp_in[0]
        # get the columns from the table
        cols = linelist.table.getHeader()
        # get the casa image
        imagename = spw.getimagefile(bt.CASA)
        imh = imhead(self.dir(imagename), mode='list')
        # set the overall parameters for imsubimage
        args = {"imagename" : self.dir(imagename),
                "overwrite" : True}

        dt.tag("start")

        if pad != 0:
            nchan = imh['shape'][2]
            dt.tag("pad") 

        # if equal size cubes are requested, this will honor the requested pad
        if equalize:
            start = linelist.table.getColumnByName("startchan")
            end = linelist.table.getColumnByName("endchan")
            # look for the widest line
            for i in range(len(start)):
                diff = end[i] - start[i] + 1
                minchan = max(minchan , diff + (pad * 2))
            dt.tag("equalize")

        # get all of the rows in the table
        rows = linelist.getall()
        delrow = set()
        procblend = [0]
        # search through looking for blended lines, leave only the strongest from each blend
        # in the list
        for i, row in enumerate(rows):
            if row.blend in procblend:
                continue
            strongest = -100.
            index = -1
            indexes = []
            blend = row.blend
            for j in range(i, len(rows)):
                if rows[j].blend != blend:
                    continue
                indexes.append(j)
                if rows[j].linestrength > strongest:
                    strongest = rows[j].linestrength
                    index = j
            indexes.remove(index)
            delrow = delrow | set(indexes)
            procblend.append(blend)
        dr = list(delrow)
        dr.sort()
        dr.reverse()
        for row in dr:
            del rows[row]

        # check on duplicate UID's, since those are the directory names here
        uid1 = []
        for row in rows:
            uid1.append(row.getkey("uid"))
        uid2 = set(uid1)
        if len(uid1) != len(uid2):
            print "LineList:",uid1
            logging.warning("There are duplicate names in the LineList")
            #raise Exception,"There are duplicate names in the LineList"

        # Create Summary table
        lc_description = admit.util.Table()
        lc_description.columns = ["Line Name","Start Channel","End Channel","Output Cube"]
        lc_description.units   = ["","int","int",""]
        lc_description.description = "Parameters of Line Cubes"
        # loop over all entries in the line list
        rdata = []
        for row in rows:
            uid = row.getkey("uid")
            cdir = self.mkext(imagename,uid)
            self.mkdir(cdir)
            basefl = uid
            lcd = [basefl]
            outfl = cdir + os.sep + "lc.im"
            args["outfile"] = self.dir(outfl)
            start = row.getkey("startchan")
            end = row.getkey("endchan")
            diff = end - start + 1
            startch = 0
            if diff < minchan:
                add = int(math.ceil(float(minchan - diff) / 2.0))
                start -= add
                end += add
                startch += add
                if start < 0:
                    logging.info("%s is too close to the edge to encompass with the "
                          + "requested channels, start=%d resetting to 0" % 
                          (uid, start))
                    startch += abs(start)
                    start = 0
                if end >= nchan:
                    logging.info("%s is too close to the edge to encompass with the "
                          + "requested channels, end=%d resetting to %d" % 
                          (uid, end, nchan - 1))
                    end = nchan - 1
                #print "\n\nDIFF ",startch,"\n\n"
            if pad > 0 and not equalize:
                start -= pad
                end += pad
                if start < 0:
                    logging.warning("pad=%d too large, start=%d resetting to 0"
                          % (pad, start))
                    startch += abs(start)
                    start = 0
                else:
                    startch += pad
                if end >= nchan:
                    logging.warning("pad=%d too large, end=%d resetting to %d"
                          % (pad, end, nchan - 1))
                    end = nchan - 1
            elif pad < 0 and not equalize:
                mid = (start + end) / 2
                start = mid + pad / 2
                end = mid - pad / 2 - 1
                if start < 0:
                    logging.warning("pad=%d too large, start=%d resetting to 0"
                          % (pad, start))
                    startch += abs(start)
                    start = 0
                else:
                    startch += abs(start)
                if end >= nchan:
                    logging.warning("pad=%d too large, end=%d resetting to %d"
                          % (pad, end, nchan - 1))
                    end = nchan - 1
            endch = startch + diff
            args["chans"] = "%i~%i" % (start, end)
            rdata.append(start)
            rdata.append(end)
            # for the summmary, which will be a table of
            # Line name, start channel, end channel, output image
            lc_description.addRow([basefl, start, end, outfl])

            # create the slices
            imsubimage(**args)

            line = row.converttoline()
            # set the restfrequency ouf the output cube
            imhead(imagename=args["outfile"], mode="put", hdkey="restfreq", 
                   hdvalue="%fGHz" % (row.getkey("frequency")))
            # set up the output BDP
            images = {bt.CASA : outfl}
            casaimage = Image(images=images)
            # note that Summary.getLineFluxes() implicitly relies on the BDP out order
            # being the same order as in the line list table.  If this is ever not
            # true, then Summary.getLineFluxes mismatch BDPs and flux values.
            #self.addoutput(LineCube_BDP(xmlFile=cdir + os.sep + basefl + ".lc",
            self.addoutput(LineCube_BDP(xmlFile=outfl,
                           image=casaimage, line=line, linechans="%i~%i" % (startch, endch)))
            dt.tag("trans-%s" % cdir)

        logging.regression("LC: %s" % str(rdata))

        taskargs = "pad=%s equalize=%s" % (pad, equalize)

        self._summary["linecube"] = SummaryEntry(lc_description.serialize(), "LineCube_AT",
                                                 self.id(True), taskargs)
        dt.tag("done")
        dt.end()
Esempio n. 21
0
    def run(self):
        """ The run method, calculates the moments and creates the BDP(s)

            Parameters
            ----------
            None

            Returns
            -------
            None
        """
        self._summary = {}
        momentsummary = []
        dt = utils.Dtime("Moment")

        # variable to track if we are using a single cutoff for all moment maps
        allsame = False
        moments = self.getkey("moments")
        numsigma = self.getkey("numsigma")
        mom0clip = self.getkey("mom0clip")
        # determine if there is only 1 cutoff or if there is a cutoff for each moment
        if len(moments) != len(numsigma):
            if len(numsigma) != 1:
                raise Exception("Length of numsigma and moment lists do not match. They must be the same length or the length of the cutoff list must be 1.")
            allsame = True
        # default moment file extensions, this is information copied from casa.immoments()
        momentFileExtensions = {-1: ".average",
                                 0: ".integrated",
                                 1: ".weighted_coord",
                                 2: ".weighted_dispersion_coord",
                                 3: ".median",
                                 4: "",
                                 5: ".standard_deviation",
                                 6: ".rms",
                                 7: ".abs_mean_dev",
                                 8: ".maximum",
                                 9: ".maximum_coord",
                                10: ".minimum",
                                11: ".minimum_coord",
                                }

        logging.debug("MOMENT: %s %s %s" %  (str(moments), str(numsigma), str(allsame)))

        # get the input casa image from bdp[0]
        # also get the channels the line actually covers (if any)
        bdpin = self._bdp_in[0]
        infile = bdpin.getimagefile(bt.CASA)
        chans = self.getkey("chans")
        # the basename of the moments, we will append _0, _1, etc.
        basename = self.mkext(infile, "mom")
        fluxname = self.mkext(infile, "flux")
        # beamarea = nppb(self.dir(infile))
        beamarea = 1.0  # until we have it from the MOM0 map

        sigma0 = self.getkey("sigma")
        sigma  = sigma0

        dt.tag("open")

        # if no CubseStats BDP was given and no sigma was specified, find a 
        # noise level via casa.imstat()
        if self._bdp_in[1] is None and sigma <= 0.0:
            raise Exception("A sigma or a CubeStats_BDP must be input to calculate the cutoff")
        elif self._bdp_in[1] is not None:
            sigma = self._bdp_in[1].get("sigma")

        # immoments is a bit peculiar. If you give one moment, it will use 
        # exactly the outfile you picked for multiple moments, it will pick
        # extensions such as .integrated [0], .weighted_coord [1] etc.
        # we loop over the moments and will use the numeric extension instead. 
        # Might be laborious loop for big input cubes
        #
        # arguments for immoments
        args = {"imagename" : self.dir(infile),
                "moments"   : moments,
                "outfile"   : self.dir(basename)}

        # set the channels if given
        if chans != "":
            args["chans"] = chans
        # error check the mom0clip input
        if mom0clip > 0.0 and not 0 in moments:
            logging.warning("mom0clip given, but no moment0 map was requested. One will be generated anyway.")
            # add moment0 to the list of computed moments, but it has to be first
            moments.insert(0,0)
            if not allsame:
                numsigma.insert(0, 2.0*sigma)

        if allsame:
            # this is only executed now if len(moments) > 1 and len(cutoff)==1
            args["excludepix"] = [-numsigma[0] * sigma, numsigma[0] * sigma]
            casa.immoments(**args)
            dt.tag("immoments-all")
        else:
            # this is execute if len(moments)==len(cutoff) , even when len=1
            for i in range(len(moments)):
                args["excludepix"] = [-numsigma[i] * sigma, numsigma[i] * sigma]
                args["moments"] = moments[i]
                args["outfile"] = self.dir(basename + momentFileExtensions[moments[i]])
                casa.immoments(**args)
                dt.tag("immoments-%d" % moments[i])

        taskargs = "moments=%s numsigma=%s" % (str(moments), str(numsigma)) 
        if sigma0 > 0:
            taskargs = taskargs + " sigma=%.2f" % sigma0
        if mom0clip > 0:
            taskargs = taskargs + " mom0clip=%g" % mom0clip
        if chans == "": 
            taskargs = taskargs + " chans=all"
        else:
            taskargs = taskargs + " chans=%s" % str(chans)
        taskargs += '&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp; <span style="background-color:white">&nbsp;' + basename.split('/')[0] + '&nbsp;</span>'

        # generate the mask to be applied to all but moment 0
        if mom0clip > 0.0:
            # get the statistics from mom0 map
            # this is usually a very biased map, so unclear if mom0sigma is all that reliable
            args = {"imagename": self.dir(infile)}
            stat = casa.imstat(imagename=self.dir(basename + momentFileExtensions[0]))
            mom0sigma = float(stat["sigma"][0])
            # generate a temporary masked file, mask will be copied to other moments
            args = {"imagename" : self.dir(basename + momentFileExtensions[0]),
                    "expr"      : 'IM0[IM0>%f]' % (mom0clip * mom0sigma),
                    "outfile"   : self.dir("mom0.masked")
                    }
            casa.immath(**args)
            # get the default mask name
            taskinit.ia.open(self.dir("mom0.masked"))
            defmask = taskinit.ia.maskhandler('default')
            taskinit.ia.close()
            dt.tag("mom0clip")

        # loop over moments to rename them to _0, _1, _2 etc.
        # apply a mask as well for proper histogram creation
        map = {}
        myplot = APlot(pmode=self._plot_mode,ptype=self._plot_type,abspath=self.dir())
        implot = ImPlot(pmode=self._plot_mode,ptype=self._plot_type,abspath=self.dir())

        for mom in moments:
            figname = imagename = "%s_%i" % (basename, mom)
            tempname = basename + momentFileExtensions[mom]
            # rename and remove the old one if there is one
            utils.rename(self.dir(tempname), self.dir(imagename))
            # copy the moment0 mask if requested; this depends on that mom0 was done before
            if mom0clip > 0.0 and mom != 0:
                #print "PJT: output=%s:%s" % (self.dir(imagename), defmask[0])
                #print "PJT: inpmask=%s:%s" % (self.dir("mom0.masked"),defmask[0])
                makemask(mode="copy", inpimage=self.dir("mom0.masked"),
                         output="%s:%s" % (self.dir(imagename), defmask[0]),
                         overwrite=True, inpmask="%s:%s" % (self.dir("mom0.masked"),
                                                            defmask[0]))
                taskinit.ia.open(self.dir(imagename))
                taskinit.ia.maskhandler('set', defmask)
                taskinit.ia.close()
                dt.tag("makemask")
            if mom == 0:
                beamarea = nppb(self.dir(imagename))
            implot.plotter(rasterfile=imagename,figname=figname,colorwedge=True)
            imagepng  = implot.getFigure(figno=implot.figno,relative=True)
            thumbname = implot.getThumbnail(figno=implot.figno,relative=True)
            images = {bt.CASA : imagename, bt.PNG  : imagepng}
            thumbtype=bt.PNG
            dt.tag("implot")

            # get the data for a histogram (ia access is about 1000-2000 faster than imval())
            map[mom] = casautil.getdata(self.dir(imagename))
            data = map[mom].compressed()
            dt.tag("getdata")

            # make the histogram plot

            # get the label for the x axis
            bunit = casa.imhead(imagename=self.dir(imagename), mode="get", hdkey="bunit")
            # object for the caption
            objectname = casa.imhead(imagename=self.dir(imagename), mode="get", hdkey="object")

            # Make the histogram plot
            # Since we give abspath in the constructor, figname should be relative
            auxname = imagename + '_histo'
            auxtype = bt.PNG
            myplot.histogram(columns = data,
                             figname = auxname,
                             xlab    = bunit,
                             ylab    = "Count",
                             title   = "Histogram of Moment %d: %s" % (mom, imagename), thumbnail=True)

            casaimage = Image(images    = images,
                                    auxiliary = auxname,
                                    auxtype   = auxtype,
                                    thumbnail = thumbname,
                                    thumbnailtype = thumbtype)
            auxname = myplot.getFigure(figno=myplot.figno,relative=True)
            auxthumb = myplot.getThumbnail(figno=myplot.figno,relative=True)

            if hasattr(self._bdp_in[0], "line"):   # SpwCube doesn't have Line
                line = deepcopy(getattr(self._bdp_in[0], "line"))
                if not isinstance(line, Line):
                    line = Line(name="Unidentified")
            else:
                # fake a Line if there wasn't one
                line = Line(name="Unidentified")
            # add the BDP to the output array
            self.addoutput(Moment_BDP(xmlFile=imagename, moment=mom,
                           image=deepcopy(casaimage), line=line))
            dt.tag("ren+mask_%d" % mom)

            imcaption = "%s Moment %d map of Source %s" % (line.name, mom, objectname)
            auxcaption = "Histogram of %s Moment %d of Source %s" % (line.name, mom, objectname)
            thismomentsummary = [line.name, mom, imagepng, thumbname, imcaption,
                                 auxname, auxthumb, auxcaption, infile]
            momentsummary.append(thismomentsummary)

        if map.has_key(0) and map.has_key(1) and map.has_key(2):
            logging.debug("MAPs present: %s" % (map.keys()))

            # m0 needs a new mask, inherited from the more restricted m1 (and m2)
            m0 = ma.masked_where(map[1].mask,map[0])
            m1 = map[1]
            m2 = map[2]
            m01 = m0*m1
            m02 = m0*m1*m1
            m22 = m0*m2*m2
            sum0 = m0.sum()
            vmean = m01.sum()/sum0
            # lacking the full 3D cube, get two estimates and take the max
            sig1  = math.sqrt(m02.sum()/sum0 - vmean*vmean)
            sig2  = m2.max()
            #vsig = max(sig1,sig2)
            vsig = sig1
            
            # consider clipping in the masked array (mom0clip)
            # @todo   i can't use info from line, so just borrow basename for now for grepping
            #         this also isn't really the flux, the points per beam is still in there
            loc = basename.rfind('/')
            sum1 = ma.masked_less(map[0],0.0).sum()   # mom0clip
            # print out:   LINE,FLUX1,FLUX0,BEAMAREA,VMEAN,VSIGMA for regression
            # the linechans parameter in bdpin is not useful to print out here, it's local to the LineCube
            s_vlsr = admit.Project.summaryData.get('vlsr')[0].getValue()[0]
            s_rest = admit.Project.summaryData.get('restfreq')[0].getValue()[0]/1e9
            s_line = line.frequency
            if loc>0:
                if basename[:loc][0:2] == 'U_':
                    # for U_ lines we'll reference the VLSR w.r.t. RESTFREQ in that band
                    if abs(vmean) > vsig:
                        vwarn = '*'
                    else:
                        vwarn = ''
                    vlsr = vmean + (1.0-s_line/s_rest)*utils.c
                    msg = "MOM0FLUX: %s %g %g %g %g %g %g" % (basename[:loc],map[0].sum(),sum0,beamarea,vmean,vlsr,vsig)
                else:
                    # for identified lines we'll assume the ID was correct and not bother with RESTFREQ
                    msg = "MOM0FLUX: %s %g %g %g %g %g %g" % (basename[:loc],map[0].sum(),sum0,beamarea,vmean,vmean,vsig)
            else:
                msg = "MOM0FLUX: %s %g %g %g %g %g %g" % ("SPW_FULL"    ,map[0].sum(),sum0,beamarea,vmean,vmean,vsig)
            logging.regression(msg)
            dt.tag("mom0flux")

            # create a histogram of flux per channel

            # grab the X coordinates for the histogram, we want them in km/s
            # restfreq should also be in summary
            restfreq = casa.imhead(self.dir(infile),mode="get",hdkey="restfreq")['value']/1e9    # in GHz
            # print "PJT  %.10f %.10f" % (restfreq,s_rest)
            imval0 = casa.imval(self.dir(infile))
            freqs = imval0['coords'].transpose()[2]/1e9
            x = (1-freqs/restfreq)*utils.c
            # 
            h = casa.imstat(self.dir(infile), axes=[0,1])
            if h.has_key('flux'):
                flux0 = h['flux']
            else:
                flux0 = h['sum']/beamarea
            flux0sum = flux0.sum() * abs(x[1]-x[0])
            # @todo   make a flux1 with fluxes derived from a good mask
            flux1 = flux0 
            # construct histogram
            title = 'Flux Spectrum (%g)' % flux0sum
            xlab = 'VLSR (km/s)'
            ylab = 'Flux (Jy)'
            myplot.plotter(x,[flux0,flux1],title=title,figname=fluxname,xlab=xlab,ylab=ylab,histo=True)
            dt.tag("flux-spectrum")
            
        self._summary["moments"] = SummaryEntry(momentsummary, "Moment_AT", 
                                                self.id(True), taskargs)
        # get rid of the temporary mask
        if mom0clip > 0.0: 
            utils.rmdir(self.dir("mom0.masked"))

        dt.tag("done")
        dt.end()