コード例 #1
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
def noise(coords, nrandom = 50, imagenames=[], stampsize=32,
        method = 'mean', weighting = 'simga2', maskradius=None,
        psfmode = 'point'):

    import stacker
    import numpy as np
    from taskinit import ia, qa

    ia.open(imagenames[0])
    beam = qa.convert(ia.restoringbeam()['major'], 'rad')['value']
    ia.done()

#     if coords.coord_type == 'physical':
#         coords = stacker.getPixelCoords(coords, imagenames)

    _allocate_buffers(imagenames, stampsize, len(coords)*len(imagenames))

    dist = []

    for i in range(nrandom):
        random_coords = stacker.randomizeCoords(coords, beam=beam)
        random_coords = stacker.getPixelCoords(random_coords, imagenames)
        _load_stack(random_coords, psfmode)

        if method == 'mean' and weighting == 'sigma2':
            random_coords = _calculate_sigma2_weights(random_coords, maskradius)
        elif method == 'mean' and weighting == 'sigma':
            random_coords = _calculate_sigma_weights(random_coords, maskradius)

        stacked_im  = _stack_stack(method, random_coords)

        dist.append(stacked_im[int(stampsize/2+0.5), int(stampsize/2+0.5),0,0])

    return np.std(dist)
コード例 #2
0
ファイル: __init__.py プロジェクト: centowen/stacker
def cl_from_im(image, clname=None, threshold=None):
    from taskinit import qa,ia, cl

    ia.open(image)
    data = ia.getregion()
    cs = ia.coordsys()
    ia.done()

    data = np.where(np.isnan(data), 0, data)
    if threshold is None:
        datanz = np.nonzero(data)
    else:
        datanz = np.nonzero(np.abs(data) > threshold)

    modelfluxes = data[datanz]
    modelpixels = np.array(datanz)
    modellist = cs.convertmany(modelpixels, 
                               unitsin=['pix', 'pix', 'pix', 'pix'], 
                               unitsout=['rad', 'rad', '', 'Hz'])

    cl.done()

    for i in range(modellist.shape[1]):
        x = qa.formxxx(str(modellist[0, i])+'rad', format='hms', prec=6)
        y = qa.formxxx(str(modellist[1, i])+'rad', format='dms', prec=6)
        pos = ' '.join(['J2000', x, y])
        freq = str(modellist[3, i])+'Hz'
        flux = modelfluxes[i]
        cl.addcomponent(flux=flux, fluxunit='Jy', dir=pos, freq=freq)

    if clname is not None:
        cl.rename(clname)
        cl.done()
    else:
        return cl
コード例 #3
0
ファイル: __init__.py プロジェクト: centowen/stacker
def noise(coords, vis, weighting='sigma2', imagenames=[], beam=None, nrand=50,
          stampsize=32, maskradius=None):
    """ Calculate noise using a Monte Carlo method, can be time consuming. """
    import stacker
    import stacker.image
    from math import pi
    if beam is None:
        try:
            from taskinit import ia, qa

            ia.open(imagenames[0])
            beam = qa.convert(ia.restoringbeam()['major'], 'rad')['value']
            ia.done()
        except ImportError:
            beam = 1/3600./180.*pi

    dist = []
    for i in range(nrand):
        random_coords = stacker.randomizeCoords(coords, beam=beam)
        if weighting == 'sigma2':
            random_coords = stacker.image.calculate_sigma2_weights(
                random_coords, imagenames, stampsize, maskradius)
        dist.append(stack(random_coords, vis))

    return np.std(np.real(np.array(dist)))
コード例 #4
0
def make_casa_testimage(infile, outname):

    if not casaOK:
        raise Exception("Attempted to make a CASA test image in a non-CASA "
                        "environment")
    ia.fromfits(infile=infile, outfile=outname, overwrite=True)
    ia.close()

    cube = SpectralCube.read(infile)
    if isinstance(cube, VaryingResolutionSpectralCube):
        ia.open(outname)
        # populate restoring beam emptily
        ia.setrestoringbeam(major={'value':1.0, 'unit':'arcsec'},
                            minor={'value':1.0, 'unit':'arcsec'},
                            pa={'value':90.0, 'unit':'deg'},
                            channel=len(cube.beams)-1,
                            polarization=-1,
                           )
        # populate each beam (hard assumption of 1 poln)
        for channum, beam in enumerate(cube.beams):
            casabdict = {'major': {'value':beam.major.to(u.deg).value, 'unit':'deg'},
                         'minor': {'value':beam.minor.to(u.deg).value, 'unit':'deg'},
                         'positionangle': {'value':beam.pa.to(u.deg).value, 'unit':'deg'}
                        }
            ia.setrestoringbeam(beam=casabdict, channel=channum, polarization=0)

        ia.close()
コード例 #5
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
def noise(coords,
          vis,
          weighting='sigma2',
          imagenames=[],
          beam=None,
          nrand=50,
          stampsize=32,
          maskradius=None):
    """ Calculate noise using a Monte Carlo method, can be time consuming. """
    import stacker
    import stacker.image
    from math import pi
    if beam is None:
        try:
            from taskinit import ia, qa

            ia.open(imagenames[0])
            beam = qa.convert(ia.restoringbeam()['major'], 'rad')['value']
            ia.done()
        except ImportError:
            beam = 1 / 3600. / 180. * pi

    dist = []
    for i in range(nrand):
        random_coords = stacker.randomizeCoords(coords, beam=beam)
        if weighting == 'sigma2':
            random_coords = stacker.image.calculate_sigma2_weights(
                random_coords, imagenames, stampsize, maskradius)
        dist.append(stack(random_coords, vis))

    return np.std(np.real(np.array(dist)))
コード例 #6
0
def cl_from_im(image, clname=None, threshold=None):
    from taskinit import qa, ia, cl

    ia.open(image)
    data = ia.getregion()
    cs = ia.coordsys()
    ia.done()

    data = np.where(np.isnan(data), 0, data)
    if threshold is None:
        datanz = np.nonzero(data)
    else:
        datanz = np.nonzero(np.abs(data) > threshold)

    modelfluxes = data[datanz]
    modelpixels = np.array(datanz)
    modellist = cs.convertmany(modelpixels,
                               unitsin=['pix', 'pix', 'pix', 'pix'],
                               unitsout=['rad', 'rad', '', 'Hz'])

    cl.done()

    for i in range(modellist.shape[1]):
        x = qa.formxxx(str(modellist[0, i]) + 'rad', format='hms', prec=6)
        y = qa.formxxx(str(modellist[1, i]) + 'rad', format='dms', prec=6)
        pos = ' '.join(['J2000', x, y])
        freq = str(modellist[3, i]) + 'Hz'
        flux = modelfluxes[i]
        cl.addcomponent(flux=flux, fluxunit='Jy', dir=pos, freq=freq)

    if clname is not None:
        cl.rename(clname)
        cl.done()
    else:
        return cl
コード例 #7
0
def randomCoords(imagenames, ncoords=10):
    import random
    from taskinit import ia, qa

    xmin, xmax = [], []
    ymin, ymax = [], []
    for image in imagenames:
        ia.open(image)
        print image, ia.boundingbox()
        trc = ia.boundingbox()['trcf'].split(', ')
        blc = ia.boundingbox()['blcf'].split(', ')
        xmin.append(qa.convert(qa.quantity(trc[0]), 'rad')['value'])
        xmax.append(qa.convert(qa.quantity(blc[0]), 'rad')['value'])
        ymin.append(qa.convert(qa.quantity(blc[1]), 'rad')['value'])
        ymax.append(qa.convert(qa.quantity(trc[1]), 'rad')['value'])
        ia.done()

    randomcoords = CoordList(imagenames)
    for i in range(ncoords):
        imageid = random.randint(0, len(imagenames) - 1)
        x = random.uniform(xmin[imageid], xmax[imageid])
        y = random.uniform(ymin[imageid], ymax[imageid])
        c = Coord(x, y, 1.0)
        randomcoords.append(c)

    return randomcoords
コード例 #8
0
    def from_casa_image(filename, dropdeg=True, skipdata=False,
                        skipvalid=False, skipcs=False):
        """
        Load a cube (into memory?) from a CASA image. By default it will transpose
        the cube into a 'python' order and drop degenerate axes. These options can
        be suppressed. The object holds the coordsys object from the image in
        memory.
        """

        # use the ia tool to get the file contents
        ia.open(filename)

        # read in the data
        if not skipdata:
            data = ia.getchunk(dropdeg=dropdeg)

        # CASA stores validity of data as a mask
        if not skipvalid:
            valid = ia.getchunk(getmask=True, dropdeg=dropdeg)

        # transpose is dealt with within the cube object
            
        # read in coordinate system object
        casa_cs = ia.coordsys()

        wcs = wcs_casa2astropy(casa_cs)

        # don't need this yet
        # stokes = get_casa_axis(temp_cs, wanttype="Stokes", skipdeg=False,)

        #    if stokes == None:
        #        order = np.arange(self.data.ndim)
        #    else:
        #        order = []
        #        for ax in np.arange(self.data.ndim+1):
        #            if ax == stokes:
        #                continue
        #            order.append(ax)

        #    self.casa_cs = ia.coordsys(order)
            
            # This should work, but coordsys.reorder() has a bug
            # on the error checking. JIRA filed. Until then the
            # axes will be reversed from the original.

            #if transpose == True:
            #    new_order = np.arange(self.data.ndim)
            #    new_order = new_order[-1*np.arange(self.data.ndim)-1]
            #    print new_order
            #    self.casa_cs.reorder(new_order)
        
        # close the ia tool
        ia.close()

        metadata = {'filename':filename}

        mask = SpectralCubeMask(wcs, np.logical_not(valid))
        cube = SpectralCube(data, wcs, mask, metadata=metadata)

        return cube
コード例 #9
0
def _getPixelCoords1ImSimpleProj(coords, imagename):
    from taskinit import ia
    from interval import interval

    ia.open(imagename)
    cs = ia.coordsys()
    imshape = ia.shape()
    ia.done()

    pixcoords = []
    for coord in coords:
        p = cs.convert(coordin=[coord.x, coord.y, 0, 0],
                       absin=[True] * 4,
                       unitsin=[coords.unit, coords.unit, 'pix', 'pix'],
                       absout=[True] * 4,
                       unitsout=['pix'] * 4)
        x = p[0]
        y = p[1]

        if x in interval[0, imshape[0] - 1] and y in interval[0.,
                                                              imshape[1] - 1]:
            c = Coord(x, y)
            try:
                c.index = coord.index
            except AttributeError:
                pass
            pixcoords.append(c)

    return pixcoords
コード例 #10
0
ファイル: mytools.py プロジェクト: e-koch/VLA_Lband
def myclean(**kwargs):

    if "mask" in kwargs.keys():
        # Since masks can be given in other forms, try to open it, and if it
        # fails, assume it just isn't an image.
        try:
            if isinstance(kwargs['mask'], six.string_types):
                masks = [kwargs['mask']]
            elif isinstance(kwargs['mask'], list):
                masks = kwargs['mask']
            else:
                raise RuntimeError

            for mask in masks:
                # Check if there's anything in the mask before cleaning
                ia.open(mask)
                stats_dict = ia.statistics()
                ia.close()
                # If there's nothing there, max == min
                max_val = stats_dict["max"]
                if max_val == 0:
                    Warning("The mask image contains no regions to clean in."
                            " Exiting.")
                    return None
        except RuntimeError:
            pass

    return catch_fail(clean, **kwargs)
コード例 #11
0
ファイル: mytools.py プロジェクト: e-koch/VLA_Lband
def myexportfits(common_beam=True, **kwargs):
    '''
    Version of exportfits that returns a Python error when it fails.
    Also attachs a common beam to the fits header.
    '''

    catch_fail(exportfits, **kwargs)

    # The above throws an error if exportfits fails.
    # Now open the FITS file and attach a beam.

    ia.open(kwargs['imagename'])

    com_beam = ia.commonbeam()

    ia.close()

    bmaj = com_beam['major']['value'] * \
        u.Unit(com_beam['major']['unit']).to(u.deg)
    bmin = com_beam['minor']['value'] * \
        u.Unit(com_beam['minor']['unit']).to(u.deg)
    bpa = com_beam['pa']['value'] * u.Unit(com_beam['pa']['unit']).to(u.deg)

    filename = kwargs['fitsimage']

    output_fits = fits.open(filename, mode='update')

    output_fits[0].header.update({"BMAJ": bmaj,
                                  "BMIN": bmin,
                                  "BPA": bpa})

    output_fits.flush()
    output_fits.close()
コード例 #12
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
def _calculate_sigma_weights(coords, maxmaskradius=None):
    import numpy as np
    from taskinit import ia
    global stampsize

    if coords.physical and coords.imagenames[0]:
        ia.open(coords.imagenames[0])
#         beam = qa.convert(ia.restoringbeam()['major'], 'rad')['value']
        if ia.restoringbeam() == {}:
            masksize = 10
        else:
            masksize = 2*np.abs(qa.convert(ia.restoringbeam()['major'],'rad')['value']/ ia.coordsys().increment()['numeric'][0])
        ia.done()

    X = np.arange(0, stampsize)-stampsize/2
    Y = np.arange(0, stampsize)-stampsize/2
    X,Y = np.meshgrid(X,Y)

    for i,coord in enumerate(coords):
        tmpdata = data[i,:,:,:,:]
        for j in range(tmpdata.shape[2]):
            for k in range(tmpdata.shape[3]):
                tmpdata[:,:,j,k]  = (tmpdata[:,:,j,k]*np.double( np.sqrt(X**2+Y**2)>masksize))
        sigma = np.std(tmpdata)
        if sigma == 0:
            coord.weight = 0.
        else:
            coord.weight = 1/sigma
    if maxmaskradius and maxmaskradius < masksize:
        masksize = maxmaskradius

    return coords
コード例 #13
0
def make_casa_testimage(infile, outname):

    infile = str(infile)
    outname = str(outname)

    if not casaOK:
        raise Exception("Attempted to make a CASA test image in a non-CASA "
                        "environment")

    ia = casatools.image()

    ia.fromfits(infile=infile, outfile=outname, overwrite=True)
    ia.unlock()
    ia.close()
    ia.done()

    cube = SpectralCube.read(infile)
    if isinstance(cube, VaryingResolutionSpectralCube):
        ia.open(outname)
        # populate restoring beam emptily
        ia.setrestoringbeam(
            major={
                'value': 1.0,
                'unit': 'arcsec'
            },
            minor={
                'value': 1.0,
                'unit': 'arcsec'
            },
            pa={
                'value': 90.0,
                'unit': 'deg'
            },
            channel=len(cube.beams) - 1,
            polarization=-1,
        )
        # populate each beam (hard assumption of 1 poln)
        for channum, beam in enumerate(cube.beams):
            casabdict = {
                'major': {
                    'value': beam.major.to(u.deg).value,
                    'unit': 'deg'
                },
                'minor': {
                    'value': beam.minor.to(u.deg).value,
                    'unit': 'deg'
                },
                'positionangle': {
                    'value': beam.pa.to(u.deg).value,
                    'unit': 'deg'
                }
            }
            ia.setrestoringbeam(beam=casabdict,
                                channel=channum,
                                polarization=0)

        ia.unlock()
        ia.close()
        ia.done()
コード例 #14
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
def getFlux(imagename):
    from taskinit import ia,rg
    ia.open(imagename)
    cs = ia.coordsys()
    x = int(cs.referencepixel()['numeric'][0])
    y = int(cs.referencepixel()['numeric'][1])
    ia.done()
    return float(ia.getregion(region=rg.box([x,y], [x,y])))
コード例 #15
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
    def __init__(self, imagename, *args, **kwargs):
        """
        Constructor

        Keyword arguments:
        imagename -- Str to casa image of primary beam
        """
        super(MSPrimaryBeamModel, self).__init__(*args, **kwargs)

        self.imagename = imagename

        try:
            from taskinit import ia
            ia.open(imagename)
            self.cs = ia.coordsys()

            self.nx = ia.shape()[0]
            self.ny = ia.shape()[1]
            self.refpix_x = self.cs.referencepixel()['numeric'][0]
            self.refpix_y = self.cs.referencepixel()['numeric'][1]
            self.increment_x = self.cs.increment()['numeric'][0]
            self.increment_y = self.cs.increment()['numeric'][1]

            try:
                self.frequencyaxis = self.cs.findaxisbyname('frequency')
                self.nu0 = self.cs.referencevalue()['numeric'][
                    self.frequencyaxis]
            except Exception:
                self.nu0 = None
                print('Some stuff!')

            self.data = ia.getregion()[:, :, 0, 0]
            ia.done()
        except ImportError:
            from pyrap.images import image
            im = image(imagename)
            self.nx = im.shape()[-1]
            self.nx = im.shape()[-2]
            self.cs = im.coordinates()
            self.cs_dir = self.cs.get_coordinate('direction')

            self.refpix_x = self.cs_dir.get_referencepixel()[1]
            self.refpix_y = self.cs_dir.get_referencepixel()[0]
            self.increment_x = self.cs_dir.get_increment()[1]
            self.increment_y = self.cs_dir.get_increment()[0]
            try:
                self.nu0 = self.cs.get_coordinate(
                    'spectral').get_referencevalue()
            except Exception:
                self.nu0 = None
                print(
                    'Warning! No frequency information in primary beam model.')

            self.data = im.getdata()[0, 0]
コード例 #16
0
def make_pbfile(vis, pbfile):
    from taskinit import im, ms, ia, qa, tb
    import numpy as np
    from scipy.constants import c

    ms.open(vis)
    fields = ms.range('field_id')['field_id']
    ms.done()
    im.open(vis)
    im.selectvis(field=fields[0])
    ms.open(vis)
    freq = np.mean(ms.range('chan_freq')['chan_freq'])
    phase_dir = ms.range('phase_dir')['phase_dir']['direction']
    ms.done()

    phase_dir = phase_dir[0][0], phase_dir[1][0]
    phase_dir = [
        qa.formxxx(str(phase_dir[0]) + 'rad', format='hms'),
        qa.formxxx(str(phase_dir[1]) + 'rad', format='dms')
    ]
    phase_dir = 'J2000 ' + ' '.join(phase_dir)

    tb.open(vis + '/ANTENNA/')
    dishdia = np.min(tb.getcol('DISH_DIAMETER'))
    tb.done()

    # pb of 512 pix cover pb down to 0.001
    # ensure largest pixel to pixel var to .01
    minpb = 0.001
    nx = 512
    cellconv = (nx * np.sqrt(np.log(2) / np.log(1 / minpb)))**-1

    beam = c / freq / dishdia
    cell = {}
    cell['value'] = beam * cellconv
    cell['unit'] = 'rad'

    #     nx = int(3*3e8/freq/dishdia*1.22*180/
    #              math.pi*3600/qa.convert(advise['cell'],
    #              'arcsec')['value'])
    # Chosen as to be 3 times fwhm of primary beam,
    # should include up to approximately .01 of peak flux

    im.defineimage(nx=nx, ny=nx, cellx=cell, celly=cell, phasecenter=phase_dir)
    im.setvp(dovp=True)
    im.makeimage(type='pb', image=pbfile)
    im.done()
    ia.open(pbfile)
    cs = ia.coordsys()
    cs.setreferencevalue(type='direction', value=[0., 0.])
    ia.setcoordsys(cs.torecord())
    ia.maskhandler('delete', 'mask0')
    ia.done()
コード例 #17
0
def test_casa_mask(data_adv, tmp_path):

    cube = SpectralCube.read(data_adv)

    mask_array = np.array([[True, False], [False, False], [True, True]])
    bool_mask = BooleanArrayMask(mask=mask_array,
                                 wcs=cube._wcs,
                                 shape=cube.shape)
    cube = cube.with_mask(bool_mask)

    make_casa_mask(cube,
                   str(tmp_path / 'casa.mask'),
                   add_stokes=False,
                   append_to_image=False,
                   overwrite=True)

    ia = casatools.image()

    ia.open(str(tmp_path / 'casa.mask'))

    casa_mask = ia.getchunk()

    coords = ia.coordsys()

    ia.unlock()
    ia.close()
    ia.done()

    # Test masks
    # Mask array is broadcasted to the cube shape. Mimic this, switch to ints,
    # and transpose to match CASA image.
    compare_mask = np.tile(mask_array, (4, 1, 1)).astype('int16').T
    assert np.all(compare_mask == casa_mask)

    # Test WCS info

    # Convert back to an astropy wcs object so transforms are dealt with.
    casa_wcs = wcs_casa2astropy(ia, coords)
    header = casa_wcs.to_header()  # Invokes transform

    # Compare some basic properties EXCLUDING the spectral axis
    assert np.allclose(cube.wcs.wcs.crval[:2], casa_wcs.wcs.crval[:2])
    assert np.all(cube.wcs.wcs.cdelt[:2] == casa_wcs.wcs.cdelt[:2])
    assert np.all(list(cube.wcs.wcs.cunit)[:2] == list(casa_wcs.wcs.cunit)[:2])
    assert np.all(list(cube.wcs.wcs.ctype)[:2] == list(casa_wcs.wcs.ctype)[:2])

    # Reference pixels in CASA are 1 pixel off.
    assert_allclose(cube.wcs.wcs.crpix, casa_wcs.wcs.crpix, atol=1.0)
コード例 #18
0
def test_casa_mask():

    cube = SpectralCube.read(path('adv.fits'))

    mask_array = np.array([[True, False], [False, False], [True, True]])
    bool_mask = BooleanArrayMask(mask=mask_array, wcs=cube._wcs,
                                 shape=cube.shape)
    cube = cube.with_mask(bool_mask)

    if os.path.exists('casa.mask'):
        os.system('rm -rf casa.mask')

    make_casa_mask(cube, 'casa.mask', add_stokes=False,
                   append_to_image=False, overwrite=True)

    ia.open('casa.mask')

    casa_mask = ia.getchunk()

    coords = ia.coordsys()

    ia.close()

    # Test masks
    # Mask array is broadcasted to the cube shape. Mimic this, switch to ints,
    # and transpose to match CASA image.
    compare_mask = np.tile(mask_array, (4, 1, 1)).astype('int16').T
    assert np.all(compare_mask == casa_mask)

    # Test WCS info

    # Convert back to an astropy wcs object so transforms are dealt with.
    casa_wcs = wcs_casa2astropy(coords)
    header = casa_wcs.to_header()  # Invokes transform

    # Compare some basic properties EXCLUDING the spectral axis
    assert np.allclose(cube.wcs.wcs.crval[:2], casa_wcs.wcs.crval[:2])
    assert np.all(cube.wcs.wcs.cdelt[:2] == casa_wcs.wcs.cdelt[:2])
    assert np.all(list(cube.wcs.wcs.cunit)[:2] == list(casa_wcs.wcs.cunit)[:2])
    assert np.all(list(cube.wcs.wcs.ctype)[:2] == list(casa_wcs.wcs.ctype)[:2])

    # Reference pixels in CASA are 1 pixel off.
    assert_allclose(cube.wcs.wcs.crpix, casa_wcs.wcs.crpix,
                    atol=1.0)
コード例 #19
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
def _write_stacked_image(imagename, pixels, template_image, stampsize):
    import os
    import shutil
    import numpy as np
    from taskinit import ia
#     global stampsize
    if os.access(imagename, os.F_OK): shutil.rmtree(imagename)

    ia.open(template_image)
    beam = ia.restoringbeam()
    cs = ia.coordsys()
    ia.done()

    csnew = cs.copy()
    csnew.setreferencevalue([0.]*2, 'dir')
    csnew.setreferencepixel([int(stampsize/2+0.5)]*2, 'dir')
    ia.fromarray(imagename, pixels=pixels, csys = csnew.torecord())
    ia.open(imagename)
    ia.setrestoringbeam(beam=beam)
    ia.done()
コード例 #20
0
def maxfit_iter(imgfiles, box, imidx):
    from taskinit import ia, rg
    try:
        from astropy.io import fits as pyfits
    except:
        try:
            import pyfits
        except ImportError:
            raise ImportError(
                'Neither astropy nor pyfits exists in this CASA installation')
    img = imgfiles[imidx]

    try:
        if (not ia.open(img)):
            raise Exception, "Cannot create image analysis tool using " + img
        print('Processing image: ' + img)
        hdr = pyfits.getheader(img)
        pols = DButil.polsfromfitsheader(hdr)
        freqs = DButil.freqsfromfitsheader(hdr)
        ndx, ndy, nchans, npols = ia.shape()
        blc, trc = [0, 0], [ndx, ndy]
        if 'box' in locals():
            if box != '':
                blc[0], blc[1], trc[0], trc[1] = [
                    int(ll) for ll in box.split(',')
                ]
        results = {}
        for itpp in pols:
            results[itpp] = {'results': {}, 'converged': []}
        for ll in range(nchans):
            for pp, itpp in enumerate(pols):
                comp = 'component{}'.format(ll)
                r = rg.box(blc=[blc[0], blc[1], ll, pp],
                           trc=[trc[0], trc[1], ll, pp])
                iachan = ia.subimage(region=r, dropdeg=True)
                try:
                    result_dict = iachan.maxfit(point=True, negfind=False)
                    result_dict['component0']['converged'] = True
                    result_dict['component0']['flux']['polarisation'] = itpp
                    result_dict['component0']['spectrum']['frequency']['m0'][
                        'value'] = float(freqs[ll])
                    results[itpp]['results'][comp] = result_dict['component0']
                    results[itpp]['converged'].append(True)
                except:
                    results[itpp]['converged'].append(False)
        results[itpp]['results']['nelements'] = results[itpp]['results'].keys()
        # update timestamp
        timstr = hdr['date-obs']
        return [True, timstr, img, results]
    except Exception, instance:
        casalog.post(str('*** Error in imfit ***') + str(instance))
        # raise instance
        return [False, timstr, img, {}]
コード例 #21
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
def calculate_pb_weights(coords, primarybeam, imagenames=[]):
    import stacker
    from scipy.constants import c
    from taskinit import ia, qa
    import numpy as np

    for i, coord in enumerate(coords):
        coord.index = i

    if coords.coord_type == 'physical':
        pixcoords = stacker.getPixelCoords(coords, imagenames)
    else:
        pixcoords = coords

#     _allocate_buffers(pixcoords.imagenames, stampsize, len(pixcoords))
#     _load_stack(pixcoords)

    for coord in pixcoords:
        ia.open(imagenames[coord.image])
        cs = ia.coordsys()
        freqaxis = cs.findaxisbyname('freq')
        restfreq = cs.referencevalue()['numeric'][freqaxis]
        dx = ((coord.x-cs.referencepixel()['numeric'][0])
                *cs.increment()['numeric'][0])
        dy = ((coord.y-cs.referencepixel()['numeric'][1])
                *cs.increment()['numeric'][1])

        coord.weight = primarybeam(dx,dy,restfreq)**2

    if coords.coord_type == 'physical':
        for coord in coords:
            coord.weight = 0.
            for pixcoord in pixcoords:
                if coord.index == pixcoord.index and pixcoord.weight > coord.weight:
                    coord.weight = pixcoord.weight
    else:
        coords = pixcoords

    return coords
コード例 #22
0
    def from_casa_image(cls, imagename):
        '''
        Instantiate beam from a CASA image.

        ** Must be run in a CASA environment! **

        Parameters
        ----------
        imagename : str
            Name of CASA image.
        '''

        try:
            from taskinit import ia
        except ImportError:
            raise ImportError("Could not import CASA (casac) and therefore"
                              " cannot read CASA .image files")

        ia.open(imagename)
        beam_props = ia.restoringbeam()
        ia.close()

        beam_keys = ["major", "minor", "positionangle"]
        if not all([True for key in beam_keys if key in beam_props]):
            raise ValueError("The image does not contain complete beam "
                             "information. Check the output of "
                             "ia.restoringbeam().")

        major = beam_props["major"]["value"] * \
            u.Unit(beam_props["major"]["unit"])
        minor = beam_props["minor"]["value"] * \
            u.Unit(beam_props["minor"]["unit"])
        pa = beam_props["positionangle"]["value"] * \
            u.Unit(beam_props["positionangle"]["unit"])

        return cls(major=major, minor=minor, pa=pa)
コード例 #23
0
    def from_casa_image(cls, imagename):
        '''
        Instantiate beam from a CASA image.

        ** Must be run in a CASA environment! **

        Parameters
        ----------
        imagename : str
            Name of CASA image.
        '''

        try:
            from taskinit import ia
        except ImportError:
            raise ImportError("Could not import CASA (casac) and therefore"
                              " cannot read CASA .image files")

        ia.open(imagename)
        beam_props = ia.restoringbeam()
        ia.close()

        beam_keys = ["major", "minor", "positionangle"]
        if not all([True for key in beam_keys if key in beam_props]):
            raise ValueError("The image does not contain complete beam "
                             "information. Check the output of "
                             "ia.restoringbeam().")

        major = beam_props["major"]["value"] * \
            u.Unit(beam_props["major"]["unit"])
        minor = beam_props["minor"]["value"] * \
            u.Unit(beam_props["minor"]["unit"])
        pa = beam_props["positionangle"]["value"] * \
            u.Unit(beam_props["positionangle"]["unit"])

        return cls(major=major, minor=minor, pa=pa)
コード例 #24
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
def _calculate_flux_weights(coords):
    from taskinit import ia
    import re

    fluxmap = []

    r = re.compile('(.*)\.image/*')
    for imagename in coords.imagenames:
        match = r.match(imagename)

        if match:
            fluximage = match.group(1)+'.flux'

            if ia.open(fluximage):
                fluxmap.append(ia.getregion())

        ia.done()

    for i,coord in enumerate(coords):
        coord.weight = (fluxmap[coord.image][int(coord.x+0.5), int(coord.y+0.5), 0, 0])**2
        
    return coords
コード例 #25
0
def _getPixelCoords1Im(coords, imagename):
    from interval import interval
    import math

    try:
        from taskinit import ia
        ia.open(imagename)
        cs = ia.coordsys()
        Nx = ia.shape()[0]
        Ny = ia.shape()[1]
        ia.done()
        x0 = cs.referencevalue()['numeric'][0]
        y0 = cs.referencevalue()['numeric'][1]
        x_pix_ref = cs.referencepixel()['numeric'][0]
        y_pix_ref = cs.referencepixel()['numeric'][1]
        x_pix_inc = cs.increment()['numeric'][0]
        y_pix_inc = cs.increment()['numeric'][1]


# If we fail to load ia, we will use pyrap instead.
# This probably means stacker was loaded from outside casapy.
    except ImportError:
        from pyrap.images import image
        im = image(imagename)
        cs = im.coordinates().get_coordinate('direction')
        dir_axis_index = im.coordinates().get_axes().index(cs.get_axes())
        imshape = im.shape()
        try:
            x_axis_index = cs.get_axes().index('Right Ascension')
        except ValueError:
            raise ValueError('Could not find direction coordinate: '\
                              'RightAscension')
        try:
            y_axis_index = cs.get_axes().index('Declination')
        except ValueError:
            raise ValueError('Could not find direction coordinate: '\
                              'Declination')

        Nx = im.shape()[dir_axis_index + x_axis_index]
        Ny = im.shape()[dir_axis_index + y_axis_index]
        x0 = cs.get_referencevalue()[x_axis_index]
        y0 = cs.get_referencevalue()[y_axis_index]
        x_pix_ref = cs.get_referencepixel()[x_axis_index]
        y_pix_ref = cs.get_referencepixel()[y_axis_index]
        x_pix_inc = cs.get_increment()[x_axis_index]
        y_pix_inc = cs.get_increment()[y_axis_index]

    pixcoords = []
    for coord in coords:
        dx = (coord.x - x0) * math.cos(coord.y)
        dy = math.asin(math.sin(coord.y) / math.cos(dx)) - y0
        x = dx / x_pix_inc + x_pix_ref
        y = dy / y_pix_inc + y_pix_ref

        if x in interval[0, Nx - 1] and y in interval[0., Ny - 1]:
            #             pixcoords.append(Coord(x,y, coord.weight))
            c = Coord(x, y, coord.weight)

            try:
                c.index = coord.index
            except AttributeError:
                pass

            pixcoords.append(c)

    return pixcoords
コード例 #26
0
def load_casa_image(filename, skipdata=False,
                    skipvalid=False, skipcs=False, **kwargs):
    """
    Load a cube (into memory?) from a CASA image. By default it will transpose
    the cube into a 'python' order and drop degenerate axes. These options can
    be suppressed. The object holds the coordsys object from the image in
    memory.
    """

    try:
        from taskinit import ia
    except ImportError:
        raise ImportError("Could not import CASA (casac) and therefore cannot read CASA .image files")

    # use the ia tool to get the file contents
    ia.open(filename)

    # read in the data
    if not skipdata:
        data = ia.getchunk()

    # CASA stores validity of data as a mask
    if not skipvalid:
        valid = ia.getchunk(getmask=True)

    # transpose is dealt with within the cube object

    # read in coordinate system object
    casa_cs = ia.coordsys()

    wcs = wcs_casa2astropy(casa_cs)

    unit = ia.brightnessunit()

    # don't need this yet
    # stokes = get_casa_axis(temp_cs, wanttype="Stokes", skipdeg=False,)

    #    if stokes == None:
    #        order = np.arange(self.data.ndim)
    #    else:
    #        order = []
    #        for ax in np.arange(self.data.ndim+1):
    #            if ax == stokes:
    #                continue
    #            order.append(ax)

    #    self.casa_cs = ia.coordsys(order)

        # This should work, but coordsys.reorder() has a bug
        # on the error checking. JIRA filed. Until then the
        # axes will be reversed from the original.

        # if transpose == True:
        #    new_order = np.arange(self.data.ndim)
        #    new_order = new_order[-1*np.arange(self.data.ndim)-1]
        #    print new_order
        #    self.casa_cs.reorder(new_order)

    # close the ia tool
    ia.close()

    meta = {'filename': filename,
            'BUNIT': unit}


    if wcs.naxis == 3:
        mask = BooleanArrayMask(np.logical_not(valid), wcs)
        cube = SpectralCube(data, wcs, mask, meta=meta)

    elif wcs.naxis == 4:
        data, wcs = cube_utils._split_stokes(data.T, wcs)
        mask = {}
        for component in data:
            data[component], wcs_slice = cube_utils._orient(data[component],
                                                            wcs)
            mask[component] = LazyMask(np.isfinite, data=data[component],
                                       wcs=wcs_slice)

        cube = StokesSpectralCube(data, wcs_slice, mask, meta=meta)

    return cube
コード例 #27
0
ファイル: CubeStats_AT.py プロジェクト: teuben/admit
    def run(self):
        """Runs the task.

           Parameters
           ----------
           None

           Returns
           -------
           None
        """

        self._summary = {}
        dt = utils.Dtime("CubeStats")

        #maxvrms = 2.0      # maximum variation in rms allowed (hardcoded for now)
        #maxvrms = -1.0     # turn maximum variation in rms allowed off
        maxvrms = self.getkey("maxvrms")

        psample = -1
        psample = self.getkey("psample")        

        # BDP's used :
        #   b1 = input BDP
        #   b2 = output BDP

        b1 = self._bdp_in[0]
        fin = b1.getimagefile(bt.CASA)

        bdp_name = self.mkext(fin,'cst')
        b2 = CubeStats_BDP(bdp_name)
        self.addoutput(b2)

        # PeakPointPlot 
        use_ppp = self.getkey("ppp")

        # peakstats: not enabled for mortal users yet
        # peakstats = (psample=1, numsigma=4, minchan=3, maxgap=2, peakfit=False)
        pnumsigma = 4
        minchan   = 3
        maxgap    = 2
        peakfit   = False             # True will enable a true gaussian fit
        
        # numsigma:  adding all signal > numsigma ; not user enabled;   for peaksum.
        numsigma = -1.0
        numsigma = 3.0

        # grab the new robust statistics. If this is used, 'rms' will be the RMS,
        # else we will use RMS = 1.4826*MAD (MAD does a decent job on outliers as well)
        # and was the only method available before CASA 4.4 when robust was implemented
        robust = self.getkey("robust")
        rargs = casautil.parse_robust(robust)
        nrargs = len(rargs)

        if nrargs == 0:
           sumrargs = "medabsdevmed"      # for the summary, indicate the default robust
        else:
           sumrargs = str(rargs)

        self._summary["rmsmethd"] = SummaryEntry([sumrargs,fin],"CubeStats_AT",self.id(True))
        #@todo think about using this instead of putting 'fin' in all the SummaryEntry
        #self._summary["casaimage"] = SummaryEntry(fin,"CubeStats_AT",self.id(True))

        # extra CASA call to get the freq's in GHz, as these are not in imstat1{}
        # @todo what if the coordinates are not in FREQ ?
        # Note: CAS-7648 bug on 3D cubes
        if False:
            # csys method
            ia.open(self.dir(fin))
            csys = ia.coordsys() 
            spec_axis = csys.findaxisbyname("spectral") 
            # ieck, we need a valid position, or else it will come back and "Exception: All selected pixels are masked"
            #freqs = ia.getprofile(spec_axis, region=rg.box([0,0],[0,0]))['coords']/1e9
            #freqs = ia.getprofile(spec_axis)['coords']/1e9
            freqs = ia.getprofile(spec_axis,unit="GHz")['coords']
            dt.tag("getprofile")
        else:
            # old imval method 
            #imval0 = casa.imval(self.dir(fin),box='0,0,0,0')     # this fails on 3D
            imval0 = casa.imval(self.dir(fin))
            freqs = imval0['coords'].transpose()[2]/1e9
            dt.tag("imval")
        nchan = len(freqs)
        chans = np.arange(nchan)

        # call CASA to get what we want
        # imstat0 is the whole cube, imstat1 the plane based statistics
        # warning: certain robust stats (**rargs) on the whole cube are going to be very slow
        dt.tag("start")
        imstat0 = casa.imstat(self.dir(fin),           logfile=self.dir('imstat0.logfile'),append=False,**rargs)
        dt.tag("imstat0")
        imstat1 = casa.imstat(self.dir(fin),axes=[0,1],logfile=self.dir('imstat1.logfile'),append=False,**rargs)
        dt.tag("imstat1")
        # imm = casa.immoments(self.dir(fin),axis='spec', moments=8, outfile=self.dir('ppp.im'))
        if nrargs > 0:
            # need to get the peaks without rubust
            imstat10 = casa.imstat(self.dir(fin),           logfile=self.dir('imstat0.logfile'),append=True)
            dt.tag("imstat10")
            imstat11 = casa.imstat(self.dir(fin),axes=[0,1],logfile=self.dir('imstat1.logfile'),append=True)
            dt.tag("imstat11")

        # grab the relevant plane-based things from imstat1
        if nrargs == 0:
            mean    = imstat1["mean"]
            sigma   = imstat1["medabsdevmed"]*1.4826     # see also: astropy.stats.median_absolute_deviation()
            peakval = imstat1["max"]
            minval  = imstat1["min"]
        else:
            mean    = imstat1["mean"]
            sigma   = imstat1["rms"]
            peakval = imstat11["max"]
            minval  = imstat11["min"]

        if True:
            # work around a bug in imstat(axes=[0,1]) for last channel [CAS-7697]
            for i in range(len(sigma)):
                if sigma[i] == 0.0:
                    minval[i] = peakval[i] = 0.0

        # too many variations in the RMS ?
        sigma_pos = sigma[np.where(sigma>0)]
        smin = sigma_pos.min()
        smax = sigma_pos.max()
        logging.info("sigma varies from %f to %f; %d/%d channels ok" % (smin,smax,len(sigma_pos),len(sigma)))
        if maxvrms > 0:
            if smax/smin > maxvrms:
                cliprms = smin * maxvrms
                logging.warning("sigma varies too much, going to clip to %g (%g > %g)" % (cliprms, smax/smin, maxvrms))
                sigma = np.where(sigma < cliprms, sigma, cliprms)

        # @todo   (and check again) for foobar.fits all sigma's became 0 when robust was selected
        #         was this with mask=True/False?

        # PeakPointPlot (can be expensive, hence the option)
        if use_ppp:
            logging.info("Computing MaxPos for PeakPointPlot")
            xpos    = np.zeros(nchan)
            ypos    = np.zeros(nchan)
            peaksum = np.zeros(nchan)

            ia.open(self.dir(fin))
            for i in range(nchan):
                if sigma[i] > 0.0:
                    plane = ia.getchunk(blc=[0,0,i,-1],trc=[-1,-1,i,-1],dropdeg=True)
                    v = ma.masked_invalid(plane)
                    v_abs = np.absolute(v)
                    max = np.unravel_index(v_abs.argmax(), v_abs.shape)
                    xpos[i] = max[0]
                    ypos[i] = max[1]
                    if numsigma > 0.0:
                        peaksum[i] = ma.masked_less(v,numsigma * sigma[i]).sum()
            peaksum = np.nan_to_num(peaksum)    # put 0's where nan's are found
            ia.close()
            dt.tag("ppp")

        nzeros = len(np.where(sigma<=0.0))
        if nzeros > 0:
            zeroch = np.where(sigma<=0.0)
            logging.warning("There are %d fully masked channels (%s)" % (nzeros,str(zeroch)))

        # construct the admit Table for CubeStats_BDP
        # note data needs to be a tuple, later to be column_stack'd
        if use_ppp:
            labels = ["channel" ,"frequency" ,"mean"    ,"sigma"   ,"max"     ,"maxposx" ,"maxposy" ,"min",     "peaksum"]
            units  = ["number"  ,"GHz"       ,"Jy/beam" ,"Jy/beam" ,"Jy/beam" ,"number"  ,"number"  ,"Jy/beam", "Jy"]
            data   = (chans     ,freqs       ,mean      ,sigma     ,peakval   ,xpos      ,ypos      ,minval,    peaksum)

        else:
            labels = ["channel" ,"frequency" ,"mean"    ,"sigma"   ,"max"     ,"min"]
            units  = ["number"  ,"GHz"       ,"Jy/beam" ,"Jy/beam" ,"Jy/beam" ,"Jy/beam"]
            data   = (chans     ,freqs       ,mean      ,sigma     ,peakval   ,minval)

        table = Table(columns=labels,units=units,data=np.column_stack(data))
        b2.setkey("table",table)

        # get the full cube statistics, it depends if robust was pre-selected
        if nrargs == 0:
            mean0  = imstat0["mean"][0]
            sigma0 = imstat0["medabsdevmed"][0]*1.4826
            peak0  = imstat0["max"][0]
            b2.setkey("mean" , float(mean0))
            b2.setkey("sigma", float(sigma0))
            b2.setkey("minval",float(imstat0["min"][0]))
            b2.setkey("maxval",float(imstat0["max"][0]))
            b2.setkey("minpos",imstat0["minpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            b2.setkey("maxpos",imstat0["maxpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            logging.info("CubeMax: %f @ %s" % (imstat0["max"][0],str(imstat0["maxpos"])))
            logging.info("CubeMin: %f @ %s" % (imstat0["min"][0],str(imstat0["minpos"])))
            logging.info("CubeRMS: %f" % sigma0)
        else:
            mean0  = imstat0["mean"][0]
            sigma0 = imstat0["rms"][0]
            peak0  = imstat10["max"][0]
            b2.setkey("mean" , float(mean0))
            b2.setkey("sigma", float(sigma0))
            b2.setkey("minval",float(imstat10["min"][0]))
            b2.setkey("maxval",float(imstat10["max"][0]))
            b2.setkey("minpos",imstat10["minpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            b2.setkey("maxpos",imstat10["maxpos"][:3].tolist())     #? [] or array(..dtype=int32) ??
            logging.info("CubeMax: %f @ %s" % (imstat10["max"][0],str(imstat10["maxpos"])))
            logging.info("CubeMin: %f @ %s" % (imstat10["min"][0],str(imstat10["minpos"])))
            logging.info("CubeRMS: %f" % sigma0)
        b2.setkey("robust",robust)
        rms_ratio = imstat0["rms"][0]/sigma0
        logging.info("RMS Sanity check %f" % rms_ratio)
        if rms_ratio > 1.5:
            logging.warning("RMS sanity check = %f.  Either bad sidelobes, lotsa signal, or both" % rms_ratio)
        logging.regression("CST: %f %f" % (sigma0, rms_ratio))

        # plots: no plots need to be made when nchan=1 for continuum
        # however we could make a histogram, overlaying the "best" gauss so 
        # signal deviations are clear?

        logging.info('mean,rms,S/N=%f %f %f' % (mean0,sigma0,peak0/sigma0))

        if nchan == 1:
            # for a continuum/1-channel we only need to stuff some numbers into the _summary
            self._summary["chanrms"] = SummaryEntry([float(sigma0), fin], "CubeStats_AT", self.id(True))
            self._summary["dynrange"] = SummaryEntry([float(peak0)/float(sigma0), fin], "CubeStats_AT", self.id(True))
            self._summary["datamean"] = SummaryEntry([float(mean0), fin], "CubeStats_AT", self.id(True))
        else:
            y1 = np.log10(ma.masked_invalid(peakval))
            y2 = np.log10(ma.masked_invalid(sigma))
            y3 = y1-y2
            y4 = np.log10(ma.masked_invalid(-minval))
            y5 = y1-y4
            y = [y1,y2,y3,y4]
            title = 'CubeStats: ' + bdp_name+'_0'
            xlab  = 'Channel'
            ylab  = 'log(Peak,Noise,Peak/Noise)'
            labels = ['log(peak)','log(rms noise)','log(peak/noise)','log(|minval|)']
            myplot = APlot(ptype=self._plot_type,pmode=self._plot_mode,abspath=self.dir())
            segp = [[chans[0],chans[nchan-1],math.log10(sigma0),math.log10(sigma0)]]
            myplot.plotter(chans,y,title,bdp_name+"_0",xlab=xlab,ylab=ylab,segments=segp,labels=labels,thumbnail=True)
            imfile = myplot.getFigure(figno=myplot.figno,relative=True)
            thumbfile = myplot.getThumbnail(figno=myplot.figno,relative=True)

            image0 = Image(images={bt.PNG:imfile},thumbnail=thumbfile,thumbnailtype=bt.PNG,description="CubeStats_0")
            b2.addimage(image0,"im0")

            if use_ppp:
                # new trial for Lee
                title = 'PeakSum: (numsigma=%.1f)' % (numsigma)
                ylab = 'Jy*N_ppb'
                myplot.plotter(chans,[peaksum],title,bdp_name+"_00",xlab=xlab,ylab=ylab,thumbnail=False)

            if True:
                # hack ascii table
                y30 = np.where(sigma > 0, np.log10(peakval/sigma), 0.0)
                table2 = Table(columns=["freq","log(P/N)"],data=np.column_stack((freqs,y30)))
                table2.exportTable(self.dir("testCubeStats.tab"))
                del table2

            # the "box" for the "spectrum" is all pixels.  Don't know how to 
            # get this except via shape.
            ia.open(self.dir(fin))
            s = ia.summary()
            ia.close()
            if 'shape' in s:
                specbox = (0,0,s['shape'][0],s['shape'][1])
            else:
                specbox = ()

            caption = "Emission characteristics as a function of channel, as derived by CubeStats_AT "
            caption += "(cyan: global rms,"
            caption += " green: noise per channel,"
            caption += " blue: peak value per channel,"
            caption += " red: peak/noise per channel)."
            self._summary["spectra"] = SummaryEntry([0, 0, str(specbox), 'Channel', imfile, thumbfile , caption, fin], "CubeStats_AT", self.id(True))
            self._summary["chanrms"] = SummaryEntry([float(sigma0), fin], "CubeStats_AT", self.id(True))

            # @todo Will imstat["max"][0] always be equal to s['datamax']?  If not, why not?
            if 'datamax' in s:
                self._summary["dynrange"] = SummaryEntry([float(s['datamax']/sigma0), fin], "CubeStats_AT", self.id(True))
            else:
                self._summary["dynrange"] = SummaryEntry([float(imstat0["max"][0]/sigma0), fin], "CubeStats_AT", self.id(True))
            self._summary["datamean"] = SummaryEntry([imstat0["mean"][0], fin], "CubeStats_AT", self.id(True))

            title = bdp_name + "_1"
            xlab =  'log(Peak,Noise,P/N)'
            myplot.histogram([y1,y2,y3],title,bdp_name+"_1",xlab=xlab,thumbnail=True)

            imfile = myplot.getFigure(figno=myplot.figno,relative=True)
            thumbfile = myplot.getThumbnail(figno=myplot.figno,relative=True)
            image1 = Image(images={bt.PNG:imfile},thumbnail=thumbfile,thumbnailtype=bt.PNG,description="CubeStats_1")
            b2.addimage(image1,"im1")

            # note that the 'y2' can have been clipped, which can throw off stats.robust()
            # @todo  should set a mask for those.

            title = bdp_name + "_2"
            xlab = 'log(Noise))'
            n = len(y2)
            ry2 = stats.robust(y2)
            y2_mean = ry2.mean()
            y2_std  = ry2.std()
            if n>9: logging.debug("NORMALTEST2: %s" % str(scipy.stats.normaltest(ry2)))
            myplot.hisplot(y2,title,bdp_name+"_2",xlab=xlab,gauss=[y2_mean,y2_std],thumbnail=True)

            title = bdp_name + "_3"
            xlab = 'log(diff[Noise])'
            n = len(y2)
            # dy2 = y2[0:-2] - y2[1:-1]
            dy2 = ma.masked_equal(y2[0:-2] - y2[1:-1],0.0).compressed()
            rdy2 = stats.robust(dy2)
            dy2_mean = rdy2.mean()
            dy2_std  = rdy2.std()
            if n>9: logging.debug("NORMALTEST3: %s" % str(scipy.stats.normaltest(rdy2)))
            myplot.hisplot(dy2,title,bdp_name+"_3",xlab=xlab,gauss=[dy2_mean,dy2_std],thumbnail=True)


            title = bdp_name + "_4"
            xlab = 'log(Signal/Noise))'
            n = len(y3)
            ry3 = stats.robust(y3)
            y3_mean = ry3.mean()
            y3_std  = ry3.std()
            if n>9: logging.debug("NORMALTEST4: %s" % str(scipy.stats.normaltest(ry3)))
            myplot.hisplot(y3,title,bdp_name+"_4",xlab=xlab,gauss=[y3_mean,y3_std],thumbnail=True)

            title = bdp_name + "_5"
            xlab = 'log(diff[Signal/Noise)])'
            n = len(y3)
            dy3 = y3[0:-2] - y3[1:-1]
            rdy3 = stats.robust(dy3)
            dy3_mean = rdy3.mean()
            dy3_std  = rdy3.std()
            if n>9: logging.debug("NORMALTEST5: %s" % str(scipy.stats.normaltest(rdy3)))
            myplot.hisplot(dy3,title,bdp_name+"_5",xlab=xlab,gauss=[dy3_mean,dy3_std],thumbnail=True)


            title = bdp_name + "_6"
            xlab = 'log(Peak+Min)'
            n = len(y1)
            ry5 = stats.robust(y5)
            y5_mean = ry5.mean()
            y5_std  = ry5.std()
            if n>9: logging.debug("NORMALTEST6: %s" % str(scipy.stats.normaltest(ry5)))
            myplot.hisplot(y5,title,bdp_name+"_6",xlab=xlab,gauss=[y5_mean,y5_std],thumbnail=True)

            logging.debug("LogPeak: m,s= %f %f min/max %f %f" % (y1.mean(),y1.std(),y1.min(),y1.max()))
            logging.debug("LogNoise: m,s= %f %f %f %f min/max %f %f" % (y2.mean(),y2.std(),y2_mean,y2_std,y2.min(),y2.max()))
            logging.debug("LogDeltaNoise: RMS/sqrt(2)= %f %f " % (dy2.std()/math.sqrt(2),dy2_std/math.sqrt(2)))
            logging.debug("LogDeltaP/N:   RMS/sqrt(2)= %f %f" % (dy3.std()/math.sqrt(2),dy3_std/math.sqrt(2)))
            logging.debug("LogPeak+Min: robust m,s= %f %f" % (y5_mean,y5_std))

            # compute two ratios that should both be near 1.0 if noise is 'normal'
            ratio  = y2.std()/(dy2.std()/math.sqrt(2))
            ratio2 = y2_std/(dy2_std/math.sqrt(2))
            logging.info("RMS BAD VARIATION RATIO: %f %f" % (ratio,ratio2))

        # making PPP plot
        if nchan > 1 and use_ppp:
            smax = 10
            gamma = 0.75

            z0 = peakval/peakval.max()
            # point sizes
            s = np.pi * ( smax * (z0**gamma) )**2
            cmds = ["grid", "axis equal"]
            title = "Peak Points per channel"
            pppimage = bdp_name + '_ppp'
            myplot.scatter(xpos,ypos,title=title,figname=pppimage,size=s,color=chans,cmds=cmds,thumbnail=True)
            pppimage     = myplot.getFigure(figno=myplot.figno,relative=True)
            pppthumbnail = myplot.getThumbnail(figno=myplot.figno,relative=True)
            caption = "Peak point plot: Locations of per-channel peaks in the image cube " + fin
            self._summary["peakpnt"] = SummaryEntry([pppimage, pppthumbnail, caption, fin], "CubeStats_AT", self.id(True))
        dt.tag("plotting")

        # making PeakStats plot
        if nchan > 1 and psample > 0:
            logging.info("Computing peakstats")
            # grab peak,mean and width values for all peaks
            (pval,mval,wval) = peakstats(self.dir(fin),freqs,sigma0,pnumsigma,minchan,maxgap,psample,peakfit)
            title = "PeakStats: cutoff = %g" % (sigma0*pnumsigma)
            xlab = 'Peak value'
            ylab = 'FWHM (channels)'
            pppimage = bdp_name + '_peakstats'
            cval = mval
            myplot.scatter(pval,wval,title=title,xlab=xlab,ylab=ylab,color=cval,figname=pppimage,thumbnail=False)
            dt.tag("peakstats")
            

        # myplot.final()    # pjt debug 
        # all done!
        dt.tag("done")

        taskargs = "robust=" + sumrargs 
        if use_ppp: 
            taskargs = taskargs + " ppp=True"
        else: 
            taskargs = taskargs + " ppp=False"
        for v in self._summary:
            self._summary[v].setTaskArgs(taskargs)

        dt.tag("summary")
        dt.end()
コード例 #28
0
ファイル: casa_image.py プロジェクト: jpinedaf/spectral-cube
def load_casa_image(filename, skipdata=False, skipvalid=False, skipcs=False, **kwargs):
    """
    Load a cube (into memory?) from a CASA image. By default it will transpose
    the cube into a 'python' order and drop degenerate axes. These options can
    be suppressed. The object holds the coordsys object from the image in
    memory.
    """

    try:
        from taskinit import ia
    except ImportError:
        raise ImportError("Could not import CASA (casac) and therefore cannot read CASA .image files")

    # use the ia tool to get the file contents
    ia.open(filename)

    # read in the data
    if not skipdata:
        data = ia.getchunk()

    # CASA stores validity of data as a mask
    if not skipvalid:
        valid = ia.getchunk(getmask=True)

    # transpose is dealt with within the cube object

    # read in coordinate system object
    casa_cs = ia.coordsys()

    wcs = wcs_casa2astropy(casa_cs)

    unit = ia.brightnessunit()

    # don't need this yet
    # stokes = get_casa_axis(temp_cs, wanttype="Stokes", skipdeg=False,)

    #    if stokes == None:
    #        order = np.arange(self.data.ndim)
    #    else:
    #        order = []
    #        for ax in np.arange(self.data.ndim+1):
    #            if ax == stokes:
    #                continue
    #            order.append(ax)

    #    self.casa_cs = ia.coordsys(order)

    # This should work, but coordsys.reorder() has a bug
    # on the error checking. JIRA filed. Until then the
    # axes will be reversed from the original.

    # if transpose == True:
    #    new_order = np.arange(self.data.ndim)
    #    new_order = new_order[-1*np.arange(self.data.ndim)-1]
    #    print new_order
    #    self.casa_cs.reorder(new_order)

    # close the ia tool
    ia.close()

    meta = {"filename": filename, "BUNIT": unit}

    if wcs.naxis == 3:
        mask = BooleanArrayMask(np.logical_not(valid), wcs)
        cube = SpectralCube(data, wcs, mask, meta=meta)

    elif wcs.naxis == 4:
        data, wcs = cube_utils._split_stokes(data.T, wcs)
        mask = {}
        for component in data:
            data[component], wcs_slice = cube_utils._orient(data[component], wcs)
            mask[component] = LazyMask(np.isfinite, data=data[component], wcs=wcs_slice)

        cube = StokesSpectralCube(data, wcs_slice, mask, meta=meta)

    return cube
コード例 #29
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
def stack(coords, outfile, stampsize = 32, imagenames= [], method = 'mean',
        weighting = None, maxmaskradius=None, psfmode = 'point', primarybeam = None):
    """
   	 Performs stacking in the image domain.

         coords -- A coordList object of all target coordinates.
	 outfile -- Target name for stacked image.
         stampsize -- size of target image in pixels
         imagenames -- Name of images to extract flux from.
         method -- 'mean' or 'median', will determined how pixels are calculated
         weighting -- only for method 'mean', if set to None will use weights in coords.
         maxmaskradius -- allows blanking of centre pixels in weight calculation
         psfmode -- Allows application of filters to stacking, currently not supported.
         primarybeam -- only applies if weighting='pb'

         returns: Estimate of stacked flux assuming point source.
    """


    from ..interval import interval
    import os
    import shutil
    import numpy as np
    from taskinit import ia, casalog

    casalog.origin('stacker')
    casalog.post('#'*42,'INFO')
    casalog.post('#'*5 +  ' {0: <31}'.format("Begin Task: Stacker")+'#'*5, 'INFO')

    global skymap
    global data
    global oldimagenames

    if coords.coord_type == 'physical':
        coords = stacker.getPixelCoords(coords, imagenames)


# Important that len(coords) here is for the pixel coordinates, not physical!
    _allocate_buffers(coords.imagenames, stampsize, len(coords))

    ia.open(coords.imagenames[0])
    cs = ia.coordsys()
    outnchans = ia.boundingbox()['trc'][2]+1
    outnstokes = ia.boundingbox()['trc'][3]+1
    ia.done()



    for imagename in coords.imagenames:
            ia.open(imagename)
            if ia.shape()[2] != outnchans or ia.shape()[3] != outnstokes:
                    print('Channels/polarisations do not match in all images! You probably want to stacking do stacking on continuum data and not on spectral cube.')
                    return
            ia.done()

    _load_stack(coords, psfmode)

    if method == 'mean' and weighting == 'sigma2':
        coords = _calculate_sigma2_weights(coords, maxmaskradius)
    elif method == 'mean' and weighting == 'sigma':
        coords = _calculate_sigma_weights(coords, maxmaskradius)
    elif method == 'mean' and weighting == 'pb':
        coords = calculate_pb_weights(coords, primarybeam, imagenames)

    npos = len([c.weight for c in coords if c.weight > 1e-6])
    casalog.post('Number of stacking positions: {0}'.format(npos),
            priority='INFO')

    stacked_im  = _stack_stack(method, coords)


    _write_stacked_image(outfile, stacked_im,
                         coords.imagenames[0], stampsize)
    casalog.post('#'*5 +  ' {0: <31}'.format("End Task: stacker")+'#'*5)
    casalog.post('#'*42)
    return stacked_im[int(stampsize/2), int(stampsize/2),0,0]
コード例 #30
0
ファイル: casa_masks.py プロジェクト: jpinedaf/spectral-cube
def make_casa_mask(SpecCube,
                   outname,
                   append_to_image=True,
                   img=None,
                   add_stokes=True,
                   stokes_posn=None):
    '''
    Outputs the mask attached to the SpectralCube object as a CASA image, or
    optionally appends the mask to a preexisting CASA image.

    Parameters
    ----------
    SpecCube : SpectralCube
        SpectralCube object containing mask.
    outname : str
        Name of the outputted mask file.
    append_to_image : bool, optional
        Appends the mask to a given image.
    img : str, optional
        Image to be appended to. Must be specified if append_to_image is
        enabled.
    add_stokes: bool, optional
        Adds a Stokes axis onto the wcs from SpecCube.
    stokes_posn : int, optional
        Sets the position of the new Stokes axis. Defaults to the last axis.
    '''

    try:
        from taskinit import ia
    except ImportError:
        print("Cannot import casac. Must be run in a CASA environment.")

    # Get the header info from the image
    # There's not wcs_astropy2casa (yet), so create a temporary file for
    # CASA to open.
    temp = tempfile.NamedTemporaryFile()
    # CASA is closing this file at some point so set it to manual delete.
    temp2 = tempfile.NamedTemporaryFile(delete=False)

    # Grab wcs
    # Optionally re-add on the Stokes axis
    if add_stokes:
        my_wcs = SpecCube.wcs
        if stokes_posn is None:
            stokes_posn = my_wcs.wcs.naxis

        new_wcs = add_stokes_axis_to_wcs(my_wcs, stokes_posn)
        header = new_wcs.to_header()
        # Transpose the shape so we're adding the axis at the place CASA will
        # recognize. Then transpose back.
        shape = SpecCube.shape[::-1]
        shape = shape[:stokes_posn] + (1, ) + shape[stokes_posn:]
        shape = shape[::-1]
    else:
        # Just grab the header from SpecCube
        header = SpecCube.header
        shape = SpecCube.shape

    hdu = fits.PrimaryHDU(header=header, data=np.empty(shape, dtype='int16'))

    hdu.writeto(temp.name)

    ia.fromfits(infile=temp.name, outfile=temp2.name, overwrite=True)

    temp.close()

    cs = ia.coordsys()

    ia.close()

    temp2.close()

    mask_arr = SpecCube.mask.include()

    # Reshape mask with possible Stokes axis
    mask_arr = mask_arr.reshape(shape)

    # Transpose to match CASA axes
    mask_arr = mask_arr.T

    ia.newimagefromarray(outfile=outname, pixels=mask_arr.astype('int16'))

    ia.open(outname)
    ia.setcoordsys(cs.torecord())

    ia.close()

    if append_to_image:
        if img is None:
            raise TypeError(
                "img argument must be specified to append the mask.")

        ia.open(outname)
        ia.calcmask(outname + ">0.5")
        ia.close()

        ia.open(img)
        ia.maskhandler('copy', [outname + ":mask0", outname])
        ia.maskhandler('set', outname)
        ia.close()
コード例 #31
0
ファイル: casa_masks.py プロジェクト: e-koch/spectral-cube
def make_casa_mask(SpecCube, outname, append_to_image=True,
                   img=None, add_stokes=True, stokes_posn=None):
    '''
    Outputs the mask attached to the SpectralCube object as a CASA image, or
    optionally appends the mask to a preexisting CASA image.

    Parameters
    ----------
    SpecCube : SpectralCube
        SpectralCube object containing mask.
    outname : str
        Name of the outputted mask file.
    append_to_image : bool, optional
        Appends the mask to a given image.
    img : str, optional
        Image to be appended to. Must be specified if append_to_image is
        enabled.
    add_stokes: bool, optional
        Adds a Stokes axis onto the wcs from SpecCube.
    stokes_posn : int, optional
        Sets the position of the new Stokes axis. Defaults to the last axis.
    '''

    try:
        from taskinit import ia
    except ImportError:
        print("Cannot import casac. Must be run in a CASA environment.")

    # Get the header info from the image
    # There's not wcs_astropy2casa (yet), so create a temporary file for
    # CASA to open.
    temp = tempfile.NamedTemporaryFile()
    # CASA is closing this file at some point so set it to manual delete.
    temp2 = tempfile.NamedTemporaryFile(delete=False)

    # Grab wcs
    # Optionally re-add on the Stokes axis
    if add_stokes:
        my_wcs = SpecCube.wcs
        if stokes_posn is None:
            stokes_posn = my_wcs.wcs.naxis

        new_wcs = add_stokes_axis_to_wcs(my_wcs, stokes_posn)
        header = new_wcs.to_header()
        # Transpose the shape so we're adding the axis at the place CASA will
        # recognize. Then transpose back.
        shape = SpecCube.shape[::-1]
        shape = shape[:stokes_posn] + (1,) + shape[stokes_posn:]
        shape = shape[::-1]
    else:
        # Just grab the header from SpecCube
        header = SpecCube.header
        shape = SpecCube.shape

    hdu = fits.PrimaryHDU(header=header,
                          data=np.empty(shape, dtype='int16'))

    hdu.writeto(temp.name)

    ia.fromfits(infile=temp.name, outfile=temp2.name, overwrite=True)

    temp.close()

    cs = ia.coordsys()

    ia.close()

    temp2.close()

    mask_arr = SpecCube.mask.include()

    # Reshape mask with possible Stokes axis
    mask_arr = mask_arr.reshape(shape)

    # Transpose to match CASA axes
    mask_arr = mask_arr.T

    ia.newimagefromarray(outfile=outname,
                         pixels=mask_arr.astype('int16'))

    ia.open(outname)
    ia.setcoordsys(cs.torecord())

    ia.close()

    if append_to_image:
        if img is None:
            raise TypeError("img argument must be specified to append the mask.")

        ia.open(outname)
        ia.calcmask(outname+">0.5")
        ia.close()

        ia.open(img)
        ia.maskhandler('copy', [outname+":mask0", outname])
        ia.maskhandler('set', outname)
        ia.close()
コード例 #32
0
ファイル: __init__.py プロジェクト: thatoeugine/stacker
def _allocate_buffers( imagenames, new_stampsize, nstackpos):
    import numpy as np
    try:
        from taskinit import ia
        dataread = 'casa'
    except ImportError:
        from pyrap.images import image
        dataread = 'pyrap'

    global skymap
    global data
    global oldimagenames
    global stampsize
    global imagesizes

    if dataread == 'casa':
        ia.open(imagenames[0])
        cs = ia.coordsys()
        outnchans = ia.boundingbox()['trc'][2]+1
        outnstokes = ia.boundingbox()['trc'][3]+1
        ia.done()
    elif dataread == 'pyrap':
        im = image(imagenames[0])
        cs = im.coordinates()
        outnchans = im.shape()[cs.get_axes().index(cs.get_coordinate('spectral').get_axes())]
        outnstokes = im.shape()[cs.get_axes().index(cs.get_coordinate('stokes').get_axes())]
    
# To improve performance this module will keep buffers between run.
# This following code resets these buffers if they have grown obsolete.
    if oldimagenames == []:
            oldimagenames = imagenames

    if oldimagenames != imagenames:
            oldimagenames = imagenames
            skymap = []
            data = []

    if stampsize == 0:
            stampsize = new_stampsize

    elif stampsize != new_stampsize:
            stampsize = new_stampsize
            data = []
            skymap = []
    
    if not(data == []) and nstackpos != data.shape[0]:
        data = []

# If there is no data buffer create one.
# The data buffer is used to save the right stacking positions before stacking them.
# During stacking this is where the full stack will actually be saved.
    if data == []:
            data = np.zeros((nstackpos, new_stampsize, new_stampsize, outnstokes, outnchans))
    else:
            data = 0.*data
    
# If there is no skymap buffer create one.
# This is the data that is most important to buffer.
# Reading a skymap from disk is a time consuming task and we don't want to do this too much.
    if skymap == []:
        for imagename in imagenames:
            if dataread == 'casa':
                ia.open(imagename)
                skymap.append(ia.getregion())
                ia.done()
            elif dataread == 'pyrap':
                buff = im.getdata()
                dir_axis = cs.get_axes().index(cs.get_coordinate('direction').get_axes())
                x_axis = dir_axis+cs.get_coordinate('direction').get_axes().index('Right Ascension')
                y_axis = dir_axis+cs.get_coordinate('direction').get_axes().index('Declination')
                specax = cs.get_axes().index(cs.get_coordinate('spectral').get_axes())
                stokesax = cs.get_axes().index(cs.get_coordinate('stokes').get_axes())
                axis_order = [x_axis, y_axis, stokesax, specax]

                for i in range(len(axis_order)-1):
                    if axis_order[i] != i:
                        target = axis_order.index(i)
                        origin = i
                        buff = buff.swapaxes(axis_order[origin], axis_order[target])
                        axis_order[origin], axis_order[target] =\
                            axis_order[target], axis_order[origin]
                skymap.append(buff)

    imagesizes = []
    for imagename in imagenames:
        if dataread == 'casa':
            ia.open(imagename)
            imagesizes.append((ia.shape()[0], ia.shape()[1]))
            ia.done()
        elif dataread == 'pyrap':
            dir_axis = cs.get_axes().index(cs.get_coordinate('direction').get_axes())
            x_axis_index = dir_axis+cs.get_coordinate('direction').get_axes().index('Right Ascension')
            y_axis_index = dir_axis+cs.get_coordinate('direction').get_axes().index('Declination')
            imagesizes.append((im.shape()[x_axis_index], im.shape()[y_axis_index]))