예제 #1
0
def write_ip_NDF(data, bad_pixel_ref):
    """

    This function writes out the array ip parameter data to an ndf_file.

    Invocation:
        result = write_ip_NDF(data,bad_pixel_ref)

    Arguements:
        data = The array ip parameter data
        bad_ref = A NDF with bad pixel values to copy over.

    Returned Value:
        Writes NDF and returns handle.
    """

    ndf_name_orig = NDG(1)
    indf = ndf.open(ndf_name_orig[0], 'WRITE', 'NEW')
    indf.new('_DOUBLE', 2, numpy.array([1, 1]), numpy.array([32, 40]))
    ndfmap = indf.map('DATA', '_DOUBLE', 'WRITE')
    ndfmap.numpytondf(data)
    indf.annul()

    # Copy bad pixels
    ndf_name = NDG(1)
    invoke("$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(
        ndf_name_orig, bad_pixel_ref, ndf_name))
    return ndf_name
예제 #2
0
def write_ip_NDF(data,bad_pixel_ref):
    """

    This function writes out the array ip parameter data to an ndf_file.

    Invocation:
        result = write_ip_NDF(data,bad_pixel_ref)

    Arguements:
        data = The array ip parameter data
        bad_ref = A NDF with bad pixel values to copy over.

    Returned Value:
        Writes NDF and returns handle.
    """

    ndf_name_orig = NDG(1)
    indf = ndf.open( ndf_name_orig[0], 'WRITE', 'NEW' )
    indf.new('_DOUBLE', 2, numpy.array([1,1]),numpy.array([32,40]))
    ndfmap = indf.map( 'DATA', '_DOUBLE', 'WRITE' )
    ndfmap.numpytondf( data )
    indf.annul()

    # Copy bad pixels
    ndf_name = NDG(1)
    invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(ndf_name_orig,bad_pixel_ref,ndf_name) )
    return ndf_name
예제 #3
0
    def test_newwithwrite(self):
        # okay we have all the data, time to open us up an ndf
        indf = ndf.open(self.testndf, 'WRITE', 'NEW')
        indf.new('_REAL', 2, numpy.array([0, 0]), numpy.array([4, 4]))

        # create PAMELA extension
        loc = indf.xnew('PAMELA', 'STRUCT')

        hdsloc = hds._transfer(loc)
        name = hdsloc.name
        self.assertEqual(name, "PAMELA")

        ccd = numpy.zeros([5, 5])

        # map primary data
        ndfmap = indf.map('DATA', '_REAL', 'WRITE')
        self.assertEqual(ndfmap.type, "_REAL")
        ndfmap.numpytondf(ccd)

        # Attribute testing
        indf.title = "A Title"
        self.assertEqual(indf.title, "A Title")
        self.assertIsNone(indf.units)
        indf.units = "K"
        self.assertEqual(indf.units, "K")
        indf.units = None
        self.assertIsNone(indf.units)

        # shut down ndf system
        indf.annul()

        # make sure we got a file
        self.assertTrue(os.path.exists(self.testndf),
                        "Test existence of NDF file")
예제 #4
0
    def test_newwithwrite(self):
        # okay we have all the data, time to open us up an ndf
        indf = ndf.open(self.testndf,'WRITE','NEW')
        indf.new('_REAL',2,
                 numpy.array([0,0]),numpy.array([4,4]))

        # create PAMELA extension
        loc = indf.xnew('PAMELA','STRUCT')

        hdsloc = hds._transfer(loc)
        name = hdsloc.name
        self.assertEqual( name, "PAMELA" )

        ccd = numpy.zeros([5,5])

        # map primary data
        ndfmap = indf.map('DATA','_REAL','WRITE')
        self.assertEqual( ndfmap.type, "_REAL")
        ndfmap.numpytondf(ccd)

        # Attribute testing
        indf.title = "A Title"
        self.assertEqual(indf.title, "A Title" )
        self.assertIsNone( indf.units )
        indf.units = "K"
        self.assertEqual(indf.units, "K")
        indf.units = None
        self.assertIsNone( indf.units)

        # shut down ndf system
        indf.annul()

        # make sure we got a file
        self.assertTrue( os.path.exists( self.testndf ), "Test existence of NDF file" )
예제 #5
0
    def test_simplenew(self):
        # okay we have all the data, time to open us up an ndf
        indf = ndf.open(self.testndf, 'WRITE', 'NEW')
        indf.new('_REAL', 2, numpy.array([0, 0]), numpy.array([4, 4]))

        # map primary data to make sure NDF does not complain
        ndfmap = indf.map('DATA', '_REAL', 'WRITE')
        self.assertEqual(ndfmap.nelem, 25)

        # make sure we got a file
        self.assertTrue(os.path.exists(self.testndf),
                        "Test existence of NDF file")
예제 #6
0
    def test_simplenew(self):
        # okay we have all the data, time to open us up an ndf
        indf = ndf.open(self.testndf,'WRITE','NEW')
        indf.new('_REAL',2,
                 numpy.array([0,0]),numpy.array([4,4]))

        # map primary data to make sure NDF does not complain
        ndfmap = indf.map('DATA','_REAL','WRITE')
        self.assertEqual( ndfmap.nelem, 25 )

        # make sure we got a file
        self.assertTrue( os.path.exists( self.testndf ), "Test existence of NDF file" )
예제 #7
0
    def test_simpleread(self):
        indf = ndf.open(os.path.join(fulldir, 'data', 'ndf_test.sdf'))
        self.assertEqual(indf.label, 'Signal')
        self.assertEqual(indf.units, 'counts')
        self.assertEqual(indf.title, 'Test Data')
        self.assertEqual(indf.xnumb, 1)
        self.assertEqual(indf.dim, [5, 3])

        # Xtension stuff
        self.assertEqual(indf.xname(0), 'FITS')
        self.assertTrue(indf.xstat('FITS'))
        self.assertFalse(indf.xstat('NONEXISTENT'))
        self.assertIsInstance(indf.xloc('FITS', 'READ'), hds.HDSWrapperClass)

        # axis stuff
        self.assertTrue(indf.astat('LABEL', 0))
        self.assertTrue(indf.astat('UNIT', 0))
        self.assertFalse(indf.astat('WIDTH', 0))
        self.assertTrue(indf.astat('CENTRE', 0))
        self.assertFalse(indf.astat('VARIANCE', 0))

        self.assertTrue(indf.astat('LABEL', 1))
        self.assertTrue(indf.astat('UNIT', 1))
        self.assertFalse(indf.astat('WIDTH', 1))
        self.assertTrue(indf.astat('CENTRE', 1))
        self.assertFalse(indf.astat('VARIANCE', 1))

        self.assertEqual(indf.acget('LABEL', 1), 'Right ascension')
        self.assertEqual(indf.acget('LABEL', 0), 'Declination')
        self.assertEqual(indf.acget('UNIT', 1), 'deg')
        self.assertEqual(indf.acget('UNIT', 0), 'deg')

        # Main ndf stuff
        history = indf.history()
        self.assertEqual(len(history), 4)
        self.assertEqual(history[0].application.split()[0], 'HISSET')
        self.assertEqual(indf.type('DATA'), '_REAL')
        self.assertSequenceEqual(indf.bound(), [[1, 1], [5, 3]])

        data = indf.read('DATA')
        data_shouldbe = (np.ones([5, 3]) * -3.4028235e+38).astype(np.float32)
        self.assertSequenceEqual(data.tolist(), data_shouldbe.tolist())

        from starlink import Ast
        self.assertIsInstance(indf.gtwcs(), Ast.FrameSet)

        self.assertTrue(indf.state('LABEL'))
        self.assertFalse(indf.state('QUALITY'))
        with self.assertRaises(hds.StarlinkError):
            indf.state('NONEXISTENT')

        indf.annul()
예제 #8
0
    def __init__(self, fname):
        """
        Initialise an NDF from a file.

        This slurps the whole thing in, including all extensions, axes etc. This could
        cause memory problems on large files. You can use either standard format
        NDF sections in such case or Pythonic ones. e.g. Given an NDF 'image' listed by
        hdstrace to have a data array DATA(3,4,5), the entire image can be specified
        using any of 'image', 'image(1:3,1:4,1:5)' or 'image[0:5,0:4,0:3]' where as usual
        with the index ranges in Python, the last index is NOT included and indices start
        at 0. While those used to dealing with NDFs may find the first with () more
        familiar, the second may be simpler if you deal with the outout from a python script
        since it is identically ordered and more consistent with taking sub-sections using
        Python as in::

            ndf = starlink.ndfpack.Ndf('image')
            subim = image.data[0:5,0:4,0:3]
        """
        object.__init__(self)

        # Next section changes from a pseudo-Pythonic version of an NDF section
        # to a Fortran-like one i.e. reverse the indices, add 1 to the first of a pair
        # or to the sole index
        reg = re.compile(r'([^\[\]]*)\[([^\[\]]*)\]')
        m = reg.match(fname)
        if m != None:
            tup = m.group(2).split(',')
            nname = m.group(1) + '('
            for st in tup[-1:0:-1]:
                subt = st.split(':')
                if len(subt) == 1:
                    add = str(int(subt[0])+1)
                elif len(subt) == 2:
                    add = str(int(subt[0])+1) + ':' + str(int(subt[1]))
                else:
                    raise Exception('Could not understand ' + fname)
                nname += add + ','
            subt = tup[0].split(':')
            if len(subt) == 1:
                add = str(int(subt[0])+1)
            elif len(subt) == 2:
                add = str(int(subt[0])+1) + ':' + str(int(subt[1]))
            else:
                raise Exception('Could not understand ' + nname)
            nname += add + ')'
            fname = nname

        # OK, get on with NDF stuff
        ndf.init()
        ndf.begin()
        try:
            indf = ndf.open(fname)
            #: the data array, a numpy N-d array
            self.data  = indf.read('Dat')
            #: pixel limits of data array.
            #: 2xndim array of lower and upper bounds
            self.bound = indf.bound()
            #: variances, a numpy N-d array
            self.var   = indf.read('Var')
            #: label string associated with the data
            self.label = indf.label
            #: title string associated with the data
            self.title = indf.title
            #: data unit string
            self.units  = indf.units
            try:
                #: WCS information, a PyAST FrameSet
                self.wcs = indf.gtwcs()
            except NotImplementedError:
                self.wcs = None

            # Read the axes
            #: a list of Axis objects, one for each dimension of data
            self.axes = []
            for nax in range(self.data.ndim):
                self.axes.append(Axis(indf, nax))

            # Read the extensions
            #: header/extensions, a dictionary
            self.head = {}
            nextn = indf.xnumb
            for nex in range(nextn):
                xname = indf.xname(nex)
                loc1 = indf.xloc(xname, 'READ')
                hdsloc = hds._transfer(loc1)
                _read_hds(hdsloc, self.head)
                hdsloc.annul()

            ndf.end()
        except:
            ndf.end()
            raise
예제 #9
0
 def test_simpleread(self):
     indf = ndf.open(os.path.join(fulldir, 'data', 'ndf_test.sdf'))
     self.assertEqual(indf.label, 'Signal')
     self.assertEqual(indf.units, 'counts')
     self.assertEqual(indf.title, 'Test Data')
예제 #10
0
def pca( indata, ncomp ):
   """

   Identifies and returns the strongest PCA components in a 3D NDF.

   Invocation:
      result = pca( indata, ncomp )

   Arguments:
      indata = NDG
         An NDG object specifying a single 3D NDF. Each plane in the cube
         is a separate image, and the images are compared using PCA.
      ncomp = int
         The number of PCA components to include in the returned NDF.

   Returned Value:
      A new NDG object containing a single 3D NDF containing just the
      strongest "ncomp" PCA components found in the input NDF.

   """

   msg_out( "   finding strongest {0} components using Principal Component Analysis...".format(ncomp) )

#  Get the shape of the input NDF.
   invoke( "$KAPPA_DIR/ndftrace {0} quiet".format(indata) )
   nx = get_task_par( "dims(1)", "ndftrace" )
   ny = get_task_par( "dims(2)", "ndftrace" )
   nz = get_task_par( "dims(3)", "ndftrace" )

#  Fill any bad pixels.
   tmp = NDG(1)
   invoke( "$KAPPA_DIR/fillbad in={0} out={1} variance=no niter=10 size=10".format(indata,tmp) )

#  Read the planes from the supplied NDF. Note, numpy axis ordering is the
#  reverse of starlink axis ordering. We want a numpy array consisting of
#  "nz" elements, each being a vectorised form of a plane from the 3D NDF.
   ndfdata = numpy.reshape( Ndf( tmp[0] ).data, (nz,nx*ny) )

#  Normalize each plane to a mean of zero and standard deviation of 1.0
   means = []
   sigmas = []
   newdata = []
   for iplane in range(0,nz):
      plane = ndfdata[ iplane ]
      mn = plane.mean()
      sg = math.sqrt( plane.var() )
      means.append( mn )
      sigmas.append( sg )

      if sg > 0.0:
         newdata.append( (plane-mn)/sg )

   newdata= numpy.array( newdata )

#  Transpose as required by MDP.
   pcadata = numpy.transpose( newdata )

#  Find the required number of PCA components (these are the strongest
#  components).
   pca = mdp.nodes.PCANode( output_dim=ncomp )
   comp = pca.execute( pcadata )

#  Re-project the components back into the space of the input 3D NDF.
   ip = numpy.dot( comp, pca.get_recmatrix() )

#  Transpose the array so that each row is an image.
   ipt = numpy.transpose(ip)

#  Normalise them back to the original scales.
   jplane = 0
   newdata = []
   for iplane in range(0,nz):
      if sigmas[ iplane ] > 0.0:
         newplane = sigmas[ iplane ] * ipt[ jplane ] + means[ iplane ]
         jplane += 1
      else:
         newplane = ndfdata[ iplane ]
      newdata.append( newplane )
   newdata= numpy.array( newdata )

#  Dump the re-projected images out to a 3D NDF.
   result = NDG(1)
   indf = ndf.open( result[0], 'WRITE', 'NEW' )
   indf.new('_DOUBLE', 3, numpy.array([1,1,1]),numpy.array([nx,ny,nz]))
   ndfmap = indf.map( 'DATA', '_DOUBLE', 'WRITE' )
   ndfmap.numpytondf( newdata )
   indf.annul()

#  Uncomment to dump the components.
#   msg_out( "Dumping PCA comps to {0}-comps".format(result[0]) )
#   compt = numpy.transpose(comp)
#   indf = ndf.open( "{0}-comps".format(result[0]), 'WRITE', 'NEW' )
#   indf.new('_DOUBLE', 3, numpy.array([1,1,1]),numpy.array([nx,ny,ncomp]))
#   ndfmap = indf.map( 'DATA', '_DOUBLE', 'WRITE' )
#   ndfmap.numpytondf( compt )
#   indf.annul()

   return result
예제 #11
0
파일: ndf.py 프로젝트: timj/starlink-pyndf
    def __init__(self, fname):
        """
        Initialise an NDF from a file.

        This slurps the whole thing in, including all extensions, axes etc. This could
        cause memory problems on large files. You can use either standard format
        NDF sections in such case or Pythonic ones. e.g. Given an NDF 'image' listed by
        hdstrace to have a data array DATA(3,4,5), the entire image can be specified
        using any of 'image', 'image(1:3,1:4,1:5)' or 'image[0:5,0:4,0:3]' where as usual
        with the index ranges in Python, the last index is NOT included and indices start
        at 0. While those used to dealing with NDFs may find the first with () more
        familiar, the second may be simpler if you deal with the outout from a python script
        since it is identically ordered and more consistent with taking sub-sections using
        Python as in::

            ndf = starlink.ndfpack.Ndf('image')
            subim = image.data[0:5,0:4,0:3]
        """
        object.__init__(self)

        # Next section changes from a pseudo-Pythonic version of an NDF section
        # to a Fortran-like one i.e. reverse the indices, add 1 to the first of a pair
        # or to the sole index
        reg = re.compile(r'([^\[\]]*)\[([^\[\]]*)\]')
        m = reg.match(fname)
        if m != None:
            tup = m.group(2).split(',')
            nname = m.group(1) + '('
            for st in tup[-1:0:-1]:
                subt = st.split(':')
                if len(subt) == 1:
                    add = str(int(subt[0])+1)
                elif len(subt) == 2:
                    add = str(int(subt[0])+1) + ':' + str(int(subt[1]))
                else:
                    raise Exception('Could not understand ' + fname)
                nname += add + ','
            subt = tup[0].split(':')
            if len(subt) == 1:
                add = str(int(subt[0])+1)
            elif len(subt) == 2:
                add = str(int(subt[0])+1) + ':' + str(int(subt[1]))
            else:
                raise Exception('Could not understand ' + nname)
            nname += add + ')'
            fname = nname

        # OK, get on with NDF stuff
        ndf.init()
        ndf.begin()
        try:
            indf = ndf.open(fname)
            #: the data array, a numpy N-d array
            self.data  = indf.read('Dat')
            #: pixel limits of data array.
            #: 2xndim array of lower and upper bounds
            self.bound = indf.bound()
            #: variances, a numpy N-d array
            self.var   = indf.read('Var')
            #: label string associated with the data
            self.label = indf.label
            #: title string associated with the data
            self.title = indf.title
            #: data unit string
            self.units  = indf.units
            try:
                #: WCS information, a PyAST FrameSet
                self.wcs = indf.gtwcs()
            except NotImplementedError:
                self.wcs = None

            # Read the axes
            #: a list of Axis objects, one for each dimension of data
            self.axes = []
            for nax in range(self.data.ndim):
                self.axes.append(Axis(indf, nax))

            # Read the extensions
            #: header/extensions, a dictionary
            self.head = {}
            nextn = indf.xnumb
            for nex in range(nextn):
                xname = indf.xname(nex)
                loc1 = indf.xloc(xname, 'READ')
                hdsloc = hds._transfer(loc1)
                _read_hds(hdsloc, self.head)
                hdsloc.annul()

            ndf.end()
        except:
            ndf.end()
            raise