Exemplo n.º 1
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. If no value is supplied on
#  the command line, the  user is prompted for a value when the parameter
#  value is first accessed within this script. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input time series NDFs",
                                  starutil.get_task_par("DATA_ARRAY",
                                                        "GLOBAL",
                                                    default=starutil.Parameter.UNSET)))

   params.append(starutil.ParNDG("OUT", "The output map", default=None,
                                  exists=False, minsize=0, maxsize=1 ))

   params.append(starutil.Par0I("NITER", "No. of iterations to perform",
                                0, noprompt=True))

   params.append(starutil.Par0F("PIXSIZE", "Pixel size (arcsec)", None,
                                 maxval=1000, minval=0.01))

   params.append(starutil.Par0S("CONFIG", "Map-maker tuning parameters",
                                "^$STARLINK_DIR/share/smurf/dimmconfig.lis"))

   params.append(starutil.ParNDG("ITERMAP", "Output cube holding itermaps",
Exemplo n.º 2
0
   tiledir = os.getenv( 'JSA_TILE_DIR' )
   if tiledir:
      msg_out( "Tiles will be read from {0}".format(tiledir) )
   else:
      msg_out( "Environment variable JSA_TILE_DIR is not set!" )
      msg_out( "Tiles will be read from the current directory ({0})".format(os.getcwd()) )

#  Create an empty list to hold the NDFs for the tiles holding the
#  required data.
   tilendf = []
   itilelist = []

#  Identify the tiles that overlap the specified region, and loop round
#  them.
   invoke("$SMURF_DIR/tilelist region={0} instrument={1}".format(region,instrument) )
   for itile in starutil.get_task_par( "tiles", "tilelist" ):

#  Get information about the tile, including the 2D spatial pixel index
#  bounds of its overlap with the required Region.
      invoke("$SMURF_DIR/tileinfo itile={0} instrument={1} "
             "target={2}".format(itile,instrument,region) )

#  Skip this tile if it does not exist (i.e. is empty).
      if starutil.get_task_par( "exists", "tileinfo" ):

#  Get the 2D spatial pixel index bounds of the part of the master tile that
#  overlaps the required region.
         tlbnd = starutil.get_task_par( "tlbnd", "tileinfo" )
         tubnd = starutil.get_task_par( "tubnd", "tileinfo" )

#  Get the path to the tile's master NDF.
Exemplo n.º 3
0
#  Process each NDF holding cleaned data created by sc2concat.
   for path in glob.glob("s*_con_res_cln.sdf"):
      base = path[:-16]

#  Get a copy of the cleaned data but with PAD samples trimmed from start
#  and end.
      tmp1 = NDG(1)
      tmp2 = NDG(1)
      invoke("$KAPPA_DIR/nomagic {0} {1} 0".format(path,tmp1) )
      invoke("$KAPPA_DIR/qualtobad {0} {1} PAD".format(tmp1,tmp2))
      invoke("$KAPPA_DIR/ndfcopy {0} {1} trimbad=yes".format(tmp2,tmp1))

#  Note the bounds of the used (i.e. non-PAD) time slices.
      invoke("$KAPPA_DIR/ndftrace {0} quiet".format(tmp1))
      tlo = starutil.get_task_par( "lbound(3)", "ndftrace" )
      thi = starutil.get_task_par( "ubound(3)", "ndftrace" )
      ntslice = thi - tlo + 1

#  Note the mumber of bolometer (should always be 1280).
      nx = starutil.get_task_par( "dims(1)", "ndftrace" )
      ny = starutil.get_task_par( "dims(2)", "ndftrace" )
      nbolo = nx*ny

#  Reshape the cleaned data from 3D to 2D.
      val = NDG(1)
      invoke("$KAPPA_DIR/reshape {0} out={1} shape=\[{2},{3}\]".format(tmp1,val,nbolo,ntslice))

#  Extract the quality array into a separate NDF.
      fla = NDG(1)
      invoke("$KAPPA_DIR/ndfcopy {0} comp=qual out={1}".format(val,fla))
Exemplo n.º 4
0
def remove_corr(ins, masks):
    """

   Masks the supplied set of Q or U images and then looks for and removes
   correlated components in the background regions.

   Invocation:
      result = remove_corr( ins, masks )

   Arguments:
      ins = NDG
         An NDG object specifying a group of Q or U images from which
         correlated background components are to be removed.
      masks = NDG
         An NDG object specifying a corresponding group of Q or U images
         in which source pixels are bad. These are only used to mask the
         images specified by "in". It should have the same size as "in".

   Returned Value:
      A new NDG object containing the group of corrected Q or U images.

   """

    #  How many NDFs are we processing?
    nndf = len(ins)

    #  Blank out sources by copy the bad pixels from "mask" into "in". We refer
    #  to "q" below, but the same applies whether processing Q or U.
    msg_out("   masking...")
    qm = NDG(ins)
    invoke("$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(ins, masks, qm))

    #  Find the most correlated pair of imagtes. We use the basic correlation
    #  coefficient calculated by kappa:scatter for this.
    msg_out("   Finding most correlated pair of images...")
    cmax = 0
    for i in range(0, nndf - 1):
        for j in range(i + 1, nndf):
            invoke("$KAPPA_DIR/scatter in1={0} in2={1} device=!".format(
                qm[i], qm[j]))
            c = starutil.get_task_par("corr", "scatter")
            if abs(c) > abs(cmax):
                cmax = c
                cati = i
                catj = j

    if abs(cmax) < 0.3:
        msg_out("   No correlated images found!")
        return ins

    msg_out("   Correlation for best pair of images = {0}".format(cmax))

    #  Find images that are reasonably correlated to the pair found above,
    #  and coadd them to form a model for the correlated background
    #  component. Note, the holes left by the masking are filled in by the
    #  coaddition using background data from other images.
    msg_out("   Forming model...")

    #  Form the average of the two most correlated images, first normalising
    #  them to a common scale so that they both have equal weight.
    norm = "{0}/norm".format(NDG.tempdir)
    if not normer(qm[cati], qm[catj], 0.3, norm):
        norm = qm[cati]

    mslist = NDG([qm[catj], norm])
    ave = "{0}/ave".format(NDG.tempdir)
    invoke(
        "$CCDPACK_DIR/makemos in={0} method=mean genvar=no usevar=no out={1}".
        format(mslist, ave))

    #  Loop round each image finding the correlation factor of the image and
    #  the above average image.
    temp = "{0}/temp".format(NDG.tempdir)
    nlist = []
    ii = 0
    for i in range(0, nndf):
        c = blanker(qm[i], ave, temp)

        #  If the correlation is high enough, normalize the image to the average
        #  image and then include the normalised image in the list of images to be
        #  coadded to form the final model.
        if abs(c) > 0.3:
            tndf = "{0}/t{1}".format(NDG.tempdir, ii)
            ii += 1
            invoke(
                "$KAPPA_DIR/normalize in1={1} in2={2} out={0} device=!".format(
                    tndf, temp, ave))
            nlist.append(tndf)

    if ii == 0:
        msg_out("   No secondary correlated images found!")
        return ins

    msg_out(
        "   Including {0} secondary correlated images in the model.".format(
            ii))

    #  Coadded the images created above to form the model of the correlated
    #  background component. Fill any remaining bad pixels with artificial data.
    model = "{0}/model".format(NDG.tempdir)
    included = NDG(nlist)
    invoke(
        "$CCDPACK_DIR/makemos in={0} method=mean usevar=no genvar=no out={1}".
        format(included, temp))
    invoke("$KAPPA_DIR/fillbad in={1} variance=no out={0} size=10 niter=10".
           format(model, temp))

    #  Now estimate how much of the model is present in each image and remove it.
    msg_out("   Removing model...")
    temp2 = "{0}/temp2".format(NDG.tempdir)
    qnew = NDG(ins)
    nbetter = 0
    for i in range(0, nndf):

        #  Try to normalise the model to the current image. This fails if the
        #  correlation between them is too low.
        if normer(model, qm[i], 0.3, temp):

            #  Remove the scaled model form the image.
            invoke("$KAPPA_DIR/sub in1={0} in2={1} out={2}".format(
                ins[i], temp, temp2))

            #  We now check that removing the correlated background component has in
            #  fact made the image flatter (poor fits etc can mean that images that
            #  are poorly correlated to the model have a large amount of model
            #  removed and so make the image less flat). FInd the standard deviation
            #  of the data in the original image and in the corrected image.
            invoke("$KAPPA_DIR/stats {0} quiet".format(ins[i]))
            oldsig = get_task_par("sigma", "stats")

            invoke("$KAPPA_DIR/stats {0} quiet".format(temp2))
            newsig = get_task_par("sigma", "stats")

            #  If the correction has made the image flatter, copy it to the returned NDG.
            if newsig < oldsig:
                nbetter += 1
                invoke("$KAPPA_DIR/ndfcopy in={1} out={0}".format(
                    qnew[i], temp2))
            else:
                invoke("$KAPPA_DIR/ndfcopy in={0} out={1}".format(
                    ins[i], qnew[i]))

#  If the input image is poorly correlated to the model, return the input
#  image unchanged.
        else:
            invoke("$KAPPA_DIR/ndfcopy in={0} out={1}".format(ins[i], qnew[i]))

    msg_out("   {0} out of {1} images have been improved.".format(
        nbetter, nndf))

    #  Return the corrected images.
    return qnew
Exemplo n.º 5
0
      elif cval == "DAS":
         instrument = "DAS"

#  If so, set the default for the INSTRUMENT parameter and prevent the
#  user being prompted for a value.
   if instrument != None:
      parsys["INSTRUMENT"].default = instrument
      parsys["INSTRUMENT"].noprompt = True

#  Get the chosen instrument.
   instrument = parsys["INSTRUMENT"].value
   instrument = starutil.shell_quote( instrument )

#  Get a list of the tiles that overlap the supplied NDF.
   invoke( "$SMURF_DIR/jsatilelist in={0} instrument={1} quiet".format(inndf,instrument) )
   tiles = starutil.get_task_par( "TILES", "jsatilelist" )

#  JSADICER requires the input array to be gridded on the JSA all-sky
#  pixel grid. This is normally an HPX projection, but if the supplied
#  NDF straddles a discontinuity in the HPX projection then we need to
#  use a different flavour of HPX (either an HPX projection centred on
#  RA=12h or am XPH (polar HEALPix) projection centred on the north or
#  south pole). The above call to jsatileinfo will have determined the
#  appropriate projection to use, so get it.
   proj = starutil.get_task_par( "PROJ", "jsatilelist" )

#  Create a file holding the FITS-WCS header for the first tile, using
#  the type of projection determined above.
   head = "{0}/header".format(NDG.tempdir)
   invoke( "$SMURF_DIR/jsatileinfo itile={0} instrument={1} header={2} "
           "proj={3} quiet".format(tiles[0],instrument,head,proj) )
Exemplo n.º 6
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. No validation of default
#  values or values supplied on the command line is performed until the
#  parameter value is first accessed within the script, at which time the
#  user is prompted for a value if necessary. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "Template POL2 time series NDFs",
                                 starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                       default=Parameter.UNSET)))
   params.append(starutil.Par0S("OUT", "Output simulated POL2 data"))
   params.append(starutil.Par0L("NEWART", "Create new artificial I, Q and U maps?" ))
   params.append(starutil.ParNDG("ARTI", "Artificial I map", maxsize=1 ))
   params.append(starutil.ParNDG("ARTQ", "Artificial Q map", maxsize=1 ))
   params.append(starutil.ParNDG("ARTU", "Artificial U map", maxsize=1 ))
   params.append(starutil.ParNDG("INCOM", "Non-POL2 data files to define COM",
                                 None, noprompt=True ))
   params.append(starutil.Par0F("COMVAL", "Constant common mode value (pW)",
                                 0.0, noprompt=True ))
   params.append(starutil.Par0S("RESTART", "Restart using old files?", None,
                                 noprompt=True))
   params.append(starutil.Par0L("RETAIN", "Retain temporary files?", False,
                                 noprompt=True))
   params.append(starutil.Par0F("IPEAK", "Peak total instensity in "
                                "artificial I map (pW)", 0.08, True ))
Exemplo n.º 7
0
def pca( indata, ncomp ):
   """

   Identifies and returns the strongest PCA components in a 3D NDF.

   Invocation:
      result = pca( indata, ncomp )

   Arguments:
      indata = NDG
         An NDG object specifying a single 3D NDF. Each plane in the cube
         is a separate image, and the images are compared using PCA.
      ncomp = int
         The number of PCA components to include in the returned NDF.

   Returned Value:
      A new NDG object containing a single 3D NDF containing just the
      strongest "ncomp" PCA components found in the input NDF.

   """

   msg_out( "   finding strongest {0} components using Principal Component Analysis...".format(ncomp) )

#  Get the shape of the input NDF.
   invoke( "$KAPPA_DIR/ndftrace {0} quiet".format(indata) )
   nx = get_task_par( "dims(1)", "ndftrace" )
   ny = get_task_par( "dims(2)", "ndftrace" )
   nz = get_task_par( "dims(3)", "ndftrace" )

#  Fill any bad pixels.
   tmp = NDG(1)
   invoke( "$KAPPA_DIR/fillbad in={0} out={1} variance=no niter=10 size=10".format(indata,tmp) )

#  Read the planes from the supplied NDF. Note, numpy axis ordering is the
#  reverse of starlink axis ordering. We want a numpy array consisting of
#  "nz" elements, each being a vectorised form of a plane from the 3D NDF.
   ndfdata = numpy.reshape( Ndf( tmp[0] ).data, (nz,nx*ny) )

#  Normalize each plane to a mean of zero and standard deviation of 1.0
   means = []
   sigmas = []
   newdata = []
   for iplane in range(0,nz):
      plane = ndfdata[ iplane ]
      mn = plane.mean()
      sg = math.sqrt( plane.var() )
      means.append( mn )
      sigmas.append( sg )

      if sg > 0.0:
         newdata.append( (plane-mn)/sg )

   newdata= numpy.array( newdata )

#  Transpose as required by MDP.
   pcadata = numpy.transpose( newdata )

#  Find the required number of PCA components (these are the strongest
#  components).
   pca = mdp.nodes.PCANode( output_dim=ncomp )
   comp = pca.execute( pcadata )

#  Re-project the components back into the space of the input 3D NDF.
   ip = numpy.dot( comp, pca.get_recmatrix() )

#  Transpose the array so that each row is an image.
   ipt = numpy.transpose(ip)

#  Normalise them back to the original scales.
   jplane = 0
   newdata = []
   for iplane in range(0,nz):
      if sigmas[ iplane ] > 0.0:
         newplane = sigmas[ iplane ] * ipt[ jplane ] + means[ iplane ]
         jplane += 1
      else:
         newplane = ndfdata[ iplane ]
      newdata.append( newplane )
   newdata= numpy.array( newdata )

#  Dump the re-projected images out to a 3D NDF.
   result = NDG(1)
   indf = ndf.open( result[0], 'WRITE', 'NEW' )
   indf.new('_DOUBLE', 3, numpy.array([1,1,1]),numpy.array([nx,ny,nz]))
   ndfmap = indf.map( 'DATA', '_DOUBLE', 'WRITE' )
   ndfmap.numpytondf( newdata )
   indf.annul()

#  Uncomment to dump the components.
#   msg_out( "Dumping PCA comps to {0}-comps".format(result[0]) )
#   compt = numpy.transpose(comp)
#   indf = ndf.open( "{0}-comps".format(result[0]), 'WRITE', 'NEW' )
#   indf.new('_DOUBLE', 3, numpy.array([1,1,1]),numpy.array([nx,ny,ncomp]))
#   ndfmap = indf.map( 'DATA', '_DOUBLE', 'WRITE' )
#   ndfmap.numpytondf( compt )
#   indf.annul()

   return result
Exemplo n.º 8
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. No validation of default
#  values or values supplied on the command line is performed until the
#  parameter value is first accessed within the script, at which time the
#  user is prompted for a value if necessary. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input POL2 time series NDFs",
                                 starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                       default=Parameter.UNSET)))

   params.append(starutil.ParNDG("Q", "The output Q intensity map",
                                 default=None, exists=False, minsize=1,
                                 maxsize=1 ))

   params.append(starutil.ParNDG("U", "The output Q intensity map",
                                 default=None, exists=False, minsize=1,
                                 maxsize=1 ))

   params.append(starutil.ParNDG("IREF", "The reference I map", default=None,
                                 noprompt=True, minsize=0, maxsize=1 ))

   params.append(starutil.Par0S("CONFIG", "Map-maker tuning parameters",
                                "def", noprompt=True))
Exemplo n.º 9
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. No validation of default
#  values or values supplied on the command line is performed until the
#  parameter value is first accessed within the script, at which time the
#  user is prompted for a value if necessary. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input POL2 data",
                                 get_task_par("DATA_ARRAY","GLOBAL",
                                              default=Parameter.UNSET)))

   params.append(starutil.ParNDG("OUT", "The output total intensity map",
                                 default=None, exists=False, minsize=1,
                                 maxsize=1 ))

   params.append(starutil.Par0S("CONFIG", "Map-maker tuning parameters",
                                "def", noprompt=True))

   params.append(starutil.Par0F("PIXSIZE", "Pixel size (arcsec)", None,
                                 maxval=1000, minval=0.01, noprompt=True))

   params.append(starutil.Par0S("QUDIR", "Directory in which to save new "
                                "Q, U and I time series", None, noprompt=True))

   params.append(starutil.Par0S("MAPDIR", "Directory in which to save new "
Exemplo n.º 10
0
def match( ref, imasked, fwhm1=4, fwhm2=100 ):

#  To avoid creating hundreds of temp NDFs, re-use the same ones for each
#  FWHM.
   lof = NDG(1)
   hif = NDG(1)
   iscaled = NDG(1)
   residuals = NDG(1)

#  Create a logarithmically spaced list of 5 FWHM values, in pixels,
#  between the supplied upper and lower FWHM limits. Try each smoothing FWHM
#  in turn, finding the one that gives the best match (i.e. lowest RMS
#  residuals) between high-pass filtered ref image and new I map. On each pass,
#  low frequencies are removed from the ref image using the current FWHM,
#  and the filtered ref image is compared to the new I map (allowing for
#  a degradation in FCF).
   minrms = 1.0E30
   result = (0.0,0.0)
   previous_fwhm = -1
   fwhm1_next = -1
   fwhm2_next = 0
   for fwhm in np.logspace( math.log10(fwhm1), math.log10(fwhm2), 5 ):

#  If required, record the current FWHM value as the upper limit for this
#  function on the next level of recursion.
      if fwhm2_next == -1:
         fwhm2_next = fwhm

#  If an error occurs estimating the RMS for a specific FWHM, ignore the
#  FWHM and pass on to the next.
      try:

#  High-pass filter the ref image by smoothing it with a Gaussian of the
#  current FWHM and then subtracting off the smoothed version.
         invoke("$KAPPA_DIR/gausmooth in={0} out={1} fwhm={2}".
                format( ref, lof, fwhm ))
         invoke("$KAPPA_DIR/sub in1={0} in2={1} out={2}".
                format( ref, lof, hif ))

#  We will now use kappa:normalize to do a least squares fit between the
#  pixel values in the filtered ref image and the corresponding pixel values
#  in the new I map. This gives us the FCF degradation factor for the I
#  map (the gradient of the fit), and scales the I map so that it has the same
#  normalisation as the ref map. The scaling information is in the high
#  data values (the source regions), and the fitting process will be
#  confused if we include lots of background noise regions, so we use the
#  masked I map instead of the full I map. We also tell kappa:normalise
#  to use inly pixels that have a ref value above 2 times the noise value
#  in ref map (to exclude any noise pixels that have been included in the
#  masked I map). So first find the maximum value in the filtered ref map
#  (the upper data limit for kappa:normalize).
         invoke( "$KAPPA_DIR/stats ndf={0}".format(hif) )
         highlimit = float( get_task_par( "MAXIMUM", "stats" ) )

#  Get the noise level in the filtered ref map. This gives us the lower
#  data limit for kappa:normalize. The filtered noise ref has no low
#  frequencies ad so will be basically flat. So we can just the standard
#  deviation of the pixel values as the noise. But we do 3 iterations of
#  sigma clipping to exclude the bright source regions.
         invoke( "$KAPPA_DIR/stats ndf={0} clip=\[3,3,3\]".format(hif) )
         noise = float( get_task_par( "SIGMA", "stats" ) )

#  Now use kappa:normalise to do the fit, using only ref values between
#  lowlimit and highlimit. The slope gives the FCF degradation factor,
#  and the offset indicates the difference in bowling between the filtered
#  ref map and the I map (we do not use the offset).
         invoke( "$KAPPA_DIR/normalize in1={0} in2={1} out={2} device=! "
                 "datarange=\[{3},{4}\]".format(imasked,hif,iscaled,2*noise,
                                                highlimit))
         degfac = float( get_task_par( "SLOPE", "normalize" ) )

#  Now we have a version of the I map that is scaled so that it looks
#  like the filtered ref map. Get the residuals between the filtered ref
#  map and the scaled I map. Turn these residuals into SNR values by dividing
#  them by the noise level in the filtered ref map, and then get the RMS
#  of the residuals. We convert the residuals to SNR values because, if the
#  ref map and I map were identical, heavier filtering would reduce the
#  noise, and thus the RMS of the residuals. We want to minimise the RMS
#  of the residuals, and so without conversion to SNR, the minimum would
#  always be found at the heaviest possible filtering.
         invoke( "$KAPPA_DIR/maths exp=\"'(ia-ib)/pa'\" ia={0} ib={1} pa={2} out={3}".
                 format(hif,iscaled,noise,residuals))

#  Get the RMS of the residuals.
         invoke( "$KAPPA_DIR/stats ndf={0}".format(residuals) )
         mean = float( get_task_par( "MEAN", "stats" ) )
         sigma = float( get_task_par( "SIGMA", "stats" ) )
         rms = math.sqrt( mean*mean + sigma*sigma )

#  If this is the lowest RMS found so far, remember it - together with
#  the FWHM and degradation factor.
         if rms < minrms:
            minrms = rms
            result = (degfac,fwhm)
            fwhm1_next = previous_fwhm
            fwhm2_next = -1

#  If an error occurs estimating the RMS for a specific FWHM, ignore the
#  FWHM and pass on to the next.
      except starutil.AtaskError as err:
         pass

#  Record the current FWHM value for use on the next pass.
      previous_fwhm = fwhm

#  Progress report....
      msg_out("   Smoothing with FWHM = {0} pixels gives RMS = {1}".format(fwhm,rms))

#  If the range of FWHM values used by this invocation is greater than 1,
#  invoke this function recursively to find the best FWHM within a smaller
#  range centred on the best FWHM.
   if minrms < 1.0E30 and (fwhm2 - fwhm1) > 1:
      if fwhm1_next <= 0:
         fwhm1_next = fwhm1
      if fwhm2_next <= 0:
         fwhm2_next = fwhm2
      result = match( ref, imasked, fwhm1_next, fwhm2_next )


   return result
Exemplo n.º 11
0
    print "where band is: 450 or 850"
    print "and observation-date has the form: YYYYMMDD"
    sys.exit(0)

# print "band={0}".format(band)

# Get WNFACT value and nFrames from data file
wnfact = float(starutil.get_fits_header(indata, "WNFACT"))
# print "wnfact={0}".format(wnfact)
nFrames = int(starutil.get_fits_header(indata, "MIRSTOP")) + 1
# print "nFrames={0}".format(nFrames)

# Gather statistics on the central region of the input spectrum
# We are interested in the z position of the maximum pixel value (peak)
instats = invoke("$KAPPA_DIR/stats ndf={0} quiet".format(indata))
maxpos = starutil.get_task_par("MAXPOS", "stats")
maxposz = maxpos[2]
# print "maxposz={0}".format(maxposz)

# Calculate the band pass frames centered on the peak
if band == "SCUBA2_850":
    wnlbound = 11.2
    wnubound = 12.2
else:
    if band == "SCUBA2_450":
        wnlbound = 22.1
        wnubound = 23.3
# print "wnlbound={0}".format(wnlbound)
# print "wnubound={0}".format(wnubound)
bandwidth = wnubound - wnlbound
# print "bandwidth={0}".format(bandwidth)
Exemplo n.º 12
0
    #  Get the name of any report file to create.
    report = parsys["REPORT"].value

    #  Create an empty list to hold the lines of the report.
    report_lines = []

    #  Use kappa:ndfcompare to compare the main NDFs holding the map data
    #  array. Include a check that the root ancestors of the two maps are the
    #  same. Always create a report file so we can echo it to the screen.
    report0 = os.path.join(NDG.tempdir, "report0")
    invoke("$KAPPA_DIR/ndfcompare in1={0} in2={1} report={2} skiptests=! "
           "accdat=0.3v accvar=1E-3 quiet".format(in1, in2, report0))

    #  See if any differences were found. If so, append the lines of the
    #  report to the report_lines list.
    similar = starutil.get_task_par("similar", "ndfcompare")
    if not similar:
        with open(report0) as f:
            report_lines.extend(f.readlines())

#  Now compare the WEIGHTS extension NDF (no need for the roots ancestor
#  check since its already been done).
    report1 = os.path.join(NDG.tempdir, "report1")
    invoke("$KAPPA_DIR/ndfcompare in1={0}.more.smurf.weights accdat=1E-3 "
           "in2={1}.more.smurf.weights report={2} quiet".format(
               in1, in2, report1))

    #  See if any differences were found. If so, append the report to any
    #  existing report.
    if not starutil.get_task_par("similar", "ndfcompare"):
        similar = False
Exemplo n.º 13
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. If no value is supplied on
#  the command line, the  user is prompted for a value when the parameter
#  value is first accessed within this script. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input time series NDFs",
                                  starutil.get_task_par("DATA_ARRAY",
                                                        "GLOBAL",
                                                    default=starutil.Parameter.UNSET)))

   params.append(starutil.ParNDG("OUT", "The output map", default=None,
                                  exists=False, minsize=0, maxsize=1 ))

   params.append(starutil.Par0I("NITER", "No. of iterations to perform",
                                0, noprompt=True))

   params.append(starutil.Par0F("PIXSIZE", "Pixel size (arcsec)", None,
                                 maxval=1000, minval=0.01))

   params.append(starutil.Par0S("CONFIG", "Map-maker tuning parameters",
                                "^$STARLINK_DIR/share/smurf/dimmconfig.lis"))

   params.append(starutil.ParNDG("ITERMAP", "Output cube holding itermaps",
Exemplo n.º 14
0
    #  Declare the script parameters. Their positions in this list define
    #  their expected position on the script command line. They can also be
    #  specified by keyword on the command line. No validation of default
    #  values or values supplied on the command line is performed until the
    #  parameter value is first accessed within the script, at which time the
    #  user is prompted for a value if necessary. The parameters "MSG_FILTER",
    #  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
    #  constructor.
    params = []

    params.append(
        starutil.ParNDG(
            "IN", "The input NDFs",
            starutil.get_task_par("DATA_ARRAY",
                                  "GLOBAL",
                                  default=Parameter.UNSET)))

    params.append(
        starutil.ParChoice("INSTRUMENT",
                           ["SCUBA-2(450)", "SCUBA-2(850)", "ACSIS", "DAS"],
                           "The JCMT instrument", "SCUBA-2(850)"))

    params.append(
        starutil.Par0L("JSA", "Are the input NDFs on the JSA "
                       "all-sky pixel grid?",
                       True,
                       noprompt=True))

    params.append(
        starutil.Par0L("RETAIN",
Exemplo n.º 15
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. No validation of default
#  values or values supplied on the command line is performed until the
#  parameter value is first accessed within the script, at which time the
#  user is prompted for a value if necessary. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input POL2 time series NDFs",
                                 starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                       default=Parameter.UNSET)))

   params.append(starutil.Par0S("CAT", "The output FITS vector catalogue",
                                 "out.FIT"))

   params.append(starutil.ParNDG("IREF", "The reference total flux map", default=None,
                                 help="Enter a null (!) to use an artifical total flux map",
                                 minsize=0, maxsize=1 ))

   params.append(starutil.ParNDG("PI", "The output polarised intensity map",
                                 default=None, exists=False, minsize=0, maxsize=1 ))

   params.append(starutil.ParChoice("PLOT", ["P","PI"], "Quantity to define "
                                 "lengths of plotted vectors", None,
                                 noprompt=True))
Exemplo n.º 16
0

try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. No validation of default
#  values or values supplied on the command line is performed until the
#  parameter value is first accessed within the script, at which time the
#  user is prompted for a value if necessary. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input POL2 time series NDFs",
                                 starutil.get_task_par("DATA_ARRAY","GLOBAL")))

   params.append(starutil.Par0S("CAT", "The output FITS vector catalogue",
                                 "out.FIT"))

   params.append(starutil.ParNDG("IREF", "The reference total flux map", default=None,
                                 help="Enter a null (!) to use an artifical total flux map",
                                 minsize=0, maxsize=1 ))

   params.append(starutil.ParNDG("PI", "The output polarised intensity map",
                                 default=None, exists=False, minsize=0, maxsize=1 ))

   params.append(starutil.ParChoice("PLOT", ["P","PI"], "Quantity to define "
                                 "lengths of plotted vectors", None,
                                 noprompt=True))
Exemplo n.º 17
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. No validation of default
#  values or values supplied on the command line is performed until the
#  parameter value is first accessed within the script, at which time the
#  user is prompted for a value if necessary. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input POL2 time series NDFs",
                                 starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                       default=Parameter.UNSET)))

   params.append(starutil.ParNDG("Q", "The output Q intensity map",
                                 default=None, exists=False, minsize=1,
                                 maxsize=1 ))

   params.append(starutil.ParNDG("U", "The output Q intensity map",
                                 default=None, exists=False, minsize=1,
                                 maxsize=1 ))

   params.append(starutil.Par0S("CAT", "The output FITS vector catalogue",
                                 default=None, noprompt=True))

   params.append(starutil.ParNDG("IPREF", "Reference map defining IP correction",
                                 default=None, noprompt=True, minsize=0, maxsize=1 ))
Exemplo n.º 18
0
#  unless the script's RETAIN parameter indicates that they are to be
#  retained. Also delete the script's temporary ADAM directory.
def cleanup():
   global retain
   ParSys.cleanup()
   if retain:
      msg_out( "Retaining temporary files in {0}".format(NDG.tempdir))
   else:
      NDG.cleanup()

#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
for zz in range(1):
    params = []
    params.append(starutil.ParNDG("IN", "The input POL2 time series NDFs (with the dome open and calibrator out)",
                                  starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                        default=Parameter.UNSET)))
    params.append(starutil.Par0S("OUT", "The output .sdf file containing the IPT parameters",
                                 "ipdata"))
    params.append(starutil.ParNDG("DomeClosedCalIn", "The input POL2 time series NDFs (with the dome closed and calibrator in)",
                                  starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                        default=Parameter.UNSET)))
    params.append(starutil.ParNDG("DomeClosedCalOut", "The input POL2 time series NDFs (with the dome closed and calibrator out)",
                                  starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                        default=Parameter.UNSET)))
    params.append(starutil.ParNDG("DomeOpenCalIn", "The input POL2 time series NDFs (with the dome open and the calibrator in)",
                                  starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                        default=Parameter.UNSET)))
    params.append(starutil.Par0F("NSIGMA", "No. of standard deviations at "
                                 "which to clip spikes", 3.0, noprompt=True))
    params.append(starutil.Par0S("CONFIG", "The cleaning config",
                                 "^$STARLINK_DIR/share/smurf/dimmconfig.lis",
Exemplo n.º 19
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. No validation of default
#  values or values supplied on the command line is performed until the
#  parameter value is first accessed within the script, at which time the
#  user is prompted for a value if necessary. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input NDFs",
                                 starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                       default=Parameter.UNSET)))

   params.append(starutil.ParChoice("INSTRUMENT",
                                    ["SCUBA-2(450)", "SCUBA-2(850)", "ACSIS",
                                    "DAS"],
                                    "The JCMT instrument", "SCUBA-2(850)"))

   params.append(starutil.Par0L("JSA", "Are the input NDFs on the JSA "
                                "all-sky pixel grid?", True, noprompt=True ) )

   params.append(starutil.Par0L("RETAIN", "Retain temporary files?", False,
                                 noprompt=True))

#  Initialise the parameters to hold any values supplied on the command
#  line.
   parsys = ParSys( params )
Exemplo n.º 20
0
#  Make maps from the Q and U time streams. These Q and U values are with
#  respect to the focal plane Y axis, and use (az,el) as the WCS axes. Set
#  CROTA to zero to ensure that the Y axis corresponds to elevation.
         mapfile = "{0}/qmap.sdf".format(obsdir)
         if not os.path.exists( mapfile ) or newpixsize:
            qts = NDG( "{0}/*_QT".format( obsdir ) )
            qmap = NDG( mapfile, False )
            invoke("$SMURF_DIR/makemap in={0} config=^{1} out={2} {3} "
                   "system=azel crota=0".format(qts,conf,qmap,pixsizepar))
         else:
            qmap = NDG( mapfile, True )
            msg_out("Re-using pre-calculated Q map for {0}.".format(obs))

         invoke("$KAPPA_DIR/ndftrace ndf={0} quiet".format(qmap) )
         actpixsize = float( get_task_par( "fpixscale(1)", "ndftrace" ) )
         if actpixsize0 == None:
            actpixsize0 = actpixsize
         elif actpixsize != actpixsize0:
            raise UsageError( "{0} had pixel size {1} - was expecting {2}".
                              format(qmap,actpixsize,actpixsize0))


         mapfile = "{0}/umap.sdf".format(obsdir)
         if not os.path.exists( mapfile ) or newpixsize:
            uts = NDG( "{0}/*_UT".format( obsdir ) )
            umap = NDG( mapfile, False )
            invoke("$SMURF_DIR/makemap in={0} config=^{1} out={2} {3} "
                   "system=azel crota=0".format(uts,conf,umap,pixsizepar))
         else:
            umap = NDG( mapfile, True )
Exemplo n.º 21
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. No validation of default
#  values or values supplied on the command line is performed until the
#  parameter value is first accessed within the script, at which time the
#  user is prompted for a value if necessary. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input POL2 time series NDFs",
                                 starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                       default=Parameter.UNSET)))

   params.append(starutil.ParNDG("Q", "The output Q intensity map",
                                 default=None, exists=False, minsize=1,
                                 maxsize=1 ))

   params.append(starutil.ParNDG("U", "The output Q intensity map",
                                 default=None, exists=False, minsize=1,
                                 maxsize=1 ))

   params.append(starutil.Par0S("CAT", "The output FITS vector catalogue",
                                 default=None, noprompt=True))

   params.append(starutil.ParNDG("IPREF", "Reference map defining IP correction",
                                 default=None, noprompt=True, minsize=0, maxsize=1 ))
Exemplo n.º 22
0
        msg_out("Tiles will be read from {0}".format(tiledir))
    else:
        msg_out("Environment variable JSA_TILE_DIR is not set!")
        msg_out("Tiles will be read from the current directory ({0})".format(
            os.getcwd()))

#  Create an empty list to hold the NDFs for the tiles holding the
#  required data.
    tilendf = []
    itilelist = []

    #  Identify the tiles that overlap the specified region, and loop round
    #  them.
    invoke("$SMURF_DIR/tilelist region={0} instrument={1}".format(
        region, instrument))
    for itile in starutil.get_task_par("tiles", "tilelist"):

        #  Get information about the tile, including the 2D spatial pixel index
        #  bounds of its overlap with the required Region.
        invoke("$SMURF_DIR/tileinfo itile={0} instrument={1} "
               "target={2}".format(itile, instrument, region))

        #  Skip this tile if it does not exist (i.e. is empty).
        if starutil.get_task_par("exists", "tileinfo"):

            #  Get the 2D spatial pixel index bounds of the part of the master tile that
            #  overlaps the required region.
            tlbnd = starutil.get_task_par("tlbnd", "tileinfo")
            tubnd = starutil.get_task_par("tubnd", "tileinfo")

            #  Get the path to the tile's master NDF.
Exemplo n.º 23
0
                instrument = "DAS"

#  If so, set the default for the INSTRUMENT parameter and prevent the
#  user being prompted for a value.
        if instrument is not None:
            parsys["INSTRUMENT"].default = instrument
            parsys["INSTRUMENT"].noprompt = True

#  Get the chosen instrument.
        instrument = parsys["INSTRUMENT"].value
        instrument = starutil.shell_quote(instrument)

        #  Get a list of the tiles that overlap the Region.
        invoke("$SMURF_DIR/jsatilelist in={0} instrument={1} quiet".format(
            region, instrument))
        tiles = starutil.get_task_par("TILES", "jsatilelist")

        #  List them.
        for tile in tiles:
            msg_out("Tile {0} touches {1}".format(tile, indata))

#  Remove temporary files.
    cleanup()

#  If an StarUtilError of any kind occurred, display the message but hide the
#  python traceback. To see the trace back, uncomment "raise" instead.
except starutil.StarUtilError as err:
    #  raise
    print(err)
    cleanup()
Exemplo n.º 24
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. If no value is supplied on
#  the command line, the  user is prompted for a value when the parameter
#  value is first accessed within this script. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "The input time series NDFs",
                                  starutil.get_task_par("DATA_ARRAY",
                                                        "GLOBAL",
                                                    default=starutil.Parameter.UNSET)))

   params.append(starutil.ParNDG("OUT", "The output map", default=None,
                                  exists=False, minsize=0, maxsize=1 ))

   params.append(starutil.Par0I("NITER", "No. of iterations to perform",
                                0, noprompt=True))

   params.append(starutil.Par0F("PIXSIZE", "Pixel size (arcsec)", None,
                                 maxval=1000, minval=0.01))

   params.append(starutil.Par0S("CONFIG", "Map-maker tuning parameters",
                                "^$STARLINK_DIR/share/smurf/dimmconfig.lis"))

   params.append(starutil.ParNDG("ITERMAP", "Output cube holding itermaps",
Exemplo n.º 25
0
            instrument = "DAS"

#  If so, set the default for the INSTRUMENT parameter and prevent the
#  user being prompted for a value.
    if instrument is not None:
        parsys["INSTRUMENT"].default = instrument
        parsys["INSTRUMENT"].noprompt = True

#  Get the chosen instrument.
    instrument = parsys["INSTRUMENT"].value
    instrument = starutil.shell_quote(instrument)

    #  Get a list of the tiles that overlap the supplied NDF.
    invoke("$SMURF_DIR/jsatilelist in={0} instrument={1} quiet".format(
        inndf, instrument))
    tiles = starutil.get_task_par("TILES", "jsatilelist")

    #  JSADICER requires the input array to be gridded on the JSA all-sky
    #  pixel grid. This is normally an HPX projection, but if the supplied
    #  NDF straddles a discontinuity in the HPX projection then we need to
    #  use a different flavour of HPX (either an HPX projection centred on
    #  RA=12h or am XPH (polar HEALPix) projection centred on the north or
    #  south pole). The above call to jsatileinfo will have determined the
    #  appropriate projection to use, so get it.
    proj = starutil.get_task_par("PROJ", "jsatilelist")

    #  Create a file holding the FITS-WCS header for the first tile, using
    #  the type of projection determined above.
    head = "{0}/header".format(NDG.tempdir)
    invoke("$SMURF_DIR/jsatileinfo itile={0} instrument={1} header={2} "
           "proj={3} quiet".format(tiles[0], instrument, head, proj))
Exemplo n.º 26
0
def blanker(test, model, newtest):
    """

   Blank out pixels in "test" that are not well correlated with "model",
   returning result in newtest.

   Invocation:
      result =  blanker( test, model, newtest )

   Arguments:
      test = string
         The name of an existing NDF.
      model = string
         The name of an existing NDF.
      newtest = string
         The name of an NDF to be created.

   Returned Value:
      A value between +1 and -1 indicating the degree of correlation
      between the model and test.

   """

    #  We want statistics of pixels that are present in both test and model,
    #  so first form a mask by adding them together, and then copy bad pixels
    #  form this mask into test and model
    mask = "{0}/mask".format(NDG.tempdir)
    tmask = "{0}/tmask".format(NDG.tempdir)
    mmask = "{0}/mmask".format(NDG.tempdir)
    invoke("$KAPPA_DIR/add in1={0} in2={1} out={2}".format(test, model, mask))
    invoke("$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(
        test, mask, tmask))
    invoke("$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(
        model, mask, mmask))

    #  Get the mean and standard deviation of the remaining pixels in the
    #  test NDF.
    invoke("$KAPPA_DIR/stats {0} clip=\[3,3,3\] quiet".format(tmask))
    tmean = get_task_par("mean", "stats")
    tsigma = get_task_par("sigma", "stats")

    #  Also get the number of good pixels in the mask.
    numgood1 = float(get_task_par("numgood", "stats"))

    #  Get the mean and standard deviation of the remaining pixels in the
    #  model NDF.
    invoke("$KAPPA_DIR/stats {0} clip=\[3,3,3\] quiet".format(mmask))
    mmean = get_task_par("mean", "stats")
    msigma = get_task_par("sigma", "stats")

    #  Normalize them both to have a mean of zero and a standard deviation of
    #  unity.
    tnorm = "{0}/tnorm".format(NDG.tempdir)
    invoke("$KAPPA_DIR/maths exp=\"'(ia-pa)/pb'\" ia={2} pa={0} pb={1} "
           "out={3}".format(tmean, tsigma, tmask, tnorm))

    mnorm = "{0}/mnorm".format(NDG.tempdir)
    invoke("$KAPPA_DIR/maths exp=\"'(ia-pa)/pb'\" ia={2} pa={0} pb={1} "
           "out={3}".format(mmean, msigma, mmask, mnorm))

    #  Find the difference between them.
    diff = "{0}/diff".format(NDG.tempdir)
    invoke("$KAPPA_DIR/sub in1={0} in2={1} out={2}".format(mnorm, tnorm, diff))

    #  Remove pixels that differ by more than 0.5 standard deviations.
    mtmask = "{0}/mtmask".format(NDG.tempdir)
    invoke("$KAPPA_DIR/thresh in={0} thrlo=-0.5 newlo=bad thrhi=0.5 "
           "newhi=bad out={1}".format(diff, mtmask))

    #  See how many pixels remain (i.e. pixels that are very similar in the
    #  test and model NDFs).
    invoke("$KAPPA_DIR/stats {0} quiet".format(mtmask))
    numgood2 = float(get_task_par("numgood", "stats"))

    #  It may be that the two NDFs are anti-correlated. To test for this we
    #  negate the model and do the above test again.
    mnormn = "{0}/mnormn".format(NDG.tempdir)
    invoke("$KAPPA_DIR/cmult in={0} scalar=-1 out={1}".format(mnorm, mnormn))

    diffn = "{0}/diffn".format(NDG.tempdir)
    invoke("$KAPPA_DIR/sub in1={0} in2={1} out={2}".format(
        mnormn, tnorm, diffn))

    mtmaskn = "{0}/mtmaskn".format(NDG.tempdir)
    invoke("$KAPPA_DIR/thresh in={0} thrlo=-0.5 newlo=bad thrhi=0.5 "
           "newhi=bad out={1}".format(diffn, mtmaskn))

    invoke("$KAPPA_DIR/stats {0} quiet".format(mtmaskn))
    numgood2n = float(get_task_par("numgood", "stats"))

    #  If we get more similar pixels by negating the model, the NDFs are
    #  anti-correlated.
    if numgood2n > numgood2:

        #  Take a copy of the supplied test NDF, masking out pixels that are not
        #  anti-similar to the corresponding model pixels.
        invoke("$KAPPA_DIR/copybad in={0} ref={2} out={1}".format(
            test, newtest, mtmaskn))

        #  The returned correlation factor is the ratio of the number of
        #  anti-similar pixels to the total number of pixels which the two NDFs
        #  have in common. But if there is not much difference between the number
        #  of similar and anti-similar pixels, we assume there is no correlation.
        if numgood2n > 1.4 * numgood2:
            res = -(numgood2n / numgood1)
        else:
            res = 0.0

#  If we get more similar pixels without negating the model, the NDFs are
#  correlated. Do the equivalent to the above.
    else:
        invoke("$KAPPA_DIR/copybad in={0} ref={2} out={1}".format(
            test, newtest, mtmask))
        if numgood2 > 1.4 * numgood2n:
            res = numgood2 / numgood1
        else:
            res = 0.0

#  If there are very few good pixels in common return zero correlation.
    if numgood1 < 150:
        res = 0.0

#  Return the correlation factor.
    return res
Exemplo n.º 27
0
            parsys[
                "CENTRE1"].prompt = "Galactic longitude at centre of required circle"
            parsys[
                "CENTRE2"].prompt = "Galactic latitude at centre of required circle"

        centre1 = parsys["CENTRE1"].value
        if centre1 is not None:
            centre2 = parsys["CENTRE2"].value
            radius = parsys["RADIUS"].value

            frame = NDG.tempfile()
            invoke("$ATOOLS_DIR/astskyframe \"'system={0}'\" {1}".format(
                system, frame))

            invoke("$ATOOLS_DIR/astunformat {0} 1 {1}".format(frame, centre1))
            cen1 = starutil.get_task_par("DVAL", "astunformat")
            invoke("$ATOOLS_DIR/astunformat {0} 2 {1}".format(frame, centre2))
            cen2 = starutil.get_task_par("DVAL", "astunformat")

            region = NDG.tempfile()
            invoke(
                "$ATOOLS_DIR/astcircle {0} 1 \[{1},{2}\] {3} ! ! {4}".format(
                    frame, cen1, cen2, math.radians(radius / 60.0), region))

#  If a Region was supplied ,not we do not yet have the coordinates of
#  the centre of the required region, and note if the Region is defined by
#  an NDF.
    else:
        try:
            invoke("$KAPPA_DIR/ndftrace {0} quiet".format(region))
            region_is_ndf = True
Exemplo n.º 28
0
         elif cval == "DAS":
            instrument = "DAS"

#  If so, set the default for the INSTRUMENT parameter and prevent the
#  user being prompted for a value.
      if instrument != None:
         parsys["INSTRUMENT"].default = instrument
         parsys["INSTRUMENT"].noprompt = True

#  Get the chosen instrument.
      instrument = parsys["INSTRUMENT"].value
      instrument = starutil.shell_quote( instrument )

#  Get a list of the tiles that overlap the Region.
      invoke( "$SMURF_DIR/jsatilelist in={0} instrument={1} quiet".format(region,instrument) )
      tiles = starutil.get_task_par( "TILES", "jsatilelist" )

#  List them.
      for tile in tiles:
         msg_out( "Tile {0} touches {1}".format(tile, indata))

#  Remove temporary files.
   cleanup()

#  If an StarUtilError of any kind occurred, display the message but hide the
#  python traceback. To see the trace back, uncomment "raise" instead.
except starutil.StarUtilError as err:
#  raise
   print( err )
   cleanup()
Exemplo n.º 29
0
def remove_corr( ins, masks ):
   """

   Masks the supplied set of Q or U images and then looks for and removes
   correlated components in the background regions.

   Invocation:
      result = remove_corr( ins, masks )

   Arguments:
      ins = NDG
         An NDG object specifying a group of Q or U images from which
         correlated background components are to be removed.
      masks = NDG
         An NDG object specifying a corresponding group of Q or U images
         in which source pixels are bad. These are only used to mask the
         images specified by "in". It should have the same size as "in".

   Returned Value:
      A new NDG object containing the group of corrected Q or U images.

   """

#  How many NDFs are we processing?
   nndf = len( ins )

#  Blank out sources by copy the bad pixels from "mask" into "in". We refer
#  to "q" below, but the same applies whether processing Q or U.
   msg_out( "   masking...")
   qm = NDG( ins )
   invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(ins,masks,qm) )

#  Find the most correlated pair of imagtes. We use the basic correlation
#  coefficient calculated by kappa:scatter for this.
   msg_out( "   Finding most correlated pair of images...")
   cmax = 0
   for i in range(0,nndf-1):
      for j in range(i + 1,nndf):
         invoke( "$KAPPA_DIR/scatter in1={0} in2={1} device=!".format(qm[i],qm[j]) )
         c = starutil.get_task_par( "corr", "scatter" )
         if abs(c) > abs(cmax):
            cmax = c
            cati = i
            catj = j

   if abs(cmax) < 0.3:
      msg_out("   No correlated images found!")
      return ins

   msg_out( "   Correlation for best pair of images = {0}".format( cmax ) )

#  Find images that are reasonably correlated to the pair found above,
#  and coadd them to form a model for the correlated background
#  component. Note, the holes left by the masking are filled in by the
#  coaddition using background data from other images.
   msg_out( "   Forming model...")

#  Form the average of the two most correlated images, first normalising
#  them to a common scale so that they both have equal weight.
   norm = "{0}/norm".format(NDG.tempdir)
   if not normer( qm[cati], qm[catj], 0.3, norm ):
      norm = qm[cati]

   mslist = NDG( [ qm[catj], norm ] )
   ave = "{0}/ave".format(NDG.tempdir)
   invoke( "$CCDPACK_DIR/makemos in={0} method=mean genvar=no usevar=no out={1}".format(mslist,ave) )

#  Loop round each image finding the correlation factor of the image and
#  the above average image.
   temp = "{0}/temp".format(NDG.tempdir)
   nlist = []
   ii = 0
   for i in range(0,nndf):
      c = blanker( qm[i], ave, temp )

#  If the correlation is high enough, normalize the image to the average
#  image and then include the normalised image in the list of images to be
#  coadded to form the final model.
      if abs(c) > 0.3:
         tndf = "{0}/t{1}".format(NDG.tempdir,ii)
         ii += 1
         invoke( "$KAPPA_DIR/normalize in1={1} in2={2} out={0} device=!".format(tndf,temp,ave))
         nlist.append( tndf )

   if ii == 0:
      msg_out("   No secondary correlated images found!")
      return ins

   msg_out("   Including {0} secondary correlated images in the model.".format(ii) )

#  Coadded the images created above to form the model of the correlated
#  background component. Fill any remaining bad pixels with artificial data.
   model = "{0}/model".format(NDG.tempdir)
   included = NDG( nlist )
   invoke( "$CCDPACK_DIR/makemos in={0} method=mean usevar=no genvar=no out={1}".format( included, temp ) )
   invoke( "$KAPPA_DIR/fillbad in={1} variance=no out={0} size=10 niter=10".format(model,temp) )

#  Now estimate how much of the model is present in each image and remove it.
   msg_out("   Removing model...")
   temp2 = "{0}/temp2".format(NDG.tempdir)
   qnew = NDG(ins)
   nbetter = 0
   for i in range(0,nndf):

#  Try to normalise the model to the current image. This fails if the
#  correlation between them is too low.
      if normer( model, qm[i], 0.3, temp ):

#  Remove the scaled model form the image.
         invoke( "$KAPPA_DIR/sub in1={0} in2={1} out={2}".format(ins[i],temp,temp2) )

#  We now check that removing the correlated background component has in
#  fact made the image flatter (poor fits etc can mean that images that
#  are poorly correlated to the model have a large amount of model
#  removed and so make the image less flat). FInd the standard deviation
#  of the data in the original image and in the corrected image.
         invoke( "$KAPPA_DIR/stats {0} quiet".format(ins[i]) )
         oldsig = get_task_par( "sigma", "stats" )

         invoke( "$KAPPA_DIR/stats {0} quiet".format(temp2) )
         newsig = get_task_par( "sigma", "stats" )

#  If the correction has made the image flatter, copy it to the returned NDG.
         if newsig < oldsig:
            nbetter += 1
            invoke( "$KAPPA_DIR/ndfcopy in={1} out={0}".format(qnew[i],temp2) )
         else:
            invoke( "$KAPPA_DIR/ndfcopy in={0} out={1}".format(ins[i],qnew[i]) )

#  If the input image is poorly correlated to the model, return the input
#  image unchanged.
      else:
         invoke( "$KAPPA_DIR/ndfcopy in={0} out={1}".format(ins[i],qnew[i]) )

   msg_out( "   {0} out of {1} images have been improved.".format(nbetter,nndf) )

#  Return the corrected images.
   return qnew
Exemplo n.º 30
0
#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
try:

#  Declare the script parameters. Their positions in this list define
#  their expected position on the script command line. They can also be
#  specified by keyword on the command line. No validation of default
#  values or values supplied on the command line is performed until the
#  parameter value is first accessed within the script, at which time the
#  user is prompted for a value if necessary. The parameters "MSG_FILTER",
#  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
#  constructor.
   params = []

   params.append(starutil.ParNDG("IN", "Template POL2 time series NDFs",
                                 starutil.get_task_par("DATA_ARRAY","GLOBAL",
                                                       default=Parameter.UNSET)))
   params.append(starutil.Par0S("OUT", "Output simulated POL2 data"))
   params.append(starutil.Par0L("NEWART", "Create new artificial I, Q and U maps?" ))
   params.append(starutil.ParNDG("ARTI", "Artificial I map", maxsize=1 ))
   params.append(starutil.ParNDG("ARTQ", "Artificial Q map", maxsize=1 ))
   params.append(starutil.ParNDG("ARTU", "Artificial U map", maxsize=1 ))
   params.append(starutil.ParNDG("INCOM", "Non-POL2 data files to define COM",
                                 None ))
   params.append(starutil.Par0S("RESTART", "Restart using old files?", None,
                                 noprompt=True))
   params.append(starutil.Par0L("RETAIN", "Retain temporary files?", False,
                                 noprompt=True))
   params.append(starutil.Par0F("IPEAK", "Peak total instensity in "
                                "artificial I map (pW)", 0.08, True ))
   params.append(starutil.Par0F("IFWHM", "Width of source in artificial I "
                                "map (pixels)", 8, True ))
Exemplo n.º 31
0
def blanker( test, model, newtest ):
   """

   Blank out pixels in "test" that are not well correlated with "model",
   returning result in newtest.

   Invocation:
      result =  blanker( test, model, newtest )

   Arguments:
      test = string
         The name of an existing NDF.
      model = string
         The name of an existing NDF.
      newtest = string
         The name of an NDF to be created.

   Returned Value:
      A value between +1 and -1 indicating the degree of correlation
      between the model and test.

   """

#  We want statistics of pixels that are present in both test and model,
#  so first form a mask by adding them together, and then copy bad pixels
#  form this mask into test and model
   mask = "{0}/mask".format(NDG.tempdir)
   tmask = "{0}/tmask".format(NDG.tempdir)
   mmask = "{0}/mmask".format(NDG.tempdir)
   invoke( "$KAPPA_DIR/add in1={0} in2={1} out={2}".format(test,model,mask) )
   invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(test,mask,tmask) )
   invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(model,mask,mmask) )

#  Get the mean and standard deviation of the remaining pixels in the
#  test NDF.
   invoke( "$KAPPA_DIR/stats {0} clip=\[3,3,3\] quiet".format(tmask) )
   tmean = get_task_par( "mean", "stats" )
   tsigma = get_task_par( "sigma", "stats" )

#  Also get the number of good pixels in the mask.
   numgood1 = float( get_task_par( "numgood", "stats" ) )

#  Get the mean and standard deviation of the remaining pixels in the
#  model NDF.
   invoke( "$KAPPA_DIR/stats {0} clip=\[3,3,3\] quiet".format(mmask) )
   mmean = get_task_par( "mean", "stats" )
   msigma = get_task_par( "sigma", "stats" )

#  Normalize them both to have a mean of zero and a standard deviation of
#  unity.
   tnorm = "{0}/tnorm".format(NDG.tempdir)
   invoke( "$KAPPA_DIR/maths exp='(ia-pa)/pb' ia={2} pa={0} pb={1} "
           "out={3}".format(tmean,tsigma,tmask,tnorm))

   mnorm = "{0}/mnorm".format(NDG.tempdir)
   invoke( "$KAPPA_DIR/maths exp='(ia-pa)/pb' ia={2} pa={0} pb={1} "
           "out={3}".format(mmean,msigma,mmask,mnorm))

#  Find the difference between them.
   diff = "{0}/diff".format(NDG.tempdir)
   invoke( "$KAPPA_DIR/sub in1={0} in2={1} out={2}".format(mnorm,tnorm,diff) )

#  Remove pixels that differ by more than 0.5 standard deviations.
   mtmask = "{0}/mtmask".format(NDG.tempdir)
   invoke( "$KAPPA_DIR/thresh in={0} thrlo=-0.5 newlo=bad thrhi=0.5 "
           "newhi=bad out={1}".format(diff,mtmask) )

#  See how many pixels remain (i.e. pixels that are very similar in the
#  test and model NDFs).
   invoke( "$KAPPA_DIR/stats {0} quiet".format(mtmask) )
   numgood2 = float( get_task_par( "numgood", "stats" ) )

#  It may be that the two NDFs are anti-correlated. To test for this we
#  negate the model and do the above test again.
   mnormn = "{0}/mnormn".format(NDG.tempdir)
   invoke( "$KAPPA_DIR/cmult in={0} scalar=-1 out={1}".format(mnorm,mnormn) )

   diffn = "{0}/diffn".format(NDG.tempdir)
   invoke( "$KAPPA_DIR/sub in1={0} in2={1} out={2}".format(mnormn,tnorm,diffn ))

   mtmaskn = "{0}/mtmaskn".format(NDG.tempdir)
   invoke( "$KAPPA_DIR/thresh in={0} thrlo=-0.5 newlo=bad thrhi=0.5 "
           "newhi=bad out={1}".format(diffn,mtmaskn) )

   invoke( "$KAPPA_DIR/stats {0} quiet".format(mtmaskn) )
   numgood2n = float( get_task_par( "numgood", "stats" ) )

#  If we get more similar pixels by negating the model, the NDFs are
#  anti-correlated.
   if numgood2n > numgood2:

#  Take a copy of the supplied test NDF, masking out pixels that are not
#  anti-similar to the corresponding model pixels.
      invoke( "$KAPPA_DIR/copybad in={0} ref={2} out={1}".format(test,newtest,mtmaskn) )

#  The returned correlation factor is the ratio of the number of
#  anti-similar pixels to the total number of pixels which the two NDFs
#  have in common. But if there is not much difference between the number
#  of similar and anti-similar pixels, we assume there is no correlation.
      if numgood2n > 1.4*numgood2:
         res = -(numgood2n/numgood1)
      else:
         res = 0.0

#  If we get more similar pixels without negating the model, the NDFs are
#  correlated. Do the equivalent to the above.
   else:
      invoke( "$KAPPA_DIR/copybad in={0} ref={2} out={1}".format(test,newtest,mtmask) )
      if numgood2 > 1.4*numgood2n:
         res = numgood2/numgood1
      else:
         res = 0.0

#  If there are very few good pixels in common return zero correlation.
   if numgood1 < 150:
      res = 0.0

#  Return the correlation factor.
   return res
Exemplo n.º 32
0
      qin = inqui.filter("'\.Q$'" )
      uin = inqui.filter("'\.U$'" )
      iin = inqui.filter("'\.I$'" )

#  If not supplied, try again using INQ, INU and INI (i.e. scan & spin
#  data).
   else:
      qin = parsys["INQ"].value
      uin = parsys["INU"].value
      iin = parsys["INI"].value

#  Check they are all in units of pW.
      for quilist in (qin,uin,iin):
         for sdf in quilist:
            invoke("$KAPPA_DIR/ndftrace ndf={0} quiet".format(sdf) )
            units = starutil.get_task_par( "UNITS", "ndftrace" ).replace(" ", "")
            if units != "pW":
               raise starutil.InvalidParameterError("All supplied I, Q and U "
                    "maps must be in units of 'pW', but '{0}' has units '{1}'.".
                    format(sdf,units))

#  Now get the PI value to use.
   pimap = parsys["PI"].value

#  Now get the QUI value to use.
   qui = parsys["QUI"].value

#  Get the output catalogue now to avoid a long wait before the user gets
#  prompted for it.
   outcat = parsys["CAT"].value
Exemplo n.º 33
0
      invoke( "$CUPID_DIR/makeclumps angle=\[0,0\] beamfwhm=0 deconv=no "
              "fwhm1=\[{0},0\] fwhm2=\[{0},0\] lbnd=\[1,1\] ubnd=\[{1},{1}\] "
              "model={2} nclump={3} out={4} outcat={5} pardist=normal "
              "peak = \[{6},0\] rms={7} trunc=0.1".
               format(clump_fwhm,npix,model,nclump_target,out,outcat,
                      peak_value,noise) )

#  Run fellwalker on the data.
      mask = NDG(1)
      outcat_fw = NDG.tempfile(".fit")
      invoke( "$CUPID_DIR/findclumps config=def deconv=no in={0} "
              "method=fellwalker out={1} outcat={2} rms={3}".
               format(out,mask,outcat_fw,noise) )

# Get the number of clumps found by FellWalker.
      nfw = starutil.get_task_par( "nclumps", "findclumps" )
      if nfw > 0:

#  See how many of the clump peaks found by FellWalker match real clumps to
#  within 0.2 pixels.
         text = invoke( "stilts tmatch2 matcher=2d params={2} "
                        "in1={0} ifmt1=fits values1='Peak1 Peak2' "
                        "in2={1} ifmt2=fits values2='Peak1 Peak2' "
                        "scorecol=sep ocmd='keepcols sep' "
                        "omode=stats".format( outcat, outcat_fw,
                                              0.5*clump_fwhm ),aslist=True )

         ok = False
         for line in text:
            match = re.match( r'columns:\s*(\d+)\s*rows:\s*(\d+)', line )
            if match:
Exemplo n.º 34
0
#  Get the name of any report file to create.
   report = parsys["REPORT"].value

#  Create an empty list to hold the lines of the report.
   report_lines = []

#  Use kappa:ndfcompare to compare the main NDFs holding the map data
#  array. Include a check that the root ancestors of the two maps are the
#  same. Always create a report file so we can echo it to the screen.
   report0 = os.path.join(NDG.tempdir,"report0")
   invoke( "$KAPPA_DIR/ndfcompare in1={0} in2={1} report={2} skiptests=! "
           "accdat=0.1v accvar=1E-4 quiet".format(in1,in2,report0) )

#  See if any differences were found. If so, append the lines of the
#  report to the report_lines list.
   similar = starutil.get_task_par( "similar", "ndfcompare" )
   if not similar:
      with open(report0) as f:
         report_lines.extend( f.readlines() )

#  Now compare the WEIGHTS extension NDF (no need for the roots ancestor
#  check since its already been done).
   report1 = os.path.join(NDG.tempdir,"report1")
   invoke( "$KAPPA_DIR/ndfcompare in1={0}.more.smurf.weights accdat=1E-4 "
           "in2={1}.more.smurf.weights report={2} quiet".format(in1,in2,report1) )

#  See if any differences were found. If so, append the report to any
#  existing report.
   if not starutil.get_task_par( "similar", "ndfcompare" ):
      similar = False
      report_lines.append("\n\n{0}\n   Comparing WEIGHTS arrays....\n".format("-"*80))
Exemplo n.º 35
0
    if retain:
        msg_out("Retaining temporary files in {0}".format(NDG.tempdir))
    else:
        NDG.cleanup()


#  Catch any exception so that we can always clean up, even if control-C
#  is pressed.
for zz in range(1):
    params = []
    params.append(
        starutil.ParNDG(
            "IN",
            "The input POL2 time series NDFs (with the dome open and calibrator out)",
            starutil.get_task_par("DATA_ARRAY",
                                  "GLOBAL",
                                  default=Parameter.UNSET)))
    params.append(
        starutil.Par0S("OUT",
                       "The output .sdf file containing the IPT parameters",
                       "ipdata"))
    params.append(
        starutil.ParNDG(
            "DomeClosedCalIn",
            "The input POL2 time series NDFs (with the dome closed and calibrator in)",
            starutil.get_task_par("DATA_ARRAY",
                                  "GLOBAL",
                                  default=Parameter.UNSET)))
    params.append(
        starutil.ParNDG(
            "DomeClosedCalOut",
Exemplo n.º 36
0
    #  Declare the script parameters. Their positions in this list define
    #  their expected position on the script command line. They can also be
    #  specified by keyword on the command line. No validation of default
    #  values or values supplied on the command line is performed until the
    #  parameter value is first accessed within the script, at which time the
    #  user is prompted for a value if necessary. The parameters "MSG_FILTER",
    #  "ILEVEL", "GLEVEL" and "LOGFILE" are added automatically by the ParSys
    #  constructor.
    params = []

    params.append(
        starutil.ParNDG(
            "IN", "Template POL2 time series NDFs",
            starutil.get_task_par("DATA_ARRAY",
                                  "GLOBAL",
                                  default=Parameter.UNSET)))
    params.append(starutil.Par0S("OUT", "Output simulated POL2 data"))
    params.append(
        starutil.Par0L("NEWART", "Create new artificial I, Q and U maps?"))
    params.append(
        starutil.ParChoice("ARTFORM", ("0", "1"),
                           "Form of artificial I, Q and U to create",
                           "0",
                           noprompt=True))
    params.append(starutil.ParNDG("ARTI", "Artificial I map", maxsize=1))
    params.append(starutil.ParNDG("ARTQ", "Artificial Q map", maxsize=1))
    params.append(starutil.ParNDG("ARTU", "Artificial U map", maxsize=1))
    params.append(
        starutil.ParNDG("INCOM",
                        "Non-POL2 data files to define COM",
Exemplo n.º 37
0
def get_filtered_skydip_data(qarray, uarray, clip, a):
    """

    This function takes q and u array data (output from calcqu), applies ffclean to remove spikes
    and puts in numpy array variable
    It borrows (copies) heavily from pol2cat.py (2015A)

    Invocation:
        ( qdata_total,qvar_total,udata_total,uvar_total,elevation,opacity_term,bad_pixel_ref ) = ...
            get_filtered_skydip_data(qarray,uarray,clip,a)

    Arguments:
        qarray = An NDF of Q array data (output from calcqu).
        uarray = An NDF of U array data (output form calcqu).
        clip = The sigma cut for ffclean.
           a = A string indicating the array (eg. 'S8A').

    Returned Value:
        qdata_total = A numpy array with the cleaned qarray data.
        qvar_total = A numpy array with the qarray variance data.
        udata_total = A numpy array with the cleaned uarray data.
        uvar_total = A numpy array with the uarray variance data.
        elevation = A numpy array with the elevation data
        opacity_term = A numpy array with the opacity brightness term (1-exp(-tau*air_mass))
            Here tau is calculated using the WVM data as input.

    """

    #  Remove spikes from the Q images for the current subarray. The cleaned NDFs
    #  are written to temporary NDFs specified by the new NDG object "qff", which
    #  inherit its size from the existing group "qarray"".
    msg_out("Removing spikes from {0} bolometer Q values...".format(a))
    qff = NDG(qarray)
    qff.comment = "qff"
    invoke("$KAPPA_DIR/ffclean in={0} out={1} genvar=yes box=3 clip=\[{2}\]".
           format(qarray, qff, clip))

    #  Remove spikes from the U images for the current subarray. The cleaned NDFs
    #  are written to temporary NDFs specified by the new NDG object "uff", which
    #  inherit its size from the existing group "uarray"".
    msg_out("Removing spikes from {0} bolometer U values...".format(a))
    uff = NDG(uarray)
    uff.comment = "uff"
    invoke("$KAPPA_DIR/ffclean in={0} out={1} genvar=yes box=3 clip=\[{2}\]".
           format(uarray, uff, clip))

    elevation = []
    opacity_term = []
    for stare in range(len(qff[:])):
        # Stack Q data in numpy array
        # Get elevation information
        elevation.append(
            numpy.array(
                float(
                    invoke(
                        "$KAPPA_DIR/fitsmod ndf={0} edit=print keyword=ELSTART"
                        .format(qff[stare])))))
        # Get Tau (Opacity) information
        tau_temp = numpy.array(
            float(
                invoke(
                    "$KAPPA_DIR/fitsmod ndf={0} edit=print keyword=WVMTAUST".
                    format(qff[stare]))))
        # Convert to obs band.
        if '4' in a:
            tau_temp = 19.04 * (tau_temp - 0.018)  # Eq from Dempsey et al
        elif '8' in a:
            tau_temp = 5.36 * (tau_temp - 0.006)  # Eq from Dempsey et al.
        opacity_term.append(1 -
                            numpy.exp(-1 * tau_temp /
                                      numpy.sin(numpy.radians(elevation[-1]))))
        invoke("$KAPPA_DIR/ndftrace {0} quiet".format(qff[stare]))
        nx = get_task_par("dims(1)", "ndftrace")
        ny = get_task_par("dims(2)", "ndftrace")
        qdata_temp = numpy.reshape(Ndf(qff[stare]).data, (ny, nx))
        qdata_temp[numpy.abs(qdata_temp) > 1e300] = numpy.nan
        if stare == 0:
            qdata_total = qdata_temp
        else:
            qdata_total = numpy.dstack((qdata_total, qdata_temp))
        qvar_temp = numpy.reshape(Ndf(qff[stare]).var, (ny, nx))
        qdata_temp[numpy.abs(qvar_temp) > 1e300] = numpy.nan
        if stare == 0:
            qvar_total = qvar_temp
        else:
            qvar_total = numpy.dstack((qvar_total, qvar_temp))
        # Stack U data in numpy array
        invoke("$KAPPA_DIR/ndftrace {0} quiet".format(uff[stare]))
        nx = get_task_par("dims(1)", "ndftrace")
        ny = get_task_par("dims(2)", "ndftrace")
        udata_temp = numpy.reshape(Ndf(uff[stare]).data, (ny, nx))
        udata_temp[numpy.abs(udata_temp) > 1e300] = numpy.nan
        if stare == 0:
            udata_total = udata_temp
        else:
            udata_total = numpy.dstack((udata_total, udata_temp))
        uvar_temp = numpy.reshape(Ndf(uff[stare]).var, (ny, nx))
        udata_temp[numpy.abs(uvar_temp) > 1e300] = numpy.nan
        if stare == 0:
            uvar_total = uvar_temp
        else:
            uvar_total = numpy.dstack((uvar_total, uvar_temp))

    # Create bad pixel reference.
    bad_pixel_ref = NDG(1)
    invoke("$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(
        qff, uff, bad_pixel_ref))
    return (qdata_total, qvar_total, udata_total, uvar_total, elevation,
            opacity_term, bad_pixel_ref)
Exemplo n.º 38
0
def get_filtered_skydip_data(qarray,uarray,clip,a):
    """

    This function takes q and u array data (output from calcqu), applies ffclean to remove spikes
    and puts in numpy array variable
    It borrows (copies) heavily from pol2cat.py (2015A)

    Invocation:
        ( qdata_total,qvar_total,udata_total,uvar_total,elevation,opacity_term,bad_pixel_ref ) = ...
            get_filtered_skydip_data(qarray,uarray,clip,a)

    Arguments:
        qarray = An NDF of Q array data (output from calcqu).
        uarray = An NDF of U array data (output form calcqu).
        clip = The sigma cut for ffclean.
           a = A string indicating the array (eg. 'S8A').

    Returned Value:
        qdata_total = A numpy array with the cleaned qarray data.
        qvar_total = A numpy array with the qarray variance data.
        udata_total = A numpy array with the cleaned uarray data.
        uvar_total = A numpy array with the uarray variance data.
        elevation = A numpy array with the elevation data
        opacity_term = A numpy array with the opacity brightness term (1-exp(-tau*air_mass))
            Here tau is calculated using the WVM data as input.

    """

    #  Remove spikes from the Q images for the current subarray. The cleaned NDFs
    #  are written to temporary NDFs specified by the new NDG object "qff", which
    #  inherit its size from the existing group "qarray"".
    msg_out( "Removing spikes from {0} bolometer Q values...".format(a))
    qff = NDG(qarray)
    qff.comment = "qff"
    invoke( "$KAPPA_DIR/ffclean in={0} out={1} genvar=yes box=3 clip=\[{2}\]".format(qarray,qff,clip) )

    #  Remove spikes from the U images for the current subarray. The cleaned NDFs
    #  are written to temporary NDFs specified by the new NDG object "uff", which
    #  inherit its size from the existing group "uarray"".
    msg_out( "Removing spikes from {0} bolometer U values...".format(a))
    uff = NDG(uarray)
    uff.comment = "uff"
    invoke( "$KAPPA_DIR/ffclean in={0} out={1} genvar=yes box=3 clip=\[{2}\]"
            .format(uarray,uff,clip) )

    elevation = []
    opacity_term = []
    for stare in range(len(qff[:])):
    # Stack Q data in numpy array
        # Get elevation information
        elevation.append(numpy.array( float( invoke( "$KAPPA_DIR/fitsmod ndf={0} edit=print keyword=ELSTART".format( qff[ stare ] ) ) ) ) )
        # Get Tau (Opacity) information
        tau_temp = numpy.array( float( invoke( "$KAPPA_DIR/fitsmod ndf={0} edit=print keyword=WVMTAUST".format( qff[ stare ] ) ) ) )
        # Convert to obs band.
        if '4' in a:
             tau_temp = 19.04*(tau_temp-0.018) # Eq from Dempsey et al
        elif '8' in a:
             tau_temp = 5.36*(tau_temp-0.006) # Eq from Dempsey et al.
        opacity_term.append(1-numpy.exp(-1*tau_temp/numpy.sin(numpy.radians(elevation[-1]))))
        invoke( "$KAPPA_DIR/ndftrace {0} quiet".format(qff[ stare ]))
        nx = get_task_par( "dims(1)", "ndftrace" )
        ny = get_task_par( "dims(2)", "ndftrace" )
        qdata_temp = numpy.reshape( Ndf( qff[ stare ] ).data, (ny,nx))
        qdata_temp[numpy.abs(qdata_temp)>1e300] = numpy.nan;
        if stare == 0:
            qdata_total = qdata_temp
        else:
            qdata_total = numpy.dstack((qdata_total,qdata_temp))
        qvar_temp = numpy.reshape( Ndf( qff[ stare ] ).var, (ny,nx))
        qdata_temp[numpy.abs(qvar_temp)>1e300] = numpy.nan;
        if stare == 0:
            qvar_total = qvar_temp
        else:
            qvar_total = numpy.dstack((qvar_total,qvar_temp))
        # Stack U data in numpy array
        invoke( "$KAPPA_DIR/ndftrace {0} quiet".format(uff[ stare ]))
        nx = get_task_par( "dims(1)", "ndftrace" )
        ny = get_task_par( "dims(2)", "ndftrace" )
        udata_temp = numpy.reshape( Ndf( uff[ stare ] ).data, (ny,nx))
        udata_temp[numpy.abs(udata_temp)>1e300] = numpy.nan;
        if stare == 0:
            udata_total = udata_temp
        else:
            udata_total = numpy.dstack((udata_total,udata_temp))
        uvar_temp = numpy.reshape( Ndf( uff[ stare ] ).var, (ny,nx))
        udata_temp[numpy.abs(uvar_temp)>1e300] = numpy.nan;
        if stare == 0:
            uvar_total = uvar_temp
        else:
            uvar_total = numpy.dstack((uvar_total,uvar_temp))

    # Create bad pixel reference.
    bad_pixel_ref = NDG(1)
    invoke( "$KAPPA_DIR/copybad in={0} ref={1} out={2}".format(qff,uff,bad_pixel_ref))
    return( qdata_total,qvar_total,udata_total,uvar_total,elevation,opacity_term,bad_pixel_ref )
Exemplo n.º 39
0
    print "and observation-date has the form: YYYYMMDD"
    sys.exit(0)


# print "band={0}".format(band)

# Get WNFACT value and nFrames from data file
wnfact = float(starutil.get_fits_header(indata, "WNFACT"))
# print "wnfact={0}".format(wnfact)
nFrames = int(starutil.get_fits_header(indata, "MIRSTOP")) + 1
# print "nFrames={0}".format(nFrames)

# Gather statistics on the central region of the input spectrum
# We are interested in the z position of the maximum pixel value (peak)
instats = invoke("$KAPPA_DIR/stats ndf={0} quiet".format(indata))
maxpos = starutil.get_task_par("MAXPOS", "stats")
maxposz = maxpos[2]
# print "maxposz={0}".format(maxposz)

# Calculate the band pass frames centered on the peak
if band == "SCUBA2_850":
    wnlbound = 11.2
    wnubound = 12.2
else:
    if band == "SCUBA2_450":
        wnlbound = 22.1
        wnubound = 23.3
# print "wnlbound={0}".format(wnlbound)
# print "wnubound={0}".format(wnubound)
bandwidth = wnubound - wnlbound
# print "bandwidth={0}".format(bandwidth)
Exemplo n.º 40
0
         parsys["CENTRE1"].prompt = "RA at centre of required circle"
         parsys["CENTRE2"].prompt = "Dec at centre of required circle"
      else:
         parsys["CENTRE1"].prompt = "Galactic longitude at centre of required circle"
         parsys["CENTRE2"].prompt = "Galactic latitude at centre of required circle"

      centre1 = parsys["CENTRE1"].value
      if centre1 != None:
         centre2 = parsys["CENTRE2"].value
         radius = parsys["RADIUS"].value

         frame = NDG.tempfile()
         invoke( "$ATOOLS_DIR/astskyframe \"'system={0}'\" {1}".format(system,frame) )

         invoke( "$ATOOLS_DIR/astunformat {0} 1 {1}".format(frame,centre1) )
         cen1 = starutil.get_task_par( "DVAL", "astunformat" )
         invoke( "$ATOOLS_DIR/astunformat {0} 2 {1}".format(frame,centre2) )
         cen2 = starutil.get_task_par( "DVAL", "astunformat" )

         region = NDG.tempfile()
         invoke( "$ATOOLS_DIR/astcircle {0} 1 \[{1},{2}\] {3} ! ! {4}".
                 format(frame,cen1,cen2,math.radians(radius/60.0),region) )

#  If a Region was supplied ,not we do not yet have the coordinates of
#  the centre of the required region, and note if the Region is defined by
#  an NDF.
   else:
      try:
         invoke( "$KAPPA_DIR/ndftrace {0} quiet".format(region) )
         region_is_ndf = True
         ndim = int( starutil.get_task_par( "NDIM", "ndftrace" ) )
Exemplo n.º 41
0
        jout += 1
        outdata = "{0}_{1}.fit".format(outbase, iout)
        msg_out("Creating output FITS file {0}/{1}: {2}".format(
            jout, nout, outdata))

        #  Get a copy of the cleaned data but with PAD samples trimmed from start
        #  and end.
        tmp1 = NDG(1)
        tmp2 = NDG(1)
        invoke("$KAPPA_DIR/nomagic {0} {1} 0".format(path, tmp1))
        invoke("$KAPPA_DIR/qualtobad {0} {1} PAD".format(tmp1, tmp2))
        invoke("$KAPPA_DIR/ndfcopy {0} {1} trimbad=yes".format(tmp2, tmp1))

        #  Note the bounds of the used (i.e. non-PAD) time slices.
        invoke("$KAPPA_DIR/ndftrace {0} quiet".format(tmp1))
        tlo = starutil.get_task_par("lbound(3)", "ndftrace")
        thi = starutil.get_task_par("ubound(3)", "ndftrace")
        ntslice = thi - tlo + 1

        #  Note the mumber of bolometer (should always be 1280).
        nx = starutil.get_task_par("dims(1)", "ndftrace")
        ny = starutil.get_task_par("dims(2)", "ndftrace")
        nbolo = nx * ny

        #  Reshape the cleaned data from 3D to 2D.
        val = NDG(1)
        invoke("$KAPPA_DIR/reshape {0} out={1} shape=\[{2},{3}\]".format(
            tmp1, val, nbolo, ntslice))

        #  Extract the quality array into a separate NDF.
        fla = NDG(1)