def request_Parser(request_str):
    ''' proceesing the request and return the service if possibel '''
    print 'the service requested :{}'.format(str(request_str.split(' ')[1]))
    print 'the service method    :{}'.format(request_str.split(' ')[0])
    print 'the Http method       :{}'.format(request_str.split(' ')[2][0:8])
    req_service = request_str.split(' ')[1]
    req_method = request_str.split(' ')[0]
    http_version = request_str.split(' ')[2][0:8]

    # start the serving
    if req_method == 'GET':
        print 'oky Get method caught'
        if req_service == '/':
            req_service = 'index.html'
            print 'req _service is :', req_service
            res_data = search_file.read_file('.', req_service)
            if res_data == 0:
                res_data = b'<html><body><p> Error 404 File not found</p></body></html>'
                res_header = make_header.make_header(404, '.html')
                return res_header + res_data
            res_header = make_header.make_header(200, req_service)
            return res_header + res_data

        else:
            res_data = search_file.read_file('.', req_service.split('/')[-1])
            if res_data == 0:
                res_data = b'<html><body><p> Error 404 File not found</p></body></html>'
                res_header = make_header.make_header(404, '.html')
                return res_header + res_data
            res_header = make_header.make_header(200, req_service)
            return res_header + res_data

    else:
        print 'Html For client Post is Not accepted'
Example #2
0
  def __init__(self, fname):
    hedr = make_header.make_header(fname,write=False,warn=False)
    if hedr["TIME_OFFSET"] == "UNKNOWN" or hedr["N_SCANS"] == "UNKNOWN" or hedr["INT_TIME"] == "UNKNOWN" or hedr["LST"] == "UNKNOWN" or hedr["FREQCENT"] == "UNKNOWN": 
      lst = freq = t_offset = n_scans = int_time = utc_date = utc_time = 0
    else:
      t_offset = int(hedr["TIME_OFFSET"])    
      n_scans = int(hedr["N_SCANS"])
      int_time = int(hedr["INT_TIME"])
      lst = float(hedr["LST"])
      freq = float(hedr["FREQCENT"])

    self.name = fname
    self.start_time = t_offset
    self.end_time = t_offset+int(n_scans)*int_time
    self.scans = n_scans
    self.utc_date = hedr["DATE"]
    self.utc_time = hedr["TIME"]
    self.lst = lst
    self.freq = freq
    self.source = hedr["SOURCE"]
    self.mode = hedr["MODE"]
    self.size = os.path.getsize(fname)

    """
    obs1 = extract_obs_offset_from_name(fname)
    obs2 = extract_obs_offset_in_file(fname)
    if obs1 != obs2 and obs1 != "UNKNOWN" and obs2 !="UNKNOWN":
      print "Consistency Error", fname, ": OBS_OFFSET in file doesn't match the offset in the name"
    """

    if hedr["SOURCE"] == "LEDA_TEST":
      if not is_integer(n_scans): print "CONSISTENCY ERROR, ", fname, "scan:",n_scans, "is not integer"
      if not is_integer((self.end_time-self.start_time)/9.0): print "CONSISTENCY ERROR", fname, ": not 9 sec dump in file"
Example #3
0
def gbtgridder(args):
    if not args.SDFITSfiles:
        return

    verbose = args.verbose
    chanStart, chanStop = parse_channels(args.channels, verbose=verbose)
    if (chanStart is not None and chanStart < 0) or (chanStop is not None
                                                     and chanStop < 0):
        return

    if chanStart is None:
        chanStart = 0

    average = args.average

    minTsys = args.mintsys
    maxTsys = args.maxtsys

    scanlist = args.scans
    if args.scans is not None:
        scanlist = parse_scans(scanlist)

    sdfitsFiles = args.SDFITSfiles
    for sdf in sdfitsFiles:
        if not os.path.exists(sdf):
            if verbose > 1:
                print sdf + ' does not exist'
            return

    # extract everything from the SDFITS files
    # this needs in the long run so that only one SDFITS file is opened at a time
    # and a reasonable amount of data are read and then gridded - repeat until done
    # right now, all of the data must be read first, then passed in one call
    # to the gridder.  In that case, there will be 2 passes through the SDFITS files
    # since the full extent of the data on the sky must be known before gridding can start.

    xsky = None
    ysky = None
    wt = None
    data = None
    nchan = None
    frest = None
    faxis = None
    source = None
    dataUnits = None
    calibType = None
    veldef = None
    specsys = None
    coordType = (None, None)
    radesys = None
    equinox = None
    observer = None
    telescop = None
    frontend = None
    dateObs = None
    uniqueScans = None
    ntsysFlagCount = 0
    outputFiles = {}

    if verbose > 3:
        print "Loading data ... "
    for thisFile in sdfitsFiles:
        try:
            if verbose > 3:
                print "   ", thisFile
            dataRecord = get_data(thisFile,
                                  nchan,
                                  chanStart,
                                  chanStop,
                                  average,
                                  scanlist,
                                  minTsys,
                                  maxTsys,
                                  verbose=verbose)
            if dataRecord is None:
                # there was a problem that should not be recovered from
                # reported by get_data, no additional reporting necessary here
                sys.exit(1)

            if len(dataRecord) == 0:
                # empty file, skipping
                continue

            if xsky is None:
                xsky = dataRecord["xsky"]
                ysky = dataRecord["ysky"]
                wt = dataRecord["wt"]
                data = dataRecord["data"]
                nchan = dataRecord["nchan"]
                chanStart = dataRecord["chanStart"]
                chanStop = dataRecord["chanStop"]
                frest = dataRecord["restfreq"]
                faxis = dataRecord["freq"]
                source = dataRecord["source"]
                dataUnits = dataRecord["units"]
                calibType = dataRecord["calibtype"]
                veldef = dataRecord["veldef"]
                specsys = dataRecord["specsys"]
                coordType = (dataRecord["xctype"], dataRecord["yctype"])
                radesys = dataRecord["radesys"]
                equinox = dataRecord["equinox"]
                telescop = dataRecord["telescop"]
                frontend = dataRecord["frontend"]
                observer = dataRecord["observer"]
                dateObs = dataRecord["date-obs"]
                uniqueScans = numpy.unique(dataRecord["scans"])

                # this also checks that the output files are OK to write
                # given the value of the clobber argument
                outputFiles = set_output_files(
                    source,
                    frest,
                    args, ["cube", "weight", "line", "cont"],
                    verbose=verbose)
                if len(outputFiles) == 0:
                    if verbose > 1:
                        print "Unable to write to output files"
                    return

            else:
                xsky = numpy.append(xsky, dataRecord["xsky"])
                ysky = numpy.append(ysky, dataRecord["ysky"])
                wt = numpy.append(wt, dataRecord["wt"])
                data = numpy.append(data, dataRecord["data"], axis=0)
                uniqueScans = numpy.unique(
                    numpy.append(uniqueScans, dataRecord["scans"]))

            ntsysFlagCount += dataRecord["ntsysflag"]

        except (AssertionError):
            if verbose > 1:
                print "There was an unexpected problem processing %s" % thisFile
            raise

    if xsky is None:
        if verbose > 1:
            print "No data was found in the input SDFITS files given the data selection options used."
            print "Can not continue."
        return

    if args.restfreq is not None:
        # Use user supplied rest frequency, conver to Hz
        frest = args.restfreq * 1.0e6

    # grid_otf.py already sets the weights to 1 if wt=None
    # Added a flag here called --eqweight
    # print args.eqweight
    if args.eqweight is True:
        #if verbose > 1:
        #    print "Setting all weights to 1."
        wt = None

    # characterize the center of the image

    # the beam_fwhm is needed in various places
    # currently we use the same equation used in idlToSdfits
    # there's about a 2% difference between the two

    # this equation comes from Adam's IDL code, where do the 747.6 and 763.8 values come from?
    # beam_fwhm = (747.6+763.8)/2.0/numpy.median(faxis/1.e9)/3600.
    # This is what idlToSdfits does (next 2 lines of code)
    # telescop diameter, in meters
    diam = 100.0
    beam_fwhm = 1.2 * constants.c * (180.0 / constants.pi) / (
        diam * numpy.median(faxis))
    # the 747.6 and 763.8 values above are equivalent to diam of 99.3 and 97.2 m in this equation, respectively

    refXsky = None
    refYsky = None
    centerYsky = None
    pix_scale = None
    xsize = None
    ysize = None
    refXpix = None
    refYpix = None

    if args.clonecube is not None:
        # use the cloned values
        cubeInfo = get_cube_info(args.clonecube, verbose=verbose)
        if cubeInfo is not None:
            if (cubeInfo["xtype"] != coordType[0]) or \
                    (cubeInfo["ytype"] != coordType[1]) or \
                    (cubeInfo['proj'] != args.proj) or \
                    (radesys is not None and (cubeInfo['radesys'] != radesys)) or \
                    (equinox is not None and (cubeInfo['equinox'] != equinox)):
                if verbose > 2:
                    print "Sky coordinates of data are not the same type found in %s" % args.clonecube
                    print "Will not clone the coordinate information from that cube"
                    if verbose > 4:
                        print "xtype : ", cubeInfo["xtype"], coordType[0]
                        print "ytype : ", cubeInfo["ytype"], coordType[1]
                        print "proj : ", cubeInfo['proj'], args.proj
                        print "radesys : ", cubeInfo['radesys'], radesys
                        print "equinox : ", cubeInfo['equinox'], equinox
            else:
                refXsky = cubeInfo["xref"]
                refYsky = cubeInfo["yref"]
                pix_scale = cubeInfo["pix_scale"]
                xsize = cubeInfo["xsize"]
                ysize = cubeInfo["ysize"]
                refXpix = cubeInfo["xrefPix"]
                refYpix = cubeInfo["yrefPix"]

    # this is needed ONLY when the cube center and size are not given
    # on the command line in one way or another
    centerUnknown = ((refXsky is None or refYsky is None)
                     and args.mapcenter is None)
    sizeUnknown = ((xsize is None or ysize is None) and args.size is None)
    nonZeroXY = None
    if centerUnknown or sizeUnknown:
        # this masks out antenna positions exactly equal to 0.0 - unlikely to happen
        # except when there is no valid antenna pointing for that scan.
        nonZeroXY = (xsky != 0.0) & (ysky != 0.0)

        # watch for the pathological case where there is no good antenna data
        # which can not be gridded at all
        if numpy.all(nonZeroXY == False):
            # always print this out, independent of verbosity level
            print "All antenna pointings are exactly equal to 0.0, can not grid this data"
            return

        if verbose > 3 and numpy.any(nonZeroXY == False):
            print "%d spectra will be excluded because the antenna pointing is exactly equal to 0.0 on both axes - unlikely to be a valid position" % (
                nonZeroXY == False).sum()

    # need to watch for coordinates near 0/360  OR near +- 180
    # this technique will miss the difficult case of a mixture of +- 180 and 0:360 X coordinates
    # assumes that Y doesn't have this problem, likely is +- 90
    xskyMin = xsky[nonZeroXY].min()
    xskyMax = xsky[nonZeroXY].max()
    newXsky = None

    if (xskyMin > 0.0):
        # all coordinates > 0, watch for 0/360 coordinates
        if (xskyMax - xskyMin) > 180.0:
            # probably a problem, subtract 360 for coordinates > 180.0 so that they run from -180 to +180 continuously through 0.0
            rangeBefore = xskyMax - xskyMin
            xskyMask = xsky > 180.0
            newXsky = xsky.copy()
            newXsky[xskyMask] -= 360.0
    else:
        # some coordinates are < 0, watch for +- 180.0
        # same criteria
        if (xskyMax - xskyMin) > 180.0:
            # probably a problem, add 360 to all negative coordinates so they run from 0 through 360
            rangeBefore = xskyMax - xskyMin
            xskyMask = xsky < 0.0
            newXsky = xsky.copy()
            newXsky[xskyMask] += 360.0

    if newXsky is not None:
        # see if that's an improvemenet
        newXskyMin = newXsky[nonZeroXY].min()
        newXskyMax = newXsky[nonZeroXY].max()
        if (newXskyMax - newXskyMin) < rangeBefore:
            # this is an improvement, use it
            xsky = newXsky.copy()
            xskyMin = newXskyMin
            xskyMax = newXskyMax

    if refXsky is None:
        if args.mapcenter is not None:
            # use user-supplied value
            refXsky = args.mapcenter[0]
        else:
            # set the reference sky position using the mean x and y positions
            # still need to worry about points clearly off the grid
            #   e.g. a reference position incorrectly included in the data to be gridded.
            #   not sure what an appropriate heuristic for that is

            # idlToSdfits rounds the center from the mean to the nearest second/arcsecond
            # for RA or HA, divide by 15
            if coordType[0] in ['RA', 'HA']:
                refXsky = round(numpy.mean(xsky[nonZeroXY]) * 3600.0 /
                                15) / (3600.0 / 15.0)
            else:
                refXsky = round(numpy.mean(xsky[nonZeroXY]) * 3600.0) / 3600.0

    if refYsky is None:
        if args.mapcenter is not None:
            # use user-supplied value
            refYsky = args.mapcenter[1]
        else:
            # nonZeroXY MUST have already been set above to get here
            # do not check that it's set or set it here
            # assume that the Y coordinate is +- 90 and there's no problem
            # with 360/0 or +- 180 confusion as there may be with the X coordinate
            refYsky = round(numpy.mean(ysky[nonZeroXY]) * 3600.0) / 3600.0

    if pix_scale is None:
        if args.pixelwidth is not None:
            # use user-supplied value, convert to degrees
            pix_scale = args.pixelwidth / 3600.0
        else:
            # find the cell size, first from the beam_fwhm
            # Need to decide on number of cell's per beam.  Adam`'s code uses 4, idlToSdfits uses 6
            # idlToSdfits also rounds up to nearest arcsecond
            pixPerBeam = 6.0
            if args.kernel == "nearest":
                # assume it's nyquist sampled, use 2 pixels per beam
                pixPerBeam = 2.0

            pix_scale = math.ceil(3600.0 * beam_fwhm / pixPerBeam) / 3600.0

    if xsize is None or ysize is None:
        # set both together
        if args.size is not None:
            # use user-supplied value
            xsize = args.size[0]
            ysize = args.size[1]
        else:
            xRange = xskyMax - xskyMin
            yRange = ysky[nonZeroXY].max() - ysky[nonZeroXY].min()

            # image size, idlToSdfits method
            # padding around border
            # imPadding = math.ceil(45./(pix_scale*3600.0))
            # add in padding and truncate to an integer
            # xsize = int((xRange*1.1/pix_scale)+2*imPadding)
            # ysize = int((yRange*1.1/pix_scale)+2*imPadding)
            # image.py then does this ...
            # xsize = int((2*round(xsize/1.95)) + 20)
            # ysize = int((2*round(ysize/1.95)) + 20)
            # But idlToSdfits only sees one SDFITS file at a time, so the extra padding makes sense there.
            # With all the data, I think just padding by 10% + 20 pixels is sufficient
            xsize = int(math.ceil(xRange * 1.1 / pix_scale)) + 20
            ysize = int(math.ceil(yRange * 1.2 / pix_scale)) + 20

    # used only for informational purposes
    centerYsky = refYsky
    if refXpix is None or refYpix is None:
        # both should be set together or unset together
        if args.proj == "TAN":
            # this is how Adam does things in his IDL code
            # the reference pixel is in the center
            refXpix = xsize / 2.0
            refYpix = ysize / 2.0
        else:
            # must be SFL
            # this is how idlToSdfits+AIPS does things for GLS==SFL
            refXpix = xsize / 2.0
            # for the Y axis is, this is where we want refYsky to be
            centerYpix = ysize / 2.0 + 1.0
            # but by definition, refYsky must be 0.0, set set refYpix
            # so that the current refYsky ends up at centerYpix
            refYpix = centerYpix - refYsky / pix_scale
            # then reset refYsky
            refYsky = 0.0

    # gaussian size to use in gridding.
    # this is what Adam used:  gauss_fwhm = beam_fwhm/3.0
    # this duplicates the aparm(2)=1.5*cellsize used by AIPS in the default pipeline settings
    # the following is about 0.41*beam_fwhm vs 0.33*beam_fwhm from Adam - so wider
    gauss_fwhm = (1.5 * pix_scale) * 2.354 / math.sqrt(2.0)

    if verbose > 4:
        print "Data summary ..."
        print "   scans : ", format_scans(uniqueScans)
        print "   channels : %d:%d" % (chanStart, chanStop)
        if args.mintsys is None and args.maxtsys is None:
            print "   no tsys selection"
        else:
            tsysRange = ""
            if args.mintsys is not None:
                tsysRange += "%f" % args.mintsys
            tsysRange += ":"
            if args.maxtsys is not None:
                tsysRange += "%f" % args.maxtsys
            print "   tsys range : ", tsysRange
            print "   flagged outside of tsys range : ", ntsysFlagCount
        # number of spectra actually gridded if wt is being used
        if wt is not None:
            print "   spectra to grid : ", (wt != 0.0).sum()
        else:
            print "   spectra to grid : ", len(xsky)
            print "   using equal weights"

        print ""
        print "Map info ..."
        print "   beam_fwhm : ", beam_fwhm, "(", beam_fwhm * 60.0 * 60.0, " arcsec)"
        print "   pix_scale : ", pix_scale, "(", pix_scale * 60.0 * 60.0, " arcsec)"
        print "  gauss fwhm : ", gauss_fwhm, "(", gauss_fwhm * 60.0 * 60.0, " arcsec)"
        print "    ref Xsky : ", refXsky
        print "    ref Ysky : ", refYsky
        print " center Ysky : ", centerYsky
        print "       xsize : ", xsize
        print "       ysize : ", ysize
        print "    ref Xpix : ", refXpix
        print "    ref Ypix : ", refYpix
        print "          f0 : ", faxis[0]
        print "    delta(f) : ", faxis[1] - faxis[0]
        print "      nchan  : ", len(faxis)
        print "      source : ", source
        print " frest (MHz) : ", frest / 1.e6

    # build the initial header object
    # only enough to build the WCS object from it + BEAM size info
    # I had trouble with embedded HISTORY cards and the WCS constructor
    # so those are omitted for now
    hdr = make_header(refXsky,
                      refYsky,
                      xsize,
                      ysize,
                      pix_scale,
                      refXpix,
                      refYpix,
                      coordType,
                      radesys,
                      equinox,
                      frest,
                      faxis,
                      beam_fwhm,
                      veldef,
                      specsys,
                      proj=args.proj,
                      verbose=verbose)

    # relax is turned on here for compatibility with previous images produced by AIPS from the gbtpipeline
    # there may be a better solution
    # even so, it does not like the "-LSR" tag to the CTYPE3 value for the frequency axis
    wcsObj = wcs.WCS(hdr, relax=True)

    if verbose > 3:
        print "Gridding"

    try:
        (cube, weight, beam_fwhm) = grid_otf(data,
                                             xsky,
                                             ysky,
                                             wcsObj,
                                             len(faxis),
                                             xsize,
                                             ysize,
                                             pix_scale,
                                             weight=wt,
                                             beam_fwhm=beam_fwhm,
                                             kern=args.kernel,
                                             gauss_fwhm=gauss_fwhm,
                                             verbose=verbose)
    except MemoryError:
        if verbose > 1:
            print "Not enough memory to create the image cubes necessary to grid this data"
            print "   Requested image size : %d x %d x %d " % (xsize, ysize,
                                                               len(faxis))
            print "   find a beefier machine, consider restricting the data to fewer channels or using channel averaging"
            print "   or use AIPS (with idlToSdfits) to grid all of this data"
        return

    if cube is None or weight is None:
        if verbose > 1:
            print "Problem gridding data"
        return

    if verbose > 3:
        print "Writing cube"

    # Add in the degenerate STOKES axis
    cube.shape = (1, ) + cube.shape
    weight.shape = cube.shape

    # start writing stuff to disk
    # add additional information to the header
    hdr['object'] = source
    hdr['telescop'] = telescop
    hdr['instrume'] = frontend
    hdr['observer'] = observer
    hdr['date-obs'] = (dateObs, 'Observed time of first spectra gridded')
    hdr['date-map'] = (time.strftime("%Y-%m-%dT%H:%M:%S",
                                     time.gmtime()), "Created by gbtgridder")
    hdr['date'] = time.strftime("%Y-%m-%d", time.gmtime())
    hdr['obsra'] = refXsky
    hdr['obsdec'] = centerYsky

    if args.kernel == 'gauss':
        hdr.add_comment('Convolved with Gaussian convolution function.')
        hdr['BMAJ'] = beam_fwhm
        hdr['BMIN'] = beam_fwhm
    elif args.kernel == 'gaussbessel':
        hdr.add_comment(
            'Convolved with optimized Gaussian-Bessel convolution function.')
        hdr['BMAJ'] = (beam_fwhm, '*But* not Gaussian.')
        hdr['BMIN'] = (beam_fwhm, '*But* not Gaussian.')
    else:
        hdr.add_comment('Gridded to nearest cell')
        hdr['BMAJ'] = beam_fwhm
        hdr['BMIN'] = beam_fwhm
    hdr['BPA'] = 0.0
    # need to change this to get the actual units from the data
    # could add additional notes to the comment field
    # if Jy, make this Jy/Beam
    if dataUnits == 'Jy':
        dataUnits = 'Jy/Beam'
    hdr['BUNIT'] = (dataUnits, calibType)

    # This suppresses runtime NaN warnings if the cube is empty
    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        hdr['DATAMAX'] = numpy.nanmax(cube)

    nanCube = False
    if numpy.isnan(hdr['DATAMAX']):
        nanCube = True
        # this could possibly be done inside the above with block
        # if the warnings catch was more sophisticated
        if verbose > 2:
            print "Entire data cube is not-a-number, this may be because a few channels are consistently bad"
            print "consider restricting the channel range"
        # remove it
        hdr.remove('DATAMAX')
    else:
        hdr['DATAMIN'] = numpy.nanmin(cube)

    # note the parameter values - this must be updated as new parameters are added
    hdr.add_history("gbtgridder version: %s" % gbtgridderVersion)
    if args.channels is not None:
        hdr.add_history("gbtgridder channels: " + args.channels)
    else:
        hdr.add_history("gbtgridder all channels used")
    hdr.add_history("gbtgridder clobber: " + str(args.clobber))
    if average is not None and average > 1:
        hdr.add_history("gbtgridder average: %s channels" % average)
    hdr.add_history("gbtgridder kernel: " + args.kernel)
    if args.output is not None:
        hdr.add_history("gbtgridder output: " + args.output)
    if args.scans is not None:
        hdr.add_history("gbtgridder scans: " + args.scans)
    if args.mintsys is None and args.maxtsys is None:
        hdr.add_history("gbtgridder no tsys selection")
    else:
        if args.mintsys is not None:
            hdr.add_history("gbtgridder mintsys: %f" % args.mintsys)
        if args.maxtsys is not None:
            hdr.add_history("gbtgridder maxtsys: %f" % args.maxtsys)
        hdr.add_history("gbtgridder N spectra outside tsys range: %d" %
                        ntsysFlagCount)

    hdr.add_history("gbtgridder sdfits files ...")
    for thisFile in args.SDFITSfiles:
        # protect against long file names - don't use more than one comment row to
        # document this.  80 chars total, 8 for "COMMENT ", 12 for "gbtgridder: "
        # leaving 60 for the file name
        if len(thisFile) > 60:
            thisFile = "*" + thisFile[-59:]
        hdr.add_history("gbtgridder: " + thisFile)

    hdr.add_comment("IEEE not-a-number used for blanked pixels.")
    hdr.add_comment(
        "  FITS (Flexible Image Transport System) format is defined in 'Astronomy"
    )
    hdr.add_comment(
        "  and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H"
    )

    phdu = pyfits.PrimaryHDU(cube, hdr)
    phdu.writeto(outputFiles["cube"])

    if not args.noweight:
        if verbose > 3:
            print "Writing weight cube"
        wtHdr = hdr.copy()
        wtHdr['BUNIT'] = ('weight', 'Weight cube')  # change from K -> weight
        wtHdr['DATAMAX'] = numpy.nanmax(weight)
        wtHdr['DATAMIN'] = numpy.nanmin(weight)

        phdu = pyfits.PrimaryHDU(weight, wtHdr)
        phdu.writeto(outputFiles["weight"])

    if not args.nocont:
        if verbose > 3:
            print "Writing 'cont' image"
        # "cont" map, sum along the spectral axis
        # SQUASH does a weighted average
        # As implemented here, this is equivalent if there are equal weights along the spectral axis
        # doing a weighted average using numpy.average and ignoring NaNs would be tricky here
        # some slices may be all NaNs (but an entire cube of NaNs was tested for earlier)
        # this suppresses that warning
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            cont_map = numpy.nanmean(cube, axis=1)

        contHdr = hdr.copy()
        # AIPS just changes the channel count on the frequency axis, leaving everything else the same
        contHdr['NAXIS3'] = 1
        # restore the now-degenerate frequency axis to the shape
        cont_map.shape = (1, ) + cont_map.shape
        contHdr.add_history('gbtgridder: average of cube along spectral axis')
        contHdr['DATAMAX'] = numpy.nanmax(cont_map)
        contHdr['DATAMIN'] = numpy.nanmin(cont_map)
        phdu = pyfits.PrimaryHDU(cont_map, contHdr)
        phdu.writeto(outputFiles["cont"])

    if not args.noline:
        if verbose > 3:
            print "Writing line image"
        # "line" map, subtract the along the spectral axis from every plane in the data_cube
        # replace the 0 channel with the avg
        # first, find the average over the baseline region
        n = len(faxis)
        baseRegion = [
            int(round(0.04 * n)),
            int(round(0.12 * n)),
            int(round(0.81 * n)),
            int(round(0.89 * n))
        ]
        # construct an index from  these regions
        baseIndx = numpy.arange(baseRegion[1] - baseRegion[0] +
                                1) + baseRegion[0]
        baseIndx = numpy.append(
            baseIndx,
            numpy.arange(baseRegion[3] - baseRegion[2] + 1) + baseRegion[2])
        # this should probably be a weighted average
        avg_map = numpy.average(cube[:, baseIndx, :, :], axis=1)
        cube -= avg_map
        cube[:, 0, :, :] = avg_map
        hdr['DATAMAX'] = numpy.nanmax(cube)
        hdr['DATAMIN'] = numpy.nanmin(cube)
        hdr.add_history(
            'gbtgridder: subtracted an average over baseline region on freq axis'
        )
        hdr.add_history('gbtgridder: average over channels: %d:%d and %d:%d' %
                        tuple(baseRegion))
        hdr.add_history('gbtgridder: channel 0 replaced with averages')
        phdu = pyfits.PrimaryHDU(cube, hdr)
        phdu.writeto(outputFiles["line"])

    return