Пример #1
0
def diagplot(data, tsys, noise, dataset):

    pl.figure(1)
    pl.clf()
    mpl_plot_templates.imdiagnostics(data)
    pl.savefig(dataset+"_diagnostics.pdf",bbox_inches='tight')
    pl.figure(2)
    pl.clf()
    pl.subplot(2,1,1)
    pl.plot(tsys,np.arange(tsys.size),alpha=0.5)
    pl.xlabel("TSYS")
    pl.ylabel("Integration")
    pl.subplot(2,1,2)
    pl.plot(tsys, noise, '.',alpha=0.5)
    pl.xlabel("TSYS")
    pl.ylabel("Noise")
    pl.savefig(dataset+"_tsys.pdf",bbox_inches='tight')
Пример #2
0
def quickmap(filename, headerfile, diagnostics=True):
    d = fits.getdata(filename)
    if diagnostics:
        import mpl_plot_templates
        p = mpl_plot_templates.imdiagnostics(d['DATA'])

    h = fits.getheader(headerfile)
    w = wcs.WCS(h)
    m = np.zeros([h['NAXIS2'],h['NAXIS1']])
    glon,glat = radec_to_gal(d['CRVAL2'],d['CRVAL3'])
    x,y = w.wcs_world2pix(glon,glat,0)
    md = (median(d['DATA'],axis=1))
    m[y.astype('int'),x.astype('int')] = md

    return m
Пример #3
0
def quickmap(filename, headerfile, diagnostics=True):
    d = fits.getdata(filename)
    if diagnostics:
        import mpl_plot_templates
        p = mpl_plot_templates.imdiagnostics(d['DATA'])

    h = fits.getheader(headerfile)
    w = wcs.WCS(h)
    m = np.zeros([h['NAXIS2'], h['NAXIS1']])
    glon, glat = radec_to_gal(d['CRVAL2'], d['CRVAL3'])
    x, y = w.wcs_world2pix(glon, glat, 0)
    md = (median(d['DATA'], axis=1))
    m[y.astype('int'), x.astype('int')] = md

    return m
Пример #4
0
def add_data_to_cube(cubefilename, data=None, filename=None, fileheader=None,
                     flatheader='header.txt',
                     cubeheader='cubeheader.txt', nhits=None,
                     smoothto=1, baselineorder=5, velocityrange=None,
                     excludefitrange=None, noisecut=np.inf, do_runscript=False,
                     linefreq=None, allow_smooth=True,
                     data_iterator=data_iterator,
                     coord_iterator=coord_iterator,
                     velo_iterator=velo_iterator,
                     progressbar=False, coordsys='galactic',
                     datalength=None,
                     velocity_offset=0.0, negative_mean_cut=None,
                     add_with_kernel=False, kernel_fwhm=None, fsw=False,
                     kernel_function=Gaussian2DKernel,
                     diagnostic_plot_name=None, chmod=False,
                     continuum_prefix=None,
                     debug_breakpoint=False,
                     default_unit=u.km/u.s,
                     make_continuum=True,
                     weightspec=None,
                     varweight=False):
    """
    Given a .fits file that contains a binary table of spectra (e.g., as
    you would get from the GBT mapping "pipeline" or the reduce_map.pro aoidl
    file provided by Adam Ginsburg), adds each spectrum into the cubefile.

    velocity_offset : 0.0
        Amount to add to the velocity vector before adding it to the cube
        (useful for FSW observations)
    weightspec : np.ndarray
        A spectrum with the same size as the input arrays but containing the relative
        weights of the data
    """

    #if not default_unit.is_equivalent(u.km/u.s):
    #    raise TypeError("Default unit is not a velocity equivalent.")

    if type(nhits) is str:
        log.debug("Loading nhits from %s" % nhits)
        nhits = pyfits.getdata(nhits)
    elif type(nhits) is not np.ndarray:
        raise TypeError("nhits must be a .fits file or an ndarray, but it is ",type(nhits))
    naxis2,naxis1 = nhits.shape

    if velocity_offset and not fsw:
        raise ValueError("Using a velocity offset, but obs type is not "
                         "frequency switched; this is almost certainly wrong, "
                         "but if there's a case for it I'll remove this.")
    if not hasattr(velocity_offset,'unit'):
        velocity_offset = velocity_offset*default_unit


    contimage = np.zeros_like(nhits)
    nhits_once = np.zeros_like(nhits)

    log.debug("Loading data cube {0}".format(cubefilename))
    t0 = time.time()
    # rescale image to weight by number of observations
    image = pyfits.getdata(cubefilename)*nhits
    log.debug(" ".join(("nhits statistics: mean, std, nzeros, size",str(nhits.mean()),str(nhits.std()),str(np.sum(nhits==0)), str(nhits.size))))
    log.debug(" ".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size), str(np.sum(np.isnan(image))))))
    log.debug(" ".join(("nhits shape: ",str(nhits.shape))))
    # default is to set empty pixels to NAN; have to set them
    # back to zero
    image[image!=image] = 0.0
    header = pyfits.getheader(cubefilename)
    # debug print "Cube shape: ",image.shape," naxis3: ",header.get('NAXIS3')," nhits shape: ",nhits.shape

    log.debug("".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size))))

    flathead = get_header(flatheader)
    naxis3 = image.shape[0]
    wcs = pywcs.WCS(flathead)
    cwcs = pywcs.WCS(header)
    vwcs = cwcs.sub([pywcs.WCSSUB_SPECTRAL])
    vunit = u.Unit(vwcs.wcs.cunit[vwcs.wcs.spec])
    cubevelo = vwcs.wcs_pix2world(np.arange(naxis3),0)[0] * vunit
    cd3 = vwcs.wcs.cdelt[vwcs.wcs.spec] * vunit

    if not vunit.is_equivalent(default_unit):
        raise ValueError("The units of the cube and the velocity axis are "
                         "possibly not equivalent.  Change default_unit to "
                         "the appropriate unit (probably {0})".format(vunit))

    if add_with_kernel:
        if wcs.wcs.has_cd():
            cd = np.abs(wcs.wcs.cd[1,1])
        else:
            cd = np.abs(wcs.wcs.cdelt[1])
        # Alternative implementation; may not work for .cd?
        #cd = np.abs(np.prod((wcs.wcs.get_cdelt() * wcs.wcs.get_pc().diagonal())))**0.5

    if velocityrange is not None:
        if hasattr(velocityrange, 'unit'):
            v1,v4 = velocityrange
        else:
            v1,v4 = velocityrange * default_unit
        ind1 = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2 = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # stupid hack.  REALLY stupid hack.  Don't crop.
        if np.abs(ind2-image.shape[0]) < 5:
            ind2 = image.shape[0]
        if np.abs(ind1) < 5:
            ind1 = 0

        #print "Velo match for v1,v4 = %f,%f: %f,%f" % (v1,v4,cubevelo[ind1],cubevelo[ind2])
        # print "Updating CRPIX3 from %i to %i. Cropping to indices %i,%i" % (header.get('CRPIX3'),header.get('CRPIX3')-ind1,ind1,ind2)
        # I think this could be disastrous: cubevelo is already set, but now we're changing how it's set in the header!
        # I don't think there's any reason to have this in the first place
        # header.set('CRPIX3',header.get('CRPIX3')-ind1)

        # reset v1,v4 to the points we just selected
        v1 = cubevelo[ind1]
        v4 = cubevelo[ind2-1]
    else:
        ind1=0
        ind2 = image.shape[0]
        v1,v4 = min(cubevelo),max(cubevelo)

    # debug print "Cube has %i v-axis pixels from %f to %f.  Crop range is %f to %f" % (naxis3,cubevelo.min(),cubevelo.max(),v1,v4)

    #if abs(cdelt) < abs(cd3):
    #    print "Spectra have CD=%0.2f, cube has CD=%0.2f.  Will smooth & interpolate." % (cdelt,cd3)

    # Disable progressbar if debug-logging is enabled (they clash)
    if progressbar and 'ProgressBar' in globals() and log.level > 10:
        if datalength is None:
            pb = ProgressBar(len(data))
        else:
            pb = ProgressBar(datalength)
    else:
        progressbar = False

    skipped = []

    for spectrum,pos,velo in zip(data_iterator(data,fsw=fsw),
                                 coord_iterator(data,coordsys_out=coordsys),
                                 velo_iterator(data,linefreq=linefreq)):

        if log.level <= 10:
            t1 = time.time()

        if not hasattr(velo,'unit'):
            velo = velo * default_unit

        glon,glat = pos
        cdelt = velo[1]-velo[0]
        if cdelt < 0:
            # for interpolation, require increasing X axis
            spectrum = spectrum[::-1]
            velo = velo[::-1]
            if log.level < 5:
                log.debug("Reversed spectral axis... ")

        if (velo.max() < cubevelo.min() or velo.min() > cubevelo.max()):
            raise ValueError("Data out of range.")

        if progressbar and log.level > 10:
            pb.update()

        velo += velocity_offset

        if glon != 0 and glat != 0:
            x,y = wcs.wcs_world2pix(glon,glat,0)
            if np.isnan(x) or np.isnan(y):
                log.warn("".join(("Skipping NaN point {0}, {1} ...".format(glon,glat))))
                continue
            if log.level < 10:
                log.debug("".join(("At point {0},{1} ...".format(glon,glat),)))
            if abs(cdelt) < abs(cd3) and allow_smooth:
                # need to smooth before interpolating to preserve signal
                kernwidth = abs(cd3/cdelt/2.35).decompose().value
                if kernwidth > 2 and kernwidth < 10:
                    xr = kernwidth*5
                    npx = np.ceil(xr*2 + 1)
                elif kernwidth > 10:
                    raise ValueError('Too much smoothing')
                else:
                    xr = 5
                    npx = 11
                #kernel = np.exp(-(np.linspace(-xr,xr,npx)**2)/(2.0*kernwidth**2))
                #kernel /= kernel.sum()
                kernel = Gaussian1DKernel(stddev=kernwidth, x_size=npx)
                smspec = np.convolve(spectrum,kernel,mode='same')
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     smspec)
            else:
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     spectrum)
            OK = (datavect[ind1:ind2] == datavect[ind1:ind2])

            if excludefitrange is None:
                include = OK
            else:
                # Exclude certain regions (e.g., the spectral lines) when computing the noise
                include = OK.copy()

                if not hasattr(excludefitrange,'unit'):
                    excludefitrange = excludefitrange * default_unit

                # Convert velocities to indices
                exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

                # Loop through exclude_inds pairwise
                for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
                    # Do not include the excluded regions
                    include[i1:i2] = False

                if include.sum() == 0:
                    raise ValueError("All data excluded.")

            noiseestimate = datavect[ind1:ind2][include].std()
            contestimate = datavect[ind1:ind2][include].mean()

            if noiseestimate > noisecut:
                log.info("Skipped a data point at %f,%f in file %s because it had excessive noise %f" % (x,y,filename,noiseestimate))
                skipped.append(True)
                continue
            elif negative_mean_cut is not None and contestimate < negative_mean_cut:
                log.info("Skipped a data point at %f,%f in file %s because it had negative continuum %f" % (x,y,filename,contestimate))
                skipped.append(True)
                continue
            elif OK.sum() == 0:
                log.info("Skipped a data point at %f,%f in file %s because it had NANs" % (x,y,filename))
                skipped.append(True)
                continue
            elif OK.sum()/float(abs(ind2-ind1)) < 0.5:
                log.info("Skipped a data point at %f,%f in file %s because it had %i NANs" % (x,y,filename,np.isnan(datavect[ind1:ind2]).sum()))
                skipped.append(True)
                continue
            if log.level < 10:
                log.debug("did not skip...")

            if varweight:
                weight = 1./noiseestimate**2
            else:
                weight = 1.

            if weightspec is None:
                wspec = weight
            else:
                wspec = weight * weightspec


            if 0 < int(np.round(x)) < naxis1 and 0 < int(np.round(y)) < naxis2:
                if add_with_kernel:
                    fwhm = np.sqrt(8*np.log(2))
                    kernel_size = kd = int(np.ceil(kernel_fwhm/fwhm/cd * 5))
                    if kernel_size < 5:
                        kernel_size = kd = 5
                    if kernel_size % 2 == 0:
                        kernel_size = kd = kernel_size+1
                    if kernel_size > 100:
                        raise ValueError("Huge kernel - are you sure?")
                    kernel_middle = mid = (kd-1)/2.
                    xinds,yinds = (np.mgrid[:kd,:kd]-mid+np.array([np.round(x),np.round(y)])[:,None,None]).astype('int')
                    # This kernel is NOT centered, and that's the bloody point.
                    # (I made a very stupid error and used Gaussian2DKernel,
                    # which is strictly centered, in a previous version)
                    kernel2d = np.exp(-((xinds-x)**2+(yinds-y)**2)/(2*(kernel_fwhm/fwhm/cd)**2))

                    dim1 = ind2-ind1
                    vect_to_add = np.outer(datavect[ind1:ind2],kernel2d).reshape([dim1,kd,kd])
                    vect_to_add[True-OK] = 0

                    # need to slice out edges
                    if yinds.max() >= naxis2 or yinds.min() < 0:
                        yok = (yinds[0,:] < naxis2) & (yinds[0,:] >= 0)
                        xinds,yinds = xinds[:,yok],yinds[:,yok]
                        vect_to_add = vect_to_add[:,:,yok]
                        kernel2d = kernel2d[:,yok]
                    if xinds.max() >= naxis1 or xinds.min() < 0:
                        xok = (xinds[:,0] < naxis1) & (xinds[:,0] >= 0)
                        xinds,yinds = xinds[xok,:],yinds[xok,:]
                        vect_to_add = vect_to_add[:,xok,:]
                        kernel2d = kernel2d[xok,:]

                    image[ind1:ind2,yinds,xinds] += vect_to_add*wspec
                    # NaN spectral bins are not appropriately downweighted... but they shouldn't exist anyway...
                    nhits[yinds,xinds] += kernel2d*weight
                    contimage[yinds,xinds] += kernel2d * contestimate*weight
                    nhits_once[yinds,xinds] += kernel2d*weight

                else:
                    image[ind1:ind2,int(np.round(y)),int(np.round(x))][OK] += datavect[ind1:ind2][OK]*weight
                    nhits[int(np.round(y)),int(np.round(x))] += weight
                    contimage[int(np.round(y)),int(np.round(x))] += contestimate*weight
                    nhits_once[int(np.round(y)),int(np.round(x))] += weight

                if log.level < 10:
                    log.debug("Z-axis indices are %i,%i..." % (ind1,ind2,))
                    log.debug("Added a data point at %i,%i" % (int(np.round(x)),int(np.round(y))))
                skipped.append(False)
            else:
                skipped.append(True)
                log.info("Skipped a data point at x,y=%f,%f "
                         "lon,lat=%f,%f in file %s because "
                         "it's out of the grid" % (x,y,glon,glat,filename))

            if debug_breakpoint:
                import ipdb
                ipdb.set_trace()

        if log.level <= 10:
            dt = time.time() - t1
            log.debug("Completed x,y={x:4.0f},{y:4.0f}"
                      " ({x:6.2f},{y:6.2f}) in {dt:6.2g}s".format(x=float(x),
                                                                  y=float(y),
                                                                  dt=dt))

    log.info("Completed 'add_data' loop for"
             " {0} in {1}s".format(cubefilename, time.time()-t0))

    if excludefitrange is not None:
        # this block redefining "include" is used for diagnostics (optional)
        ind1a = np.argmin(np.abs(np.floor(v1-velo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-velo)))+1
        dname = 'DATA' if 'DATA' in data.dtype.names else 'SPECTRA'
        OK = (data[dname][0,:]==data[dname][0,:])
        OK[:ind1a] = False
        OK[ind2a:] = False

        include = OK

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-velo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")
    else:
        dname = 'DATA' if 'DATA' in data.dtype.names else 'SPECTRA'
        include = slice(None)


    if diagnostic_plot_name:
        from mpl_plot_templates import imdiagnostics

        pylab.clf()

        dd = data[dname][:,include]
        imdiagnostics(dd,axis=pylab.gca())
        pylab.savefig(diagnostic_plot_name, bbox_inches='tight')

        # Save a copy with the bad stuff flagged out; this should tell whether flagging worked
        skipped = np.array(skipped,dtype='bool')
        dd[skipped,:] = -999
        maskdata = np.ma.masked_equal(dd,-999)
        pylab.clf()
        imdiagnostics(maskdata, axis=pylab.gca())
        dpn_pre,dpn_suf = os.path.splitext(diagnostic_plot_name)
        dpn_flagged = dpn_pre+"_flagged"+dpn_suf
        pylab.savefig(dpn_flagged, bbox_inches='tight')

        log.info("Saved diagnostic plot %s and %s" % (diagnostic_plot_name,dpn_flagged))

    log.debug("nhits statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(nhits.mean(),nhits.std(),np.sum(nhits==0), nhits.size))
    log.debug("Image statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(image.mean(),image.std(),np.sum(image==0), image.size))
    
    imav = image/nhits

    if log.level <= 10:
        nnan = np.count_nonzero(np.isnan(imav))
        log.debug("imav statistics: mean, std, nzeros, size, nnan, ngood: {0} {1} {2} {3} {4} {5}".format(imav.mean(),imav.std(),np.sum(imav==0), imav.size, nnan, imav.size-nnan))
        log.debug("imav shape: {0}".format(imav.shape))

    subcube = imav[ind1:ind2,:,:]

    if log.level <= 10:
        nnan = np.sum(np.isnan(subcube))
        print("subcube statistics: mean, std, nzeros, size, nnan, ngood:",np.nansum(subcube)/subcube.size,np.std(subcube[subcube==subcube]),np.sum(subcube==0), subcube.size, nnan, subcube.size-nnan)
        print("subcube shape: ",subcube.shape)

    H = header.copy()
    if fileheader is not None:
        for k,v in fileheader.items():
            if 'RESTFRQ' in k or 'RESTFREQ' in k:
                header.set(k,v)
            #if k[0] == 'C' and '1' in k and k[-1] != '1':
            #    header.set(k.replace('1','3'), v)
    moreH = get_header(cubeheader)
    for k,v in H.items():
        header.set(k,v)
    for k,v in moreH.items():
        header.set(k,v)
    HDU = pyfits.PrimaryHDU(data=subcube,header=header)
    HDU.writeto(cubefilename,clobber=True,output_verify='fix')

    outpre = cubefilename.replace(".fits","")

    include = np.ones(imav.shape[0],dtype='bool')

    if excludefitrange is not None:
        # this block redifining "include" is used for continuum
        ind1a = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")

    HDU2 = pyfits.PrimaryHDU(data=nhits,header=flathead)
    HDU2.writeto(outpre+"_nhits.fits",clobber=True,output_verify='fix')

    #OKCube = (imav==imav)
    #contmap = np.nansum(imav[naxis3*0.1:naxis3*0.9,:,:],axis=0) / OKCube.sum(axis=0)
    if make_continuum:
        contmap = np.nansum(imav[include,:,:],axis=0) / include.sum()
        HDU2 = pyfits.PrimaryHDU(data=contmap,header=flathead)
        HDU2.writeto(outpre+"_continuum.fits",clobber=True,output_verify='fix')

        if continuum_prefix is not None:
            # Solo continuum image (just this obs set)
            HDU2.data = contimage / nhits_once
            HDU2.writeto(continuum_prefix+"_continuum.fits",clobber=True,output_verify='fix')
            HDU2.data = nhits_once
            HDU2.writeto(continuum_prefix+"_nhits.fits",clobber=True,output_verify='fix')

    log.info("Writing script file {0}".format(outpre+"_starlink.sh"))
    scriptfile = open(outpre+"_starlink.sh",'w')
    outpath,outfn = os.path.split(cubefilename)
    outpath,pre = os.path.split(outpre)
    print(("#!/bin/bash"), file=scriptfile)
    if outpath != '':
        print(('cd %s' % outpath), file=scriptfile)
    print(('. /star/etc/profile'), file=scriptfile)
    print(('kappa > /dev/null'), file=scriptfile)
    print(('convert > /dev/null'), file=scriptfile)
    print(('fits2ndf %s %s' % (outfn,outfn.replace(".fits",".sdf"))), file=scriptfile)
    if excludefitrange is not None:
        v2v3 = ""
        for v2,v3 in zip(excludefitrange[::2],excludefitrange[1::2]):
            v2v3 += "%0.2f %0.2f " % (v2.to(default_unit).value,v3.to(default_unit).value)
        print(('mfittrend %s  ranges=\\\"%0.2f %s %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v2v3,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    else:
        print(('mfittrend %s  ranges=\\\"%0.2f %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    print(('sub %s %s %s' % (outfn.replace(".fits",".sdf"),outfn.replace(".fits","_baseline.sdf"),outfn.replace(".fits","_sub.sdf"))), file=scriptfile)
    print(('sqorst %s_sub mode=pixelscale  axis=3 pixscale=%i out=%s_vrebin' % (pre,smoothto,pre)), file=scriptfile)
    print(('gausmooth %s_vrebin fwhm=1.0 axes=[1,2] out=%s_smooth' % (pre,pre)), file=scriptfile)
    print(('#collapse %s estimator=mean axis="VRAD" low=-400 high=500 out=%s_continuum' % (pre,pre)), file=scriptfile)
    print(('rm %s_sub.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_sub %s_sub.fits' % (pre,pre)), file=scriptfile)
    print(('rm %s_smooth.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_smooth %s_smooth.fits' % (pre,pre)), file=scriptfile)
    print(("# Fix STARLINK's failure to respect header keywords."), file=scriptfile)
    print(('sethead %s_smooth.fits RESTFRQ=`gethead RESTFRQ %s.fits`' % (pre,pre)), file=scriptfile)
    print(('rm %s_baseline.sdf' % (pre)), file=scriptfile)
    print(('rm %s_smooth.sdf' % (pre)), file=scriptfile)
    print(('rm %s_sub.sdf' % (pre)), file=scriptfile)
    print(('rm %s_vrebin.sdf' % (pre)), file=scriptfile)
    print(('rm %s.sdf' % (pre)), file=scriptfile)
    scriptfile.close()

    if chmod:
        scriptfilename = (outpre+"_starlink.sh").replace(" ","")
        #subprocess.call("chmod +x {0}".format(scriptfilename), shell=True)
        st = os.stat(scriptfilename)
        os.chmod(scriptfilename, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH | stat.S_IXUSR)

    if do_runscript:
        runscript(outpre)

    _fix_ms_kms_file(outpre+"_sub.fits")
    _fix_ms_kms_file(outpre+"_smooth.fits")

    if log.level <= 20:
        log.info("Completed {0} in {1}s".format(pre, time.time()-t0))
Пример #5
0
import numpy as np
from mpl_plot_templates import imdiagnostics
import pylab as pl

arr = np.random.rand(1024 / 8, 1024)

pl.figure(1)
pl.clf()
ax1 = imdiagnostics(arr)
pl.figure(2)
pl.clf()
ax2 = imdiagnostics(arr, square_aspect=True)

arr = np.random.rand(1024 / 256, 1024)

pl.figure(3)
pl.clf()
ax3 = imdiagnostics(arr)
pl.figure(4)
pl.clf()
ax4 = imdiagnostics(arr, square_aspect=True)

pl.show()
Пример #6
0
import numpy as np
from mpl_plot_templates import imdiagnostics
import pylab as pl

arr = np.random.rand(1024/8,1024)

pl.figure(1)
pl.clf()
ax1 = imdiagnostics(arr)
pl.figure(2)
pl.clf()
ax2 = imdiagnostics(arr,square_aspect=True)

arr = np.random.rand(1024/256,1024)

pl.figure(3)
pl.clf()
ax3 = imdiagnostics(arr)
pl.figure(4)
pl.clf()
ax4 = imdiagnostics(arr,square_aspect=True)

pl.show()
Пример #7
0
def add_data_to_cube(cubefilename, data=None, filename=None, fileheader=None,
                     flatheader='header.txt',
                     cubeheader='cubeheader.txt', nhits=None,
                     smoothto=1, baselineorder=5, velocityrange=None,
                     excludefitrange=None, noisecut=np.inf, do_runscript=False,
                     linefreq=None, allow_smooth=True,
                     data_iterator=data_iterator,
                     coord_iterator=coord_iterator,
                     velo_iterator=velo_iterator,
                     progressbar=False, coordsys='galactic',
                     datalength=None,
                     velocity_offset=0.0, negative_mean_cut=None,
                     add_with_kernel=False, kernel_fwhm=None, fsw=False,
                     kernel_function=Gaussian2DKernel,
                     diagnostic_plot_name=None, chmod=False,
                     continuum_prefix=None,
                     debug_breakpoint=False,
                     default_unit=u.km/u.s,
                     make_continuum=True,
                     weightspec=None,
                     varweight=False):
    """
    Given a .fits file that contains a binary table of spectra (e.g., as
    you would get from the GBT mapping "pipeline" or the reduce_map.pro aoidl
    file provided by Adam Ginsburg), adds each spectrum into the cubefile.

    velocity_offset : 0.0
        Amount to add to the velocity vector before adding it to the cube
        (useful for FSW observations)
    weightspec : np.ndarray
        A spectrum with the same size as the input arrays but containing the relative
        weights of the data
    """

    #if not default_unit.is_equivalent(u.km/u.s):
    #    raise TypeError("Default unit is not a velocity equivalent.")

    if type(nhits) is str:
        log.debug("Loading nhits from %s" % nhits)
        nhits = pyfits.getdata(nhits)
    elif type(nhits) is not np.ndarray:
        raise TypeError("nhits must be a .fits file or an ndarray, but it is ",type(nhits))
    naxis2,naxis1 = nhits.shape

    if velocity_offset and not fsw:
        raise ValueError("Using a velocity offset, but obs type is not "
                         "frequency switched; this is almost certainly wrong, "
                         "but if there's a case for it I'll remove this.")
    if not hasattr(velocity_offset,'unit'):
        velocity_offset = velocity_offset*default_unit


    contimage = np.zeros_like(nhits)
    nhits_once = np.zeros_like(nhits)

    log.debug("Loading data cube {0}".format(cubefilename))
    t0 = time.time()
    # rescale image to weight by number of observations
    image = pyfits.getdata(cubefilename)*nhits
    log.debug(" ".join(("nhits statistics: mean, std, nzeros, size",str(nhits.mean()),str(nhits.std()),str(np.sum(nhits==0)), str(nhits.size))))
    log.debug(" ".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size), str(np.sum(np.isnan(image))))))
    log.debug(" ".join(("nhits shape: ",str(nhits.shape))))
    # default is to set empty pixels to NAN; have to set them
    # back to zero
    image[image!=image] = 0.0
    header = pyfits.getheader(cubefilename)
    # debug print "Cube shape: ",image.shape," naxis3: ",header.get('NAXIS3')," nhits shape: ",nhits.shape

    log.debug("".join(("Image statistics: mean, std, nzeros, size",str(image.mean()),str(image.std()),str(np.sum(image==0)), str(image.size))))

    flathead = get_header(flatheader)
    naxis3 = image.shape[0]
    wcs = pywcs.WCS(flathead)
    cwcs = pywcs.WCS(header)
    vwcs = cwcs.sub([pywcs.WCSSUB_SPECTRAL])
    vunit = u.Unit(vwcs.wcs.cunit[vwcs.wcs.spec])
    cubevelo = vwcs.wcs_pix2world(np.arange(naxis3),0)[0] * vunit
    cd3 = vwcs.wcs.cdelt[vwcs.wcs.spec] * vunit

    if not vunit.is_equivalent(default_unit):
        raise ValueError("The units of the cube and the velocity axis are "
                         "possibly not equivalent.  Change default_unit to "
                         "the appropriate unit (probably {0})".format(vunit))

    if add_with_kernel:
        if wcs.wcs.has_cd():
            cd = np.abs(wcs.wcs.cd[1,1])
        else:
            cd = np.abs(wcs.wcs.cdelt[1])
        # Alternative implementation; may not work for .cd?
        #cd = np.abs(np.prod((wcs.wcs.get_cdelt() * wcs.wcs.get_pc().diagonal())))**0.5

    if velocityrange is not None:
        if hasattr(velocityrange, 'unit'):
            v1,v4 = velocityrange
        else:
            v1,v4 = velocityrange * default_unit
        ind1 = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2 = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # stupid hack.  REALLY stupid hack.  Don't crop.
        if np.abs(ind2-image.shape[0]) < 5:
            ind2 = image.shape[0]
        if np.abs(ind1) < 5:
            ind1 = 0

        #print "Velo match for v1,v4 = %f,%f: %f,%f" % (v1,v4,cubevelo[ind1],cubevelo[ind2])
        # print "Updating CRPIX3 from %i to %i. Cropping to indices %i,%i" % (header.get('CRPIX3'),header.get('CRPIX3')-ind1,ind1,ind2)
        # I think this could be disastrous: cubevelo is already set, but now we're changing how it's set in the header!
        # I don't think there's any reason to have this in the first place
        # header.set('CRPIX3',header.get('CRPIX3')-ind1)

        # reset v1,v4 to the points we just selected
        v1 = cubevelo[ind1]
        v4 = cubevelo[ind2-1]
    else:
        ind1=0
        ind2 = image.shape[0]
        v1,v4 = min(cubevelo),max(cubevelo)

    # debug print "Cube has %i v-axis pixels from %f to %f.  Crop range is %f to %f" % (naxis3,cubevelo.min(),cubevelo.max(),v1,v4)

    #if abs(cdelt) < abs(cd3):
    #    print "Spectra have CD=%0.2f, cube has CD=%0.2f.  Will smooth & interpolate." % (cdelt,cd3)

    # Disable progressbar if debug-logging is enabled (they clash)
    if progressbar and 'ProgressBar' in globals() and log.level > 10:
        if datalength is None:
            pb = ProgressBar(len(data))
        else:
            pb = ProgressBar(datalength)
    else:
        progressbar = False

    skipped = []

    for spectrum,pos,velo in zip(data_iterator(data,fsw=fsw),
                                 coord_iterator(data,coordsys_out=coordsys),
                                 velo_iterator(data,linefreq=linefreq)):

        if log.level <= 10:
            t1 = time.time()

        if not hasattr(velo,'unit'):
            velo = velo * default_unit

        glon,glat = pos
        cdelt = velo[1]-velo[0]
        if cdelt < 0:
            # for interpolation, require increasing X axis
            spectrum = spectrum[::-1]
            velo = velo[::-1]
            if log.level < 5:
                log.debug("Reversed spectral axis... ")

        if (velo.max() < cubevelo.min() or velo.min() > cubevelo.max()):
            raise ValueError("Data out of range.")

        if progressbar and log.level > 10:
            pb.update()

        velo += velocity_offset

        if glon != 0 and glat != 0:
            x,y = wcs.wcs_world2pix(glon,glat,0)
            if np.isnan(x) or np.isnan(y):
                log.warn("".join(("Skipping NaN point {0}, {1} ...".format(glon,glat))))
                continue
            if log.level < 10:
                log.debug("".join(("At point {0},{1} ...".format(glon,glat),)))
            if abs(cdelt) < abs(cd3) and allow_smooth:
                # need to smooth before interpolating to preserve signal
                kernwidth = abs(cd3/cdelt/2.35).decompose().value
                if kernwidth > 2 and kernwidth < 10:
                    xr = kernwidth*5
                    npx = np.ceil(xr*2 + 1)
                elif kernwidth > 10:
                    raise ValueError('Too much smoothing')
                else:
                    xr = 5
                    npx = 11
                #kernel = np.exp(-(np.linspace(-xr,xr,npx)**2)/(2.0*kernwidth**2))
                #kernel /= kernel.sum()
                kernel = Gaussian1DKernel(stddev=kernwidth, x_size=npx)
                smspec = np.convolve(spectrum,kernel,mode='same')
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     smspec)
            else:
                datavect = np.interp(cubevelo.to(default_unit).value,
                                     velo.to(default_unit).value,
                                     spectrum)
            OK = (datavect[ind1:ind2] == datavect[ind1:ind2])

            if excludefitrange is None:
                include = OK
            else:
                # Exclude certain regions (e.g., the spectral lines) when computing the noise
                include = OK.copy()

                if not hasattr(excludefitrange,'unit'):
                    excludefitrange = excludefitrange * default_unit

                # Convert velocities to indices
                exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

                # Loop through exclude_inds pairwise
                for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
                    # Do not include the excluded regions
                    include[i1:i2] = False

                if include.sum() == 0:
                    raise ValueError("All data excluded.")

            noiseestimate = datavect[ind1:ind2][include].std()
            contestimate = datavect[ind1:ind2][include].mean()

            if noiseestimate > noisecut:
                log.info("Skipped a data point at %f,%f in file %s because it had excessive noise %f" % (x,y,filename,noiseestimate))
                skipped.append(True)
                continue
            elif negative_mean_cut is not None and contestimate < negative_mean_cut:
                log.info("Skipped a data point at %f,%f in file %s because it had negative continuum %f" % (x,y,filename,contestimate))
                skipped.append(True)
                continue
            elif OK.sum() == 0:
                log.info("Skipped a data point at %f,%f in file %s because it had NANs" % (x,y,filename))
                skipped.append(True)
                continue
            elif OK.sum()/float(abs(ind2-ind1)) < 0.5:
                log.info("Skipped a data point at %f,%f in file %s because it had %i NANs" % (x,y,filename,np.isnan(datavect[ind1:ind2]).sum()))
                skipped.append(True)
                continue
            if log.level < 10:
                log.debug("did not skip...")

            if varweight:
                weight = 1./noiseestimate**2
            else:
                weight = 1.

            if weightspec is None:
                wspec = weight
            else:
                wspec = weight * weightspec


            if 0 < int(np.round(x)) < naxis1 and 0 < int(np.round(y)) < naxis2:
                if add_with_kernel:
                    fwhm = np.sqrt(8*np.log(2))
                    kernel_size = kd = int(np.ceil(kernel_fwhm/fwhm/cd * 5))
                    if kernel_size < 5:
                        kernel_size = kd = 5
                    if kernel_size % 2 == 0:
                        kernel_size = kd = kernel_size+1
                    if kernel_size > 100:
                        raise ValueError("Huge kernel - are you sure?")
                    kernel_middle = mid = (kd-1)/2.
                    xinds,yinds = (np.mgrid[:kd,:kd]-mid+np.array([np.round(x),np.round(y)])[:,None,None]).astype('int')
                    # This kernel is NOT centered, and that's the bloody point.
                    # (I made a very stupid error and used Gaussian2DKernel,
                    # which is strictly centered, in a previous version)
                    kernel2d = np.exp(-((xinds-x)**2+(yinds-y)**2)/(2*(kernel_fwhm/fwhm/cd)**2))

                    dim1 = ind2-ind1
                    vect_to_add = np.outer(datavect[ind1:ind2],kernel2d).reshape([dim1,kd,kd])
                    vect_to_add[True-OK] = 0

                    # need to slice out edges
                    if yinds.max() >= naxis2 or yinds.min() < 0:
                        yok = (yinds[0,:] < naxis2) & (yinds[0,:] >= 0)
                        xinds,yinds = xinds[:,yok],yinds[:,yok]
                        vect_to_add = vect_to_add[:,:,yok]
                        kernel2d = kernel2d[:,yok]
                    if xinds.max() >= naxis1 or xinds.min() < 0:
                        xok = (xinds[:,0] < naxis1) & (xinds[:,0] >= 0)
                        xinds,yinds = xinds[xok,:],yinds[xok,:]
                        vect_to_add = vect_to_add[:,xok,:]
                        kernel2d = kernel2d[xok,:]

                    image[ind1:ind2,yinds,xinds] += vect_to_add*wspec
                    # NaN spectral bins are not appropriately downweighted... but they shouldn't exist anyway...
                    nhits[yinds,xinds] += kernel2d*weight
                    contimage[yinds,xinds] += kernel2d * contestimate*weight
                    nhits_once[yinds,xinds] += kernel2d*weight

                else:
                    image[ind1:ind2,int(np.round(y)),int(np.round(x))][OK] += datavect[ind1:ind2][OK]*weight
                    nhits[int(np.round(y)),int(np.round(x))] += weight
                    contimage[int(np.round(y)),int(np.round(x))] += contestimate*weight
                    nhits_once[int(np.round(y)),int(np.round(x))] += weight

                if log.level < 10:
                    log.debug("Z-axis indices are %i,%i..." % (ind1,ind2,))
                    log.debug("Added a data point at %i,%i" % (int(np.round(x)),int(np.round(y))))
                skipped.append(False)
            else:
                skipped.append(True)
                log.info("Skipped a data point at x,y=%f,%f "
                         "lon,lat=%f,%f in file %s because "
                         "it's out of the grid" % (x,y,glon,glat,filename))

            if debug_breakpoint:
                import ipdb
                ipdb.set_trace()

        if log.level <= 10:
            dt = time.time() - t1
            log.debug("Completed x,y={x:4.0f},{y:4.0f}"
                      " ({x:6.2f},{y:6.2f}) in {dt:6.2g}s".format(x=float(x),
                                                                  y=float(y),
                                                                  dt=dt))

    log.info("Completed 'add_data' loop for"
             " {0} in {1}s".format(cubefilename, time.time()-t0))

    if data.dtype.names is not None:
        dname = 'DATA' if 'DATA' in data.dtype.names else 'SPECTRA'
    else:
        dname = slice(None)

    if excludefitrange is not None:
        # this block redefining "include" is used for diagnostics (optional)
        ind1a = np.argmin(np.abs(np.floor(v1-velo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-velo)))+1
        OK = (data[dname][0,:]==data[dname][0,:])
        OK[:ind1a] = False
        OK[ind2a:] = False

        include = OK

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-velo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")
    else:
        include = slice(None)


    if diagnostic_plot_name:
        from mpl_plot_templates import imdiagnostics

        pylab.clf()

        dd = data[dname][:,include]
        imdiagnostics(dd,axis=pylab.gca())
        pylab.savefig(diagnostic_plot_name, bbox_inches='tight')

        # Save a copy with the bad stuff flagged out; this should tell whether flagging worked
        skipped = np.array(skipped,dtype='bool')
        dd[skipped,:] = -999
        maskdata = np.ma.masked_equal(dd,-999)
        pylab.clf()
        imdiagnostics(maskdata, axis=pylab.gca())
        dpn_pre,dpn_suf = os.path.splitext(diagnostic_plot_name)
        dpn_flagged = dpn_pre+"_flagged"+dpn_suf
        pylab.savefig(dpn_flagged, bbox_inches='tight')

        log.info("Saved diagnostic plot %s and %s" % (diagnostic_plot_name,dpn_flagged))

    log.debug("nhits statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(nhits.mean(),nhits.std(),np.sum(nhits==0), nhits.size))
    log.debug("Image statistics: mean, std, nzeros, size {0} {1} {2} {3}".format(image.mean(),image.std(),np.sum(image==0), image.size))

    imav = image/nhits

    if log.level <= 10:
        nnan = np.count_nonzero(np.isnan(imav))
        log.debug("imav statistics: mean, std, nzeros, size, nnan, ngood: {0} {1} {2} {3} {4} {5}".format(imav.mean(),imav.std(),np.sum(imav==0), imav.size, nnan, imav.size-nnan))
        log.debug("imav shape: {0}".format(imav.shape))

    subcube = imav[ind1:ind2,:,:]

    if log.level <= 10:
        nnan = np.sum(np.isnan(subcube))
        print("subcube statistics: mean, std, nzeros, size, nnan, ngood:",np.nansum(subcube)/subcube.size,np.std(subcube[subcube==subcube]),np.sum(subcube==0), subcube.size, nnan, subcube.size-nnan)
        print("subcube shape: ",subcube.shape)

    H = header.copy()
    if fileheader is not None:
        for k,v in fileheader.items():
            if 'RESTFRQ' in k or 'RESTFREQ' in k:
                header.set(k,v)
            #if k[0] == 'C' and '1' in k and k[-1] != '1':
            #    header.set(k.replace('1','3'), v)
    moreH = get_header(cubeheader)
    for k,v in H.items():
        header.set(k,v)
    for k,v in moreH.items():
        header.set(k,v)
    HDU = pyfits.PrimaryHDU(data=subcube,header=header)
    HDU.writeto(cubefilename,clobber=True,output_verify='fix')

    outpre = cubefilename.replace(".fits","")

    include = np.ones(imav.shape[0],dtype='bool')

    if excludefitrange is not None:
        # this block redifining "include" is used for continuum
        ind1a = np.argmin(np.abs(np.floor(v1-cubevelo)))
        ind2a = np.argmin(np.abs(np.ceil(v4-cubevelo)))+1

        # Convert velocities to indices
        exclude_inds = [np.argmin(np.abs(np.floor(v-cubevelo))) for v in excludefitrange]

        # Loop through exclude_inds pairwise
        for (i1,i2) in zip(exclude_inds[:-1:2],exclude_inds[1::2]):
            # Do not include the excluded regions
            include[i1:i2] = False

        if include.sum() == 0:
            raise ValueError("All data excluded.")

    HDU2 = pyfits.PrimaryHDU(data=nhits,header=flathead)
    HDU2.writeto(outpre+"_nhits.fits",clobber=True,output_verify='fix')

    #OKCube = (imav==imav)
    #contmap = np.nansum(imav[naxis3*0.1:naxis3*0.9,:,:],axis=0) / OKCube.sum(axis=0)
    if make_continuum:
        contmap = np.nansum(imav[include,:,:],axis=0) / include.sum()
        HDU2 = pyfits.PrimaryHDU(data=contmap,header=flathead)
        HDU2.writeto(outpre+"_continuum.fits",clobber=True,output_verify='fix')

        if continuum_prefix is not None:
            # Solo continuum image (just this obs set)
            HDU2.data = contimage / nhits_once
            HDU2.writeto(continuum_prefix+"_continuum.fits",clobber=True,output_verify='fix')
            HDU2.data = nhits_once
            HDU2.writeto(continuum_prefix+"_nhits.fits",clobber=True,output_verify='fix')

    log.info("Writing script file {0}".format(outpre+"_starlink.sh"))
    scriptfile = open(outpre+"_starlink.sh",'w')
    outpath,outfn = os.path.split(cubefilename)
    outpath,pre = os.path.split(outpre)
    print(("#!/bin/bash"), file=scriptfile)
    if outpath != '':
        print(('cd %s' % outpath), file=scriptfile)
    print(('. /star/etc/profile'), file=scriptfile)
    print(('kappa > /dev/null'), file=scriptfile)
    print(('convert > /dev/null'), file=scriptfile)
    print(('fits2ndf %s %s' % (outfn,outfn.replace(".fits",".sdf"))), file=scriptfile)
    if excludefitrange is not None:
        v2v3 = ""
        for v2,v3 in zip(excludefitrange[::2],excludefitrange[1::2]):
            v2v3 += "%0.2f %0.2f " % (v2.to(default_unit).value,v3.to(default_unit).value)
        print(('mfittrend %s  ranges=\\\"%0.2f %s %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v2v3,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    else:
        print(('mfittrend %s  ranges=\\\"%0.2f %0.2f\\\" order=%i axis=3 out=%s' % (outfn.replace(".fits",".sdf"),v1.to(default_unit).value,v4.to(default_unit).value,baselineorder,outfn.replace(".fits","_baseline.sdf"))), file=scriptfile)
    print(('sub %s %s %s' % (outfn.replace(".fits",".sdf"),outfn.replace(".fits","_baseline.sdf"),outfn.replace(".fits","_sub.sdf"))), file=scriptfile)
    print(('sqorst %s_sub mode=pixelscale  axis=3 pixscale=%i out=%s_vrebin' % (pre,smoothto,pre)), file=scriptfile)
    print(('gausmooth %s_vrebin fwhm=1.0 axes=[1,2] out=%s_smooth' % (pre,pre)), file=scriptfile)
    print(('#collapse %s estimator=mean axis="VRAD" low=-400 high=500 out=%s_continuum' % (pre,pre)), file=scriptfile)
    print(('rm %s_sub.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_sub %s_sub.fits' % (pre,pre)), file=scriptfile)
    print(('rm %s_smooth.fits' % (pre)), file=scriptfile)
    print(('ndf2fits %s_smooth %s_smooth.fits' % (pre,pre)), file=scriptfile)
    print(("# Fix STARLINK's failure to respect header keywords."), file=scriptfile)
    print(('sethead %s_smooth.fits RESTFRQ=`gethead RESTFRQ %s.fits`' % (pre,pre)), file=scriptfile)
    print(('rm %s_baseline.sdf' % (pre)), file=scriptfile)
    print(('rm %s_smooth.sdf' % (pre)), file=scriptfile)
    print(('rm %s_sub.sdf' % (pre)), file=scriptfile)
    print(('rm %s_vrebin.sdf' % (pre)), file=scriptfile)
    print(('rm %s.sdf' % (pre)), file=scriptfile)
    scriptfile.close()

    if chmod:
        scriptfilename = (outpre+"_starlink.sh").replace(" ","")
        #subprocess.call("chmod +x {0}".format(scriptfilename), shell=True)
        st = os.stat(scriptfilename)
        os.chmod(scriptfilename, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH | stat.S_IXUSR)

    if do_runscript:
        runscript(outpre)

    _fix_ms_kms_file(outpre+"_sub.fits")
    _fix_ms_kms_file(outpre+"_smooth.fits")

    if log.level <= 20:
        log.info("Completed {0} in {1}s".format(pre, time.time()-t0))