Example #1
0
    def make_grid(self):
        jobs = []
        for filename in range(len(self.filelist)):
            otffile = LMTOTFNetCDFFile(self.filelist[filename])
            otffile._reduce_data(self.biased)
            if (self.sigmaweight):
                otffile.baseline()

            #print Z
            print self.filelist[filename]
            self.convolve(otffile)
            #p = multiprocessing.Process(Target=self.convolve, args=otffile)
            #jobs.append(p)
            #p.start()
        self.normalize_grid()
        self.T = self.T.reshape((self.naxes2, self.naxes1, self.naxes0))
        self.create_netcdf()
Example #2
0
def convolve_mp(filename, biased, sigmaweight,tsysweight,RMAX, crval2, crval3, weights, naxes0, naxes1, naxes2, theta_n):
    filename = LMTOTFNetCDFFile(filename)

    # these arrays need to be shared in memory between
    # all of the processes writing to the output grid
    #T = numpy.zeros(naxes0*naxes1*naxes2)    
    #WT = numpy.zeros(naxes1*naxes2)
    #TSYS = numpy.zeros(naxes1*naxes2)
    global T
    global WT
    global TSYS

    MAX_WT = numpy.ones(naxes1*naxes2) * (-1.0 * 10**30)
    INT_TIME = numpy.zeros(naxes1*naxes2)
    nchan = filename.hdu.header.nchan
    filename._reduce_data(biased)

    if (sigmaweight):
        filename.baseline()
    if (tsysweight):
        wt1 = numpy.zeros(filename.hdu.header.nhorns)
        for ih in range(filename.hdu.header.nhorns):
            if (filename.hdu.header.tsys[ih] != 0.0):
                wt1[ih] = 1.0 / (filename.hdu.header.tsys[ih] * filename.hdu.header.tsys[ih])
            else:
                wt1[ih] = 1.0
    else:
        wt1 = 1.0
    clight = 2.99792458e10 #cm/s

    #in the future, we can just check what telescope it was from
    #and then assign a diameter value based on that
    D = 1370.0 #FCRAO diameter in cm
    ic = int(filename.hdu.header.nchan / 2.0)
    if (filename.hdu.header.nchan > 128):
        ic = 505
    dx = RMAX / 256.0

    # for all uses of XPOS and YPOS, I'm going to bump up the idmp index
    # by 1 - this is to compensate for them being also having the reference
    # positions as their 0th and last indices as well as the actual data in between

    XMAX = 60.0 * crval2 # in arcseconds? (crvals must be in arcminutes?)
    YMIN = 60.0 * crval3 # in arcseconds?
    lambda_D = 206264.81 * (clight / filename.hdu.header.fsky / ((1.0e9))/ D)
    step = theta_n / lambda_D

    for idmp in range(filename.hdu.header.nsample - 2):
        print idmp, "of", filename.hdu.header.nsample - 2
        for ih in range(filename.hdu.header.nhorns):
            if (tsysweight):
                weight1 = wt1[ih]
            if (sigmaweight):
                weight1 = 1.0 / (filename.hdu.data.sigma[idmp,ih] * filename.hdu.data.sigma[idmp,ih])
            else:
                weight1 = wt1
            YBEG = 60.0 * (filename.hdu.data.YPOS[ih, idmp + 1] - YMIN) / lambda_D
            iybeg = int((YBEG - RMAX) / step  + 1)
            if (iybeg < 0):
                iybeg = 0
            iyend = int((YBEG + RMAX) / step + 1)
            if (iyend > naxes2):
                iyend = naxes2

            XBEG = 60.0 * (XMAX - filename.hdu.data.XPOS[ih, idmp + 1]) / lambda_D
            ixbeg = int((XBEG - RMAX) / step + 1)
            if (ixbeg < 0):
                ixbeg = 0
            ixend = int((XBEG + RMAX) / step + 1)
            if (ixend > naxes1):
                ixend = naxes1

            #iy = iybeg
            #ix = ixbeg
            #T = self.T
            for ix_l in range(ixend - ixbeg):
                ix = ix_l + ixbeg
                deltax = XBEG - (ix * step)
                dx2 = deltax * deltax
                #print deltay
                #dely2 = deltay * deltay
                for iy_l in range(iyend - iybeg):
                    iy = iy_l + iybeg
                    deltay = (iy * step) - YBEG
                    #print deltax
                    rad2 = dx2 + deltay * deltay
                    #print rad2
                    if (rad2 < (RMAX * RMAX)):
                        IS = int(numpy.sqrt(rad2) / dx)
                        #print IS
                        weight0 = weights[IS]
                        wt = weight0 * weight1
                        #wt = 1.0 # temporary
                        ii = int(ix + iy * naxes1)
                        #print ii
                        WT[ii] += wt
                        TSYS[ii] += filename.hdu.header.tsys[ih] * wt
                        INT_TIME[ii] += wt * filename.hdu.header.tdump
                        if (weight0 > MAX_WT[ii]):
                            MAX_WT[ii] = weight0
                        jj = int(ii * naxes0)
                        #print jj
                        #print (ix, iy)
                        for k in range(naxes0):
                            if (filename.hdu.data.reduced[idmp, ih, k] < (10.0)**30):
                                T[jj + k] += (wt*filename.hdu.data.reduced[idmp, ih, k])
Example #3
0
def gridmaker_dumps(xmin, xmax, ymin, ymax, filelist, dataloc="null", writeloc="null",cython=True, normalize=True):
    ### going to move the 'make_grid' function outside of the class
    ### for multiprocessing purposes? we'll see if it works
    #g = LMTOTFRegrid_mp(xmin, xmax, ymin, ymax, filelist)
    backupfilelist = filelist
    if(writeloc=="null"):
        print "Assuming that all needed files are in local directory!"
    else:
        for i in range(len(filelist)):
            filelist[i] = dataloc+filelist[i] #this pre-appends the location of the data
    g = initialize_regrid(xmin, xmax, ymin, ymax, filelist)
    print "Starting Grid Making Process..."
    
    p = Pool(cpu_count())
    if (cython==True):
        print "number of files = ", len(g.filelist)
        for i in range(len(g.filelist)):
            filename = LMTOTFNetCDFFile(g.filelist[i])
            filename._reduce_data(g.biased)
            if (g.sigmaweight):
                filename.baseline()
            if (g.tsysweight):
                wt1 = numpy.zeros(g.nhorns)
                filename.hdu.data.sigma = 0.0
                for ih in range(g.nhorns):
                    if (filename/hdu.header.tsys[ih] != 0.0):
                        wt1[ih] = 1.0 / (filename.hdu.header.tsys[ih] * filename.hdu.header.tsys[ih])
                    else:
                        wt1[ih] = 1.0
            else:
                wt1 = 1.0
            
            print "number of dumps: ", filename.hdu.header.nsample - 2, " in file ", g.filelist[i]
            for idmp in range(filename.hdu.header.nsample - 2):
                #print idmp
                p.apply_async(convolve_wrapper_dump, args=(g.biased, g.sigmaweight, g.tsysweight, g.RMAX, g.crval2, g.crval3, g.weights, g.naxes0, g.naxes1, g.naxes2, g.theta_n, filename.hdu.header.nhorns, g.nchan, filename.hdu.header.fsky, idmp, filename.hdu.data.XPOS, filename.hdu.data.YPOS, filename.hdu.data.reduced, filename.hdu.data.sigma, wt1, filename.hdu.header.tsys), callback = callback_update)
                #print "process added to Pool"
    if (cython==False):
        for i in range(len(g.filelist)):
            p.apply_async(convolve_wrapper, args=(g.filelist[i], g.biased, g.sigmaweight, g.tsysweight, g.RMAX, g.crval2, g.crval3, g.weights, g.naxes0, g.naxes1, g.naxes2, g.theta_n,), callback = callback_update)
    p.close()
    p.join()
    if (normalize==True):
        g.normalize_grid()
        g.T = g.T.reshape((g.naxes2, g.naxes1, g.naxes0))
        print "Grid normalized and reshaped!"
        print "The stdev value of the grid is:"
        print g.T.var()**.5
        g.create_netcdf()
    if (normalize==False):
        print "Assuming the filelist was delegated to multiple computers:"
        if ((writeloc == "null")or(dataloc == "null")):
            print "Need to know where the file is to be written!"
        else:
            file0 = filelist[0]
            print file0
            file0 = file0[len(writeloc):len(file0)]
            filenew = file0.strip(".nc")
            print "The T, WT, TSYS, MAX_WT and naxes arrays are stored in that order in the file:"
            print writeloc+filenew
            naxes = numpy.array([g.naxes2, g.naxes1, g.naxes0])
            numpy.savez(writeloc+filenew, g.T, g.WT, g.TSYS, g.MAX_WT, naxes)