Beispiel #1
0
 def __init__(self,
              work_queue,
              result_queue,
              IdSharedMem=None,
              StopWhenQueueEmpty=False,
              NImGauss=31,
              DeltaChi2=4.,
              ListGauss=None,
              GSig=None,
              SigMin=None,
              Var=None):
     multiprocessing.Process.__init__(self)
     self.work_queue = work_queue
     self.result_queue = result_queue
     self.kill_received = False
     self.exit = multiprocessing.Event()
     self.IdSharedMem = IdSharedMem
     self.StopWhenQueueEmpty = StopWhenQueueEmpty
     self.CubeMeanVariablePSF = NpShared.GiveArray("%sCubeMeanVariablePSF" %
                                                   self.IdSharedMem)
     self.MeanModelImage = NpShared.GiveArray("%sMeanModelImage" %
                                              self.IdSharedMem)
     self.MeanResidual = NpShared.GiveArray("%sMeanResidual" %
                                            self.IdSharedMem)
     self.NImGauss = NImGauss
     self.DeltaChi2 = DeltaChi2
     self.ListGauss = ListGauss
     self.NGauss = len(ListGauss)
     self.GSig = GSig
     self.SigMin = SigMin
     self.Var = Var
Beispiel #2
0
    def run(self):
        while not self.kill_received and self.CondContinue():
            #gc.enable()
            try:
                iQueue = self.work_queue.get_nowait()  #(True,2)
            except Exception, e:
                #print "Exception worker: %s"%str(e)
                break

            #print "Start %i"%iQueue

            Queue = NpShared.GiveArray("%sQueue_%3.3i" %
                                       (self.IdSharedMem, iQueue))
            self.CurrentInvCov = NpShared.GiveArray("%sInvCov_AllFacet" %
                                                    (self.IdSharedMem))

            for iJob in range(Queue.shape[0]):
                x0, y0, FacetID = Queue[iJob]

                iFacet = FacetID
                self.CurrentFacetID = FacetID
                #self.CurrentCF=NpShared.GiveArray("%sConvMatrix_Facet_%4.4i"%(self.IdSharedMem,iFacet))
                self.CurrentCM = NpShared.GiveArray("%sCM_Facet%4.4i" %
                                                    (self.IdSharedMem, iFacet))
                #self.CurrentInvCov=NpShared.GiveArray("%sInvCov_Facet%4.4i"%(self.IdSharedMem,iFacet))
                # if self.CurrentInvCov is None:
                #     invCM=ModLinAlg.invSVD(np.float64(self.CurrentCM[0,0]))/self.Var
                #     self.CurrentInvCov=NpShared.ToShared("%sInvCov_Facet%4.4i"%(self.IdSharedMem,iFacet),invCM)

                iGauss = self.SmearThisComp(x0, y0)
                Queue[iJob, 2] = iGauss

            self.result_queue.put({"Success": True, "iQueue": iQueue})
    def GiveDicoJonesMatrices(self):
        print >> log, "  Getting Jones matrices from Shared Memory"
        DicoJonesMatrices = {}

        GD = self.GD

        SolsFile = GD["DDESolutions"]["DDSols"]

        if SolsFile != "":
            DicoJones_killMS = NpShared.SharedToDico("%sJonesFile_killMS" %
                                                     self.IdSharedMem)
            DicoJonesMatrices["DicoJones_killMS"] = DicoJones_killMS
            DicoJonesMatrices["DicoJones_killMS"][
                "MapJones"] = NpShared.GiveArray("%sMapJones_killMS" %
                                                 self.IdSharedMem)
            DicoClusterDirs_killMS = NpShared.SharedToDico(
                "%sDicoClusterDirs_killMS" % self.IdSharedMem)
            DicoJonesMatrices["DicoJones_killMS"][
                "DicoClusterDirs"] = DicoClusterDirs_killMS

        ApplyBeam = (GD["Beam"]["Model"] is not None)
        if ApplyBeam:
            DicoJones_Beam = NpShared.SharedToDico("%sJonesFile_Beam" %
                                                   self.IdSharedMem)
            DicoJonesMatrices["DicoJones_Beam"] = DicoJones_Beam
            DicoJonesMatrices["DicoJones_Beam"][
                "MapJones"] = NpShared.GiveArray("%sMapJones_Beam" %
                                                 self.IdSharedMem)
            DicoClusterDirs_Beam = NpShared.SharedToDico(
                "%sDicoClusterDirs_Beam" % self.IdSharedMem)
            DicoJonesMatrices["DicoJones_Beam"][
                "DicoClusterDirs"] = DicoClusterDirs_Beam

        return DicoJonesMatrices
    def UnPackMapping(self):
        Map = NpShared.GiveArray("%sMappingSmearing" % (self.IdSharedMem))
        Nb = Map[0]
        NRowInBlocks = Map[1:Nb + 1]
        StartRow = Map[Nb + 1:2 * Nb + 1]
        # print
        # print NRowInBlocks.tolist()
        # print StartRow.tolist()
        MaxRow = 0

        for i in [3507]:  # range(Nb):
            ii = StartRow[i]
            MaxRow = np.max([MaxRow, np.max(Map[ii:ii + NRowInBlocks[i]])])
            print "(iblock= %i , istart= %i), Nrow=%i" % \
                  (i, StartRow[i], NRowInBlocks[i]), Map[ii:ii+NRowInBlocks[i]]
            #if MaxRow>=1080: stop

        print MaxRow
Beispiel #5
0
    def Smear(self, Parallel=True):
        if Parallel:
            NCPU = self.NCPU
        else:
            NCPU = 1
        StopWhenQueueEmpty = True
        print("Building queue", file=log)
        self.ModelOut = np.zeros_like(self.MeanModelImage)
        indx, indy = np.where(self.MeanModelImage[0, 0] != 0)
        #indx,indy=np.where(self.MeanModelImage==np.max(self.MeanModelImage))
        work_queue = multiprocessing.Queue()
        result_queue = multiprocessing.Queue()

        SizeMax = int(indx.size / float(NCPU) / 100.)
        SizeMax = np.max([SizeMax, 1])
        iPix = 0
        iQueue = 0
        Queue = []
        while iPix < indx.size:
            xc, yc = indx[iPix], indy[iPix]
            FacetID = self.PSFServer.giveFacetID2(xc, yc)
            #DicoOrder={"xy":(xc,yc),
            #           "FacetID":FacetID}
            Queue.append([xc, yc, FacetID])

            iPix += 1
            if (len(Queue) == SizeMax) | (iPix == indx.size):
                NpShared.ToShared("%sQueue_%3.3i" % (self.IdSharedMem, iQueue),
                                  np.array(Queue))
                work_queue.put(iQueue)
                Queue = []
                iQueue += 1

        NJobs = indx.size

        workerlist = []

        pBAR = ProgressBar(Title=" Find gaussian")
        #pBAR.disable()
        pBAR.render(0, '%4i/%i' % (0, NJobs))
        for ii in range(NCPU):
            W = WorkerSmear(work_queue,
                            result_queue,
                            IdSharedMem=self.IdSharedMem,
                            StopWhenQueueEmpty=StopWhenQueueEmpty,
                            NImGauss=self.NImGauss,
                            DeltaChi2=self.DeltaChi2,
                            ListGauss=self.ListGauss,
                            GSig=self.GSig,
                            Var=self.Var,
                            SigMin=self.SigMin)
            workerlist.append(W)
            if Parallel:
                workerlist[ii].start()
            else:
                workerlist[ii].run()

        N = self.NImGauss
        iResult = 0
        #print "!!!!!!!!!!!!!!!!!!!!!!!!",iResult,NJobs
        while iResult < NJobs:
            DicoResult = None
            # for result_queue in List_Result_queue:
            #     if result_queue.qsize()!=0:
            #         try:
            #             DicoResult=result_queue.get_nowait()

            #             break
            #         except:

            #             pass
            #         #DicoResult=result_queue.get()
            #print "!!!!!!!!!!!!!!!!!!!!!!!!! Qsize",result_queue.qsize()
            #print work_queue.qsize(),result_queue.qsize()
            if result_queue.qsize() != 0:
                try:
                    DicoResult = result_queue.get_nowait()
                except:
                    pass
                    #DicoResult=result_queue.get()

            if DicoResult is None:
                time.sleep(0.001)
                continue

            if DicoResult["Success"]:
                iQueue = DicoResult["iQueue"]
                Queue = NpShared.GiveArray("%sQueue_%3.3i" %
                                           (self.IdSharedMem, iQueue))
                for iJob in range(Queue.shape[0]):
                    x0, y0, iGauss = Queue[iJob]
                    SMax = self.MeanModelImage[0, 0, x0, y0]
                    SubModelOut = self.ModelOut[0,
                                                0][x0 - N // 2:x0 + N // 2 + 1,
                                                   y0 - N // 2:y0 + N // 2 + 1]
                    SubModelOut += self.ListRestoredGauss[iGauss] * SMax
                    #iGauss=0
                    #print
                    #print SMax
                    #print np.sum(self.ListGauss[iGauss])
                    #print
                    SubModelOut += self.ListGauss[iGauss] * SMax

                    iResult += 1
                    NDone = iResult
                    intPercent = int(100 * NDone / float(NJobs))
                    pBAR.render(intPercent, '%4i/%i' % (NDone, NJobs))

        for ii in range(NCPU):
            try:
                workerlist[ii].shutdown()
                workerlist[ii].terminate()
                workerlist[ii].join()
            except:
                pass

        return self.ModelOut
    def BuildSmearMappingParallel(self, DATA, GridChanMapping):
        print >> log, "Build decorrelation mapping ..."

        na = self.MS.na

        l = self.radiusRad
        dPhi = np.sqrt(6. * (1. - self.Decorr))

        NChan = self.MS.ChanFreq.size
        self.BlocksRowsList = []

        InfoSmearMapping = {}
        InfoSmearMapping["freqs"] = self.MS.ChanFreq
        InfoSmearMapping["dfreqs"] = self.MS.dFreq
        InfoSmearMapping["dPhi"] = dPhi
        InfoSmearMapping["l"] = l
        BlocksRowsList = []

        joblist = [(a0, a1) for a0 in xrange(na) for a1 in xrange(na)
                   if a0 != a1]

        WorkerMapName = Multiprocessing.getShmURL("SmearWorker.%d")

        results = Multiprocessing.runjobs(
            joblist,
            title="Smear mapping",
            target=_smearmapping_worker,
            kwargs=dict(DATA=DATA,
                        InfoSmearMapping=InfoSmearMapping,
                        WorkerMapName=WorkerMapName,
                        GridChanMapping=GridChanMapping))

        # process worker results
        # for each map (each array returned from worker), BlockSizes[MapName] will
        # contain a list of BlocksSizesBL entries returned from that worker
        RowsBlockSizes = {}
        NTotBlocks = 0
        NTotRows = 0
        worker_maps = {}

        for DicoResult in results:
            if not DicoResult["Empty"]:
                MapName = DicoResult["MapName"]
                map = worker_maps.get(MapName)
                if map is None:
                    map = worker_maps[MapName] = NpShared.GiveArray(MapName)
                bl = DicoResult["bl"]
                rowslice = DicoResult["Slice"]
                bsz = np.array(DicoResult["BlocksSizesBL"])
                RowsBlockSizes[bl] = map[rowslice], bsz
                NTotBlocks += DicoResult["NBlocksTotBL"]
                NTotRows += bsz.sum()

        # output mapping has 2 words for the total size, plus 2*NTotBlocks header, plus NTotRows blocklists
        OutputMapping = np.zeros((2 + 2 * NTotBlocks + NTotRows, ), np.int32)

        # just in case NTotBlocks is over 2^31...
        # (don't want to use np.int32 for the whole mapping as that just wastes space, we may assume
        # that we have substantially fewer rows, so int32 is perfectly good as a row index etc.)
        OutputMapping[0] = NTotBlocks
        OutputMapping[1] = NTotBlocks >> 32

        BlockListSizes = OutputMapping[2:2 + NTotBlocks]

        BlockLists = OutputMapping[2 + NTotBlocks:]
        iii = 0
        jjj = 0

        # now go through each per-baseline mapping, sorted by baseline
        for _, (BlocksRowsListBL,
                BlocksSizesBL) in sorted(RowsBlockSizes.items()):
            #print>>log, "  Worker: %i"%(IdWorker)

            BlockLists[iii:iii + BlocksRowsListBL.size] = BlocksRowsListBL[:]
            iii += BlocksRowsListBL.size

            # print "IdWorker,AppendId",IdWorker,AppendId,BlocksSizesBL
            # MM=np.concatenate((MM,BlocksSizesBL))
            BlockListSizes[jjj:jjj + BlocksSizesBL.size] = BlocksSizesBL[:]
            jjj += BlocksSizesBL.size

        for MapName in worker_maps.iterkeys():
            NpShared.DelArray(MapName)

        #print>>log, "  Put in shared mem"

        NVis = np.where(DATA["A0"] != DATA["A1"])[0].size * NChan
        #print>>log, "  Number of blocks:         %i"%NTotBlocks
        #print>>log, "  Number of 4-Visibilities: %i"%NVis
        fact = (100. * (NVis - NTotBlocks) / float(NVis))

        # self.UnPackMapping()
        # print FinalMapping

        return OutputMapping, fact
Beispiel #7
0
 def load_impl(self):
     return NpShared.GiveArray(_to_shm(self.path))
Beispiel #8
0
    def CalcWeights(self, uvw_weights_flags_freqs, Robust=0, Weighting="Briggs", Super=1,
                          nbands=1, band_mapping=None, weightnorm=1, force_unity_weight=False):
        """
        Computes imaging weights in "MFS mode", when all uv-points are binned onto a single grid.
        Args:
            uvw_weights_flags_freqs: list of (uv, weights, flags, freqs) tuples, one per each MS
                if weights is a string, it is treated as the filename for a shared array
            Robust:                  robustness
            Weighting:               natural, uniform, briggs
            Super:                   !=1 for superuniform or superrobust: uv bin size is 1/(super*FoV)
            nbands:                  number of frequency bands to compute weights on (if band_mapping is not None)
            band_mapping:            band_mapping[iMS][ichan] gives the band number of channel #ichan of MS #iMS
                                     if None, the "MFS weighting" is used, with all frequency points weighted
                                     on a single grid
            weightnorm:              multiply weights by this factor
            force_unity_weight:      force all weights to 1

        Returns:
            list of imaging weights arrays, one per MS, same shape as original data weights
        """

        Weighting = Weighting.lower()
        if Weighting == "natural":
            print>> log, "Weighting in natural mode"
            if force_unity_weight:
                for uv, weights, flags, freqs in uvw_weights_flags_freqs:
                    if flags is not None:
                        if type(weights) is str:
                            NpShared.GiveArray(weights).fill(1)
                        else:
                            weights.fill(1)
            return [x[1] for x in uvw_weights_flags_freqs]

        nch, npol, npixIm, _ = self.ImShape
        FOV = self.CellSizeRad * npixIm
        cell = 1. / (Super * FOV)

        if band_mapping is None:
            nbands = 1
            print>> log, "initializing weighting grid for single band (or MFS weighting)"
        else:
            print>> log, "initializing weighting grids for %d bands"%nbands

        # find max grid extent by considering _unflagged_ UVs
        xymax = 0
        for uv, weights, flags, freqs in uvw_weights_flags_freqs:
            # max |u|,|v| in lambda
            uvsel=abs(uv)[~flags, :]
            if uvsel.size==0:
                print>> log, ModColor.Str("  A dataset is fully flagged")
                continue
            uvmax = uvsel.max() * freqs.max() / _cc
            xymax = max(xymax, int(math.floor(uvmax / cell)))
            if flags is not None:
                # max |u|,|v| in lambda
                uvmax = abs(uv)[~flags, :].max() * freqs.max() / _cc
                xymax = max(xymax, int(math.floor(uvmax / cell)))
        if xymax == 0:
            raise Exception('All datasets are fully flagged')

        xymax += 1
        # grid will be from [-xymax,xymax] in U and [0,xymax] in V
        npixx = xymax * 2 + 1
        npixy = xymax + 1
        npix = npixx * npixy


        print>> log, "Calculating imaging weights on an [%i,%i]x%i grid with cellsize %g" % (npixx, npixy, nbands, cell)
        grid0 = np.zeros((nbands, npix), np.float64)
        grid = grid0.reshape((nbands*npix,))

        # this will ve a per-MS list of weights and an index array, or None if an MS is all flagged
        weights_index = [(None, None)] * len(uvw_weights_flags_freqs)

        for iMS, (uv, weights_or_path, flags, freqs) in enumerate(uvw_weights_flags_freqs):
            if flags is None:  # entire chunk flagged
                continue
            weights = NpShared.GiveArray(weights_or_path) if type(weights_or_path) is str else weights_or_path
            if force_unity_weight:
                weights.fill(1)
                weights[flags,...]=0

            elif weightnorm != 1:
                weights *= weightnorm
            # flip sign of negative v values -- we'll only grid the top half of the plane
            uv[uv[:, 1] < 0] *= -1
            # convert u/v to lambda, and then to pixel offset
            uv = uv[..., np.newaxis] * freqs[np.newaxis, np.newaxis, :] / _cc
            uv = np.floor(uv / cell).astype(int)
            # u is offset, v isn't since it's the top half

            x = uv[:, 0, :]
            y = uv[:, 1, :]
            x += xymax  # offset, since X grid starts at -xymax
            # convert to index array -- this gives the number of the uv-bin on the grid
            index = y * npixx + x
            # if we're in per-band weighting mode, then adjust the index to refer to each band's grid
            if band_mapping is not None:
                bandmap = band_mapping[iMS]
                # uv has shape nvis,nfreq; bandmap has shape nfreq
                index += bandmap[np.newaxis,:]*npix
            # zero weight refers to zero cell (otherwise it may end up outside the grid, since grid is
            # only big enough to accommodate the *unflagged* uv-points)
            index[weights==0] = 0

            weights_index[iMS] = weights_or_path, index
            del uv
            print>> log, "Accumulating weights (%d/%d)" % (iMS + 1, len(uvw_weights_flags_freqs))
            # accumulate onto grid
            # print>>log,weights,index
            _pyGridderSmearPols.pyAccumulateWeightsOntoGrid(grid, weights.ravel(), index.ravel())

        if Weighting == "uniform":
            #            print>>log,"adjusting grid to uniform weight"
            #           grid[grid!=0] = 1/grid[grid!=0]
            print>> log, ("applying uniform weighting (super=%.2f)" % Super)
            for weights_or_path, index in weights_index:
                if index is not None:
                    weights = NpShared.GiveArray(weights_or_path) if type(weights_or_path) is str else weights_or_path
                    weights /= grid[index]

        elif Weighting == "briggs" or Weighting == "robust":
            numeratorSqrt = 5.0 * 10 ** (-Robust)
            for band in range(nbands):
                print>> log, ("applying Briggs weighting (robust=%.2f, super=%.2f, band %d)" % (Robust, Super, band))
                grid1 = grid0[band,:]
                avgW = (grid1 ** 2).sum() / grid1.sum()
                sSq = numeratorSqrt ** 2 / avgW
                grid1[...] = 1 / (1 + grid1 * sSq)
            for weights_or_path, index in weights_index:
                if index is not None:
                    weights = NpShared.GiveArray(weights_or_path) if type(weights_or_path) is str else weights_or_path
                    weights *= grid[index]

        else:
            raise ValueError("unknown weighting \"%s\"" % Weighting)

        print>> log, "weights computed"
    def calcDistanceMatrixMinParallel(self, ListIslands, Parallel=True):
        NIslands = len(ListIslands)
        self.D = np.zeros((NIslands, NIslands), np.float32)
        self.dx = np.zeros((NIslands, NIslands), np.int32)
        self.dy = np.zeros((NIslands, NIslands), np.int32)

        work_queue = multiprocessing.JoinableQueue()
        for iIsland in range(NIslands):
            work_queue.put({"iIsland": (iIsland)})

        result_queue = multiprocessing.JoinableQueue()
        NJobs = work_queue.qsize()
        workerlist = []
        NCPU = self.NCPU

        ListEdgeIslands = self.giveEdgesIslands(ListIslands)

        for ii in range(NCPU):
            W = WorkerDistance(work_queue, result_queue, ListEdgeIslands,
                               self.IdSharedMem)
            workerlist.append(W)
            if Parallel:
                workerlist[ii].start()

        pBAR = ProgressBar(Title="  Calc. Dist. ")
        pBAR.render(0, NJobs)
        iResult = 0
        if not Parallel:
            for ii in range(NCPU):
                workerlist[ii].run()  # just run until all work is completed

        while iResult < NJobs:
            DicoResult = None
            if result_queue.qsize() != 0:
                try:
                    DicoResult = result_queue.get()
                except:
                    pass

            if DicoResult == None:
                time.sleep(0.5)
                continue

            if DicoResult["Success"]:
                iResult += 1
                NDone = iResult
                pBAR.render(NDone, NJobs)

                iIsland = DicoResult["iIsland"]
                Result = NpShared.GiveArray("%sDistances_%6.6i" %
                                            (self.IdSharedMem, iIsland))

                self.dx[iIsland] = Result[0]
                self.dy[iIsland] = Result[1]
                self.D[iIsland] = Result[2]
                NpShared.DelAll("%sDistances_%6.6i" %
                                (self.IdSharedMem, iIsland))

        if Parallel:
            for ii in range(NCPU):
                workerlist[ii].shutdown()
                workerlist[ii].terminate()
                workerlist[ii].join()