Example #1
0
def ConvolveGaussianParallel(shareddict,
                             field_in,
                             field_out,
                             CellSizeRad=None,
                             GaussPars=[(0., 0., 0.)],
                             Normalise=False):
    """Convolves images held in a dict, using APP.
    """
    Ain0 = shareddict[field_in]
    nch, npol, _, _ = Ain0.shape
    Aout = shareddict[field_out]
    # single channel? Handle serially
    if nch == 1:
        return ConvolveGaussian(shareddict, field_in, field_out, 0,
                                CellSizeRad, GaussPars[0], None, Normalise)

    jobid = "convolve:%s:%s:" % (field_in, field_out)
    for ch in range(nch):
        sd_rw = shareddict.readwrite()
        APP.runJob(jobid + str(ch),
                   _convolveSingleGaussianFFTW_noret,
                   args=(sd_rw, field_in, field_out, ch, CellSizeRad,
                         GaussPars[ch], None, Normalise))
    APP.awaitJobResults(jobid + "*")  #, progress="Convolving")

    return Aout
Example #2
0
    def Interpol(self):
        APP.startWorkers()
        if "TEC" in self.DicoFile0["SmoothMode"]:
            TECArray = NpShared.ToShared("%sTECArray" % IdSharedMem,
                                         self.DicoFile0["SolsTEC"])
            CPhaseArray = NpShared.ToShared("%sCPhaseArray" % IdSharedMem,
                                            self.DicoFile0["SolsCPhase"])
            nt, nd, na = TECArray.shape
            iJob = 0

            for it in range(nt):
                APP.runJob("InterpolTECTime_%d" % iJob,
                           self.InterpolTECTime,
                           args=(it, ))  #,serial=True)
                iJob += 1
            workers_res = APP.awaitJobResults("InterpolTECTime*",
                                              progress="Interpol TEC")

        iJob = 0
        for it in range(nt):
            APP.runJob("InterpolAmpTime_%d" % iJob,
                       self.InterpolAmpTime,
                       args=(it, ))  #,serial=True)
            iJob += 1
        workers_res = APP.awaitJobResults("InterpolAmpTime*",
                                          progress="Interpol Amp")

        # APP.terminate()
        APP.shutdown()
        Multiprocessing.cleanupShm()
Example #3
0
 def giveWeigthParallel(self):
     self.DATA = shared_dict.attach(self.DictName)
     self.na = self.DATA["na"]
     for A0 in range(0, self.na):
         for A1 in range(A0 + 1, self.na):
             APP.runJob("giveWeigthChunk:%d:%d" % (A0, A1),
                        self.giveWeigthChunk,
                        args=(A0, A1,
                              self.DATA.readwrite()))  #,serial=True)
     APP.awaitJobResults("giveWeigthChunk:*", progress="CalcWeight")
Example #4
0
 def giveWeigthParallel(self):
     self.DATA = shared_dict.attach(self.DictName)
     nrow, nch, npol = self.DATA["data"].shape
     self.na = self.DATA["na"]
     ntu = self.DATA["times_unique"].size
     self.DATA["Wa"] = np.zeros((self.na, ntu), np.float32)
     for A0 in range(self.na):
         APP.runJob("giveWeigthAnt:%d" % (A0),
                    self.giveWeigthAnt,
                    args=(A0, ))  #,serial=True)
     APP.awaitJobResults("giveWeigthAnt:*", progress="CalcWeight")
Example #5
0
    def StackAll(self):
        while self.iCurrentMS<self.nMS:
            if self.LoadNextMS()=="NotRead": continue
            print("Making dynamic spectra...", file=log)
            # for iTime in range(self.NTimes):
            #     APP.runJob("Stack_SingleTime:%d"%(iTime), 
            #                self.Stack_SingleTime,
            #                args=(iTime,))#,serial=True)
            # APP.awaitJobResults("Stack_SingleTime:*", progress="Append MS %i"%self.DicoDATA["iMS"])

            FF=self.DicoGrids["DomainEdges_Freq"]
            TT=self.DicoGrids["DomainEdges_Time"]
            for iTime in range(TT.size-1):
                for iFreq in range(FF.size-1):
                    #print(iTime,iFreq)
                    APP.runJob("Stack_SingleTime:%d:%d"%(iTime,iFreq), 
                               self.Stack_SingleTime,
                               args=(iTime,iFreq))#,serial=True)

            APP.awaitJobResults("Stack_SingleTime:*", progress="Append MS %i"%self.DicoDATA["iMS"])

            
            FOut="%s.Weights.npy"%self.DicoDATA["MSName"]
            
            self.DicoDATA.reload()
            
            # print(self.DicoDATA["WOUT"])
            #self.DicoDATA["WOUT"]/=np.median(self.DicoDATA["WOUT"])
            
            w=self.DicoDATA["WOUT"]

            w/=np.median(w)
            w[w<=0]=0
            w[w>2.]=2
            
            log.print("    saving weights as %s"%FOut)
            np.save(FOut,self.DicoDATA["WOUT"])
            
            # import pylab
            # pylab.clf()
            # pylab.hist(w[:,0].ravel())
            # pylab.draw()
            # pylab.show()
            
            #np.save(FOut,self.DicoGrids["GridSTD"])
            
            # for iTime in range(self.NTimes):
            #     self.Stack_SingleTime(iTime)
       
        self.Finalise()
Example #6
0
    def StackAll(self):
        while self.iCurrentMS < self.nMS:
            if self.LoadNextMS() == "NotRead": continue
            print("Making dynamic spectra...", file=log)
            for iTime in range(self.NTimes):
                APP.runJob("Stack_SingleTime:%d" % (iTime),
                           self.Stack_SingleTime,
                           args=(iTime, ))  #,serial=True)
            APP.awaitJobResults("Stack_SingleTime:*",
                                progress="Append MS %i" % self.DicoDATA["iMS"])

            # for iTime in range(self.NTimes):
            #     self.Stack_SingleTime(iTime)

        self.Finalise()
Example #7
0
    def StackAll(self):
        while self.iCurrentMS < self.nMS:
            if self.LoadNextMS() == "NotRead": continue
            print("Making dynamic spectra...", file=log)
            for iTime in range(self.NTimes):
                APP.runJob("Stack_SingleTime:%d" % (iTime),
                           self.Stack_SingleTime,
                           args=(iTime, ))  #,serial=True)
            APP.awaitJobResults("Stack_SingleTime:*",
                                progress="Append MS %i" % self.DicoDATA["iMS"])
            FOut = "%s.Weights.npy" % self.DicoDATA["MSName"]

            log.print("    saving weights as %s" % FOut)
            self.DicoDATA.reload()
            # print(self.DicoDATA["WOUT"])
            self.DicoDATA["WOUT"] /= np.median(self.DicoDATA["WOUT"])
            np.save(FOut, self.DicoDATA["WOUT"])

            # for iTime in range(self.NTimes):
            #     self.Stack_SingleTime(iTime)

        self.Finalise()
    def giveDicoInitIndiv(self,
                          ListIslands,
                          ModelImage,
                          DicoDirty,
                          ListDoIsland=None):
        DicoInitIndiv = shared_dict.create("DicoInitIsland")
        ParmDict = shared_dict.create("InitSSDModelHMP")
        ParmDict["ModelImage"] = ModelImage
        ParmDict["GridFreqs"] = self.GridFreqs
        ParmDict["DegridFreqs"] = self.DegridFreqs

        #         ListBigIslands=[]
        #         ListSmallIslands=[]
        #         ListDoBigIsland=[]
        #         ListDoSmallIsland=[]
        #         NParallel=0
        #         for iIsland,Island in enumerate(ListIslands):
        #             if len(Island)>self.GD["SSDClean"]["ConvFFTSwitch"]:
        #                 ListBigIslands.append(Island)
        #                 ListDoBigIsland.append(ListDoIsland[iIsland])
        #                 if ListDoIsland or ListDoIsland[iIsland]:
        #                     NParallel+=1
        #             else:
        #                 ListSmallIslands.append(Island)
        #                 ListDoSmallIsland.append(ListDoIsland[iIsland])
        #         print>>log,"Initialise big islands (parallelised per island)"
        #         pBAR= ProgressBar(Title="Init islands")
        #         pBAR.render(0, NParallel)
        #         nDone=0
        #         for iIsland,Island in enumerate(ListBigIslands):
        #             if not ListDoIsland or ListDoBigIsland[iIsland]:
        #                 subdict = DicoInitIndiv.addSubdict(iIsland)
        #                 # APP.runJob("InitIsland:%d" % iIsland, self._initIsland_worker,
        #                 #            args=(subdict.writeonly(), iIsland, Island,
        #                 #                  self.DicoVariablePSF.readonly(), DicoDirty.readonly(),
        #                 #                  ParmDict.readonly(), self.InitMachine.DeconvMachine.facetcache.readonly(),self.NCPU),serial=True)
        #                 self._initIsland_worker(subdict, iIsland, Island,
        #                                         self.DicoVariablePSF, DicoDirty,
        #                                         ParmDict, self.InitMachine.DeconvMachine.facetcache,
        #                                         self.NCPU)
        #                 pBAR.render(nDone+1, NParallel)
        #                 nDone+=1
        # #        APP.awaitJobResults("InitIsland:*", progress="Init islands")
        #         print>>log,"Initialise small islands (parallelised over islands)"
        #         for iIsland,Island in enumerate(ListSmallIslands):
        #             if not ListDoIsland or ListDoSmallIsland[iIsland]:
        #                 subdict = DicoInitIndiv.addSubdict(iIsland)
        #                 APP.runJob("InitIsland:%d" % iIsland, self._initIsland_worker,
        #                            args=(subdict.writeonly(), iIsland, Island,
        #                                  self.DicoVariablePSF.readonly(), DicoDirty.readonly(),
        #                                  ParmDict.readonly(), self.InitMachine.DeconvMachine.facetcache.readonly(),1))
        #         APP.awaitJobResults("InitIsland:*", progress="Init islands")
        #         DicoInitIndiv.reload()

        print >> log, "Initialise islands (parallelised over islands)"
        if not self.GD["GAClean"]["ParallelInitHMP"]:
            pBAR = ProgressBar(Title="  Init islands")
            for iIsland, Island in enumerate(ListIslands):
                if not ListDoIsland or ListDoIsland[iIsland]:
                    subdict = DicoInitIndiv.addSubdict(iIsland)
                    self._initIsland_worker(
                        subdict, iIsland, Island, self.DicoVariablePSF,
                        DicoDirty, ParmDict,
                        self.InitMachine.DeconvMachine.facetcache, 1)
                pBAR.render(iIsland, len(ListIslands))
        else:
            for iIsland, Island in enumerate(ListIslands):
                if not ListDoIsland or ListDoIsland[iIsland]:
                    subdict = DicoInitIndiv.addSubdict(iIsland)
                    APP.runJob("InitIsland:%d" % iIsland,
                               self._initIsland_worker,
                               args=(subdict.writeonly(), iIsland, Island,
                                     self.DicoVariablePSF.readonly(),
                                     DicoDirty.readonly(), ParmDict.readonly(),
                                     self.InitMachine.DeconvMachine.facetcache.
                                     readonly(), 1))
            APP.awaitJobResults("InitIsland:*", progress="Init islands")
            DicoInitIndiv.reload()

        ParmDict.delete()

        return DicoInitIndiv
Example #9
0
    def InitMSMF(self, approx=False, cache=True, facetcache=None):
        """Initializes MSMF basis functions. If approx is True, then uses the central facet's PSF for
        all facets.
        Populates the self.facetcache dict, unless facetcache is supplied
        """
        self.DicoMSMachine = {}
        valid = True
        if facetcache is not None:
            print>> log, "HMP basis functions pre-initialized"
            self.facetcache = facetcache
        else:
            cachehash = dict(
                [(section, self.GD[section]) for section in (
                    "Data", "Beam", "Selection", "Freq",
                    "Image", "Facets", "Weight", "RIME","DDESolutions",
                    "Comp", "CF",
                    "HMP")])
            cachepath, valid = self.maincache.checkCache(self.CacheFileName, cachehash, reset=not cache or self.PSFHasChanged)
            # do not use cache in approx mode
            if approx or not cache:
                valid = False
            if valid:
                print>>log,"Initialising HMP basis functions from cache %s"%cachepath
                self.facetcache = shared_dict.create(self.CacheFileName)
                self.facetcache.restore(cachepath)
            else:
                self.facetcache = None


        init_cache = self.facetcache is None
        if init_cache:
            self.facetcache = shared_dict.create(self.CacheFileName)

        # in any mode, start by initializing a MS machine for the central facet. This will precompute the scale
        # functions
        centralFacet = self.PSFServer.DicoVariablePSF["CentralFacet"]

        self.DicoMSMachine[centralFacet] = MSM0 = \
            self._initMSM_facet(centralFacet,
                                self.facetcache.addSubdict(centralFacet) if init_cache else self.facetcache[centralFacet],
                                None, self.SideLobeLevel, self.OffsetSideLobe, verbose=True)
        if approx:
            print>>log, "HMP approximation mode: using PSF of central facet (%d)" % centralFacet
            for iFacet in xrange(self.PSFServer.NFacets):
                self.DicoMSMachine[iFacet] = MSM0
        elif (self.GD["Facets"]["NFacets"]==1)&(not self.GD["DDESolutions"]["DDSols"]):
            self.DicoMSMachine[0] = MSM0
            
        else:
            # if no facet cache, init in parallel
            if init_cache:
                for iFacet in xrange(self.PSFServer.NFacets):
                    if iFacet != centralFacet:
                        fcdict = self.facetcache.addSubdict(iFacet)
                        if self.ParallelMode:
                            args=(fcdict.writeonly(), MSM0.ScaleFuncs.readonly(), self.DicoVariablePSF.readonly(),
                                  iFacet, self.SideLobeLevel, self.OffsetSideLobe, False)
                            APP.runJob("InitHMP:%d"%iFacet, self._initMSM_handler,
                                       args=args)
                        else:
                            self.DicoMSMachine[iFacet] = \
                                self._initMSM_facet(iFacet, fcdict, None,
                                                    self.SideLobeLevel, self.OffsetSideLobe, MSM0=MSM0, verbose=False)

                if self.ParallelMode:
                    APP.awaitJobResults("InitHMP:*", progress="Init HMP")
                    self.facetcache.reload()

            #        t = ClassTimeIt.ClassTimeIt()
            # now reinit from cache (since cache was computed by subprocesses)
            for iFacet in xrange(self.PSFServer.NFacets):
                if iFacet not in self.DicoMSMachine:
                    self.DicoMSMachine[iFacet] = \
                        self._initMSM_facet(iFacet, self.facetcache[iFacet], None,
                                            self.SideLobeLevel, self.OffsetSideLobe, MSM0=MSM0, verbose=False)

            # write cache to disk, unless in a mode where we explicitly don't want it
            if facetcache is None and not valid and cache and not approx:
                try:
                    #MyPickle.DicoNPToFile(facetcache,cachepath)
                    #cPickle.dump(facetcache, file(cachepath, 'w'), 2)
                    print>>log,"  saving HMP cache to %s"%cachepath
                    self.facetcache.save(cachepath)
                    #self.maincache.saveCache("HMPMachine")
                    self.maincache.saveCache(self.CacheFileName)
                    self.PSFHasChanged=False
                    print>>log,"  HMP init done"
                except:
                    print>>log, traceback.format_exc()
                    print >>log, ModColor.Str(
                        "WARNING: HMP cache could not be written, see error report above. Proceeding anyway.")
    def InitMSMF(self, approx=False, cache=True, facetcache=None):
        """Initializes MSMF basis functions. If approx is True, then uses the central facet's PSF for
        all facets.
        Populates the self.facetcache dict, unless facetcache is supplied
        """
        self.DicoMSMachine = {}
        valid = True
        if facetcache is not None:
            print >> log, "HMP basis functions pre-initialized"
            self.facetcache = facetcache
        else:
            cachehash = dict([
                (section, self.GD[section])
                for section in ("Data", "Beam", "Selection", "Freq", "Image",
                                "Facets", "Weight", "RIME", "Comp", "CF",
                                "HMP")
            ])
            cachepath, valid = self.maincache.checkCache(self.CacheFileName,
                                                         cachehash,
                                                         reset=not cache
                                                         or self.PSFHasChanged)
            # do not use cache in approx mode
            if approx or not cache:
                valid = False
            if valid:
                print >> log, "Initialising HMP basis functions from cache %s" % cachepath
                self.facetcache = shared_dict.create(self.CacheFileName)
                self.facetcache.restore(cachepath)
            else:
                self.facetcache = None

        centralFacet = self.PSFServer.DicoVariablePSF["CentralFacet"]
        if approx:
            print >> log, "HMP approximation mode: using PSF of central facet (%d)" % centralFacet
            self.PSFServer.setFacet(centralFacet)
            MSMachine = ClassMultiScaleMachine.ClassMultiScaleMachine(
                self.GD,
                self.facetcache.addSubdict(0),
                self.GainMachine,
                NFreqBands=self.NFreqBands)
            MSMachine.setModelMachine(self.ModelMachine)
            MSMachine.setSideLobeLevel(self.SideLobeLevel, self.OffsetSideLobe)
            MSMachine.SetFacet(centralFacet)
            MSMachine.SetPSF(self.PSFServer)  # ThisPSF,ThisMeanPSF)
            MSMachine.FindPSFExtent(verbose=True)
            MSMachine.MakeMultiScaleCube(verbose=True)
            MSMachine.MakeBasisMatrix()
            for iFacet in xrange(self.PSFServer.NFacets):
                self.DicoMSMachine[iFacet] = MSMachine
        else:
            # if no facet cache, init in parallel
            if self.facetcache is None:
                self.facetcache = shared_dict.create(self.CacheFileName)
                # breakout = False
                for iFacet in xrange(self.PSFServer.NFacets):
                    fcdict = self.facetcache.addSubdict(iFacet)
                    if self.ParallelMode:
                        args = (fcdict.writeonly(),
                                self.DicoVariablePSF.readonly(), iFacet,
                                self.SideLobeLevel, self.OffsetSideLobe,
                                centralFacet)
                        APP.runJob("InitHMP:%d" % iFacet,
                                   self._initMSM_handler,
                                   args=args)
                    else:
                        args = (fcdict, self.DicoVariablePSF, iFacet,
                                self.SideLobeLevel, self.OffsetSideLobe,
                                centralFacet)
                        self._initMSM_handler(*args)
                        # import pdb;
                        # pdb.set_trace()
                        # if breakout:
                        #     raise RuntimeError("exiting")

                if self.ParallelMode:
                    APP.awaitJobResults("InitHMP:*", progress="Init HMP")

                self.facetcache.reload()
            #        t = ClassTimeIt.ClassTimeIt()
            for iFacet in xrange(self.PSFServer.NFacets):
                self.PSFServer.setFacet(iFacet)
                MSMachine = ClassMultiScaleMachine.ClassMultiScaleMachine(
                    self.GD,
                    self.facetcache[iFacet],
                    self.GainMachine,
                    NFreqBands=self.NFreqBands)
                MSMachine.setModelMachine(self.ModelMachine)
                MSMachine.setSideLobeLevel(self.SideLobeLevel,
                                           self.OffsetSideLobe)
                MSMachine.SetFacet(iFacet)
                MSMachine.SetPSF(self.PSFServer)  # ThisPSF,ThisMeanPSF)
                MSMachine.FindPSFExtent(
                    verbose=(iFacet == centralFacet
                             ))  # only print to log for central facet
                MSMachine.MakeMultiScaleCube(verbose=(iFacet == centralFacet))
                MSMachine.MakeBasisMatrix()
                self.DicoMSMachine[iFacet] = MSMachine

            # write cache to disk, unless in a mode where we explicitly don't want it
            if facetcache is None and not valid and cache and not approx:
                try:
                    #MyPickle.DicoNPToFile(facetcache,cachepath)
                    #cPickle.dump(facetcache, file(cachepath, 'w'), 2)
                    print >> log, "  saving HMP cache to %s" % cachepath
                    self.facetcache.save(cachepath)
                    #self.maincache.saveCache("HMPMachine")
                    self.maincache.saveCache(self.CacheFileName)
                    self.PSFHasChanged = False
                    print >> log, "  HMP init done"
                except:
                    print >> log, traceback.format_exc()
                    print >> log, ModColor.Str(
                        "WARNING: HMP cache could not be written, see error report above. Proceeding anyway."
                    )
Example #11
0
    def InterpolParallel(self):
        Sols0 = self.Sols
        nt, nch, na, nd, _, _ = Sols0.G.shape
        log.print(" #Times:      %i" % nt)
        log.print(" #Channels:   %i" % nch)
        log.print(" #Antennas:   %i" % na)
        log.print(" #Directions: %i" % nd)

        # APP.terminate()
        # APP.shutdown()
        # Multiprocessing.cleanupShm()
        APP.startWorkers()
        iJob = 0
        #        for iAnt in [49]:#range(na):
        #            for iDir in [0]:#range(nd):

        if "TEC" in self.InterpMode:
            #APP.runJob("FitThisTEC_%d"%iJob, self.FitThisTEC, args=(208,)); iJob+=1
            self.TECArray = NpShared.ToShared(
                "%sTECArray" % IdSharedMem, np.zeros((nt, nd, na), np.float32))
            self.CPhaseArray = NpShared.ToShared(
                "%sCPhaseArray" % IdSharedMem,
                np.zeros((nt, nd, na), np.float32))
            for it in range(nt):
                #            for iDir in range(nd):
                APP.runJob("FitThisTEC_%d" % iJob,
                           self.FitThisTEC,
                           args=(it, ))  #,serial=True)
                iJob += 1
            workers_res = APP.awaitJobResults("FitThisTEC*",
                                              progress="Fit TEC")

        if "Amp" in self.InterpMode:
            for iAnt in range(na):
                for iDir in range(nd):
                    APP.runJob("FitThisAmp_%d" % iJob,
                               self.FitThisAmp,
                               args=(iAnt, iDir))  #,serial=True)
                    iJob += 1
            workers_res = APP.awaitJobResults("FitThisAmp*",
                                              progress="Smooth Amp")

        if "PolyAmp" in self.InterpMode:
            for iDir in range(nd):
                APP.runJob("FitThisPolyAmp_%d" % iJob,
                           self.FitThisPolyAmp,
                           args=(iDir, ))
                iJob += 1
            workers_res = APP.awaitJobResults("FitThisPolyAmp*",
                                              progress="Smooth Amp")

        if "Clip" in self.InterpMode:
            for iDir in range(nd):
                APP.runJob("ClipThisDir_%d" % iJob,
                           self.ClipThisDir,
                           args=(iDir, ),
                           serial=True)
                iJob += 1
            workers_res = APP.awaitJobResults("ClipThisDir*",
                                              progress="Clip Amp")

        #APP.terminate()
        APP.shutdown()
        Multiprocessing.cleanupShm()