Exemplo n.º 1
0
    def InitFromCatalog(self):

        FileCoords = self.FileCoords
        dtype = [('Name', 'S200'), ("ra", np.float64), ("dec", np.float64),
                 ('Type', 'S200')]
        #FileCoords="Transient_LOTTS.csv"

        self.PosArray = np.load(FileCoords)
        self.PosArray = self.PosArray.view(np.recarray)

        self.NDirSelected = self.PosArray.shape[0]

        self.NDir = self.PosArray.shape[0]

        self.DicoDATA = shared_dict.create("DATA")
        self.DicoGrids = shared_dict.create("Grids")
        self.DicoGrids["GridSTD"] = np.zeros((self.na, self.NTimes),
                                             np.float64)

        self.DoJonesCorr_kMS = False
        self.DicoJones = None
        if self.SolsName:
            self.DoJonesCorr_kMS = True
            self.DicoJones_kMS = shared_dict.create("DicoJones_kMS")

        self.DoJonesCorr_Beam = False
        if self.BeamModel:
            self.DoJonesCorr_Beam = True
            self.DicoJones_Beam = shared_dict.create("DicoJones_Beam")

        APP.registerJobHandlers(self)
        AsyncProcessPool.init(ncpu=self.NCPU, affinity=0)
        APP.startWorkers()
Exemplo n.º 2
0
    def Interpol(self):
        APP.startWorkers()
        if "TEC" in self.DicoFile0["SmoothMode"]:
            TECArray = NpShared.ToShared("%sTECArray" % IdSharedMem,
                                         self.DicoFile0["SolsTEC"])
            CPhaseArray = NpShared.ToShared("%sCPhaseArray" % IdSharedMem,
                                            self.DicoFile0["SolsCPhase"])
            nt, nd, na = TECArray.shape
            iJob = 0

            for it in range(nt):
                APP.runJob("InterpolTECTime_%d" % iJob,
                           self.InterpolTECTime,
                           args=(it, ))  #,serial=True)
                iJob += 1
            workers_res = APP.awaitJobResults("InterpolTECTime*",
                                              progress="Interpol TEC")

        iJob = 0
        for it in range(nt):
            APP.runJob("InterpolAmpTime_%d" % iJob,
                       self.InterpolAmpTime,
                       args=(it, ))  #,serial=True)
            iJob += 1
        workers_res = APP.awaitJobResults("InterpolAmpTime*",
                                          progress="Interpol Amp")

        # APP.terminate()
        APP.shutdown()
        Multiprocessing.cleanupShm()
Exemplo n.º 3
0
def ConvolveGaussianParallel(shareddict,
                             field_in,
                             field_out,
                             CellSizeRad=None,
                             GaussPars=[(0., 0., 0.)],
                             Normalise=False):
    """Convolves images held in a dict, using APP.
    """
    Ain0 = shareddict[field_in]
    nch, npol, _, _ = Ain0.shape
    Aout = shareddict[field_out]
    # single channel? Handle serially
    if nch == 1:
        return ConvolveGaussian(shareddict, field_in, field_out, 0,
                                CellSizeRad, GaussPars[0], None, Normalise)

    jobid = "convolve:%s:%s:" % (field_in, field_out)
    for ch in range(nch):
        sd_rw = shareddict.readwrite()
        APP.runJob(jobid + str(ch),
                   _convolveSingleGaussianFFTW_noret,
                   args=(sd_rw, field_in, field_out, ch, CellSizeRad,
                         GaussPars[ch], None, Normalise))
    APP.awaitJobResults(jobid + "*")  #, progress="Convolving")

    return Aout
Exemplo n.º 4
0
    def InitFromCatalog(self):

        FileCoords = self.FileCoords
        dtype = [('Name', 'S200'), ("ra", np.float64), ("dec", np.float64),
                 ('Type', 'S200')]
        #FileCoords="Transient_LOTTS.csv"

        self.PosArray = np.load(FileCoords)
        self.PosArray = self.PosArray.view(np.recarray)

        self.NDirSelected = self.PosArray.shape[0]

        self.NDir = self.PosArray.shape[0]

        self.DicoDATA = shared_dict.create("DATA")
        self.DicoGrids = shared_dict.create("Grids")

        dChan = np.min([self.StepFreq, self.NChan])
        self.DicoGrids["DomainEdges_Freq"] = np.int64(
            np.linspace(0, self.NChan,
                        int(self.NChan / dChan) + 1))
        #self.DicoGrids["DomainEdges_Time"] = np.int64(np.linspace(0,self.NTimes-1,int(self.NTimes/self.StepTime)+1))
        DT = self.times.max() - self.times.min()
        DTSol = np.min([self.StepTime * self.dt, DT])
        self.DicoGrids["DomainEdges_Time"] = np.linspace(
            self.times.min() - 1e-6,
            self.times.max() + 1e-6,
            int(DT / DTSol) + 1)

        self.DicoGrids["GridC2"] = np.zeros(
            (self.DicoGrids["DomainEdges_Time"].size - 1,
             self.DicoGrids["DomainEdges_Freq"].size - 1, self.na, self.na),
            np.complex128)
        self.DicoGrids["GridC"] = np.zeros(
            (self.DicoGrids["DomainEdges_Time"].size - 1,
             self.DicoGrids["DomainEdges_Freq"].size - 1, self.na, self.na),
            np.complex128)

        log.print("  DomainEdges_Freq: %s" %
                  (str(self.DicoGrids["DomainEdges_Freq"])))
        log.print("  DomainEdges_Time: %s" %
                  (str(self.DicoGrids["DomainEdges_Time"])))

        self.DoJonesCorr_kMS = False
        self.DicoJones = None
        if self.SolsName:
            self.DoJonesCorr_kMS = True
            self.DicoJones_kMS = shared_dict.create("DicoJones_kMS")

        self.DoJonesCorr_Beam = False
        if self.BeamModel:
            self.DoJonesCorr_Beam = True
            self.DicoJones_Beam = shared_dict.create("DicoJones_Beam")

        APP.registerJobHandlers(self)
        AsyncProcessPool.init(ncpu=self.NCPU, affinity=0)
        APP.startWorkers()
Exemplo n.º 5
0
    def collectSmearMapping(self, DATA, field):
        APP.awaitJobCounter(self._job_counter,
                            progress="Mapping %s" % self.name,
                            total=self._nbl,
                            timeout=1)
        self._outdict.reload()
        #self._outdict.save("bda.dict")
        blockdict = self._outdict["blocks"]
        sizedict = self._outdict["sizes"]
        # process worker results
        # for each map (each array returned from worker), BlockSizes[MapName] will
        # contain a list of BlocksSizesBL entries returned from that worker
        NTotBlocks = 0
        NTotRows = 0

        for key in sizedict.iterkeys():
            bsz = sizedict[key]
            NTotBlocks += len(bsz)
            NTotRows += bsz.sum()

        mapping = DATA.addSharedArray(field, (2 + NTotBlocks + NTotRows, ),
                                      np.int32)

        mapping[0] = NTotBlocks
        mapping[1] = NTotBlocks >> 32

        FinalMappingSizes = mapping[2:2 + NTotBlocks]
        FinalMapping = mapping[2 + NTotBlocks:]

        iii = 0
        jjj = 0

        # now go through each per-baseline mapping, sorted by baseline
        for key in sorted(sizedict.iterkeys()):
            BlocksSizesBL = sizedict[key]
            BlocksRowsListBL = blockdict[key]

            FinalMapping[iii:iii + BlocksRowsListBL.size] = BlocksRowsListBL[:]
            iii += BlocksRowsListBL.size

            # print "IdWorker,AppendId",IdWorker,AppendId,BlocksSizesBL
            # MM=np.concatenate((MM,BlocksSizesBL))
            FinalMappingSizes[jjj:jjj + BlocksSizesBL.size] = BlocksSizesBL[:]
            jjj += BlocksSizesBL.size

        NVis = np.where(DATA["A0"] != DATA["A1"])[0].size * DATA["freqs"].size
        #print>>log, "  Number of blocks:         %i"%NTotBlocks
        #print>>log, "  Number of 4-Visibilities: %i"%NVis
        fact = (100. * (NVis - NTotBlocks) / float(NVis))

        # clear temp shared arrays/dicts
        del sizedict
        del blockdict
        self._outdict.delete()

        return mapping, fact
Exemplo n.º 6
0
 def giveWeigthParallel(self):
     self.DATA = shared_dict.attach(self.DictName)
     self.na = self.DATA["na"]
     for A0 in range(0, self.na):
         for A1 in range(A0 + 1, self.na):
             APP.runJob("giveWeigthChunk:%d:%d" % (A0, A1),
                        self.giveWeigthChunk,
                        args=(A0, A1,
                              self.DATA.readwrite()))  #,serial=True)
     APP.awaitJobResults("giveWeigthChunk:*", progress="CalcWeight")
Exemplo n.º 7
0
 def __init__(self,
              GD,
              NFreqBands,
              RefFreq,
              MainCache=None,
              IdSharedMem=""):
     self.GD = GD
     self.InitMachine = ClassInitSSDModel(GD, NFreqBands, RefFreq,
                                          MainCache, IdSharedMem)
     self.NCPU = (self.GD["Parallel"]["NCPU"] or psutil.cpu_count())
     APP.registerJobHandlers(self)
Exemplo n.º 8
0
 def giveWeigthParallel(self):
     self.DATA = shared_dict.attach(self.DictName)
     nrow, nch, npol = self.DATA["data"].shape
     self.na = self.DATA["na"]
     ntu = self.DATA["times_unique"].size
     self.DATA["Wa"] = np.zeros((self.na, ntu), np.float32)
     for A0 in range(self.na):
         APP.runJob("giveWeigthAnt:%d" % (A0),
                    self.giveWeigthAnt,
                    args=(A0, ))  #,serial=True)
     APP.awaitJobResults("giveWeigthAnt:*", progress="CalcWeight")
Exemplo n.º 9
0
    def StackAll(self):
        while self.iCurrentMS<self.nMS:
            if self.LoadNextMS()=="NotRead": continue
            print("Making dynamic spectra...", file=log)
            # for iTime in range(self.NTimes):
            #     APP.runJob("Stack_SingleTime:%d"%(iTime), 
            #                self.Stack_SingleTime,
            #                args=(iTime,))#,serial=True)
            # APP.awaitJobResults("Stack_SingleTime:*", progress="Append MS %i"%self.DicoDATA["iMS"])

            FF=self.DicoGrids["DomainEdges_Freq"]
            TT=self.DicoGrids["DomainEdges_Time"]
            for iTime in range(TT.size-1):
                for iFreq in range(FF.size-1):
                    #print(iTime,iFreq)
                    APP.runJob("Stack_SingleTime:%d:%d"%(iTime,iFreq), 
                               self.Stack_SingleTime,
                               args=(iTime,iFreq))#,serial=True)

            APP.awaitJobResults("Stack_SingleTime:*", progress="Append MS %i"%self.DicoDATA["iMS"])

            
            FOut="%s.Weights.npy"%self.DicoDATA["MSName"]
            
            self.DicoDATA.reload()
            
            # print(self.DicoDATA["WOUT"])
            #self.DicoDATA["WOUT"]/=np.median(self.DicoDATA["WOUT"])
            
            w=self.DicoDATA["WOUT"]

            w/=np.median(w)
            w[w<=0]=0
            w[w>2.]=2
            
            log.print("    saving weights as %s"%FOut)
            np.save(FOut,self.DicoDATA["WOUT"])
            
            # import pylab
            # pylab.clf()
            # pylab.hist(w[:,0].ravel())
            # pylab.draw()
            # pylab.show()
            
            #np.save(FOut,self.DicoGrids["GridSTD"])
            
            # for iTime in range(self.NTimes):
            #     self.Stack_SingleTime(iTime)
       
        self.Finalise()
Exemplo n.º 10
0
    def StackAll(self):
        while self.iCurrentMS < self.nMS:
            if self.LoadNextMS() == "NotRead": continue
            print("Making dynamic spectra...", file=log)
            for iTime in range(self.NTimes):
                APP.runJob("Stack_SingleTime:%d" % (iTime),
                           self.Stack_SingleTime,
                           args=(iTime, ))  #,serial=True)
            APP.awaitJobResults("Stack_SingleTime:*",
                                progress="Append MS %i" % self.DicoDATA["iMS"])

            # for iTime in range(self.NTimes):
            #     self.Stack_SingleTime(iTime)

        self.Finalise()
Exemplo n.º 11
0
 def computeSmearMappingInBackground (self, base_job_id, MS, DATA, radiusDeg, Decorr, channel_mapping, mode):
     l = radiusDeg * np.pi / 180
     dPhi = np.sqrt(6. * (1. - Decorr))
     # create new empty shared dicts for results
     self._outdict = shared_dict.create("%s:%s:tmp" %(DATA.path, self.name))
     blockdict = self._outdict.addSubdict("blocks")
     sizedict  = self._outdict.addSubdict("sizes")
     self._nbl = 0
     for a0 in xrange(MS.na):
         for a1 in xrange(MS.na):
             if a0 != a1:
                 self._nbl += 1
                 APP.runJob("%s:%s:%d:%d" % (base_job_id, self.name, a0, a1), self._smearmapping_worker,
                            counter=self._job_counter, collect_result=False,
                            args=(DATA.readonly(), blockdict.writeonly(), sizedict.writeonly(), a0, a1, dPhi, l,
                                  channel_mapping, mode))
Exemplo n.º 12
0
    def StackAll(self):
        while self.iCurrentMS < self.nMS:
            if self.LoadNextMS() == "NotRead": continue
            print("Making dynamic spectra...", file=log)
            for iTime in range(self.NTimes):
                APP.runJob("Stack_SingleTime:%d" % (iTime),
                           self.Stack_SingleTime,
                           args=(iTime, ))  #,serial=True)
            APP.awaitJobResults("Stack_SingleTime:*",
                                progress="Append MS %i" % self.DicoDATA["iMS"])
            FOut = "%s.Weights.npy" % self.DicoDATA["MSName"]

            log.print("    saving weights as %s" % FOut)
            self.DicoDATA.reload()
            # print(self.DicoDATA["WOUT"])
            self.DicoDATA["WOUT"] /= np.median(self.DicoDATA["WOUT"])
            np.save(FOut, self.DicoDATA["WOUT"])

            # for iTime in range(self.NTimes):
            #     self.Stack_SingleTime(iTime)

        self.Finalise()
Exemplo n.º 13
0
    def __init__(self, **kwargs):
        for key, value in kwargs.items():
            setattr(self, key, value)

        LMS = [l.strip() for l in open(self.MSOutFreq).readlines()]
        self.OutFreqDomains = np.zeros((len(LMS) * self.NFreqPerMS, 2),
                                       np.float64)
        iFTot = 0
        for iMS, MS in enumerate(LMS):
            t = table("%s::SPECTRAL_WINDOW" % MS, ack=False)
            df = t.getcol("CHAN_WIDTH").flat[0]
            fs = t.getcol("CHAN_FREQ").ravel()
            f0, f1 = fs[0] - df / 2., fs[-1] + df / 2.
            ff = np.linspace(f0, f1, self.NFreqPerMS + 1)
            for iF in range(self.NFreqPerMS):
                self.OutFreqDomains[iFTot, 0] = ff[iF]
                self.OutFreqDomains[iFTot, 1] = ff[iF + 1]
                iFTot += 1

        NFreqsOut = self.OutFreqDomains.shape[0]
        self.CentralFreqs = np.mean(self.OutFreqDomains, axis=1)

        log.print("Loading %s" % self.SolsFileIn)
        self.DicoFile0 = dict(np.load(self.SolsFileIn))
        Dico0 = self.DicoFile0
        self.Sols0 = self.DicoFile0["Sols"].view(np.recarray)

        DicoOut = {}
        DicoOut['ModelName'] = Dico0['ModelName']
        DicoOut['StationNames'] = Dico0['StationNames']
        DicoOut['BeamTimes'] = Dico0['BeamTimes']
        DicoOut['SourceCatSub'] = Dico0['SourceCatSub']
        DicoOut['ClusterCat'] = Dico0['ClusterCat']
        DicoOut['SkyModel'] = Dico0['SkyModel']
        DicoOut['FreqDomains'] = self.OutFreqDomains
        self.DicoOut = DicoOut
        self.CentralFreqsIn = np.mean(Dico0['FreqDomains'], axis=1)

        self.DicoFreqWeights = {}
        for iChan in range(self.CentralFreqs.size):
            f = self.CentralFreqs[iChan]
            i0 = np.where(self.CentralFreqsIn <= f)[0]
            i1 = np.where(self.CentralFreqsIn > f)[0]
            if i0.size > 0 and i1.size > 0:
                i0 = i0[-1]
                i1 = i1[0]
                f0 = self.CentralFreqsIn[i0]
                f1 = self.CentralFreqsIn[i1]
                df = np.abs(f0 - f1)
                alpha = 1. - (f - f0) / df
                c0 = alpha
                c1 = 1. - alpha
                self.DicoFreqWeights[iChan] = {
                    "Type": "Dual",
                    "Coefs": (c0, c1),
                    "Index": (i0, i1)
                }
            else:
                i0 = np.argmin(np.abs(self.CentralFreqsIn - f))
                self.DicoFreqWeights[iChan] = {"Type": "Single", "Index": i0}

        # for iChan in range(self.CentralFreqs.size):
        #     print
        #     print self.CentralFreqs[iChan]/1e6
        #     if self.DicoFreqWeights[iChan]["Type"]=="Dual":
        #         i0,i1=self.DicoFreqWeights[iChan]["Index"]
        #         print self.CentralFreqsIn[i0]/1e6,self.CentralFreqsIn[i1]/1e6,self.DicoFreqWeights[iChan]["Coefs"]
        #     else:
        #         i0=self.DicoFreqWeights[iChan]["Index"]
        #         print self.CentralFreqsIn[i0]/1e6

        nt, _, na, nd, _, _ = self.Sols0.G.shape
        SolsOut = np.zeros(
            (nt, ),
            dtype=[("t0", np.float64), ("t1", np.float64),
                   ("G", np.complex64, (NFreqsOut, na, nd, 2, 2)),
                   ("Stats", np.float32, (NFreqsOut, na, 4))])

        SolsOut = SolsOut.view(np.recarray)
        SolsOut.t0 = self.Sols0.t0
        SolsOut.t1 = self.Sols0.t1
        SolsOut.G[..., 0, 0] = 1.
        SolsOut.G[..., 1, 1] = 1.
        self.SolsOut = SolsOut
        self.GOut = NpShared.ToShared("%sGOut" % IdSharedMem,
                                      self.SolsOut.G.copy())

        APP.registerJobHandlers(self)
        AsyncProcessPool.init(ncpu=self.NCPU, affinity=0)
Exemplo n.º 14
0
    return Aout


# wrappers that discard return value for use with APP -- avoids wasteful stuffing of images into result queues
def _convolveSingleGaussianFFTW_noret(*args, **kw):
    _convolveSingleGaussianFFTW(*args, **kw)
    return None


def _convolveSingleGaussianNP_noret(*args, **kw):
    _convolveSingleGaussianNP(*args, **kw)
    return None


APP.registerJobHandlers(_convolveSingleGaussianFFTW_noret,
                        _convolveSingleGaussianNP_noret)

## FFTW version
#def ConvolveGaussianFFTW(Ain0,
#                        CellSizeRad=None,
#                        GaussPars=[(0.,0.,0.)],
#                        Normalise=False,
#                        out=None,
#                        nthreads=1,
#                        min_size_fft=2048):
#   warnings.warn("deprecated", DeprecationWarning)

#   assert Ain0.shape == 4, "Expected stack of images: nch, npol, Ny, Nx"
#   nch,npol,Ny,Nx=Ain0.shape
#   pady = max(Ny, min_size_fft)
#   padx = max(Nx, min_size_fft)
    def InitMSMF(self, approx=False, cache=True, facetcache=None):
        """Initializes MSMF basis functions. If approx is True, then uses the central facet's PSF for
        all facets.
        Populates the self.facetcache dict, unless facetcache is supplied
        """
        self.DicoMSMachine = {}
        valid = True
        if facetcache is not None:
            print >> log, "HMP basis functions pre-initialized"
            self.facetcache = facetcache
        else:
            cachehash = dict([
                (section, self.GD[section])
                for section in ("Data", "Beam", "Selection", "Freq", "Image",
                                "Facets", "Weight", "RIME", "Comp", "CF",
                                "HMP")
            ])
            cachepath, valid = self.maincache.checkCache(self.CacheFileName,
                                                         cachehash,
                                                         reset=not cache
                                                         or self.PSFHasChanged)
            # do not use cache in approx mode
            if approx or not cache:
                valid = False
            if valid:
                print >> log, "Initialising HMP basis functions from cache %s" % cachepath
                self.facetcache = shared_dict.create(self.CacheFileName)
                self.facetcache.restore(cachepath)
            else:
                self.facetcache = None

        centralFacet = self.PSFServer.DicoVariablePSF["CentralFacet"]
        if approx:
            print >> log, "HMP approximation mode: using PSF of central facet (%d)" % centralFacet
            self.PSFServer.setFacet(centralFacet)
            MSMachine = ClassMultiScaleMachine.ClassMultiScaleMachine(
                self.GD,
                self.facetcache.addSubdict(0),
                self.GainMachine,
                NFreqBands=self.NFreqBands)
            MSMachine.setModelMachine(self.ModelMachine)
            MSMachine.setSideLobeLevel(self.SideLobeLevel, self.OffsetSideLobe)
            MSMachine.SetFacet(centralFacet)
            MSMachine.SetPSF(self.PSFServer)  # ThisPSF,ThisMeanPSF)
            MSMachine.FindPSFExtent(verbose=True)
            MSMachine.MakeMultiScaleCube(verbose=True)
            MSMachine.MakeBasisMatrix()
            for iFacet in xrange(self.PSFServer.NFacets):
                self.DicoMSMachine[iFacet] = MSMachine
        else:
            # if no facet cache, init in parallel
            if self.facetcache is None:
                self.facetcache = shared_dict.create(self.CacheFileName)
                # breakout = False
                for iFacet in xrange(self.PSFServer.NFacets):
                    fcdict = self.facetcache.addSubdict(iFacet)
                    if self.ParallelMode:
                        args = (fcdict.writeonly(),
                                self.DicoVariablePSF.readonly(), iFacet,
                                self.SideLobeLevel, self.OffsetSideLobe,
                                centralFacet)
                        APP.runJob("InitHMP:%d" % iFacet,
                                   self._initMSM_handler,
                                   args=args)
                    else:
                        args = (fcdict, self.DicoVariablePSF, iFacet,
                                self.SideLobeLevel, self.OffsetSideLobe,
                                centralFacet)
                        self._initMSM_handler(*args)
                        # import pdb;
                        # pdb.set_trace()
                        # if breakout:
                        #     raise RuntimeError("exiting")

                if self.ParallelMode:
                    APP.awaitJobResults("InitHMP:*", progress="Init HMP")

                self.facetcache.reload()
            #        t = ClassTimeIt.ClassTimeIt()
            for iFacet in xrange(self.PSFServer.NFacets):
                self.PSFServer.setFacet(iFacet)
                MSMachine = ClassMultiScaleMachine.ClassMultiScaleMachine(
                    self.GD,
                    self.facetcache[iFacet],
                    self.GainMachine,
                    NFreqBands=self.NFreqBands)
                MSMachine.setModelMachine(self.ModelMachine)
                MSMachine.setSideLobeLevel(self.SideLobeLevel,
                                           self.OffsetSideLobe)
                MSMachine.SetFacet(iFacet)
                MSMachine.SetPSF(self.PSFServer)  # ThisPSF,ThisMeanPSF)
                MSMachine.FindPSFExtent(
                    verbose=(iFacet == centralFacet
                             ))  # only print to log for central facet
                MSMachine.MakeMultiScaleCube(verbose=(iFacet == centralFacet))
                MSMachine.MakeBasisMatrix()
                self.DicoMSMachine[iFacet] = MSMachine

            # write cache to disk, unless in a mode where we explicitly don't want it
            if facetcache is None and not valid and cache and not approx:
                try:
                    #MyPickle.DicoNPToFile(facetcache,cachepath)
                    #cPickle.dump(facetcache, file(cachepath, 'w'), 2)
                    print >> log, "  saving HMP cache to %s" % cachepath
                    self.facetcache.save(cachepath)
                    #self.maincache.saveCache("HMPMachine")
                    self.maincache.saveCache(self.CacheFileName)
                    self.PSFHasChanged = False
                    print >> log, "  HMP init done"
                except:
                    print >> log, traceback.format_exc()
                    print >> log, ModColor.Str(
                        "WARNING: HMP cache could not be written, see error report above. Proceeding anyway."
                    )
Exemplo n.º 16
0
    elif len(args):
        print(args)
        OP.ExitWithError("Incorrect number of arguments. Use -h for help.")
        sys.exit(1)

    retcode = report_error = 0

    try:
        main(OP, messages)
        print(ModColor.Str(
            "DDFacet ended successfully after %s" %
            T.timehms(), col="green"), file=log)
    except KeyboardInterrupt:
        print(traceback.format_exc(), file=log)
        print(ModColor.Str("DDFacet interrupted by Ctrl+C", col="red"), file=log)
        APP.terminate()
        retcode = 1 #Should at least give the command line an indication of failure
    except Exceptions.UserInputError:
        print(ModColor.Str(sys.exc_info()[1], col="red"), file=log)
        print(ModColor.Str("There was a problem with some user input. See messages above for an indication."), file=log)
        APP.terminate()
        retcode = 1  # Should at least give the command line an indication of failure
    except WorkerProcessError:
        print(ModColor.Str("A worker process has died on us unexpectedly. This probably indicates a bug:"), file=log)
        print(ModColor.Str("  the original underlying error may be reported in the log [possibly far] above."), file=log)
        report_error = True
    except:
        if sys.exc_info()[0] is not WorkerProcessError and Exceptions.is_pdb_enabled():
            APP.terminate()
            raise
        else:
Exemplo n.º 17
0
    def giveDicoInitIndiv(self,
                          ListIslands,
                          ModelImage,
                          DicoDirty,
                          ListDoIsland=None):
        DicoInitIndiv = shared_dict.create("DicoInitIsland")
        ParmDict = shared_dict.create("InitSSDModelHMP")
        ParmDict["ModelImage"] = ModelImage
        ParmDict["GridFreqs"] = self.GridFreqs
        ParmDict["DegridFreqs"] = self.DegridFreqs

        #         ListBigIslands=[]
        #         ListSmallIslands=[]
        #         ListDoBigIsland=[]
        #         ListDoSmallIsland=[]
        #         NParallel=0
        #         for iIsland,Island in enumerate(ListIslands):
        #             if len(Island)>self.GD["SSDClean"]["ConvFFTSwitch"]:
        #                 ListBigIslands.append(Island)
        #                 ListDoBigIsland.append(ListDoIsland[iIsland])
        #                 if ListDoIsland or ListDoIsland[iIsland]:
        #                     NParallel+=1
        #             else:
        #                 ListSmallIslands.append(Island)
        #                 ListDoSmallIsland.append(ListDoIsland[iIsland])
        #         print>>log,"Initialise big islands (parallelised per island)"
        #         pBAR= ProgressBar(Title="Init islands")
        #         pBAR.render(0, NParallel)
        #         nDone=0
        #         for iIsland,Island in enumerate(ListBigIslands):
        #             if not ListDoIsland or ListDoBigIsland[iIsland]:
        #                 subdict = DicoInitIndiv.addSubdict(iIsland)
        #                 # APP.runJob("InitIsland:%d" % iIsland, self._initIsland_worker,
        #                 #            args=(subdict.writeonly(), iIsland, Island,
        #                 #                  self.DicoVariablePSF.readonly(), DicoDirty.readonly(),
        #                 #                  ParmDict.readonly(), self.InitMachine.DeconvMachine.facetcache.readonly(),self.NCPU),serial=True)
        #                 self._initIsland_worker(subdict, iIsland, Island,
        #                                         self.DicoVariablePSF, DicoDirty,
        #                                         ParmDict, self.InitMachine.DeconvMachine.facetcache,
        #                                         self.NCPU)
        #                 pBAR.render(nDone+1, NParallel)
        #                 nDone+=1
        # #        APP.awaitJobResults("InitIsland:*", progress="Init islands")
        #         print>>log,"Initialise small islands (parallelised over islands)"
        #         for iIsland,Island in enumerate(ListSmallIslands):
        #             if not ListDoIsland or ListDoSmallIsland[iIsland]:
        #                 subdict = DicoInitIndiv.addSubdict(iIsland)
        #                 APP.runJob("InitIsland:%d" % iIsland, self._initIsland_worker,
        #                            args=(subdict.writeonly(), iIsland, Island,
        #                                  self.DicoVariablePSF.readonly(), DicoDirty.readonly(),
        #                                  ParmDict.readonly(), self.InitMachine.DeconvMachine.facetcache.readonly(),1))
        #         APP.awaitJobResults("InitIsland:*", progress="Init islands")
        #         DicoInitIndiv.reload()

        print >> log, "Initialise islands (parallelised over islands)"
        if not self.GD["GAClean"]["ParallelInitHMP"]:
            pBAR = ProgressBar(Title="  Init islands")
            for iIsland, Island in enumerate(ListIslands):
                if not ListDoIsland or ListDoIsland[iIsland]:
                    subdict = DicoInitIndiv.addSubdict(iIsland)
                    self._initIsland_worker(
                        subdict, iIsland, Island, self.DicoVariablePSF,
                        DicoDirty, ParmDict,
                        self.InitMachine.DeconvMachine.facetcache, 1)
                pBAR.render(iIsland, len(ListIslands))
        else:
            for iIsland, Island in enumerate(ListIslands):
                if not ListDoIsland or ListDoIsland[iIsland]:
                    subdict = DicoInitIndiv.addSubdict(iIsland)
                    APP.runJob("InitIsland:%d" % iIsland,
                               self._initIsland_worker,
                               args=(subdict.writeonly(), iIsland, Island,
                                     self.DicoVariablePSF.readonly(),
                                     DicoDirty.readonly(), ParmDict.readonly(),
                                     self.InitMachine.DeconvMachine.facetcache.
                                     readonly(), 1))
            APP.awaitJobResults("InitIsland:*", progress="Init islands")
            DicoInitIndiv.reload()

        ParmDict.delete()

        return DicoInitIndiv
Exemplo n.º 18
0
    def LoadModel(self):

        # ClassModelMachine,DicoModel=GiveModelMachine(self.FileDicoModel)
        # try:
        #     self.GD["GAClean"]["GASolvePars"]=DicoModel["SolveParam"]
        # except:
        #     self.GD["GAClean"]["GASolvePars"]=["S","Alpha"]
        #     DicoModel["SolveParam"]=self.GD["GAClean"]["GASolvePars"]
        # self.MM=ClassModelMachine(self.GD)
        # self.MM.FromDico(DicoModel)

        # From DicoModel
        ModConstructor = ClassModModelMachine(self.GD)
        self.MM = ModConstructor.GiveInitialisedMMFromFile(self.FileDicoModel)
        #ModelImage0=self.MM.GiveModelImage(np.mean(self.VS.MS.ChanFreq))

        if self.GD["GDkMS"]["ImageSkyModel"]["FilterNegComp"]:
            self.MM.FilterNegComponants(box=15, sig=1)

        if self.GD["GDkMS"]["ImageSkyModel"]["MaskImage"] != None:
            self.MM.CleanMaskedComponants(
                self.GD["GDkMS"]["ImageSkyModel"]["MaskImage"])
        #self.ModelImage=self.MM.GiveModelImage(np.mean(self.VS.MS.ChanFreq))
        model_freqs = self.VS.FreqBandChannelsDegrid[0]
        # original_freqs=self.VS.FreqBandChannels[0]
        # self.MM.setFreqMachine(original_freqs, model_freqs)
        ModelImage = self.MM.GiveModelImage(model_freqs)

        log.print("model image @%s MHz (min,max) = (%f, %f)" %
                  (str(model_freqs / 1e6), ModelImage.min(), ModelImage.max()))

        # # From ModelImage
        # print "im!!!!!!!!!!!!!!!!!!!!!!!"
        # im=image("Model.fits")
        # data=im.getdata()
        # nch,npol,nx,_=data.shape
        # for ch in range(nch):
        #     for pol in range(npol):
        #         data[ch,pol]=data[ch,pol].T[::-1]
        # self.ModelImage=ModelImage=data
        # # ###############################
        self.FacetMachine.ToCasaImage(ModelImage,
                                      ImageName="%s.Model_kMS" %
                                      self.BaseImageName,
                                      Fits=True)
        # #stop

        #del(data)

        self.DicoImager = self.FacetMachine.DicoImager

        NFacets = len(self.FacetMachine.DicoImager)
        self.NFacets = NFacets

        #self.NDirs=NFacets
        #self.Dirs=range(self.NDirs)

        # SolsFile=self.GD["DDESolutions"]["DDSols"]
        # if not(".npz" in SolsFile):
        #     ThisMSName=reformat.reformat(os.path.abspath(self.VS.MSName),LastSlash=False)
        #     SolsFile="%s/killMS.%s.sols.npz"%(self.VS.MSName,SolsFile)
        #     #SolsFile="BOOTES24_SB100-109.2ch8s.ms/killMS.KAFCA.Scalar.50Dir.0.1P.BriggsSq.PreCuster4.sols.npz"

        # DicoSolsFile=np.load(SolsFile)
        # ClusterCat=DicoSolsFile["ClusterCat"]
        # ClusterCat=ClusterCat.view(np.recarray)

        #DicoFacetName="%s.DicoFacet"%self.BaseImageName
        #DicoFacet=DDFacet.Other.MyPickle.Load(DicoFacetName)

        NodeFile = "%s.NodesCat.npy" % self.GD["Output"][
            "Name"]  #BaseImageName
        NodesCat = np.load(NodeFile)
        NodesCat = NodesCat.view(np.recarray)

        self.NDir = NodesCat.shape[0]

        ClusterCat = np.zeros((self.NDir, ),
                              dtype=[('Name', '|S200'), ('ra', np.float),
                                     ('dec', np.float), ('l', np.float),
                                     ('m', np.float), ('SumI', np.float),
                                     ("Cluster", int)])
        ClusterCat = ClusterCat.view(np.recarray)
        ClusterCat.l = NodesCat.l
        ClusterCat.m = NodesCat.m
        ClusterCat.ra = NodesCat.ra
        ClusterCat.dec = NodesCat.dec

        NN = ClusterCat.shape[0]
        Cat=np.zeros((NN,),dtype=[('Name','|S200'),('ra',np.float),('dec',np.float),('Sref',np.float),('I',np.float),('Q',np.float),\
                                  ('U',np.float),('V',np.float),('RefFreq',np.float),('alpha',np.float),('ESref',np.float),\
                                  ('Ealpha',np.float),('kill',np.int),('Cluster',np.int),('Type',np.int),('Gmin',np.float),\
                                  ('Gmaj',np.float),('Gangle',np.float),("Select",np.int),('l',np.float),('m',np.float),
                                  ("Exclude",bool)])
        Cat = Cat.view(np.recarray)
        Cat.RefFreq = 1.
        Cat.ra[:] = ClusterCat.ra
        Cat.dec[:] = ClusterCat.dec
        Cat.I[:] = ClusterCat.SumI[:]
        Cat.Cluster = np.arange(NN)
        Cat.Sref[:] = ClusterCat.SumI[:]

        self.SourceCat = Cat

        self.DicoImager = self.FacetMachine.DicoImager
        self.ClusterCat = ClusterCat
        self.ClusterCat.SumI = 0.

        # ind=np.where(self.ClusterCat.SumI!=0)[0]
        # self.ClusterCat=self.ClusterCat[ind].copy()
        # NFacets=self.ClusterCat.shape[0]
        # log.print( "  There are %i non-zero facets"%NFacets)

        NFacets = len(self.FacetMachine.DicoImager)
        lFacet = np.zeros((NFacets, ), np.float32)
        mFacet = np.zeros_like(lFacet)
        for iFacet in range(NFacets):
            l, m = self.FacetMachine.DicoImager[iFacet]["lmShift"]
            lFacet[iFacet] = l
            mFacet[iFacet] = m

        NDir = ClusterCat.l.size
        d = np.sqrt((ClusterCat.l.reshape((NDir, 1)) -
                     lFacet.reshape((1, NFacets)))**2 +
                    (ClusterCat.m.reshape((NDir, 1)) -
                     mFacet.reshape((1, NFacets)))**2)
        idDir = np.argmin(d, axis=0)

        for iFacet in range(NFacets):
            self.FacetMachine.DicoImager[iFacet]["iDirJones"] = idDir[iFacet]
            # print(iFacet,idDir[iFacet])

        self.SM.ClusterCat = self.ClusterCat
        self.SM.SourceCat = self.SourceCat

        from DDFacet.Other.AsyncProcessPool import APP
        APP.startWorkers()
        #self.VS.CalcWeightsBackground()
        self.FacetMachine.initCFInBackground()
        self.FacetMachine.awaitInitCompletion()

        # for iFacet in range(NFacets):

        #     #self.FacetMachine.SpacialWeigth[iFacet]=NpShared.ToShared("%sSpacialWeight_%3.3i"%(self.IdSharedMem,iFacet),self.FacetMachine.SpacialWeigth[iFacet])
        #     self.FacetMachine.SpacialWeigth[iFacet]=self.FacetMachine._CF[iFacet]["SW"]
        # log.print( "  Splitting model image")
        # self.BuildGridsParallel()
        self.FacetMachine.BuildFacetNormImage()
        self.FacetMachine.setModelImage(ModelImage)
        self.FacetMachine.set_model_grid()
        self.PrepareGridMachinesMapping()
        #self.BuildGridsSerial()
        #self.BuildGridsParallel()

        NFacets = self.ClusterCat.shape[0]
        self.SM.NDir = self.NDirs
        self.SM.Dirs = self.Dirs
        log.print("  There are %i non-zero directions" % self.SM.NDir)
        self.SM.ClusterCat = self.ClusterCat
        self.SM.SourceCat = self.SourceCat

        # self.SM.SourceCat.I[:]=self.ClusterCat.SumI[:]

        self.SM.DicoJonesDirToFacet = self.DicoJonesDirToFacet
        self.SM.GD = self.FacetMachine.GD
        self.SM.DicoImager = self.FacetMachine.DicoImager
        #self.SM.GD["Comp"]["CompDeGridMode"]=0
        CurrentMS = self.VS.ListMS[self.VS.iCurrentMS]
        self.SM.rac = CurrentMS.rac
        self.SM.decc = CurrentMS.decc
        self.SM.AvailableCorrelationProductsIds = self.VS.StokesConverter.AvailableCorrelationProductsIds(
        )
        self.SM.RequiredStokesProductsIds = self.VS.StokesConverter.RequiredStokesProductsIds(
        )
        self.SM.NFreqBands = self.VS.NFreqBands
        self.SM.Path = {"cf_dict_path": self.FacetMachine._CF.path}
        #self.SM.ChanMappingDegrid=self.VS.FreqBandChannelsDegrid[0]
        self.SM.ChanMappingDegrid = self.VS.DicoMSChanMappingDegridding[0]
        self.SM._model_dict = self.FacetMachine._model_dict
        self.SM.MapClusterCatOrigToCut = self.MapClusterCatOrigToCut
Exemplo n.º 19
0
    def __init__(self, Gain=0.3,
                 MaxMinorIter=100, 
                 NCPU=1, #psutil.cpu_count()
                 CycleFactor=2.5, 
                 FluxThreshold=None, 
                 RMSFactor=3, 
                 PeakFactor=0,
                 PrevPeakFactor=0,
                 GD=None, 
                 SearchMaxAbs=1, 
                 ModelMachine=None,
                 NFreqBands=1,
                 RefFreq=None,
                 MainCache=None,
                 IdSharedMem="",
                 ParallelMode=True,
                 CacheFileName="HMPBasis",
                 **kw    # absorb any unknown keywords arguments into this
                 ):
        """
        ImageDeconvMachine constructor. Note that this should be called pretty much when setting up the imager,
        before APP workers are started, because the object registers APP handlers.
        """
        self.IdSharedMem=IdSharedMem
        self.SearchMaxAbs=SearchMaxAbs
        self._ModelImage = None
        self.MaxMinorIter = MaxMinorIter
        self.NCPU = NCPU
        self.Chi2Thr = 10000
        self._MaskArray = None
        self.GD = GD
        self.SubPSF = None
        self.MultiFreqMode = NFreqBands > 1
        self.NFreqBands = NFreqBands
        self.RefFreq = RefFreq
        self.FluxThreshold = FluxThreshold
        self.CycleFactor = CycleFactor
        self.RMSFactor = RMSFactor
        self.PeakFactor = PeakFactor
        self.PrevPeakFactor = PrevPeakFactor
        self.CacheFileName=CacheFileName
        self.GainMachine=ClassGainMachine.get_instance()
        self.ModelMachine = None
        self.PSFServer = None
        if ModelMachine is not None:
            self.updateModelMachine(ModelMachine)
        self.PSFHasChanged=False
        self._previous_initial_peak = None
        self.maincache = MainCache
        # reset overall iteration counter
        self._niter = 0
        self.facetcache=None
        self._MaskArray=None
        self.MaskMachine=None
        self.ParallelMode=ParallelMode
        if self.ParallelMode:
            APP.registerJobHandlers(self)

        # we are in a worker
        if not self.ParallelMode:
            numexpr.set_num_threads(NCPU)

        # peak finding mode.
        # "normal" searches for peak in mean dirty image
        # "sigma" searches for peak in mean_dirty/noise_map (setNoiseMap will have been called)
        # "weighted" searched for peak in mean_dirty*weight
        self._peakMode = "normal"

        self.CurrentNegMask=None
        self._NoiseMap=None
        self._PNRStop=None      # in _peakMode "sigma", provides addiitonal stopping criterion

        if self.GD["HMP"]["PeakWeightImage"]:
            print>> log, "  Reading peak weighting image %s" % self.GD["HMP"]["PeakWeightImage"]
            img = image(self.GD["HMP"]["PeakWeightImage"]).getdata()
            _, _, nx, ny = img.shape
            # collapse freq and pol axes
            img = img.sum(axis=1).sum(axis=0).T[::-1].copy()
            self._peakWeightImage = img.reshape((1,1,ny,nx))
            self._peakMode = "weighted"

        self._prevPeak = None
Exemplo n.º 20
0
    def InitMSMF(self, approx=False, cache=True, facetcache=None):
        """Initializes MSMF basis functions. If approx is True, then uses the central facet's PSF for
        all facets.
        Populates the self.facetcache dict, unless facetcache is supplied
        """
        self.DicoMSMachine = {}
        valid = True
        if facetcache is not None:
            print>> log, "HMP basis functions pre-initialized"
            self.facetcache = facetcache
        else:
            cachehash = dict(
                [(section, self.GD[section]) for section in (
                    "Data", "Beam", "Selection", "Freq",
                    "Image", "Facets", "Weight", "RIME","DDESolutions",
                    "Comp", "CF",
                    "HMP")])
            cachepath, valid = self.maincache.checkCache(self.CacheFileName, cachehash, reset=not cache or self.PSFHasChanged)
            # do not use cache in approx mode
            if approx or not cache:
                valid = False
            if valid:
                print>>log,"Initialising HMP basis functions from cache %s"%cachepath
                self.facetcache = shared_dict.create(self.CacheFileName)
                self.facetcache.restore(cachepath)
            else:
                self.facetcache = None


        init_cache = self.facetcache is None
        if init_cache:
            self.facetcache = shared_dict.create(self.CacheFileName)

        # in any mode, start by initializing a MS machine for the central facet. This will precompute the scale
        # functions
        centralFacet = self.PSFServer.DicoVariablePSF["CentralFacet"]

        self.DicoMSMachine[centralFacet] = MSM0 = \
            self._initMSM_facet(centralFacet,
                                self.facetcache.addSubdict(centralFacet) if init_cache else self.facetcache[centralFacet],
                                None, self.SideLobeLevel, self.OffsetSideLobe, verbose=True)
        if approx:
            print>>log, "HMP approximation mode: using PSF of central facet (%d)" % centralFacet
            for iFacet in xrange(self.PSFServer.NFacets):
                self.DicoMSMachine[iFacet] = MSM0
        elif (self.GD["Facets"]["NFacets"]==1)&(not self.GD["DDESolutions"]["DDSols"]):
            self.DicoMSMachine[0] = MSM0
            
        else:
            # if no facet cache, init in parallel
            if init_cache:
                for iFacet in xrange(self.PSFServer.NFacets):
                    if iFacet != centralFacet:
                        fcdict = self.facetcache.addSubdict(iFacet)
                        if self.ParallelMode:
                            args=(fcdict.writeonly(), MSM0.ScaleFuncs.readonly(), self.DicoVariablePSF.readonly(),
                                  iFacet, self.SideLobeLevel, self.OffsetSideLobe, False)
                            APP.runJob("InitHMP:%d"%iFacet, self._initMSM_handler,
                                       args=args)
                        else:
                            self.DicoMSMachine[iFacet] = \
                                self._initMSM_facet(iFacet, fcdict, None,
                                                    self.SideLobeLevel, self.OffsetSideLobe, MSM0=MSM0, verbose=False)

                if self.ParallelMode:
                    APP.awaitJobResults("InitHMP:*", progress="Init HMP")
                    self.facetcache.reload()

            #        t = ClassTimeIt.ClassTimeIt()
            # now reinit from cache (since cache was computed by subprocesses)
            for iFacet in xrange(self.PSFServer.NFacets):
                if iFacet not in self.DicoMSMachine:
                    self.DicoMSMachine[iFacet] = \
                        self._initMSM_facet(iFacet, self.facetcache[iFacet], None,
                                            self.SideLobeLevel, self.OffsetSideLobe, MSM0=MSM0, verbose=False)

            # write cache to disk, unless in a mode where we explicitly don't want it
            if facetcache is None and not valid and cache and not approx:
                try:
                    #MyPickle.DicoNPToFile(facetcache,cachepath)
                    #cPickle.dump(facetcache, file(cachepath, 'w'), 2)
                    print>>log,"  saving HMP cache to %s"%cachepath
                    self.facetcache.save(cachepath)
                    #self.maincache.saveCache("HMPMachine")
                    self.maincache.saveCache(self.CacheFileName)
                    self.PSFHasChanged=False
                    print>>log,"  HMP init done"
                except:
                    print>>log, traceback.format_exc()
                    print >>log, ModColor.Str(
                        "WARNING: HMP cache could not be written, see error report above. Proceeding anyway.")
Exemplo n.º 21
0
    def __init__(self,
                 InSolsName,
                 OutSolsName,
                 InterpMode="TEC",
                 PolMode="Scalar",
                 Amp_PolyOrder=3,
                 NCPU=0,
                 Amp_GaussKernel=(0, 5),
                 Amp_SmoothType="Poly",
                 CrossMode=1,
                 RemoveAmpBias=0,
                 RemoveMedianAmp=True):

        if type(InterpMode) == str:
            InterpMode = InterpMode.split(",")  #[InterpMode]
        self.InSolsName = InSolsName
        self.OutSolsName = OutSolsName
        self.RemoveMedianAmp = RemoveMedianAmp

        log.print("Loading %s" % self.InSolsName)
        self.DicoFile = dict(np.load(self.InSolsName, allow_pickle=True))
        self.Sols = self.DicoFile["Sols"].view(np.recarray)
        if "MaskedSols" in self.DicoFile.keys():
            MaskFreq = np.logical_not(
                np.all(np.all(np.all(self.DicoFile["MaskedSols"][..., 0, 0],
                                     axis=0),
                              axis=1),
                       axis=1))
            nt, _, na, nd, _, _ = self.Sols.G.shape

            self.DicoFile["FreqDomains"] = self.DicoFile["FreqDomains"][
                MaskFreq]
            NFreqsOut = np.count_nonzero(MaskFreq)
            log.print("There are %i non-zero freq channels" % NFreqsOut)
            SolsOut = np.zeros(
                (nt, ),
                dtype=[("t0", np.float64), ("t1", np.float64),
                       ("G", np.complex64, (NFreqsOut, na, nd, 2, 2)),
                       ("Stats", np.float32, (NFreqsOut, na, 4))])
            SolsOut = SolsOut.view(np.recarray)
            SolsOut.G = self.Sols.G[:, MaskFreq, ...]
            SolsOut.t0 = self.Sols.t0
            SolsOut.t1 = self.Sols.t1
            self.Sols = self.DicoFile["Sols"] = SolsOut
            del (self.DicoFile["MaskedSols"])

        #self.Sols=self.Sols[0:10].copy()
        self.CrossMode = CrossMode
        self.CentralFreqs = np.mean(self.DicoFile["FreqDomains"], axis=1)
        self.incrCross = 11
        iii = 0
        NTEC = 101
        NConstPhase = 51
        TECGridAmp = 0.1
        TECGrid, CPhase = np.mgrid[-TECGridAmp:TECGridAmp:NTEC * 1j,
                                   -np.pi:np.pi:NConstPhase * 1j]
        Z = TECToZ(TECGrid.reshape((-1, 1)), CPhase.reshape((-1, 1)),
                   self.CentralFreqs.reshape((1, -1)))
        self.Z = Z
        self.TECGrid, self.CPhase = TECGrid, CPhase

        self.InterpMode = InterpMode
        self.Amp_PolyOrder = Amp_PolyOrder

        self.RemoveAmpBias = RemoveAmpBias
        if self.RemoveAmpBias:
            self.CalcFreqAmpSystematics()
            self.Sols.G /= self.G0

        self.GOut = NpShared.ToShared("%sGOut" % IdSharedMem,
                                      self.Sols.G.copy())
        self.PolMode = PolMode
        self.Amp_GaussKernel = Amp_GaussKernel
        if len(self.Amp_GaussKernel) != 2:
            raise ValueError("GaussKernel should be of size 2")
        self.Amp_SmoothType = Amp_SmoothType

        if "TEC" in self.InterpMode:
            log.print("  Smooth phases using a TEC model")
            if self.CrossMode:
                log.print(ModColor.Str("Using CrossMode"))

        if "Amp" in self.InterpMode:
            if Amp_SmoothType == "Poly":
                log.print(
                    "  Smooth amplitudes using polynomial model of order %i" %
                    self.Amp_PolyOrder)
            if Amp_SmoothType == "Gauss":
                log.print(
                    "  Smooth amplitudes using Gaussian kernel of %s (Time/Freq) bins"
                    % str(Amp_GaussKernel))

        if self.RemoveAmpBias:
            self.GOut *= self.G0

        APP.registerJobHandlers(self)
        AsyncProcessPool.init(ncpu=NCPU, affinity=0)
Exemplo n.º 22
0
 def killWorkers(self):
     print >> log, "Killing workers"
     APP.terminate()
     APP.shutdown()
     Multiprocessing.cleanupShm()
Exemplo n.º 23
0
    def __init__(self,
                 ListMSName,
                 ColName="DATA",
                 ModelName="PREDICT_KMS",
                 UVRange=[1., 1000.],
                 SolsName=None,
                 FileCoords=None,
                 Radius=3.,
                 NOff=-1,
                 Image=None,
                 SolsDir=None,
                 NCPU=1):
        self.ListMSName = sorted(ListMSName)  #[0:2]
        self.nMS = len(self.ListMSName)
        self.ColName = ColName
        self.ModelName = ModelName
        self.OutName = self.ListMSName[0].split("/")[-1].split("_")[0]
        self.UVRange = UVRange
        self.ReadMSInfos()
        self.Radius = Radius
        self.Image = Image
        self.SolsDir = SolsDir
        #self.PosArray=np.genfromtxt(FileCoords,dtype=[('Name','S200'),("ra",np.float64),("dec",np.float64),('Type','S200')],delimiter="\t")

        # identify version in logs
        print >> log, "DynSpecMS version %s starting up" % version()

        # should we use the surveys DB?
        if 'DDF_PIPELINE_DATABASE' in os.environ:
            print >> log, "Using the surveys database"
            from surveys_db import SurveysDB
            with SurveysDB() as sdb:
                sdb.cur.execute('select * from transients')
                result = sdb.cur.fetchall()
            # convert to a list, then to ndarray, then to recarray
            l = []
            for r in result:
                l.append((r['id'], r['ra'], r['decl'], r['type']))
            self.PosArray = np.asarray(l,
                                       dtype=[('Name', 'S200'),
                                              ("ra", np.float64),
                                              ("dec", np.float64),
                                              ('Type', 'S200')])
            print >> log, "Created an array with %i records" % len(result)
        else:
            if FileCoords is None:
                FileCoords = "Transient_LOTTS.csv"
                if not os.path.isfile(FileCoords):
                    ssExec = "wget -q --user=anonymous ftp://ftp.strw.leidenuniv.nl/pub/tasse/%s -O %s" % (
                        FileCoords, FileCoords)
                    print >> log, "Downloading %s" % FileCoords
                    print >> log, "   Executing: %s" % ssExec
                    os.system(ssExec)
            self.PosArray = np.genfromtxt(FileCoords,
                                          dtype=[('Name', 'S200'),
                                                 ("ra", np.float64),
                                                 ("dec", np.float64),
                                                 ('Type', 'S200')],
                                          delimiter=",")[()]
        self.PosArray = self.PosArray.view(np.recarray)
        self.PosArray.ra *= np.pi / 180.
        self.PosArray.dec *= np.pi / 180.

        NOrig = self.PosArray.shape[0]
        Dist = AngDist(self.ra0, self.PosArray.ra, self.dec0,
                       self.PosArray.dec)
        ind = np.where(Dist < Radius * np.pi / 180)[0]
        self.PosArray = self.PosArray[ind]
        self.NDirSelected = self.PosArray.shape[0]

        print >> log, "Selected %i target [out of the %i in the original list]" % (
            self.NDirSelected, NOrig)
        if self.NDirSelected == 0:
            print >> log, ModColor.Str("   Have found no sources - returning")
            self.killWorkers()
            return

        if NOff == -1:
            NOff = self.PosArray.shape[0] * 2
        if NOff is not None:
            print >> log, "Including %i off targets" % (NOff)
            self.PosArray = np.concatenate(
                [self.PosArray, self.GiveOffPosArray(NOff)])
            self.PosArray = self.PosArray.view(np.recarray)
        self.NDir = self.PosArray.shape[0]
        print >> log, "For a total of %i targets" % (self.NDir)

        self.DicoDATA = shared_dict.create("DATA")
        self.DicoGrids = shared_dict.create("Grids")
        self.DicoGrids["GridLinPol"] = np.zeros(
            (self.NDir, self.NChan, self.NTimes, 4), np.complex128)
        self.DicoGrids["GridWeight"] = np.zeros(
            (self.NDir, self.NChan, self.NTimes, 4), np.complex128)

        self.SolsName = SolsName
        self.DoJonesCorr = False
        if self.SolsName:
            self.DoJonesCorr = True
            self.DicoJones = shared_dict.create("DicoJones")

        APP.registerJobHandlers(self)
        AsyncProcessPool.init(ncpu=NCPU, affinity=0)
        APP.startWorkers()
Exemplo n.º 24
0
    def InterpolParallel(self):
        Sols0 = self.Sols
        nt, nch, na, nd, _, _ = Sols0.G.shape
        log.print(" #Times:      %i" % nt)
        log.print(" #Channels:   %i" % nch)
        log.print(" #Antennas:   %i" % na)
        log.print(" #Directions: %i" % nd)

        # APP.terminate()
        # APP.shutdown()
        # Multiprocessing.cleanupShm()
        APP.startWorkers()
        iJob = 0
        #        for iAnt in [49]:#range(na):
        #            for iDir in [0]:#range(nd):

        if "TEC" in self.InterpMode:
            #APP.runJob("FitThisTEC_%d"%iJob, self.FitThisTEC, args=(208,)); iJob+=1
            self.TECArray = NpShared.ToShared(
                "%sTECArray" % IdSharedMem, np.zeros((nt, nd, na), np.float32))
            self.CPhaseArray = NpShared.ToShared(
                "%sCPhaseArray" % IdSharedMem,
                np.zeros((nt, nd, na), np.float32))
            for it in range(nt):
                #            for iDir in range(nd):
                APP.runJob("FitThisTEC_%d" % iJob,
                           self.FitThisTEC,
                           args=(it, ))  #,serial=True)
                iJob += 1
            workers_res = APP.awaitJobResults("FitThisTEC*",
                                              progress="Fit TEC")

        if "Amp" in self.InterpMode:
            for iAnt in range(na):
                for iDir in range(nd):
                    APP.runJob("FitThisAmp_%d" % iJob,
                               self.FitThisAmp,
                               args=(iAnt, iDir))  #,serial=True)
                    iJob += 1
            workers_res = APP.awaitJobResults("FitThisAmp*",
                                              progress="Smooth Amp")

        if "PolyAmp" in self.InterpMode:
            for iDir in range(nd):
                APP.runJob("FitThisPolyAmp_%d" % iJob,
                           self.FitThisPolyAmp,
                           args=(iDir, ))
                iJob += 1
            workers_res = APP.awaitJobResults("FitThisPolyAmp*",
                                              progress="Smooth Amp")

        if "Clip" in self.InterpMode:
            for iDir in range(nd):
                APP.runJob("ClipThisDir_%d" % iJob,
                           self.ClipThisDir,
                           args=(iDir, ),
                           serial=True)
                iJob += 1
            workers_res = APP.awaitJobResults("ClipThisDir*",
                                              progress="Clip Amp")

        #APP.terminate()
        APP.shutdown()
        Multiprocessing.cleanupShm()
Exemplo n.º 25
0
 def __init__(self, name=None, mode=2):
     self.name = name or "SMM.%x" % id(self)
     APP.registerJobHandlers(self)
     self._job_counter = APP.createJobCounter(self.name)
     self._data = self._blockdict = self._sizedict = None
Exemplo n.º 26
0
 def killWorkers(self):
     print("Killing workers", file=log)
     APP.terminate()
     APP.shutdown()
     Multiprocessing.cleanupShm()
Exemplo n.º 27
0
    def InitFromCatalog(self):

        FileCoords = self.FileCoords
        dtype = [('Name', 'S200'), ("ra", np.float64), ("dec", np.float64),
                 ('Type', 'S200')]
        # should we use the surveys DB?
        if 'DDF_PIPELINE_DATABASE' in os.environ:
            print("Using the surveys database", file=log)
            from surveys_db import SurveysDB
            with SurveysDB() as sdb:
                sdb.cur.execute('select * from transients')
                result = sdb.cur.fetchall()
            # convert to a list, then to ndarray, then to recarray
            l = []
            for r in result:
                l.append((r['id'], r['ra'], r['decl'], r['type']))
            if FileCoords is not None:
                print('Adding data from file ' + FileCoords, file=log)
                additional = np.genfromtxt(FileCoords,
                                           dtype=dtype,
                                           delimiter=",")[()]
                if not additional.shape:
                    # deal with a one-line input file
                    additional = np.array([additional], dtype=dtype)
                for r in additional:
                    l.append(tuple(r))
            self.PosArray = np.asarray(l, dtype=dtype)
            print("Created an array with %i records" % len(result), file=log)

        else:

            #FileCoords="Transient_LOTTS.csv"
            if FileCoords is None:
                if not os.path.isfile(FileCoords):
                    ssExec = "wget -q --user=anonymous ftp://ftp.strw.leidenuniv.nl/pub/tasse/%s -O %s" % (
                        FileCoords, FileCoords)
                    print("Downloading %s" % FileCoords, file=log)
                    print("   Executing: %s" % ssExec, file=log)
                    os.system(ssExec)
            log.print("Reading cvs file: %s" % FileCoords)
            #self.PosArray=np.genfromtxt(FileCoords,dtype=dtype,delimiter=",")[()]
            self.PosArray = np.genfromtxt(FileCoords,
                                          dtype=dtype,
                                          delimiter=",")

        self.PosArray = self.PosArray.view(np.recarray)
        self.PosArray.ra *= np.pi / 180.
        self.PosArray.dec *= np.pi / 180.
        Radius = self.Radius
        NOrig = self.PosArray.Name.shape[0]
        Dist = AngDist(self.ra0, self.PosArray.ra, self.dec0,
                       self.PosArray.dec)
        ind = np.where(Dist < (Radius * np.pi / 180))[0]
        self.PosArray = self.PosArray[ind]
        self.NDirSelected = self.PosArray.shape[0]

        print("Selected %i target [out of the %i in the original list]" %
              (self.NDirSelected, NOrig),
              file=log)
        if self.NDirSelected == 0:
            print(ModColor.Str("   Have found no sources - returning"),
                  file=log)
            self.killWorkers()
            return

        NOff = self.NOff

        if NOff == -1:
            NOff = self.PosArray.shape[0] * 2
        if NOff is not None:
            print("Including %i off targets" % (NOff), file=log)
            self.PosArray = np.concatenate(
                [self.PosArray, self.GiveOffPosArray(NOff)])
            self.PosArray = self.PosArray.view(np.recarray)
        self.NDir = self.PosArray.shape[0]
        print("For a total of %i targets" % (self.NDir), file=log)

        self.DicoDATA = shared_dict.create("DATA")
        self.DicoGrids = shared_dict.create("Grids")
        self.DicoGrids["GridLinPol"] = np.zeros(
            (self.NDir, self.NChan, self.NTimes, 4), np.complex128)
        self.DicoGrids["GridWeight"] = np.zeros(
            (self.NDir, self.NChan, self.NTimes, 4), np.complex128)

        self.DoJonesCorr_kMS = False
        self.DicoJones = None
        if self.SolsName:
            self.DoJonesCorr_kMS = True
            self.DicoJones_kMS = shared_dict.create("DicoJones_kMS")

        self.DoJonesCorr_Beam = False
        if self.BeamModel:
            self.DoJonesCorr_Beam = True
            self.DicoJones_Beam = shared_dict.create("DicoJones_Beam")

        APP.registerJobHandlers(self)
        AsyncProcessPool.init(ncpu=self.NCPU, affinity=0)
        APP.startWorkers()