Beispiel #1
0
 def __init__(self,
              work_queue,
              result_queue,
              IdSharedMem=None,
              StopWhenQueueEmpty=False,
              NImGauss=31,
              DeltaChi2=4.,
              ListGauss=None,
              GSig=None,
              SigMin=None,
              Var=None):
     multiprocessing.Process.__init__(self)
     self.work_queue = work_queue
     self.result_queue = result_queue
     self.kill_received = False
     self.exit = multiprocessing.Event()
     self.IdSharedMem = IdSharedMem
     self.StopWhenQueueEmpty = StopWhenQueueEmpty
     self.CubeMeanVariablePSF = NpShared.GiveArray("%sCubeMeanVariablePSF" %
                                                   self.IdSharedMem)
     self.MeanModelImage = NpShared.GiveArray("%sMeanModelImage" %
                                              self.IdSharedMem)
     self.MeanResidual = NpShared.GiveArray("%sMeanResidual" %
                                            self.IdSharedMem)
     self.NImGauss = NImGauss
     self.DeltaChi2 = DeltaChi2
     self.ListGauss = ListGauss
     self.NGauss = len(ListGauss)
     self.GSig = GSig
     self.SigMin = SigMin
     self.Var = Var
Beispiel #2
0
    def run(self):
        while not self.kill_received and self.CondContinue():
            #gc.enable()
            try:
                iQueue = self.work_queue.get_nowait()  #(True,2)
            except Exception, e:
                #print "Exception worker: %s"%str(e)
                break

            #print "Start %i"%iQueue

            Queue = NpShared.GiveArray("%sQueue_%3.3i" %
                                       (self.IdSharedMem, iQueue))
            self.CurrentInvCov = NpShared.GiveArray("%sInvCov_AllFacet" %
                                                    (self.IdSharedMem))

            for iJob in range(Queue.shape[0]):
                x0, y0, FacetID = Queue[iJob]

                iFacet = FacetID
                self.CurrentFacetID = FacetID
                #self.CurrentCF=NpShared.GiveArray("%sConvMatrix_Facet_%4.4i"%(self.IdSharedMem,iFacet))
                self.CurrentCM = NpShared.GiveArray("%sCM_Facet%4.4i" %
                                                    (self.IdSharedMem, iFacet))
                #self.CurrentInvCov=NpShared.GiveArray("%sInvCov_Facet%4.4i"%(self.IdSharedMem,iFacet))
                # if self.CurrentInvCov is None:
                #     invCM=ModLinAlg.invSVD(np.float64(self.CurrentCM[0,0]))/self.Var
                #     self.CurrentInvCov=NpShared.ToShared("%sInvCov_Facet%4.4i"%(self.IdSharedMem,iFacet),invCM)

                iGauss = self.SmearThisComp(x0, y0)
                Queue[iJob, 2] = iGauss

            self.result_queue.put({"Success": True, "iQueue": iQueue})
Beispiel #3
0
    def __init__(self, Cell=10, Sup=15, Nw=11, wmax=30000, Npix=101, Freqs=np.array([100.e6]), OverS=11, lmShift=None,
                 mode="compute",
                 cf_dict=None, compute_cf=True,
                 IDFacet=None):
        """
        Class for computing/loading/saving w-kernels and spheroidals.

        Args:
            Cell:
            Sup:
            Nw:
            wmax:
            Npix:
            Freqs:
            OverS:
            lmShift:
            mode:       "compute" to compute CFs, save them to store_dict, and write to store_file
                        "load" to load CFs from store_file, and save them to store_dict
                        "dict" to load CFs from store_dict
            IDFacet:
        """

        self.Nw = int(Nw)
        self.Cell = Cell
        self.Sup = Sup
        self.Nw = Nw
        self.Npix = Npix
        self.Freqs = Freqs
        self.OverS = OverS
        self.lmShift = lmShift
        self.IDFacet = IDFacet
        Freqs = self.Freqs
        C = 299792458.
        waveMin = C/Freqs[-1]
        self.RefWave = waveMin

        # recompute?
        if compute_cf:
            cf_dict["wmax"] = self.wmax = wmax
            self.InitSphe()
            self.InitW()
            dS = np.float32
            cf_dict["Sphe"] = dS(self.ifzfCF.real)
            cf_dict["InvSphe"] = dS(1./np.float64(self.ifzfCF.real))
            cf_dict["CuCv"] = np.array([self.Cu, self.Cv])
            NpShared.PackListSquareMatrix(cf_dict, "W", self.Wplanes + self.WplanesConj)
        else:
            self.wmax = cf_dict["wmax"]
            self.ifzfCF = cf_dict["Sphe"]
            self.Cu, self.Cv = cf_dict["CuCv"]
            ww = NpShared.UnPackListSquareMatrix(cf_dict["W"])
            if len(ww) != self.Nw*2:
                raise RuntimeError("mismatch in number of cached w-planes")
            self.Wplanes = ww[:self.Nw]
            self.WplanesConj = ww[self.Nw:]
Beispiel #4
0
def cleanupShm ():
    """
    Deletes all shared arrays for this process
    """
    NpShared.DelAll(getShmPrefix())
    # above statement don't work for directories and subdirectories
    os.system("rm -rf /dev/shm/%s"%getShmPrefix())
Beispiel #5
0
 def addSharedArray (self, item, shape, dtype):
     """adds a SharedArray entry of the specified shape and dtype"""
     if not self._readwrite:
         raise RuntimeError("SharedDict %s attached as read-only" % self.path)
     name = self._key_to_name(item) + 'a'
     filepath = os.path.join(self.path, name)
     array = NpShared.CreateShared(_to_shm(filepath), shape, dtype)
     collections.OrderedDict.__setitem__(self, item, array)
     return array
Beispiel #6
0
    def initIsland(self, DicoJob):
        if self.InitMachine is None:
            self.Init()
        iIsland = DicoJob["iIsland"]
        Island = self.ListIsland[iIsland]
        SModel, AModel = self.InitMachine.giveModel(Island)

        DicoInitIndiv = {"S": SModel, "Alpha": AModel}
        NameDico = "%sDicoInitIsland_%5.5i" % (self.IdSharedMem, iIsland)
        NpShared.DicoToShared(NameDico, DicoInitIndiv)
        self.result_queue.put({"Success": True, "iIsland": iIsland})
    def GiveDicoJonesMatrices(self):
        print >> log, "  Getting Jones matrices from Shared Memory"
        DicoJonesMatrices = {}

        GD = self.GD

        SolsFile = GD["DDESolutions"]["DDSols"]

        if SolsFile != "":
            DicoJones_killMS = NpShared.SharedToDico("%sJonesFile_killMS" %
                                                     self.IdSharedMem)
            DicoJonesMatrices["DicoJones_killMS"] = DicoJones_killMS
            DicoJonesMatrices["DicoJones_killMS"][
                "MapJones"] = NpShared.GiveArray("%sMapJones_killMS" %
                                                 self.IdSharedMem)
            DicoClusterDirs_killMS = NpShared.SharedToDico(
                "%sDicoClusterDirs_killMS" % self.IdSharedMem)
            DicoJonesMatrices["DicoJones_killMS"][
                "DicoClusterDirs"] = DicoClusterDirs_killMS

        ApplyBeam = (GD["Beam"]["Model"] is not None)
        if ApplyBeam:
            DicoJones_Beam = NpShared.SharedToDico("%sJonesFile_Beam" %
                                                   self.IdSharedMem)
            DicoJonesMatrices["DicoJones_Beam"] = DicoJones_Beam
            DicoJonesMatrices["DicoJones_Beam"][
                "MapJones"] = NpShared.GiveArray("%sMapJones_Beam" %
                                                 self.IdSharedMem)
            DicoClusterDirs_Beam = NpShared.SharedToDico(
                "%sDicoClusterDirs_Beam" % self.IdSharedMem)
            DicoJonesMatrices["DicoJones_Beam"][
                "DicoClusterDirs"] = DicoClusterDirs_Beam

        return DicoJonesMatrices
Beispiel #8
0
    def FindAlpha(self):
        self.DicoJonesMatrices=self.GiveDicoJonesMatrices()
        DicoJonesMatrices=self.DicoJonesMatrices
        print("  Find Alpha for smoothing", file=log)
        l_List=DicoJonesMatrices["DicoJones_killMS"]["DicoClusterDirs"]["l"].tolist()
        m_List=DicoJonesMatrices["DicoJones_killMS"]["DicoClusterDirs"]["m"].tolist()

        NDir=len(l_List)
        Jm=DicoJonesMatrices["DicoJones_killMS"]["Jones"]
        nt,nd,na,nf,_,_=Jm.shape
        self.AlphaReg=np.zeros((NDir,na),np.float32)

        for (iDir,l,m) in zip(range(NDir),l_List,m_List):
            self.AlphaReg[iDir,:]=self.FindAlphaSingleDir(DicoJonesMatrices,l,m)
        NpShared.ToShared("%sAlphaReg" % self.IdSharedMem, self.AlphaReg)
    def UnPackMapping(self):
        Map = NpShared.GiveArray("%sMappingSmearing" % (self.IdSharedMem))
        Nb = Map[0]
        NRowInBlocks = Map[1:Nb + 1]
        StartRow = Map[Nb + 1:2 * Nb + 1]
        # print
        # print NRowInBlocks.tolist()
        # print StartRow.tolist()
        MaxRow = 0

        for i in [3507]:  # range(Nb):
            ii = StartRow[i]
            MaxRow = np.max([MaxRow, np.max(Map[ii:ii + NRowInBlocks[i]])])
            print "(iblock= %i , istart= %i), Nrow=%i" % \
                  (i, StartRow[i], NRowInBlocks[i]), Map[ii:ii+NRowInBlocks[i]]
            #if MaxRow>=1080: stop

        print MaxRow
Beispiel #10
0
    def __setitem__(self, item, value):
        if not self._readwrite:
            raise RuntimeError("SharedDict %s attached as read-only" %
                               self.path)
        if type(item).__name__ not in _allowed_key_types:
            raise KeyError("unsupported key of type " + type(item).__name__)
        name = self._key_to_name(item)
        path = os.path.join(self.path, name)
        # remove previous item from SHM, if it's in the local dict
        if collections.OrderedDict.__contains__(self, item):
            for suffix in "ap":
                if os.path.exists(path + suffix):
                    os.unlink(path + suffix)
            if os.path.exists(path + "d"):
                os.system("rm -fr " + path + "d")
        # if item is not in local dict but is on disk, this is a multiprocessing logic error
        else:
            for suffix in "apd":
                if os.path.exists(path + suffix):
                    raise RuntimeError(
                        "SharedDict entry %s exists, possibly added by another process. This is most likely a bug!"
                        % (path + suffix))

        # for arrays, copy to a shared array
        if isinstance(value, np.ndarray):
            value = NpShared.ToShared(_to_shm(path + 'a'), value)
        # for regular dicts, copy across
        elif isinstance(value, (dict, SharedDict, collections.OrderedDict)):
            dict1 = self.addSubdict(item)
            for key1, value1 in getattr(value, "iteritems", value.items)():
                dict1[key1] = value1
            value = dict1
        # # for lists, use dict
        # elif isinstance(value, (list, tuple)):
        #     dict1 = self.addList(item)
        #     for key1, value1 in enumerate(value):
        #         dict1[key1] = value1
        #     value = dict1
        # all other types, just use pickle
        else:
            cPickle.dump(value, open(path + 'p', "wb"), 2)
        collections.OrderedDict.__setitem__(self, item, value)
    def giveMinDist(self, DicoJob):
        iIsland=DicoJob["iIsland"]
        NIslands=len(self.ListIslands)
        Result=np.zeros((3,NIslands),np.int32)

        x0,y0=np.array(self.ListIslands[iIsland]).T
        for jIsland in range(NIslands):
            x1,y1=np.array(self.ListIslands[jIsland]).T
            dx=x0.reshape((-1,1))-x1.reshape((1,-1))
            dy=y0.reshape((-1,1))-y1.reshape((1,-1))
            d=np.sqrt(dx**2+dy**2)
            dmin=np.min(d)
            indx,indy=np.where(d==dmin)
            Res=dmin
            Result[0,jIsland]=dx[indx[0],indy[0]]
            Result[1,jIsland]=dy[indx[0],indy[0]]
            Result[2,jIsland]=dmin

        NpShared.ToShared("%sDistances_%6.6i"%(self.IdSharedMem,iIsland),Result)

        self.result_queue.put({"iIsland": iIsland, "Success":True})
Beispiel #12
0
    def __init__(self,
                 MeanResidual,
                 MeanModelImage,
                 PSFServer,
                 DeltaChi2=4.,
                 IdSharedMem="",
                 NCPU=6):
        IdSharedMem += "SmearSM."
        NpShared.DelAll(IdSharedMem)
        self.IdSharedMem = IdSharedMem
        self.NCPU = NCPU
        self.MeanModelImage = NpShared.ToShared(
            "%sMeanModelImage" % self.IdSharedMem, MeanModelImage)
        self.MeanResidual = NpShared.ToShared(
            "%sMeanResidual" % self.IdSharedMem, MeanResidual)
        NPixStats = 10000
        RandomInd = np.int64(np.random.rand(NPixStats) * (MeanResidual.size))
        self.RMS = np.std(np.real(self.MeanResidual.ravel()[RandomInd]))
        self.FWHMMin = 3.

        self.PSFServer = PSFServer
        self.DeltaChi2 = DeltaChi2
        self.Var = self.RMS**2
        self.NImGauss = 31
        self.CubeMeanVariablePSF = NpShared.ToShared(
            "%sCubeMeanVariablePSF" % self.IdSharedMem,
            self.PSFServer.DicoVariablePSF['CubeMeanVariablePSF'])
        self.DicoConvMachine = {}

        N = self.NImGauss
        dx, dy = np.mgrid[-(N // 2):N // 2:1j * N, -(N // 2):N // 2:1j * N]

        ListPixParms = [(int(dx.ravel()[i]), int(dy.ravel()[i]))
                        for i in range(dx.size)]
        ListPixData = ListPixParms
        ConvMode = "Matrix"
        N = self.NImGauss

        #stop
        #for
        #ClassConvMachine():
        #def __init__(self,PSF,ListPixParms,ListPixData,ConvMode):

        d = np.sqrt(dx**2 + dy**2)
        self.dist = d
        self.NGauss = 10

        GSig = np.linspace(0., 2, self.NGauss)
        self.GSig = GSig
        ListGauss = []
        One = np.zeros_like(d)
        One[N // 2, N // 2] = 1.
        ListGauss.append(One)
        for sig in GSig[1::]:
            v = np.exp(-d**2 / (2. * sig**2))
            Sv = np.sum(v)
            v /= Sv
            ListGauss.append(v)

        self.ListGauss = ListGauss

        print("Declare convolution machines", file=log)
        NJobs = self.PSFServer.NFacets
        pBAR = ProgressBar(Title=" Declare      ")
        #pBAR.disable()
        pBAR.render(0, '%4i/%i' % (0, NJobs))
        for iFacet in range(self.PSFServer.NFacets):
            #print iFacet,"/",self.PSFServer.NFacets
            PSF = self.PSFServer.DicoVariablePSF['CubeMeanVariablePSF'][
                iFacet]  #[0,0]
            _, _, NPixPSF, _ = PSF.shape
            PSF = PSF[:, :, NPixPSF // 2 - N:NPixPSF // 2 + N + 1,
                      NPixPSF // 2 - N:NPixPSF // 2 + N + 1]
            #print PSF.shape
            #sig=1
            #PSF=(np.exp(-self.dist**2/(2.*sig**2))).reshape(1,1,N,N)

            self.DicoConvMachine[iFacet] = ClassConvMachine.ClassConvMachine(
                PSF, ListPixParms, ListPixData, ConvMode)
            CM = self.DicoConvMachine[iFacet].CM
            NpShared.ToShared("%sCM_Facet%4.4i" % (self.IdSharedMem, iFacet),
                              CM)
            #invCM=ModLinAlg.invSVD(np.float64(CM[0,0]))/self.Var
            #NpShared.ToShared("%sInvCov_Facet%4.4i"%(self.IdSharedMem,iFacet),invCM)

            NDone = iFacet + 1
            intPercent = int(100 * NDone / float(NJobs))
            pBAR.render(intPercent, '%4i/%i' % (NDone, NJobs))

        PSFMean = np.mean(
            self.PSFServer.DicoVariablePSF['CubeMeanVariablePSF'], axis=0)
        self.ConvMachineMeanPSF = ClassConvMachine.ClassConvMachine(
            PSFMean, ListPixParms, ListPixData, ConvMode)
        CM = self.ConvMachineMeanPSF.CM
        invCM = ModLinAlg.invSVD(np.float64(CM[0, 0]), Cut=1e-8) / self.Var
        NpShared.ToShared("%sInvCov_AllFacet" % (self.IdSharedMem), invCM)
        self.FindSupport()
Beispiel #13
0
    def Smear(self, Parallel=True):
        if Parallel:
            NCPU = self.NCPU
        else:
            NCPU = 1
        StopWhenQueueEmpty = True
        print("Building queue", file=log)
        self.ModelOut = np.zeros_like(self.MeanModelImage)
        indx, indy = np.where(self.MeanModelImage[0, 0] != 0)
        #indx,indy=np.where(self.MeanModelImage==np.max(self.MeanModelImage))
        work_queue = multiprocessing.Queue()
        result_queue = multiprocessing.Queue()

        SizeMax = int(indx.size / float(NCPU) / 100.)
        SizeMax = np.max([SizeMax, 1])
        iPix = 0
        iQueue = 0
        Queue = []
        while iPix < indx.size:
            xc, yc = indx[iPix], indy[iPix]
            FacetID = self.PSFServer.giveFacetID2(xc, yc)
            #DicoOrder={"xy":(xc,yc),
            #           "FacetID":FacetID}
            Queue.append([xc, yc, FacetID])

            iPix += 1
            if (len(Queue) == SizeMax) | (iPix == indx.size):
                NpShared.ToShared("%sQueue_%3.3i" % (self.IdSharedMem, iQueue),
                                  np.array(Queue))
                work_queue.put(iQueue)
                Queue = []
                iQueue += 1

        NJobs = indx.size

        workerlist = []

        pBAR = ProgressBar(Title=" Find gaussian")
        #pBAR.disable()
        pBAR.render(0, '%4i/%i' % (0, NJobs))
        for ii in range(NCPU):
            W = WorkerSmear(work_queue,
                            result_queue,
                            IdSharedMem=self.IdSharedMem,
                            StopWhenQueueEmpty=StopWhenQueueEmpty,
                            NImGauss=self.NImGauss,
                            DeltaChi2=self.DeltaChi2,
                            ListGauss=self.ListGauss,
                            GSig=self.GSig,
                            Var=self.Var,
                            SigMin=self.SigMin)
            workerlist.append(W)
            if Parallel:
                workerlist[ii].start()
            else:
                workerlist[ii].run()

        N = self.NImGauss
        iResult = 0
        #print "!!!!!!!!!!!!!!!!!!!!!!!!",iResult,NJobs
        while iResult < NJobs:
            DicoResult = None
            # for result_queue in List_Result_queue:
            #     if result_queue.qsize()!=0:
            #         try:
            #             DicoResult=result_queue.get_nowait()

            #             break
            #         except:

            #             pass
            #         #DicoResult=result_queue.get()
            #print "!!!!!!!!!!!!!!!!!!!!!!!!! Qsize",result_queue.qsize()
            #print work_queue.qsize(),result_queue.qsize()
            if result_queue.qsize() != 0:
                try:
                    DicoResult = result_queue.get_nowait()
                except:
                    pass
                    #DicoResult=result_queue.get()

            if DicoResult is None:
                time.sleep(0.001)
                continue

            if DicoResult["Success"]:
                iQueue = DicoResult["iQueue"]
                Queue = NpShared.GiveArray("%sQueue_%3.3i" %
                                           (self.IdSharedMem, iQueue))
                for iJob in range(Queue.shape[0]):
                    x0, y0, iGauss = Queue[iJob]
                    SMax = self.MeanModelImage[0, 0, x0, y0]
                    SubModelOut = self.ModelOut[0,
                                                0][x0 - N // 2:x0 + N // 2 + 1,
                                                   y0 - N // 2:y0 + N // 2 + 1]
                    SubModelOut += self.ListRestoredGauss[iGauss] * SMax
                    #iGauss=0
                    #print
                    #print SMax
                    #print np.sum(self.ListGauss[iGauss])
                    #print
                    SubModelOut += self.ListGauss[iGauss] * SMax

                    iResult += 1
                    NDone = iResult
                    intPercent = int(100 * NDone / float(NJobs))
                    pBAR.render(intPercent, '%4i/%i' % (NDone, NJobs))

        for ii in range(NCPU):
            try:
                workerlist[ii].shutdown()
                workerlist[ii].terminate()
                workerlist[ii].join()
            except:
                pass

        return self.ModelOut
Beispiel #14
0
 def CleanUpSHM(self):
     NpShared.DelAll(self.IdSharedMem)
Beispiel #15
0
    group = optparse.OptionGroup(opt, "* SHM")
    group.add_option(
        '--ID',
        help='ID of shared memory to be deleted, default is %default',
        default=None)
    opt.add_option_group(group)
    options, arguments = opt.parse_args()

    return options


if __name__ == "__main__":
    options = read_options()
    print >> log, "Clear shared memory"
    if options.ID is not None:
        NpShared.DelAll(options.ID)
    else:
        NpShared.DelAll()

    Multiprocessing.cleanupStaleShm()
    Multiprocessing.cleanupShm()
    ll = glob.glob("/dev/shm/sem.*")

    print >> log, "Clear Semaphores"
    # remove semaphores we don't have access to
    ll = filter(lambda x: os.access(x, os.W_OK), ll)

    ListSemaphores = [".".join(l.split(".")[1::]) for l in ll]

    _pyGridderSmear.pySetSemaphores(ListSemaphores)
    _pyGridderSmear.pyDeleteSemaphore(ListSemaphores)
Beispiel #16
0
    def giveDicoInitIndiv(self,
                          ListIslands,
                          ModelImage,
                          DicoDirty,
                          ListDoIsland=None,
                          Parallel=True):
        NCPU = self.NCPU
        work_queue = multiprocessing.JoinableQueue()
        ListIslands = ListIslands  #[300:308]
        DoIsland = True

        for iIsland in range(len(ListIslands)):
            if ListDoIsland is not None:
                DoIsland = ListDoIsland[iIsland]
            if DoIsland: work_queue.put({"iIsland": iIsland})

        result_queue = multiprocessing.JoinableQueue()
        NJobs = work_queue.qsize()
        workerlist = []

        logger.setSilent(SilentModules)
        #MyLogger.setLoud(SilentModules)

        #MyLogger.setLoud("ClassImageDeconvMachineMSMF")

        print >> log, "Launch MORESANE workers"
        for ii in range(NCPU):
            W = WorkerInitMSMF(work_queue, result_queue, self.GD,
                               self.DicoVariablePSF, DicoDirty, self.RefFreq,
                               self.GridFreqs, self.DegridFreqs,
                               self.MainCache, ModelImage, ListIslands,
                               self.IdSharedMem)
            workerlist.append(W)
            if Parallel:
                workerlist[ii].start()

        timer = ClassTimeIt.ClassTimeIt()
        pBAR = ProgressBar(Title="  MORESANing islands ")
        #pBAR.disable()
        pBAR.render(0, NJobs)
        iResult = 0
        if not Parallel:
            for ii in range(NCPU):
                workerlist[ii].run()  # just run until all work is completed

        self.DicoInitIndiv = {}
        while iResult < NJobs:
            DicoResult = None
            if result_queue.qsize() != 0:
                try:
                    DicoResult = result_queue.get()
                except:
                    pass

            if DicoResult == None:
                time.sleep(0.5)
                continue

            if DicoResult["Success"]:
                iResult += 1
                NDone = iResult

                pBAR.render(NDone, NJobs)

                iIsland = DicoResult["iIsland"]
                NameDico = "%sDicoInitIsland_%5.5i" % (self.IdSharedMem,
                                                       iIsland)
                Dico = NpShared.SharedToDico(NameDico)
                self.DicoInitIndiv[iIsland] = copy.deepcopy(Dico)
                NpShared.DelAll(NameDico)

        if Parallel:
            for ii in range(NCPU):
                workerlist[ii].shutdown()
                workerlist[ii].terminate()
                workerlist[ii].join()

        #MyLogger.setLoud(["pymoresane.main"])
        #MyLogger.setLoud(["ClassImageDeconvMachineMSMF","ClassPSFServer","ClassMultiScaleMachine","GiveModelMachine","ClassModelMachineMSMF"])
        return self.DicoInitIndiv
    def BuildSmearMappingParallel(self, DATA, GridChanMapping):
        print >> log, "Build decorrelation mapping ..."

        na = self.MS.na

        l = self.radiusRad
        dPhi = np.sqrt(6. * (1. - self.Decorr))

        NChan = self.MS.ChanFreq.size
        self.BlocksRowsList = []

        InfoSmearMapping = {}
        InfoSmearMapping["freqs"] = self.MS.ChanFreq
        InfoSmearMapping["dfreqs"] = self.MS.dFreq
        InfoSmearMapping["dPhi"] = dPhi
        InfoSmearMapping["l"] = l
        BlocksRowsList = []

        joblist = [(a0, a1) for a0 in xrange(na) for a1 in xrange(na)
                   if a0 != a1]

        WorkerMapName = Multiprocessing.getShmURL("SmearWorker.%d")

        results = Multiprocessing.runjobs(
            joblist,
            title="Smear mapping",
            target=_smearmapping_worker,
            kwargs=dict(DATA=DATA,
                        InfoSmearMapping=InfoSmearMapping,
                        WorkerMapName=WorkerMapName,
                        GridChanMapping=GridChanMapping))

        # process worker results
        # for each map (each array returned from worker), BlockSizes[MapName] will
        # contain a list of BlocksSizesBL entries returned from that worker
        RowsBlockSizes = {}
        NTotBlocks = 0
        NTotRows = 0
        worker_maps = {}

        for DicoResult in results:
            if not DicoResult["Empty"]:
                MapName = DicoResult["MapName"]
                map = worker_maps.get(MapName)
                if map is None:
                    map = worker_maps[MapName] = NpShared.GiveArray(MapName)
                bl = DicoResult["bl"]
                rowslice = DicoResult["Slice"]
                bsz = np.array(DicoResult["BlocksSizesBL"])
                RowsBlockSizes[bl] = map[rowslice], bsz
                NTotBlocks += DicoResult["NBlocksTotBL"]
                NTotRows += bsz.sum()

        # output mapping has 2 words for the total size, plus 2*NTotBlocks header, plus NTotRows blocklists
        OutputMapping = np.zeros((2 + 2 * NTotBlocks + NTotRows, ), np.int32)

        # just in case NTotBlocks is over 2^31...
        # (don't want to use np.int32 for the whole mapping as that just wastes space, we may assume
        # that we have substantially fewer rows, so int32 is perfectly good as a row index etc.)
        OutputMapping[0] = NTotBlocks
        OutputMapping[1] = NTotBlocks >> 32

        BlockListSizes = OutputMapping[2:2 + NTotBlocks]

        BlockLists = OutputMapping[2 + NTotBlocks:]
        iii = 0
        jjj = 0

        # now go through each per-baseline mapping, sorted by baseline
        for _, (BlocksRowsListBL,
                BlocksSizesBL) in sorted(RowsBlockSizes.items()):
            #print>>log, "  Worker: %i"%(IdWorker)

            BlockLists[iii:iii + BlocksRowsListBL.size] = BlocksRowsListBL[:]
            iii += BlocksRowsListBL.size

            # print "IdWorker,AppendId",IdWorker,AppendId,BlocksSizesBL
            # MM=np.concatenate((MM,BlocksSizesBL))
            BlockListSizes[jjj:jjj + BlocksSizesBL.size] = BlocksSizesBL[:]
            jjj += BlocksSizesBL.size

        for MapName in worker_maps.iterkeys():
            NpShared.DelArray(MapName)

        #print>>log, "  Put in shared mem"

        NVis = np.where(DATA["A0"] != DATA["A1"])[0].size * NChan
        #print>>log, "  Number of blocks:         %i"%NTotBlocks
        #print>>log, "  Number of 4-Visibilities: %i"%NVis
        fact = (100. * (NVis - NTotBlocks) / float(NVis))

        # self.UnPackMapping()
        # print FinalMapping

        return OutputMapping, fact
Beispiel #18
0
 def load_impl(self):
     return NpShared.GiveArray(_to_shm(self.path))
    def calcDistanceMatrixMinParallel(self, ListIslands, Parallel=True):
        NIslands = len(ListIslands)
        self.D = np.zeros((NIslands, NIslands), np.float32)
        self.dx = np.zeros((NIslands, NIslands), np.int32)
        self.dy = np.zeros((NIslands, NIslands), np.int32)

        work_queue = multiprocessing.JoinableQueue()
        for iIsland in range(NIslands):
            work_queue.put({"iIsland": (iIsland)})

        result_queue = multiprocessing.JoinableQueue()
        NJobs = work_queue.qsize()
        workerlist = []
        NCPU = self.NCPU

        ListEdgeIslands = self.giveEdgesIslands(ListIslands)

        for ii in range(NCPU):
            W = WorkerDistance(work_queue, result_queue, ListEdgeIslands,
                               self.IdSharedMem)
            workerlist.append(W)
            if Parallel:
                workerlist[ii].start()

        pBAR = ProgressBar(Title="  Calc. Dist. ")
        pBAR.render(0, NJobs)
        iResult = 0
        if not Parallel:
            for ii in range(NCPU):
                workerlist[ii].run()  # just run until all work is completed

        while iResult < NJobs:
            DicoResult = None
            if result_queue.qsize() != 0:
                try:
                    DicoResult = result_queue.get()
                except:
                    pass

            if DicoResult == None:
                time.sleep(0.5)
                continue

            if DicoResult["Success"]:
                iResult += 1
                NDone = iResult
                pBAR.render(NDone, NJobs)

                iIsland = DicoResult["iIsland"]
                Result = NpShared.GiveArray("%sDistances_%6.6i" %
                                            (self.IdSharedMem, iIsland))

                self.dx[iIsland] = Result[0]
                self.dy[iIsland] = Result[1]
                self.D[iIsland] = Result[2]
                NpShared.DelAll("%sDistances_%6.6i" %
                                (self.IdSharedMem, iIsland))

        if Parallel:
            for ii in range(NCPU):
                workerlist[ii].shutdown()
                workerlist[ii].terminate()
                workerlist[ii].join()
Beispiel #20
0
def monitorMem():
    LMem = []
    LSMem = []
    LSMemAvail = []
    LMemAvail = []
    LMemTotal = []
    LShared = []
    LCPU = []
    t0 = time.time()
    LT = []
    Swap0 = None

    while True:
        # process = psutil.Process(os.getpid())
        # mem = process.get_memory_info()[0] / float(2 ** 20)
        vmem = psutil.virtual_memory()

        mem = vmem.used / float(2**20)
        LMem.append(mem)
        memAvail = vmem.available / float(2**20)
        LMemAvail.append(memAvail)

        memTotal = vmem.total / float(2**20)
        LMemTotal.append(memTotal)

        smem = psutil.swap_memory()
        Smem = smem.used / float(2**20)
        if Swap0 is None:
            Swap0 = Smem
        LSMem.append(Smem - Swap0)

        SmemAvail = smem.total / float(2**20)
        LSMemAvail.append(SmemAvail)

        TotSeen = np.array(LMemAvail) + np.array(LMem)
        Cache = TotSeen - np.array(LMemTotal)

        PureRAM = np.array(LMem) - Cache

        Shared = NpShared.SizeShm()
        if not Shared: Shared = LShared[-1]
        LShared.append(Shared)

        cpu = psutil.cpu_percent()
        LCPU.append(cpu)
        LT.append((time.time() - t0) / 60)

        ax = pylab.subplot(111)
        ax2 = ax.twinx()

        if len(LMem) > 2:
            #pylab.clf()
            # pylab.subplot(1,2,1)
            # pylab.plot(LMem)
            # pylab.plot(LMemAvail)
            # pylab.plot(np.array(LMemAvail)+np.array(LMem))
            # pylab.subplot(1,2,2)
            # pylab.plot(LCPU)

            # pylab.draw()
            # pylab.show(False)
            # pylab.pause(0.01)

            ax.cla()

            # Total Available
            #ax.plot(LT,LMemTotal,lw=2,color="black",ls=":")
            x, y = GivePolygon(LT, LMemTotal)
            ax.fill(x, y, 'black', alpha=0.1, edgecolor='black')

            # Cache
            # ax.plot(LT,LMemTotal-np.array(LMem),lw=2,color="green")
            x, y = GivePolygon(LT, np.array(LMem))
            ax.fill(x, y, 'black', alpha=0.1, edgecolor='blue', lw=2)

            # Total used excluding cache
            x, y = GivePolygon(LT, np.array(LShared) + np.array(PureRAM))
            ax.fill(x, y, 'black', alpha=0.3, edgecolor='blue', lw=2)

            # memory
            # ax.plot(LT,PureRAM,lw=2,color="blue")
            x, y = GivePolygon(LT, PureRAM)
            ax.fill(x,
                    y,
                    'green',
                    alpha=0.3,
                    edgecolor='green',
                    lw=2,
                    hatch="//")

            # Shared
            #ax.plot(LT,LShared,lw=2,color="black")
            x, y = GivePolygon(LT, LShared)
            ax.fill(x, y, 'red', alpha=0.3, edgecolor='red', lw=2, hatch="\\")
            #ax.plot(LT,TotSeen,lw=2,color="red")

            # # Total Used
            # ax.plot(LT,np.array(LMem),lw=2,color="blue",ls="--")

            # swap
            x, y = GivePolygon(LT, LSMem)
            #ax.fill(x,y,'', alpha=1, edgecolor='red',lw=2,hatch="/")
            ax.fill(x, y, 'gray', alpha=0.3, edgecolor='red', lw=1, hatch="*")
            # ax.plot(LT,np.array(LSMem),lw=2,ls=":",color="blue")
            # ax.plot(LT,np.array(LSMemAvail),lw=2,ls=":",color="red")

            # CPU
            ax2.plot(LT, LCPU, color="black", ls="--")

            #ax.legend(loc=0)
            ax.grid()

            ax.set_ylabel("Mem [MB]")
            ax2.set_ylabel("CPU [%]")
            ax.set_xlabel("Time [min.]")
            #ax2.set_ylabel(r"Temperature ($^\circ$C)")
            #ax2.set_ylim(0, 35)
            ax.set_xlim(np.min(LT), np.max(LT))
            ax.set_ylim(0, 1.1 * np.max(LMemTotal))

            #ax2.legend(loc=0)

            pylab.draw()
            #pylab.show()
            pylab.show(False)
            pylab.pause(0.5)

        time.sleep(0.5)
Beispiel #21
0
    def CalcWeights(self, uvw_weights_flags_freqs, Robust=0, Weighting="Briggs", Super=1,
                          nbands=1, band_mapping=None, weightnorm=1, force_unity_weight=False):
        """
        Computes imaging weights in "MFS mode", when all uv-points are binned onto a single grid.
        Args:
            uvw_weights_flags_freqs: list of (uv, weights, flags, freqs) tuples, one per each MS
                if weights is a string, it is treated as the filename for a shared array
            Robust:                  robustness
            Weighting:               natural, uniform, briggs
            Super:                   !=1 for superuniform or superrobust: uv bin size is 1/(super*FoV)
            nbands:                  number of frequency bands to compute weights on (if band_mapping is not None)
            band_mapping:            band_mapping[iMS][ichan] gives the band number of channel #ichan of MS #iMS
                                     if None, the "MFS weighting" is used, with all frequency points weighted
                                     on a single grid
            weightnorm:              multiply weights by this factor
            force_unity_weight:      force all weights to 1

        Returns:
            list of imaging weights arrays, one per MS, same shape as original data weights
        """

        Weighting = Weighting.lower()
        if Weighting == "natural":
            print>> log, "Weighting in natural mode"
            if force_unity_weight:
                for uv, weights, flags, freqs in uvw_weights_flags_freqs:
                    if flags is not None:
                        if type(weights) is str:
                            NpShared.GiveArray(weights).fill(1)
                        else:
                            weights.fill(1)
            return [x[1] for x in uvw_weights_flags_freqs]

        nch, npol, npixIm, _ = self.ImShape
        FOV = self.CellSizeRad * npixIm
        cell = 1. / (Super * FOV)

        if band_mapping is None:
            nbands = 1
            print>> log, "initializing weighting grid for single band (or MFS weighting)"
        else:
            print>> log, "initializing weighting grids for %d bands"%nbands

        # find max grid extent by considering _unflagged_ UVs
        xymax = 0
        for uv, weights, flags, freqs in uvw_weights_flags_freqs:
            # max |u|,|v| in lambda
            uvsel=abs(uv)[~flags, :]
            if uvsel.size==0:
                print>> log, ModColor.Str("  A dataset is fully flagged")
                continue
            uvmax = uvsel.max() * freqs.max() / _cc
            xymax = max(xymax, int(math.floor(uvmax / cell)))
            if flags is not None:
                # max |u|,|v| in lambda
                uvmax = abs(uv)[~flags, :].max() * freqs.max() / _cc
                xymax = max(xymax, int(math.floor(uvmax / cell)))
        if xymax == 0:
            raise Exception('All datasets are fully flagged')

        xymax += 1
        # grid will be from [-xymax,xymax] in U and [0,xymax] in V
        npixx = xymax * 2 + 1
        npixy = xymax + 1
        npix = npixx * npixy


        print>> log, "Calculating imaging weights on an [%i,%i]x%i grid with cellsize %g" % (npixx, npixy, nbands, cell)
        grid0 = np.zeros((nbands, npix), np.float64)
        grid = grid0.reshape((nbands*npix,))

        # this will ve a per-MS list of weights and an index array, or None if an MS is all flagged
        weights_index = [(None, None)] * len(uvw_weights_flags_freqs)

        for iMS, (uv, weights_or_path, flags, freqs) in enumerate(uvw_weights_flags_freqs):
            if flags is None:  # entire chunk flagged
                continue
            weights = NpShared.GiveArray(weights_or_path) if type(weights_or_path) is str else weights_or_path
            if force_unity_weight:
                weights.fill(1)
                weights[flags,...]=0

            elif weightnorm != 1:
                weights *= weightnorm
            # flip sign of negative v values -- we'll only grid the top half of the plane
            uv[uv[:, 1] < 0] *= -1
            # convert u/v to lambda, and then to pixel offset
            uv = uv[..., np.newaxis] * freqs[np.newaxis, np.newaxis, :] / _cc
            uv = np.floor(uv / cell).astype(int)
            # u is offset, v isn't since it's the top half

            x = uv[:, 0, :]
            y = uv[:, 1, :]
            x += xymax  # offset, since X grid starts at -xymax
            # convert to index array -- this gives the number of the uv-bin on the grid
            index = y * npixx + x
            # if we're in per-band weighting mode, then adjust the index to refer to each band's grid
            if band_mapping is not None:
                bandmap = band_mapping[iMS]
                # uv has shape nvis,nfreq; bandmap has shape nfreq
                index += bandmap[np.newaxis,:]*npix
            # zero weight refers to zero cell (otherwise it may end up outside the grid, since grid is
            # only big enough to accommodate the *unflagged* uv-points)
            index[weights==0] = 0

            weights_index[iMS] = weights_or_path, index
            del uv
            print>> log, "Accumulating weights (%d/%d)" % (iMS + 1, len(uvw_weights_flags_freqs))
            # accumulate onto grid
            # print>>log,weights,index
            _pyGridderSmearPols.pyAccumulateWeightsOntoGrid(grid, weights.ravel(), index.ravel())

        if Weighting == "uniform":
            #            print>>log,"adjusting grid to uniform weight"
            #           grid[grid!=0] = 1/grid[grid!=0]
            print>> log, ("applying uniform weighting (super=%.2f)" % Super)
            for weights_or_path, index in weights_index:
                if index is not None:
                    weights = NpShared.GiveArray(weights_or_path) if type(weights_or_path) is str else weights_or_path
                    weights /= grid[index]

        elif Weighting == "briggs" or Weighting == "robust":
            numeratorSqrt = 5.0 * 10 ** (-Robust)
            for band in range(nbands):
                print>> log, ("applying Briggs weighting (robust=%.2f, super=%.2f, band %d)" % (Robust, Super, band))
                grid1 = grid0[band,:]
                avgW = (grid1 ** 2).sum() / grid1.sum()
                sSq = numeratorSqrt ** 2 / avgW
                grid1[...] = 1 / (1 + grid1 * sSq)
            for weights_or_path, index in weights_index:
                if index is not None:
                    weights = NpShared.GiveArray(weights_or_path) if type(weights_or_path) is str else weights_or_path
                    weights *= grid[index]

        else:
            raise ValueError("unknown weighting \"%s\"" % Weighting)

        print>> log, "weights computed"