示例#1
0
    def doLMIter(self):
        T = ClassTimeIt.ClassTimeIt()
        T.disable()
        #J,H=
        self.giveJacobianHessian()
        T.timeit("J, H")
        z = self.GiveGPredict(self.CurrentX)
        Y = np.array([(z[iFreq].reshape((-1, 1)) * z[iFreq].conj().reshape(
            (1, -1))).ravel() for iFreq in range(self.NFreq)]).ravel()

        r = self.Y - Y[self.Mask][::self.Incr]

        v = self.JHy(r)
        H = self.DiagJHJ()
        T.timeit("diff")

        Hinv = killMS.Array.ModLinAlg.invSVD(H)
        T.timeit("inv")

        X = self.CurrentX + np.real(np.dot(Hinv, v.reshape((-1, 1))).ravel())

        xx = self.CurrentX.copy()
        xx[xx == 0] = 1e-6
        self.Diff = np.max(np.abs((X - xx) / xx))

        z0 = self.GiveGPredict(self.CurrentX)
        Norm(z0)
        self.CurrentX = X
        z = self.GiveGPredict(self.CurrentX)
        Norm(z)
        self.Diff = np.max(np.abs(np.angle(z * z0.conj())))
        #print self.Diff

        return
示例#2
0
def giveFitness(Indiv,
                x=None,
                y=None,
                S=None,
                Polygons=None,
                PolyCut=None,
                BigPolygon=None):
    T = ClassTimeIt.ClassTimeIt("Fitness")
    T.disable()
    CMD = ClassMetricDEAP.ClassMetricDEAP(Indiv,
                                          x=x,
                                          y=y,
                                          S=S,
                                          Polygons=Polygons,
                                          PolyCut=PolyCut,
                                          BigPolygon=BigPolygon)
    fluxPerFacet = CMD.fluxPerFacet()
    NPerFacet = CMD.NPerFacet()
    meanDistancePerFacet = CMD.meanDistancePerFacet()
    overlapPerFacet = CMD.overlapPerFacet()

    Fitness = 0
    Fitness += -np.std(fluxPerFacet)
    Fitness += -np.std(NPerFacet)
    Fitness += -1e5 * np.count_nonzero(NPerFacet == 0)

    # aspectRatioPerFacet=CMD.aspectRatioPerFacet()
    # A=aspectRatioPerFacet
    # Fitness+= -np.mean(A[A>0])

    Fitness += -np.mean(meanDistancePerFacet) * 10
    Fitness += -np.sum(overlapPerFacet) * 1e5

    return Fitness,
示例#3
0
def testWhereMax():
    
    Npx=1030
    Npy=500
    nch=1
    np.random.seed(8)
    A=np.zeros((nch,Npx,Npy),dtype=np.float32)
    A=np.float32(np.random.randn(*(nch,Npx,Npy)))
    
    Mask=np.zeros((Npx,Npy),np.bool8)
    #A[0,100,200]=30.

    T= ClassTimeIt.ClassTimeIt()
    #Ans=A_whereMax(A,NCPU=1,Mask=Mask)
    #print Ans
    T.timeit("1")
    f=np.abs
    #f=lambda x: x
    ind= np.where(f(A)==np.max(f(A)))
    T.timeit("2")

    print(ind,A[ind])
    print("===========================")
    print(A_whereMax(A,NCPU=1,DoAbs=0,Mask=None))
    print(A_whereMax(A,NCPU=1,DoAbs=0,Mask=np.zeros(A.shape,np.bool8)))
    print("===========================")
    print(A_whereMax(A,NCPU=6,DoAbs=0,Mask=None))
    print(A_whereMax(A,NCPU=6,DoAbs=0,Mask=np.zeros(A.shape,np.bool8)))
    print("===========================")
    print(A_whereMax(A,NCPU=1,DoAbs=1,Mask=None))
    print(A_whereMax(A,NCPU=1,DoAbs=1,Mask=np.zeros(A.shape,np.bool8)))
    print("===========================")
    print(A_whereMax(A,NCPU=6,DoAbs=1,Mask=None))
    print(A_whereMax(A,NCPU=6,DoAbs=1,Mask=np.zeros(A.shape,np.bool8)))
示例#4
0
    def fft(self, Ain):
        axes = (-1, -2)

        T = ClassTimeIt.ClassTimeIt("ModFFTW")
        T.disable()

        sin = Ain.shape
        if len(Ain.shape) == 2:
            s = (1, 1, Ain.shape[0], Ain.shape[1])
            A = Ain.reshape(s)
        else:
            A = Ain

        nch, npol, _, _ = A.shape
        for ich in range(nch):
            for ipol in range(npol):
                A_2D = iFs(A[ich, ipol].astype(self.ThisType), axes=axes)
                T.timeit("shift and copy")
                A_2D[...] = pyfftw.interfaces.numpy_fft.fft2(
                    A_2D,
                    axes=(-1, -2),
                    overwrite_input=True,
                    planner_effort='FFTW_MEASURE',
                    threads=self.ncores)
                T.timeit("fft")
                A[ich, ipol] = Fs(A_2D, axes=axes)
                T.timeit("shift")
        if self.norm:
            A /= (A.shape[-1] * A.shape[-2])

        return A.reshape(sin)
示例#5
0
def testAdd():

    Np = 16000

    x0 = 1000
    x1 = 11000
    y0 = 5000
    y1 = 15000
    nch = 1

    _a0 = np.ones((nch, Np, Np), dtype=np.float32)
    _a1 = np.ones((nch, Np, Np), dtype=np.float32)
    # b=np.float32(np.arange(Np**2).reshape((Np,Np)))
    b = np.ones((Np, Np), dtype=np.float32)

    Aedge = np.array([x0, x1, y0, y1], np.int32)
    Bedge = np.array([x0 + 1, x1 + 1, y0, y1], np.int32)

    T = ClassTimeIt.ClassTimeIt()

    N = 10
    NBlocks = 6
    factor = -1.

    for i in xrange(N):
        # b=np.float32(np.random.randn(Np,Np))
        A_add_B_prod_factor(_a0,
                            b,
                            Aedge,
                            Bedge,
                            factor=float(factor),
                            NCPU=NBlocks)
        # A_add_B_prod_factor(_a0,b,Aedge,Bedge,factor=float(factor),NCPU=NBlocks)
        # for ch in range(nch):
        #     a0=_a0[ch]
        #     a1=_a1[ch]
        #     a_x0,a_x1,a_y0,a_y1=Aedge
        #     b_x0,b_x1,b_y0,b_y1=Bedge
        #     a1[a_x0:a_x1,a_y0:a_y1]+=b[b_x0:b_x1,b_y0:b_y1]*factor
        # # print a1[a_x0:a_x1,a_y0:a_y1].shape
        # # print b[b_x0:b_x1,b_y0:b_y1].shape
        # # print a_x0,a_x1,a_y0,a_y1
        # # print b_x0,b_x1,b_y0,b_y1
        # print "done"
        # print np.max(_a0-_a1)
        # # print

    # print _a0
    # print _a1

    T.timeit("1")
    for i in xrange(N):
        for ch in xrange(nch):
            a0 = _a0[ch]
            a1 = _a1[ch]
            a_x0, a_x1, a_y0, a_y1 = Aedge
            b_x0, b_x1, b_y0, b_y1 = Bedge
            a1[a_x0:a_x1, a_y0:a_y1] += b[b_x0:b_x1, b_y0:b_y1] * factor
            # a1=a0+b*factor
    T.timeit("2")
示例#6
0
    def Convolve(self, A, Norm=True, OutMode="Data", ConvMode=None):

        if ConvMode == None:
            ConvMode = self.ConvMode

        if ConvMode == "Matrix":
            return self.ConvolveMatrix(A, OutMode=OutMode)
        elif ConvMode == "Vector":
            T = ClassTimeIt.ClassTimeIt("ConvVec")
            T.disable()
            C = self.ConvolveVector(A, OutMode=OutMode)
            T.timeit()
        elif ConvMode == "FFT":
            T = ClassTimeIt.ClassTimeIt("ConvFFT")
            T.disable()
            C = self.ConvolveFFT(A, OutMode=OutMode)
            T.timeit()
            return C
示例#7
0
    def _dispatch_job(self, jobitem, reraise=False):
        """Handles job described by jobitem dict.

        If reraise is True, any eceptions are re-raised. This is useful for debugging."""
        timer = ClassTimeIt.ClassTimeIt()
        event = counter = None
        try:
            job_id, event_id, counter_id, args, kwargs = [jobitem.get(attr) for attr in
                                                        "job_id", "event", "counter", "args", "kwargs"]
            handler_id, method, handler_desc = jobitem["handler"]
            handler = self._job_handlers.get(handler_id)
            if handler is None:
                raise RuntimeError("Job %s: unknown handler %s. This is a bug." % (job_id, handler_desc))
            event, eventname = self._events[event_id] if event_id is not None else (None, None)
            # find counter object, if specified
            if counter_id:
                counter = self._job_counters.get(counter_id)
                if counter is None:
                    raise RuntimeError("Job %s: unknown counter %s. This is a bug." % (job_id, counter_id))
            # instantiate SharedDict arguments
#            timer.timeit('init '+job_id)
            args = [ arg.instantiate() if type(arg) is shared_dict.SharedDictRepresentation else arg for arg in args ]
            for key in kwargs.keys():
                if type(kwargs[key]) is shared_dict.SharedDictRepresentation:
                    kwargs[key] = kwargs[key].instantiate()
#            timer.timeit('instantiated '+job_id)
            # call the job
            if self.verbose > 1:
                print>> log, "job %s: calling %s" % (job_id, handler_desc)
            if method is None:
                # call object directly
                result = handler(*args, **kwargs)
            else:
                call = getattr(handler, method, None)
                if not callable(call):
                    raise KeyError("Job %s: unknown method '%s' for handler %s" % (job_id, method, handler_desc))
                result = call(*args, **kwargs)
            if self.verbose > 3:
                print>> log, "job %s: %s returns %s" % (job_id, handler_desc, result)
            # Send result back
            if jobitem['collect_result']:
                self._result_queue.put(
                    dict(job_id=job_id, proc_id=self.proc_id, success=True, result=result, time=timer.seconds()))
        except KeyboardInterrupt:
            raise
        except Exception, exc:
            
            if reraise:
                raise


            print>> log, ModColor.Str("process %s: exception raised processing job %s: %s" % (
                AsyncProcessPool.proc_id, job_id, traceback.format_exc()))
            if jobitem['collect_result']:
                self._result_queue.put(
                    dict(job_id=job_id, proc_id=self.proc_id, success=False, error=exc, time=timer.seconds()))
示例#8
0
 def addSubModelToSubDirty(self):
     T = ClassTimeIt.ClassTimeIt("InitSSD.addSubModelToSubDirty")
     T.disable()
     ConvModel = self.giveConvModel(self.SubSSDModelImage)
     _, _, N0x, N0y = ConvModel.shape
     MeanConvModel = np.mean(ConvModel, axis=0).reshape((1, 1, N0x, N0y))
     self.DicoSubDirty["ImageCube"] += ConvModel
     self.DicoSubDirty['MeanImage'] += MeanConvModel
     #print "MAX=",np.max(self.DicoSubDirty['MeanImage'])
     T.timeit("2")
示例#9
0
    def DiagJHJ(self):
        T = ClassTimeIt.ClassTimeIt("JHy")
        T.disable()
        H = np.zeros((self.LMode, self.na, self.LMode, self.na), np.complex64)
        for iAnt in range(self.na):
            #Jt[self.indA0[iAnt],iAnt]=J_TEC[self.indA0[iAnt]]
            #Jt[self.indA1[iAnt],iAnt]=-J_TEC[self.indA1[iAnt]]
            H[0, iAnt, 0, iAnt] += np.sum(self.J_TEC[self.indA0[iAnt]].conj() *
                                          self.J_TEC[self.indA0[iAnt]])
            H[0, iAnt, 0, iAnt] += np.sum(self.J_TEC[self.indA1[iAnt]].conj() *
                                          self.J_TEC[self.indA1[iAnt]])

            if "CPhase" in self.Mode:
                H[0, iAnt, 1,
                  iAnt] += np.sum(self.J_TEC[self.indA0[iAnt]].conj() *
                                  self.J_Phase[self.indA0[iAnt]])
                H[0, iAnt, 1,
                  iAnt] += np.sum(self.J_TEC[self.indA1[iAnt]].conj() *
                                  self.J_Phase[self.indA1[iAnt]])

                H[1, iAnt, 0,
                  iAnt] += np.sum(self.J_Phase[self.indA0[iAnt]].conj() *
                                  self.J_TEC[self.indA0[iAnt]])
                H[1, iAnt, 0,
                  iAnt] += np.sum(self.J_Phase[self.indA1[iAnt]].conj() *
                                  self.J_TEC[self.indA1[iAnt]])

                H[1, iAnt, 1,
                  iAnt] += np.sum(self.J_Phase[self.indA0[iAnt]].conj() *
                                  self.J_Phase[self.indA0[iAnt]])
                H[1, iAnt, 1,
                  iAnt] += np.sum(self.J_Phase[self.indA1[iAnt]].conj() *
                                  self.J_Phase[self.indA1[iAnt]])

        T.timeit("H")

        H = H.reshape((self.LMode * self.na, self.LMode * self.na))
        return H
        A = np.log10(np.abs(self.H))

        B = np.log10(np.abs(H))
        vmin, vmax = A.min(), A.max()
        import pylab
        pylab.clf()
        pylab.subplot(1, 2, 1)
        pylab.imshow(A, interpolation="nearest", vmin=vmin, vmax=vmax)
        pylab.colorbar()
        pylab.subplot(1, 2, 2)
        pylab.imshow(B, interpolation="nearest", vmin=vmin, vmax=vmax)
        pylab.colorbar()
        pylab.draw()
        pylab.show(False)

        stop
        return H
示例#10
0
 def __init__(self,
              work_queue,
              result_queue,
              island_dict=None,
              iIsland=None,
              ListPixParms=None,
              ListPixData=None,
              GD=None,
              PSF=None,
              PauseOnStart=False,
              PM=None,
              PixVariance=1e-2,
              EstimatedStdFromResid=0,
              MaxFunc=None,
              WeightMaxFunc=None,
              DirtyArray=None,
              ConvMode=None,
              StopWhenQueueEmpty=False,
              BestChi2=1.,
              DicoData=None):
     self.T = ClassTimeIt.ClassTimeIt("WorkerFitness")
     self.T.disable()
     multiprocessing.Process.__init__(self)
     self.work_queue = work_queue
     self.result_queue = result_queue
     self.kill_received = False
     self.exit = multiprocessing.Event()
     self._pause_on_start = PauseOnStart
     self._island_dict = island_dict
     self._chain_dict = island_dict["Chains"]
     self.GD = GD
     self.PM = PM
     self.EstimatedStdFromResid = EstimatedStdFromResid
     self.ListPixParms = ListPixParms
     self.ListPixData = ListPixData
     self.iIsland = iIsland
     self.PSF = PSF
     self.PixVariance = PixVariance
     self.ConvMachine = ClassConvMachine.ClassConvMachine(
         self.PSF, self.ListPixParms, self.ListPixData, ConvMode)
     self.ConvMachine.setParamMachine(self.PM)
     self.DicoData = DicoData
     self.MutMachine = ClassMutate.ClassMutate(self.PM)
     self.MutMachine.setData(DicoData)
     self.MaxFunc = MaxFunc
     self.WeightMaxFunc = WeightMaxFunc
     self.DirtyArray = DirtyArray
     self.T.timeit("init")
     self.StopWhenQueueEmpty = StopWhenQueueEmpty
     self.BestChi2 = BestChi2
示例#11
0
    def giveJacobianHessian(self):
        T = ClassTimeIt.ClassTimeIt("J")
        J = np.zeros((self.Y.size, self.na * 2), np.complex64)
        Jt = J[:, 0:self.na]
        Jc = J[:, self.na:]

        TEC = self.CurrentX[0:self.na]
        dTEC = TEC[self.A0] - TEC[self.A1]
        if "CPhase" in self.Mode:
            ConstPhase = self.CurrentX[self.na:]
            dConstPhase = ConstPhase[self.A0] - ConstPhase[self.A1]
        else:
            dConstPhase = 0

        Phase = K / self.nu_Y * dTEC + dConstPhase
        Z = np.exp(1j * Phase)

        self.J_TEC = J_TEC = 1j * K / self.nu_Y * Z
        self.J_Phase = J_Phase = 1j * Z
        return
        T.timeit("first")
        for iAnt in range(self.na):
            Jt[self.indA0[iAnt], iAnt] = J_TEC[self.indA0[iAnt]]
            Jt[self.indA1[iAnt], iAnt] = -J_TEC[self.indA1[iAnt]]

            Jc[self.indA0[iAnt], iAnt] = J_Phase[self.indA0[iAnt]]
            Jc[self.indA1[iAnt], iAnt] = -J_Phase[self.indA1[iAnt]]

        T.timeit("build")
        self.J = J
        self.Jsp = Jsp = scipy.sparse.coo_matrix(J)
        T.timeit("sp")
        # import pylab
        # pylab.clf()
        # pylab.subplot(1,2,1)
        # pylab.imshow(Jt.real,interpolation="nearest",aspect="auto")
        # pylab.subplot(1,2,2)
        # pylab.imshow(Jc.real,interpolation="nearest",aspect="auto")
        # pylab.draw()
        # pylab.show(False)
        # stop

        #print np.count_nonzero(J)/float(J.size)

        T.timeit("prod")
        H = np.array(np.dot(Jsp.T.conj(), Jsp).todense())
        self.H = H
        T.timeit("Hsp")

        return J, H
示例#12
0
    def GiveInstrumentBeam(self,*args,**kwargs):
        
        T=ClassTimeIt.ClassTimeIt("GiveInstrumentBeam")
        T.disable()
        Beam=self.GiveRawBeam(*args,**kwargs)
        nd,na,nch,_,_=Beam.shape
        T.timeit("0")
        MeanBeam=np.zeros((nd,na,self.NChanJones,2,2),dtype=Beam.dtype)
        for ich in range(self.NChanJones):
            indCh=np.where(self.VisToJonesChanMapping==ich)[0]
            MeanBeam[:,:,ich,:,:]=np.mean(Beam[:,:,indCh,:,:],axis=2)
        T.timeit("1")

        return MeanBeam
示例#13
0
def MakeSphe(Support, NpixIm):
    # x,y,CF=Gaussian.Gaussian(3,Support,1)

    import ClassTimeIt
    T = ClassTimeIt.ClassTimeIt()
    SupportSphe = 111
    xc = SupportSphe / 2
    CF = ModTaper.Sphe2D(SupportSphe)
    T.timeit("0")
    CF = np.complex128(CF)  # np.array(np.complex128(CF),order="F")

    fCF = fft2(CF)
    fCF = fCF[xc - Support / 2:xc + Support / 2 + 1,
              xc - Support / 2:xc + Support / 2 + 1].copy()
    zfCF = ZeroPad(fCF, NpixIm)
    T.timeit("1")

    ifzfCF = ifft2(zfCF)

    # ############"
    # import pylab
    # pylab.clf()
    # pylab.subplot(3,2,1)
    # lpar=list(pylab.imshow.__defaults__)
    # lpar[3]="nearest"
    # pylab.imshow.__defaults__=tuple(lpar)
    # pylab.imshow(CF.real)
    # pylab.colorbar()
    # pylab.subplot(3,2,2)
    # pylab.imshow(CF.imag)
    # pylab.colorbar()
    # pylab.subplot(3,2,3)
    # pylab.imshow(fCF.real)
    # pylab.colorbar()
    # pylab.subplot(3,2,4)
    # pylab.imshow(fCF.imag)
    # pylab.colorbar()
    # pylab.subplot(3,2,5)
    # pylab.imshow(ifzfCF.real)
    # pylab.colorbar()
    # pylab.subplot(3,2,6)
    # pylab.imshow(ifzfCF.imag)
    # pylab.colorbar()
    # pylab.draw()
    # pylab.show(False)
    # pylab.pause(0.1)
    # stop

    return CF, fCF, ifzfCF
示例#14
0
 def InitCF(self, cf_dict, compute_cf, wmax):
     T = ClassTimeIt.ClassTimeIt("InitCF_ClassDDEGridMachine")
     T.disable()
     self.WTerm = ModCF.ClassWTermModified(Cell=self.Cell,
                                           Sup=self.Sup,
                                           Npix=self.Npix,
                                           Freqs=self.ChanFreq,
                                           wmax=wmax,
                                           Nw=self.Nw,
                                           OverS=self.OverS,
                                           lmShift=self.lmShift,
                                           cf_dict=cf_dict,
                                           compute_cf=compute_cf,
                                           IDFacet=self.IDFacet)
     T.timeit("2")
     self.ifzfCF = self.WTerm.ifzfCF
示例#15
0
    def _smearmapping_worker(self, DATA, blockdict, sizedict, a0, a1, dPhi, l, channel_mapping, mode):
        t = ClassTimeIt.ClassTimeIt()
        t.disable()
        if mode == 1:
            BlocksRowsListBL, BlocksSizesBL, _ = GiveBlocksRowsListBL_old(a0, a1, DATA, dPhi, l, channel_mapping)
        elif mode == 2:
            BlocksRowsListBL, BlocksSizesBL, _ = GiveBlocksRowsListBL(a0, a1, DATA, dPhi, l, channel_mapping)
        else:
            raise ValueError("unknown BDAMode setting %d"%mode)

        t.timeit('compute')
        if BlocksRowsListBL is not None:
            key = "%d:%d" % (a0,a1)
            sizedict[key]  = np.array(BlocksSizesBL)
            blockdict[key] = np.array(BlocksRowsListBL)
            t.timeit('store')
示例#16
0
    def addSubModelToSubDirty(self):
        T = ClassTimeIt.ClassTimeIt("InitSSD.addSubModelToSubDirty")
        T.disable()
        ConvModel = self.giveConvModel(self.SubSSDModelImage)
        _, _, N0x, N0y = ConvModel.shape
        MeanConvModel = np.mean(ConvModel, axis=0).reshape((1, 1, N0x, N0y))
        self.DicoSubDirty["ImageCube"] += ConvModel
        #self.DicoSubDirty['MeanImage']+=MeanConvModel

        W = np.float32(self.DicoSubDirty["WeightChansImages"])
        W = W / np.sum(W)
        MeanImage = np.sum(self.DicoSubDirty["ImageCube"] * W.reshape(
            (-1, 1, 1, 1)),
                           axis=0).reshape((1, 1, N0x, N0y))
        self.DicoSubDirty['MeanImage'] = MeanImage
        #print "MAX=",np.max(self.DicoSubDirty['MeanImage'])
        T.timeit("2")
示例#17
0
    def GiveConvVector(self, iPix, TypeOut="Data"):
        T = ClassTimeIt.ClassTimeIt()
        T.disable()
        PSF = self.PSF
        NPixPSF = PSF.shape[-1]
        xc = yc = NPixPSF / 2
        T.timeit("0")
        x1, y1 = self.ArrayListPixParms[iPix:iPix + 1].T

        if TypeOut == "Data":
            M = np.zeros((self.NFreqBands, 1, self.NPixListData, 1),
                         np.float32)
            x0, y0 = self.ArrayListPixData.T
        else:
            M = np.zeros((self.NFreqBands, 1, self.NPixListParms, 1),
                         np.float32)
            x0, y0 = self.ArrayListPixParms.T

        N0 = x0.size
        N1 = x1.size
        T.timeit("1")
        dx = (x1.reshape((N1, 1)) - x0.reshape((1, N0)) + xc).T
        dy = (y1.reshape((N1, 1)) - y0.reshape((1, N0)) + xc).T
        T.timeit("2")
        Cx = ((dx >= 0) & (dx < NPixPSF))
        Cy = ((dy >= 0) & (dy < NPixPSF))
        C = (Cx & Cy)
        T.timeit("3")
        indPSF = np.arange(M.shape[-1] * M.shape[-2])
        indPSF_sel = indPSF[C.ravel()]
        indPixPSF = dx.ravel()[C.ravel()] * NPixPSF + dy.ravel()[C.ravel()]
        T.timeit("4")
        if indPSF_sel.size != indPSF.size:
            for iBand in range(self.NFreqBands):
                PSF_Chan = PSF[iBand, 0]
                M[iBand, 0].flat[indPSF_sel] = PSF_Chan.flat[indPixPSF.ravel()]
            return M[:, :, :, 0]
        else:
            ListVec = []
            for iBand in range(self.NFreqBands):
                PSF_Chan = PSF[iBand, 0]
                ListVec.append(PSF_Chan.flat[indPixPSF.ravel()])
            return ListVec
示例#18
0
def doOverlap(npP0, npP1):
    T = ClassTimeIt.ClassTimeIt("Overlap")
    T.disable()
    if npP0.size == 0: return False
    if npP1.size == 0: return False
    P0 = Polygon.Polygon(npP0)
    P1 = Polygon.Polygon(npP1)
    T.timeit("declare")
    P1Cut = (P0 & P1)
    T.timeit("Cut")
    aP1 = P1.area()
    aP1Cut = P1Cut.area()
    T.timeit("Area")
    if np.abs(aP1Cut - aP1) < 1e-10:
        return "Contains"
    elif aP1Cut == 0:
        return "Outside"
    else:
        return "Cut"
示例#19
0
    def __init__(self, shape, dtype, norm=True, ncores=1, FromSharedId=None):
        # if FromSharedId is None:
        #     self.A = pyfftw.n_byte_align_empty( shape[-2::], 16, dtype=dtype)
        # else:
        #     self.A = NpShared.GiveArray(FromSharedId)

        #pyfftw.interfaces.cache.enable()
        #pyfftw.interfaces.cache.set_keepalive_time(3000)
        self.ncores = ncores or NCPU_global
        #print "plan"
        T = ClassTimeIt.ClassTimeIt("ModFFTW")
        T.disable()

        #self.A = pyfftw.interfaces.numpy_fft.fft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE',  threads=self.ncores)
        T.timeit("planF")
        #self.A = pyfftw.interfaces.numpy_fft.ifft2(self.A, axes=(-1,-2),overwrite_input=True, planner_effort='FFTW_MEASURE',  threads=self.ncores)
        T.timeit("planB")
        #print "done"
        self.ThisType = dtype
        self.norm = norm
示例#20
0
    def JHy(self, y):
        T = ClassTimeIt.ClassTimeIt("JHy")
        T.disable()
        v = np.zeros((self.LMode, self.na), np.complex64)
        for iAnt in range(self.na):
            v[0, iAnt] += np.sum(self.J_TEC[self.indA0[iAnt]].conj() *
                                 y[self.indA0[iAnt]])
            v[0, iAnt] += np.sum(-self.J_TEC[self.indA1[iAnt]].conj() *
                                 y[self.indA1[iAnt]])

            if "CPhase" in self.Mode:
                v[1, iAnt] += np.sum(self.J_Phase[self.indA0[iAnt]].conj() *
                                     y[self.indA0[iAnt]])
                v[1, iAnt] += np.sum(-self.J_Phase[self.indA1[iAnt]].conj() *
                                     y[self.indA1[iAnt]])

        v = v.ravel()
        T.timeit("Prod")
        # PSparse=np.array(np.dot(self.J.T.conj(),scipy.sparse.coo_matrix(y.reshape((-1,1)))).todense()).ravel()
        # T.timeit("PSparse")
        return v
示例#21
0
    def ConvolveVector(self, A, Norm=True, OutMode="Data"):
        sh = A.shape
        if OutMode == "Data":
            OutSize = self.NPixListData
        elif OutMode == "Parms":
            OutSize = self.NPixListParms
        ConvA = np.zeros((self.NFreqBands, 1, OutSize), np.float32)
        T = ClassTimeIt.ClassTimeIt("Vec")
        T.disable()

        for iPix in range(self.NPixListParms):
            Fch = A[:, iPix]
            if np.abs(Fch).max() == 0: continue

            Vec_iPix = self.GiveConvVector(iPix, TypeOut=OutMode)
            T.timeit("GetVec")
            for iBand in range(self.NFreqBands):
                F = Fch[iBand]
                ConvA[iBand] += F * Vec_iPix[iBand]
            T.timeit("Sum")

        return ConvA
示例#22
0
    def fft(self, A, ChanList=None):
        axes = (-1, -2)

        T = ClassTimeIt.ClassTimeIt("ModFFTW")
        T.disable()

        nch, npol, n, n = A.shape

        if ChanList is not None:
            CSel = ChanList
        else:
            CSel = range(nch)

        for ich in CSel:
            for ipol in range(npol):
                B = iFs(A[ich, ipol].astype(A.dtype), axes=axes)
                T.timeit("shift and copy")
                B = np.fft.fft2(B, axes=axes)
                T.timeit("fft")
                A[ich, ipol] = Fs(B, axes=axes) / (A.shape[-1] * A.shape[-2])
                T.timeit("shift")

        return A
    def Deconvolve(self, ch=0, **kwargs):
        """
        Runs minor cycle over image channel 'ch'.
        initMinor is number of minor iteration (keeps continuous count through major iterations)
        Nminor is max number of minor iterations

        Returns tuple of: return_code,continue,updated
        where return_code is a status string;
        continue is True if another cycle should be executed (one or more polarizations still need cleaning);
        update is True if one or more polarization models have been updated
        """
        #No need to set the channel when doing joint deconvolution
        self.setChannel(ch)

        exit_msg = ""
        continue_deconvolution = False
        update_model = False

        _, npix, _ = self.Dirty.shape
        xc = (npix) / 2

        npol, _, _ = self.Dirty.shape

        # Get the PeakMap (first index will always be 0 because we only support I cleaning)
        PeakMap = self.Dirty[0, :, :]

        m0, m1 = PeakMap.min(), PeakMap.max()

        #These options should probably be moved into MinorCycleConfig in parset
        DoAbs = int(self.GD["Deconv"]["AllowNegative"])
        print >> log, "  Running minor cycle [MinorIter = %i/%i, SearchMaxAbs = %i]" % (
            self._niter, self.MaxMinorIter, DoAbs)

        ## Determine which stopping criterion to use for flux limit
        #Get RMS stopping criterion
        NPixStats = self.GD["Deconv"]["NumRMSSamples"]
        if NPixStats:
            RandomInd = np.int64(np.random.rand(NPixStats) * npix**2)
            RMS = np.std(np.real(PeakMap.ravel()[RandomInd]))
        else:
            RMS = np.std(PeakMap)

        self.RMS = RMS

        self.GainMachine.SetRMS(RMS)

        Fluxlimit_RMS = self.RMSFactor * RMS

        #Find position and intensity of first peak
        x, y, MaxDirty = NpParallel.A_whereMax(PeakMap,
                                               NCPU=self.NCPU,
                                               DoAbs=DoAbs,
                                               Mask=self.MaskArray)

        #Get peak factor stopping criterion
        Fluxlimit_Peak = MaxDirty * self.PeakFactor

        #Get side lobe stopping criterion
        Fluxlimit_Sidelobe = (
            (self.CycleFactor - 1.) / 4. * (1. - self.SideLobeLevel) +
            self.SideLobeLevel) * MaxDirty if self.CycleFactor else 0

        mm0, mm1 = PeakMap.min(), PeakMap.max()

        # Choose whichever threshold is highest
        StopFlux = max(Fluxlimit_Peak, Fluxlimit_RMS, Fluxlimit_Sidelobe,
                       self.FluxThreshold)

        print >> log, "    Dirty image peak flux      = %10.6f Jy [(min, max) = (%.3g, %.3g) Jy]" % (
            MaxDirty, mm0, mm1)
        print >> log, "      RMS-based threshold      = %10.6f Jy [rms = %.3g Jy; RMS factor %.1f]" % (
            Fluxlimit_RMS, RMS, self.RMSFactor)
        print >> log, "      Sidelobe-based threshold = %10.6f Jy [sidelobe  = %.3f of peak; cycle factor %.1f]" % (
            Fluxlimit_Sidelobe, self.SideLobeLevel, self.CycleFactor)
        print >> log, "      Peak-based threshold     = %10.6f Jy [%.3f of peak]" % (
            Fluxlimit_Peak, self.PeakFactor)
        print >> log, "      Absolute threshold       = %10.6f Jy" % (
            self.FluxThreshold)
        print >> log, "    Stopping flux              = %10.6f Jy [%.3f of peak ]" % (
            StopFlux, StopFlux / MaxDirty)

        T = ClassTimeIt.ClassTimeIt()
        T.disable()

        ThisFlux = MaxDirty
        #print x,y

        if ThisFlux < StopFlux:
            print >> log, ModColor.Str(
                "    Initial maximum peak %g Jy below threshold, we're done CLEANing"
                % (ThisFlux),
                col="green")
            exit_msg = exit_msg + " " + "FluxThreshold"
            continue_deconvolution = False or continue_deconvolution
            update_model = False or update_model
            # No need to do anything further if we are already at the stopping flux
            return exit_msg, continue_deconvolution, update_model

        # set peak in GainMachine (deprecated?)
        self.GainMachine.SetFluxMax(ThisFlux)

        # def GivePercentDone(ThisMaxFlux):
        #     fracDone=1.-(ThisMaxFlux-StopFlux)/(MaxDirty-StopFlux)
        #     return max(int(round(100*fracDone)),100)

        #Do minor cycle deconvolution loop
        try:
            for i in range(self._niter + 1, self.MaxMinorIter + 1):
                self._niter = i
                #grab a new peakmap
                PeakMap = self.Dirty[0, :, :]

                x, y, ThisFlux = NpParallel.A_whereMax(PeakMap,
                                                       NCPU=self.NCPU,
                                                       DoAbs=DoAbs,
                                                       Mask=self.MaskArray)

                # deprecated?
                self.GainMachine.SetFluxMax(ThisFlux)

                T.timeit("max0")

                if ThisFlux <= StopFlux:
                    print >> log, ModColor.Str(
                        "    CLEANing [iter=%i] peak of %.3g Jy lower than stopping flux"
                        % (i, ThisFlux),
                        col="green")
                    cont = ThisFlux > self.FluxThreshold
                    if not cont:
                        print >> log, ModColor.Str(
                            "    CLEANing [iter=%i] absolute flux threshold of %.3g Jy has been reached"
                            % (i, self.FluxThreshold),
                            col="green",
                            Bold=True)
                    exit_msg = exit_msg + " " + "MinFluxRms"
                    continue_deconvolution = cont or continue_deconvolution
                    update_model = True or update_model

                    break  # stop cleaning if threshold reached

                # This is used to track Cleaning progress
                rounded_iter_step = 1 if i < 10 else (10 if i < 200 else (
                    100 if i < 2000 else 1000))
                # min(int(10**math.floor(math.log10(i))), 10000)
                if i >= 10 and i % rounded_iter_step == 0:
                    # if self.GD["Debug"]["PrintMinorCycleRMS"]:
                    #rms = np.std(np.real(self._CubeDirty.ravel()[self.IndStats]))
                    print >> log, "    [iter=%i] peak residual %.3g" % (
                        i, ThisFlux)

                nch, npol, _, _ = self._Dirty.shape
                #Fpol contains the intensities at (x,y) per freq and polarisation
                Fpol = np.zeros([nch, npol, 1, 1], dtype=np.float32)
                if self.MultiFreqMode:
                    if self.GD["Hogbom"]["FreqMode"] == "Poly":
                        Ncoeffs = self.GD["Hogbom"]["PolyFitOrder"]
                    elif self.GD["Hogbom"]["FreqMode"] == "GPR":
                        Ncoeffs = self.GD["Hogbom"]["NumBasisFuncs"]
                    else:
                        raise NotImplementedError(
                            "FreqMode %s not supported" %
                            self.GD["Hogbom"]["FreqMode"])
                    Coeffs = np.zeros([npol, Ncoeffs])
                else:
                    Coeffs = np.zeros([npol,
                                       nch])  # to support per channel cleaning

                # Get the JonesNorm
                JonesNorm = (self.DicoDirty["JonesNorm"][:, :, x, y]).reshape(
                    (nch, npol, 1, 1))

                # Get the solution
                Fpol[:, 0, 0, 0] = self._Dirty[:, 0, x, y] / np.sqrt(
                    JonesNorm[:, 0, 0, 0])
                # Fit a polynomial to get coeffs
                # tmp = self.ModelMachine.FreqMachine.Fit(Fpol[:, 0, 0, 0])
                # print tmp.shape
                Coeffs[0, :] = self.ModelMachine.FreqMachine.Fit(Fpol[:, 0, 0,
                                                                      0])
                # Overwrite with polynoimial fit
                Fpol[:, 0, 0,
                     0] = self.ModelMachine.FreqMachine.Eval(Coeffs[0, :])

                T.timeit("stuff")

                #Find PSF corresponding to location (x,y)
                self.PSFServer.setLocation(
                    x, y)  #Selects the facet closest to (x,y)
                PSF, meanPSF = self.PSFServer.GivePSF()  #Gives associated PSF
                _, _, PSFnx, PSFny = PSF.shape
                # Normalise PSF in each channel
                PSF /= np.amax(PSF.reshape(nch, npol, PSFnx * PSFny),
                               axis=2,
                               keepdims=True).reshape(nch, npol, 1, 1)

                T.timeit("FindScale")

                CurrentGain = self.GainMachine.GiveGain()

                #Update model
                self.ModelMachine.AppendComponentToDictStacked((x, y), 1.0,
                                                               Coeffs[0, :], 0)

                # Subtract LocalSM*CurrentGain from dirty image
                self.SubStep((x, y),
                             PSF * Fpol * CurrentGain * np.sqrt(JonesNorm))
                T.timeit("SubStep")

                T.timeit("End")

        except KeyboardInterrupt:
            print >> log, ModColor.Str(
                "    CLEANing [iter=%i] minor cycle interrupted with Ctrl+C, peak flux %.3g"
                % (self._niter, ThisFlux))
            exit_msg = exit_msg + " " + "MaxIter"
            continue_deconvolution = False or continue_deconvolution
            update_model = True or update_model
            return exit_msg, continue_deconvolution, update_model

        if self._niter >= self.MaxMinorIter:  #Reached maximum number of iterations:
            print >> log, ModColor.Str(
                "    CLEANing [iter=%i] Reached maximum number of iterations, peak flux %.3g"
                % (self._niter, ThisFlux))
            exit_msg = exit_msg + " " + "MaxIter"
            continue_deconvolution = False or continue_deconvolution
            update_model = True or update_model

        return exit_msg, continue_deconvolution, update_model
示例#24
0
    def get(self,
            times,
            uvw,
            visIn,
            flag,
            A0A1,
            ModelImage,
            PointingID=0,
            Row0Row1=(0, -1),
            DicoJonesMatrices=None,
            freqs=None,
            ImToGrid=True,
            TranformModelInput="",
            ChanMapping=None,
            sparsification=None):
        T = ClassTimeIt.ClassTimeIt("get")
        T.disable()
        vis = visIn.view()
        A0, A1 = A0A1

        T.timeit("0")

        if ImToGrid:
            if np.max(np.abs(ModelImage)) == 0:
                return vis
            Grid = self.dtype(self.setModelIm(ModelImage))
        else:
            Grid = ModelImage

        if ChanMapping is None:
            ChanMapping = np.zeros((visIn.shape[1], ), np.int32)

        if TranformModelInput == "FT":
            if np.max(np.abs(ModelImage)) == 0:
                return vis
            if self.GD["RIME"]["Precision"] == "S":
                Cast = np.complex64
            elif self.GD["RIME"]["Precision"] == "D":
                Cast = np.complex128
            Grid = np.complex64(self.getFFTWMachine().fft(Cast(ModelImage)))

        if freqs.size > 1:
            df = freqs[1::] - freqs[0:-1]
            ddf = np.abs(df - np.mean(df))
            ChanEquidistant = int(np.max(ddf) < 1.)
        else:
            ChanEquidistant = 0

        # np.save("Grid",Grid)
        NVisChan = visIn.shape[1]
        self.ChanMappingDegrid = np.int32(ChanMapping)
        self.SumJonesChan = np.zeros((2, NVisChan), np.float64)

        T.timeit("1")

        npol = self.npol
        NChan = self.NChan
        SumWeigths = self.SumWeigths
        if vis.shape != flag.shape:
            raise Exception('vis[%s] and flag[%s] should have the same shape' %
                            (str(vis.shape), str(flag.shape)))

        l0, m0 = self.lmShift
        FacetInfos = np.float64(
            np.array([self.WTerm.Cu, self.WTerm.Cv, l0, m0, self.IDFacet]))
        Row0, Row1 = Row0Row1
        if Row1 == -1:
            Row1 = uvw.shape[0]
        RowInfos = np.array([Row0, Row1]).astype(np.int32)

        T.timeit("2")

        self.CheckTypes(Grid=Grid,
                        vis=vis,
                        uvw=uvw,
                        flag=flag,
                        ListWTerm=self.WTerm.Wplanes)

        ParamJonesList = []

        if DicoJonesMatrices is not None:
            ApplyAmp = 0
            ApplyPhase = 0
            ScaleAmplitude = 0
            CalibError = 0.

            if "A" in self.GD["DDESolutions"]["DDModeDeGrid"]:
                ApplyAmp = 1
            if "P" in self.GD["DDESolutions"]["DDModeDeGrid"]:
                ApplyPhase = 1
            if self.GD["DDESolutions"]["ScaleAmpDeGrid"]:
                ScaleAmplitude = 1
                CalibError = (self.GD["DDESolutions"]["CalibErr"] /
                              3600.) * np.pi / 180

            LApplySol = [ApplyAmp, ApplyPhase, ScaleAmplitude, CalibError]
            LSumJones = [self.SumJones]
            LSumJonesChan = [self.SumJonesChan]
            ParamJonesList = self.GiveParamJonesList(DicoJonesMatrices,
                                                     times,
                                                     A0,
                                                     A1,
                                                     uvw,
                                                     degridder=True)
            ParamJonesList = ParamJonesList+LApplySol+LSumJones+LSumJonesChan + \
                [np.float32(self.GD["DDESolutions"]["ReWeightSNR"])]

        T.timeit("3")
        #print vis
        #print "DEGRID:",Grid.shape,ChanMapping
        if self.GD["RIME"]["ForwardMode"] == "Classic":
            _ = _pyGridder.pyDeGridderWPol(
                Grid, vis, uvw, flag, SumWeigths, 0, self.WTerm.WplanesConj,
                self.WTerm.Wplanes,
                np.array([
                    self.WTerm.RefWave, self.WTerm.wmax,
                    len(self.WTerm.Wplanes), self.WTerm.OverS
                ],
                         dtype=np.float64), self.incr.astype(np.float64),
                freqs, [self.PolMap, FacetInfos, RowInfos, ChanMapping],
                ParamJonesList, self.LSmear)
        elif self.GD["RIME"]["ForwardMode"] == "BDA-degrid":
            # OptimisationInfos=[self.FullScalarMode,self.ChanEquidistant]
            OptimisationInfos = [
                self.JonesType, ChanEquidistant, self.SkyType, self.PolModeID
            ]
            #            MapSmear = NpShared.GiveArray(
            #                "%sBDA.DeGrid" %
            #               (self.ChunkDataCache))
            vis = _pyGridderSmear.pyDeGridderWPol(
                Grid, vis, uvw, flag, SumWeigths, 0, self.WTerm.WplanesConj,
                self.WTerm.Wplanes,
                np.array([
                    self.WTerm.RefWave, self.WTerm.wmax,
                    len(self.WTerm.Wplanes), self.WTerm.OverS
                ],
                         dtype=np.float64), self.incr.astype(np.float64),
                freqs, [self.PolMap, FacetInfos, RowInfos], ParamJonesList,
                self._bda_degrid, sparsification
                if sparsification is not None else np.array([], dtype=np.bool),
                OptimisationInfos, self.LSmear, np.int32(ChanMapping),
                np.array(self.DataCorrelationFormat).astype(np.uint16),
                np.array(self.ExpectedOutputStokes).astype(np.uint16))
        elif self.GD["RIME"]["ForwardMode"] == "BDA-degrid-classic":
            OptimisationInfos = [
                self.JonesType, ChanEquidistant, self.SkyType, self.PolModeID
            ]
            #            MapSmear = NpShared.GiveArray(
            #                "%sBDA.DeGrid" %
            #               (self.ChunkDataCache))
            vis = _pyGridderSmearClassic.pyDeGridderWPol(
                Grid, vis, uvw, flag, SumWeigths, 0, self.WTerm.WplanesConj,
                self.WTerm.Wplanes,
                np.array([
                    self.WTerm.RefWave, self.WTerm.wmax,
                    len(self.WTerm.Wplanes), self.WTerm.OverS
                ],
                         dtype=np.float64), self.incr.astype(np.float64),
                freqs, [self.PolMap, FacetInfos, RowInfos], ParamJonesList,
                self._bda_degrid, sparsification
                if sparsification is not None else np.array([], dtype=np.bool),
                OptimisationInfos, self.LSmear, np.int32(ChanMapping),
                np.array(self.DataCorrelationFormat).astype(np.uint16),
                np.array(self.ExpectedOutputStokes).astype(np.uint16))
        else:
            raise ValueError("unknown --RIME-ForwardMode %s" %
                             self.GD["RIME"]["ForwardMode"])

        T.timeit("4 (degrid)")
        # print vis

        # uvw,vis=self.ShiftVis(uvwOrig,vis,reverse=False)

        # T.timeit("5")
        return vis
示例#25
0
    def put(self,
            times,
            uvw,
            visIn,
            flag,
            A0A1,
            W=None,
            PointingID=0,
            DoNormWeights=True,
            DicoJonesMatrices=None,
            freqs=None,
            DoPSF=0,
            ChanMapping=None,
            ResidueGrid=None,
            sparsification=None):
        """
        Gridding routine, wraps external python extension C gridder
        Args:
            times:
            uvw:
            visIn:
            flag:
            A0A1:
            W:
            PointingID:
            DoNormWeights:
            DicoJonesMatrices:
            freqs:
            DoPSF:
            ChanMapping:
            ResidueGrid:
        Returns:

        """
        vis = visIn

        T = ClassTimeIt.ClassTimeIt("put")
        T.disable()
        self.DoNormWeights = DoNormWeights
        if not (self.DoNormWeights):
            self.reinitGrid()

        if freqs.size > 1:
            df = freqs[1::] - freqs[0:-1]
            ddf = np.abs(df - np.mean(df))
            ChanEquidistant = int(np.max(ddf) < 1.)
        else:
            ChanEquidistant = 0

        if ChanMapping is None:
            ChanMapping = np.zeros((visIn.shape[1], ), np.int64)
        self.ChanMappingGrid = ChanMapping

        Grid = ResidueGrid

        if Grid.dtype != self.dtype:
            raise TypeError("Grid must be of type " + str(self.dtype))
        A0, A1 = A0A1

        npol = self.npol
        NChan = self.NChan

        NVisChan = vis.shape[1]
        self.SumJonesChan = np.zeros((2, NVisChan), np.float64)

        if isinstance(W, type(None)):
            W = np.ones((uvw.shape[0], NVisChan), dtype=np.float64)

        SumWeigths = self.SumWeigths
        if vis.shape != flag.shape:
            raise Exception('vis[%s] and flag[%s] should have the same shape' %
                            (str(vis.shape), str(flag.shape)))

        u, v, w = uvw.T

        l0, m0 = self.lmShift
        FacetInfos = np.float64(
            np.array([self.WTerm.Cu, self.WTerm.Cv, l0, m0, self.IDFacet]))

        self.CheckTypes(Grid=Grid,
                        vis=vis,
                        uvw=uvw,
                        flag=flag,
                        ListWTerm=self.WTerm.Wplanes,
                        W=W)
        ParamJonesList = []
        if DicoJonesMatrices is not None:
            ApplyAmp = 0
            ApplyPhase = 0
            ScaleAmplitude = 0
            CalibError = 0.

            if "A" in self.GD["DDESolutions"]["DDModeGrid"]:
                ApplyAmp = 1
            if "P" in self.GD["DDESolutions"]["DDModeGrid"]:
                ApplyPhase = 1
            if self.GD["DDESolutions"]["ScaleAmpGrid"]:
                ScaleAmplitude = 1
                CalibError = (self.GD["DDESolutions"]["CalibErr"] /
                              3600.) * np.pi / 180
            LApplySol = [ApplyAmp, ApplyPhase, ScaleAmplitude, CalibError]
            LSumJones = [self.SumJones]
            LSumJonesChan = [self.SumJonesChan]
            ParamJonesList = self.GiveParamJonesList(DicoJonesMatrices,
                                                     times,
                                                     A0,
                                                     A1,
                                                     uvw,
                                                     gridder=True)
            ParamJonesList = ParamJonesList + LApplySol + LSumJones + LSumJonesChan + [
                np.float32(self.GD["DDESolutions"]["ReWeightSNR"])
            ]

        #T2= ClassTimeIt.ClassTimeIt("Gridder")
        #T2.disable()
        T.timeit("stuff")
        if False:  # # self.GD["Comp"]["GridMode"] == 0:  # really deprecated for now
            raise RuntimeError("Deprecated flag. Please use BDA gridder")
        elif self.GD["RIME"]["BackwardMode"] == "BDA-grid":
            OptimisationInfos = [
                self.JonesType, ChanEquidistant, self.SkyType, self.PolModeID
            ]
            _pyGridderSmear.pyGridderWPol(
                Grid, vis, uvw, flag, W, SumWeigths, DoPSF, self.WTerm.Wplanes,
                self.WTerm.WplanesConj,
                np.array([
                    self.WTerm.RefWave, self.WTerm.wmax,
                    len(self.WTerm.Wplanes), self.WTerm.OverS
                ],
                         dtype=np.float64), self.incr.astype(np.float64),
                freqs, [self.PolMap, FacetInfos], ParamJonesList,
                self._bda_grid, sparsification
                if sparsification is not None else np.array([], dtype=np.bool),
                OptimisationInfos, self.LSmear, np.int32(ChanMapping),
                np.array(self.DataCorrelationFormat).astype(np.uint16),
                np.array(self.ExpectedOutputStokes).astype(np.uint16))

            T.timeit("gridder")
            T.timeit("grid %d" % self.IDFacet)
        elif self.GD["RIME"]["BackwardMode"] == "BDA-grid-classic":
            OptimisationInfos = [
                self.JonesType, ChanEquidistant, self.SkyType, self.PolModeID
            ]
            _pyGridderSmearClassic.pyGridderWPol(
                Grid, vis, uvw, flag, W, SumWeigths, DoPSF, self.WTerm.Wplanes,
                self.WTerm.WplanesConj,
                np.array([
                    self.WTerm.RefWave, self.WTerm.wmax,
                    len(self.WTerm.Wplanes), self.WTerm.OverS
                ],
                         dtype=np.float64), self.incr.astype(np.float64),
                freqs, [self.PolMap, FacetInfos], ParamJonesList,
                self._bda_grid, sparsification
                if sparsification is not None else np.array([], dtype=np.bool),
                OptimisationInfos, self.LSmear, np.int32(ChanMapping),
                np.array(self.DataCorrelationFormat).astype(np.uint16),
                np.array(self.ExpectedOutputStokes).astype(np.uint16))
        else:
            raise ValueError("unknown --RIME-BackwardMode %s" %
                             self.GD["RIME"]["BackwardMode"])

        T.timeit("gridder")
        T.timeit("grid %d" % self.IDFacet)
示例#26
0
def testGrid():
    import pylab
    # Parset=ReadCFG.Parset("%s/Parset/DefaultParset.cfg"%os.environ["DDFACET_DIR"])
    Parset = ReadCFG.Parset("%s/DDFacet/Parset/DefaultParset.cfg" %
                            os.environ["DDFACET_DIR"])
    DC = Parset.DicoPars
    # 19 (-0.01442078294460315, 0.014406238534169863) 2025 3465 -10.0

    #(array([0]), array([0]), array([1015]), array([1201]))
    #(array([0]), array([0]), array([1050]), array([1398]))
    # 17 (-0.014391694123736577, 0.01437714971330329) 2025 3465 -10.0
    #(array([0]), array([0]), array([1030]), array([1303]))

    npix = 2025
    Cell = 1.5
    # Cell=.5
    offy, offx = 3465 / 2 - 1030, 3465 / 2 - 1303
    offx = offx
    offy = -offy
    CellRad = (Cell / 3600.) * np.pi / 180
    L = offy * (Cell / 3600.) * np.pi / 180
    M = -offx * (Cell / 3600.) * np.pi / 180

    l0, m0 = -0.014391694123736577, 0.01437714971330329
    #l0,m0=-0.009454, 0.
    L += l0
    M += m0

    DC["Image"]["Cell"] = Cell
    DC["Image"]["NPix"] = npix
    # DC["Image"]["Padding"]=1
    DC["Data"]["MS"] = "Simul.MS.W0.tsel"
    #/media/6B5E-87D0/DDFacet/Test/TestDegridOleg/TestOlegVLA.MS_p0

    DC["CF"]["OverS"] = 81
    DC["CF"]["Support"] = 9
    DC["CF"]["Nw"] = 2
    DC["CF"]["wmax"] = 100000.
    DC["Stores"]["DeleteDDFProducts"] = False  # True
    IdSharedMem = "123."
    # DC["Selection"]["UVRangeKm"]=[0.2,2000.e6]
    DC["Comp"]["CompDeGridDecorr"] = 0.0
    DC["Image"]["Robust"] = -1
    DC["Image"]["Weighting"] = "Briggs"
    #DC["Comp"]["CompDeGridMode"] = False
    #DC["Comp"]["CompGridMode"] = False
    #DC["Comp"]["DegridMode"] = True

    VS = ClassVisServer.ClassVisServer(
        DC["Data"]["MS"],
        ColName=DC["Data"]["ColName"],
        TVisSizeMin=DC["Data"]["ChunkHours"] * 60 * 1.1,
        # DicoSelectOptions=DicoSelectOptions,
        TChunkSize=DC["Data"]["ChunkHours"],
        Robust=DC["Image"]["Robust"],
        Weighting=DC["Image"]["Weighting"],
        Super=DC["Image"]["SuperUniform"],
        DicoSelectOptions=dict(DC["Selection"]),
        NCPU=DC["Parallel"]["NCPU"],
        GD=DC)

    Padding = DC["Image"]["Padding"]
    #_,npix=EstimateNpix(npix,Padding)
    sh = [1, 1, npix, npix]
    VS.setFOV(sh, sh, sh, CellRad)

    VS.CalcWeights()
    Load = VS.LoadNextVisChunk()
    DATA = VS.VisChunkToShared()

    # DicoConfigGM={"NPix":NpixFacet,
    #               "Cell":Cell,
    #               "ChanFreq":ChanFreq,
    #               "DoPSF":False,
    #               "Support":Support,
    #               "OverS":OverS,
    #               "wmax":wmax,
    #               "Nw":Nw,
    #               "WProj":True,
    #               "DoDDE":self.DoDDE,
    #               "Padding":Padding}
    # GM=ClassDDEGridMachine(Parset.DicoPars,DoDDE=False,WProj=True,lmShift=(0.,0.),JonesDir=3,SpheNorm=True,IdSharedMem="caca")
    # GM=ClassDDEGridMachine(Parset.DicoPars,
    #                        IdSharedMem="caca",
    #                        **DicoConfigGMself.DicoImager[iFacet]["DicoConfigGM"])

    ChanFreq = VS.CurrentMS.ChanFreq.flatten()
    GM = ClassDDEGridMachine(
        DC,
        ChanFreq,
        npix,
        lmShift=(l0, m0),  # self.DicoImager[iFacet]["lmShift"],
        IdSharedMem=IdSharedMem)

    row0 = 0
    row1 = DATA["uvw"].shape[0]  # -1
    uvw = np.float64(DATA["uvw"])  # [row0:row1]
    # uvw[:,2]=0
    times = np.float64(DATA["times"])  # [row0:row1]
    data = np.complex64(DATA["data"])  # [row0:row1]
    # data.fill(1.)
    # data[:,:,0]=1
    # data[:,:,3]=1
    A0 = np.int32(DATA["A0"])  # [row0:row1]
    A1 = np.int32(DATA["A1"])  # [row0:row1]

    DOrig = data.copy()

    # uvw.fill(0)

    flag = np.bool8(DATA["flags"])  # [row0:row1,:,:].copy()
    # ind=np.where(np.logical_not((A0==12)&(A1==14)))[0]
    # flag[ind,:,:]=1
    # flag.fill(0)

    # ind=np.where(np.logical_not((A0==0)&(A1==27)))[0]
    # uvw=uvw[ind].copy()
    # data=data[ind].copy()
    # flag[ind,:,:]=1
    # A0=A0[ind].copy()
    # A1=A1[ind].copy()
    # times=times[ind].copy()

    # MapSmear=NpShared.GiveArray("%sMappingSmearing"%("caca"))
    # stop
    # row=19550
    # print A0[row],A1[row],flag[row]
    # stop

    # DicoJonesMatrices={}
    # DicoClusterDirs=NpShared.SharedToDico("%sDicoClusterDirs"%IdSharedMem)
    # DicoJonesMatrices["DicoClusterDirs"]=DicoClusterDirs

    # DicoJones_Beam=NpShared.SharedToDico("%sJonesFile_Beam"%IdSharedMem)
    # DicoJonesMatrices["DicoJones_Beam"]=DicoJones_Beam
    # DicoJonesMatrices["DicoJones_Beam"]["MapJones"]=NpShared.GiveArray("%sMapJones_Beam"%IdSharedMem)

    DicoJonesMatrices = None

    T = ClassTimeIt.ClassTimeIt("main")

    # print "Start"
    # Grid=GM.put(times,uvw,data,flag,(A0,A1),W=DATA["Weights"],PointingID=0,DoNormWeights=True, DicoJonesMatrices=DicoJonesMatrices)
    # print "OK"
    # pylab.clf()
    # ax=pylab.subplot(1,3,1)
    # pylab.imshow(np.real(Grid[0,0]),cmap="gray",interpolation="nearest")#,vmin=-600,vmax=600)
    # G0=(Grid/np.max(Grid)).copy()

    # pylab.imshow(np.random.rand(50,50))

    # ####

    # GM=ClassDDEGridMachine(DC,
    #                        ChanFreq,
    #                        npix,
    #                        lmShift=(0.,0.),#self.DicoImager[iFacet]["lmShift"],
    #                        IdSharedMem=IdSharedMem)
    # data.fill(1.)
    # Grid=GM.put(times,uvw,data,flag,(A0,A1),W=DATA["Weights"],PointingID=0,DoNormWeights=True, DicoJonesMatrices=DicoJonesMatrices)
    # pylab.subplot(1,3,2,sharex=ax,sharey=ax)
    # pylab.imshow(np.real(Grid[0,0]),cmap="gray",interpolation="nearest")#,vmin=-600,vmax=600)
    # pylab.subplot(1,3,3,sharex=ax,sharey=ax)
    # pylab.imshow(np.real(Grid[0,0])-np.real(G0[0,0]),cmap="gray",interpolation="nearest")#,vmin=-600,vmax=600)
    # pylab.colorbar()
    # pylab.draw()
    # pylab.show(False)

    # return

    Grid = np.zeros(sh, np.complex64)
    T.timeit("grid")
    # Grid[np.isnan(Grid)]=-1

    # Grid[0,0,100,100]=10.

    # Grid.fill(0)
    _, _, n, n = Grid.shape
    Grid[:, :, n / 2 + offx, n / 2 + offy] = 10.

    data.fill(0)

    #GM.GD["Comp"]["CompDeGridMode"] = True
    data = GM.get(times, uvw, data, flag, (A0, A1), Grid,
                  freqs=ChanFreq)  # , DicoJonesMatrices=DicoJonesMatrices)
    data0 = -data.copy()

    # data.fill(0)
    # GM.GD["Comp"]["CompDeGridMode"] = False
    # data1=-GM.get(times,uvw,data,flag,(A0,A1),Grid,freqs=ChanFreq)#,
    # DicoJonesMatrices=DicoJonesMatrices)

    # ind=np.where(((A0==12)&(A1==14)))[0]
    # data0=data0[ind]
    # data1=data1[ind]
    # print data0-data1

    op0 = np.abs
    op1 = np.imag

    # op0=np.abs
    # op1=np.angle

    nbl = VS.CurrentMS.nbl

    U, V, W = uvw.T
    C = 299792458.
    N = np.sqrt(1. - L**2 - M**2)
    U = U.reshape(U.size, 1)
    V = V.reshape(U.size, 1)
    W = W.reshape(U.size, 1)
    #L,M=-0.0194966364621, 0.0112573688
    ChanFreq = ChanFreq.reshape(1, ChanFreq.size)
    K = 10. * np.exp(2. * np.pi * 1j * (ChanFreq[0] / C) * (U * L + V * M + W *
                                                            (N - 1)))
    # ind=np.where((d0-d1)[:]!=0)

    #print -0.0194966364621, 0.0112573688
    #-0.0194967821858 0.0112573736754
    # print L,M

    ind = range(U.size)  # np.where((A0==49)&(A1==55))[0]
    d0 = data0[ind, -1, 0].ravel()
    # d1=data1[ind,-1,0].ravel()
    k = K[ind, -1]
    # k=DOrig[ind,-1,0].ravel()

    # d0=data0[:,:,0].ravel()
    # d1=data1[:,:,0].ravel()
    # k=K[:,:]

    X0 = d0.ravel()
    # X1=d1.ravel()
    Y = k.ravel()

    pylab.clf()
    pylab.subplot(2, 1, 1)
    # pylab.plot(op0(d0))
    pylab.plot(op0(X0))
    # pylab.plot(op0(X1))
    pylab.plot(op0(Y))
    pylab.plot(op0(X0) - op0(Y))
    pylab.subplot(2, 1, 2)
    pylab.plot(op1(X0))
    # pylab.plot(op1(X1))
    pylab.plot(op1(Y))
    pylab.plot(op1(X0) - op1(Y))
    pylab.draw()
    pylab.show()
示例#27
0
    def __init__(
        self,
        GD,
        ChanFreq,
        Npix,
        lmShift=(0., 0.),
        IDFacet=0,
        SpheNorm=True,
        NFreqBands=1,
        DataCorrelationFormat=[5, 6, 7, 8],
        ExpectedOutputStokes=[1],
        ListSemaphores=None,
        cf_dict=None,
        compute_cf=False,
        wmax=None,  # must be supplied if compute_cf=True
        bda_grid=None,
        bda_degrid=None,
    ):
        """

        Args:
            GD:
            ChanFreq:
            Npix:
            lmShift:
            IdSharedMem:
            IdSharedMemData:
            FacetDataCache:
            ChunkDataCache:
            IDFacet:
            SpheNorm:
            NFreqBands:
            DataCorrelationFormat:
            ExpectedOutputStokes:
            ListSemaphores:
            cf_dict: SharedDict from/to which WTerms and Sphes are saved
            compute_cf: if True, wterm/sphe is recomputed and saved to store_dict
        """
        T = ClassTimeIt.ClassTimeIt("Init_ClassDDEGridMachine")
        T.disable()
        self.GD = GD
        self.IDFacet = IDFacet
        self.SpheNorm = SpheNorm
        self.ListSemaphores = ListSemaphores
        self._bda_grid = bda_grid
        self._bda_degrid = bda_degrid

        # self.DoPSF=DoPSF
        self.DoPSF = False
        # if DoPSF:
        #     self.DoPSF=True
        #     Npix=Npix*2

        Precision = GD["RIME"]["Precision"]
        PolMode = ExpectedOutputStokes

        if Precision == "S":
            self.dtype = np.complex64
        elif Precision == "D":
            self.dtype = np.complex128

        self.dtype = np.complex64
        T.timeit("0")
        Padding = GD["Facets"]["Padding"]
        self.NonPaddedNpix, Npix = EstimateNpix(Npix, Padding)
        self.Padding = Npix / float(self.NonPaddedNpix)
        # self.Padding=Padding

        self.LSmear = []
        self.PolMode = PolMode
        # SkyType & JonesType
        # 0: scalar
        # 1: diag
        # 2: full
        #if PolMode == "I":
        #    self.npol = 1
        #    self.PolMap = np.array([0, 5, 5, 0], np.int32)
        #    self.SkyType = 1
        #    self.PolModeID = 0
        #elif PolMode == "IQUV":
        #    self.SkyType = 2
        #    self.npol = 4
        #    self.PolMap = np.array([0, 1, 2, 3], np.int32)
        #    self.PolModeID = 1
        #DEPRICATION:
        #These are only to be used in the degridder, they are depricated for the gridder
        self.npol = len(ExpectedOutputStokes)
        self.SkyType = 1
        self.PolMap = np.array([0, 5, 5, 0], np.int32)
        self.PolModeID = 0
        self.Npix = Npix

        self.NFreqBands = NFreqBands
        self.NonPaddedShape = (self.NFreqBands, self.npol, self.NonPaddedNpix,
                               self.NonPaddedNpix)

        self.GridShape = (self.NFreqBands, self.npol, self.Npix, self.Npix)

        x0 = (self.Npix - self.NonPaddedNpix) / 2  # +1
        self.PaddingInnerCoord = (x0, x0 + self.NonPaddedNpix)

        T.timeit("1")

        OverS = GD["CF"]["OverS"]
        Support = GD["CF"]["Support"]
        Nw = GD["CF"]["Nw"]
        Cell = GD["Image"]["Cell"]

        # T=ClassTimeIt.ClassTimeIt("ClassImager")
        # T.disable()

        self.Cell = Cell
        self.incr = (np.array([-Cell, Cell], dtype=np.float64) /
                     3600.) * (np.pi / 180)
        # CF.fill(1.)
        # print self.ChanEquidistant
        # self.FullScalarMode=int(GD["DDESolutions"]["FullScalarMode"])
        # self.FullScalarMode=0

        JonesMode = GD["DDESolutions"]["JonesMode"]
        if JonesMode == "Scalar":
            self.JonesType = 0
        elif JonesMode == "Diag":
            self.JonesType = 1
        elif JonesMode == "Full":
            self.JonesType = 2

        T.timeit("3")

        self.ChanFreq = ChanFreq
        self.Sup = Support
        self.WProj = True
        self.Nw = Nw
        self.OverS = OverS
        self.lmShift = lmShift

        T.timeit("4")
        # if neither is set, then machine is being constructed for ffts only
        if cf_dict or compute_cf:
            self.InitCF(cf_dict, compute_cf, wmax)
        T.timeit("5")

        self.reinitGrid()
        self.CasaImage = None
        self.DicoATerm = None
        T.timeit("6")
        self.DataCorrelationFormat = DataCorrelationFormat
        self.ExpectedOutputStokes = ExpectedOutputStokes
        self._fftw_machine = None
示例#28
0
def _convolveSingleGaussianNP(shareddict,
                              field_in,
                              field_out,
                              ch,
                              CellSizeRad,
                              GaussPars_ch,
                              Normalise=False,
                              return_gaussian=False):
    """Convolves a single channel in a cube of nchan, npol, Ny, Nx
       @param shareddict: a dictionary containing an input and output array of size
       [nchans, npols, Ny, Nx]
       @param field_in: index of input field in shareddict
       @param field_out: index of the output field in shareddict (can be the
       same as field_in
       @param ch: index of channel to convolve
       @param CellSizeRad: pixel size in radians of the gaussian in image space
       @param Normalize: Normalize the Gaussian amplitude
       @param return_gaussian: return the convolving Gaussian as well
    """
    # The FFT needs to be big enough to avoid spectral leakage in the
    # transforms, so we pad both sides of the stack of images with the same
    # number of pixels. This preserves the baseband component correctly:
    # Even - 4 pixels becomes 6 for instance:
    #   |   |   |  x  |    | => |    |    |    |  x  |    |    |
    # Odd - 3 pixels becomes 5 for instance:
    #   |   |  x  |   | => |   |   |  x  |   |   |
    # IFFTShift will shift the central location down to 0 (middle + 1 and
    # middle for even and odd respectively). After FFT the baseband is at
    # 0 as expected. FFTShift can then recentre the FFT. Going back the
    # IFF is again applied so baseband is at 0, ifft taken and FFTShift
    # brings the central location back to middle + 1 and middle for even and
    # odd respectively. The signal can then safely be unpadded
    T = ClassTimeIt.ClassTimeIt()
    T.disable()
    Ain = shareddict[field_in][ch]
    Aout = shareddict[field_out][ch]
    T.timeit("init %d" % ch)
    npol, npix_y, npix_x = Ain.shape
    assert npix_y == npix_x, "Only supports square grids at the moment"
    pad_edge = max(
        int(np.ceil((ModToolBox.EstimateNpix(npix_x)[1] - npix_x) / 2.0) * 2),
        0)
    PSF = np.pad(GiveGauss(npix_x, CellSizeRad, GaussPars_ch, parallel=True),
                 ((pad_edge // 2, pad_edge // 2),
                  (pad_edge // 2, pad_edge // 2)),
                 mode="constant")

    # PSF=np.ones((Ain.shape[-1],Ain.shape[-1]),dtype=np.float32)
    if Normalise:
        PSF /= np.sum(PSF)
    T.timeit("givegauss %d" % ch)
    fPSF = np.fft.rfft2(iFs(PSF))
    fPSF = np.abs(fPSF)
    for pol in range(npol):
        A = iFs(
            np.pad(Ain[pol], ((pad_edge // 2, pad_edge // 2),
                              (pad_edge // 2, pad_edge // 2)),
                   mode="constant"))
        fA = np.fft.rfft2(A)
        nfA = fA * fPSF
        Aout[pol, :, :] = Fs(np.fft.irfft2(
            nfA, s=A.shape))[pad_edge // 2:npix_y + pad_edge // 2,
                             pad_edge // 2:npix_x + pad_edge // 2]

    T.timeit("convolve %d" % ch)
    if return_gaussian:
        return Aout, PSF
    else:
        return Aout
示例#29
0
def _convolveSingleGaussianFFTW(shareddict,
                                field_in,
                                field_out,
                                ch,
                                CellSizeRad,
                                GaussPars_ch,
                                Gauss=None,
                                Normalise=False,
                                nthreads=1,
                                return_gaussian=False):
    """Convolves a single channel in a cube of nchan, npol, Ny, Nx
       @param shareddict: a dictionary containing an input and output array of size
       [nchans, npols, Ny, Nx]
       @param field_in: index of input field in shareddict
       @param field_out: index of the output field in shareddict (can be the
       same as field_in
       @param ch: index of channel to convolve
       @param CellSizeRad: pixel size in radians of the gaussian in image space
       @param nthreads: number of threads to use in FFTW
       @param Gauss: if set, Gaussian to use (has been precomputed)
       @param Normalize: Normalize the gaussian amplitude
       @param return_gaussian: return the convolving Gaussian as well
    """
    # The FFT needs to be big enough to avoid spectral leakage in the
    # transforms, so we pad both sides of the stack of images with the same
    # number of pixels. This preserves the baseband component correctly:
    # Even - 4 pixels becomes 6 for instance:
    #   |   |   |  x  |    | => |    |    |    |  x  |    |    |
    # Odd - 3 pixels becomes 5 for instance:
    #   |   |  x  |   | => |   |   |  x  |   |   |
    # IFFTShift will shift the central location down to 0 (middle + 1 and
    # middle for even and odd respectively). After FFT the baseband is at
    # 0 as expected. FFTShift can then recentre the FFT. Going back the
    # IFF is again applied so baseband is at 0, ifft taken and FFTShift
    # brings the central location back to middle + 1 and middle for even and
    # odd respectively. The signal can then safely be unpadded
    T = ClassTimeIt.ClassTimeIt()
    T.disable()
    Ain = shareddict[field_in][ch]
    Aout = shareddict[field_out][ch]
    T.timeit("init %d" % ch)
    if Gauss is not None:
        PSF = Gauss
    else:
        PSF = GiveConvolvingGaussian(Ain.shape,
                                     CellSizeRad,
                                     GaussPars_ch,
                                     Normalise=Normalise)
    npol, npix_y, npix_x = Ain.shape
    pad_edge = max(
        int(np.ceil((ModToolBox.EstimateNpix(npix_x)[1] - npix_x) / 2.0) * 2),
        0)

    T.timeit("givegauss %d" % ch)
    fPSF = pyfftw.interfaces.numpy_fft.rfft2(iFs(PSF),
                                             overwrite_input=True,
                                             threads=nthreads)
    fPSF = np.abs(fPSF)
    for pol in range(npol):
        A = iFs(
            np.pad(Ain[pol], ((pad_edge // 2, pad_edge // 2),
                              (pad_edge // 2, pad_edge // 2)),
                   mode="constant"))
        fA = pyfftw.interfaces.numpy_fft.rfft2(A,
                                               overwrite_input=True,
                                               threads=nthreads)
        nfA = fA * fPSF
        Aout[pol, :, :] = Fs(
            pyfftw.interfaces.numpy_fft.irfft2(
                nfA, s=A.shape, overwrite_input=True,
                threads=nthreads))[pad_edge // 2:pad_edge // 2 + npix_y,
                                   pad_edge // 2:pad_edge // 2 + npix_x]
    T.timeit("convolve %d" % ch)

    if return_gaussian:
        return Aout, PSF
    else:
        return Aout
示例#30
0
    def giveDicoInitIndiv(self,
                          ListIslands,
                          ModelImage,
                          DicoDirty,
                          ListDoIsland=None,
                          Parallel=True):
        NCPU = self.NCPU
        work_queue = multiprocessing.JoinableQueue()
        ListIslands = ListIslands  #[300:308]
        DoIsland = True

        for iIsland in range(len(ListIslands)):
            if ListDoIsland is not None:
                DoIsland = ListDoIsland[iIsland]
            if DoIsland: work_queue.put({"iIsland": iIsland})

        result_queue = multiprocessing.JoinableQueue()
        NJobs = work_queue.qsize()
        workerlist = []

        logger.setSilent(SilentModules)
        #MyLogger.setLoud(SilentModules)

        #MyLogger.setLoud("ClassImageDeconvMachineMSMF")

        print >> log, "Launch MORESANE workers"
        for ii in range(NCPU):
            W = WorkerInitMSMF(work_queue, result_queue, self.GD,
                               self.DicoVariablePSF, DicoDirty, self.RefFreq,
                               self.GridFreqs, self.DegridFreqs,
                               self.MainCache, ModelImage, ListIslands,
                               self.IdSharedMem)
            workerlist.append(W)
            if Parallel:
                workerlist[ii].start()

        timer = ClassTimeIt.ClassTimeIt()
        pBAR = ProgressBar(Title="  MORESANing islands ")
        #pBAR.disable()
        pBAR.render(0, NJobs)
        iResult = 0
        if not Parallel:
            for ii in range(NCPU):
                workerlist[ii].run()  # just run until all work is completed

        self.DicoInitIndiv = {}
        while iResult < NJobs:
            DicoResult = None
            if result_queue.qsize() != 0:
                try:
                    DicoResult = result_queue.get()
                except:
                    pass

            if DicoResult == None:
                time.sleep(0.5)
                continue

            if DicoResult["Success"]:
                iResult += 1
                NDone = iResult

                pBAR.render(NDone, NJobs)

                iIsland = DicoResult["iIsland"]
                NameDico = "%sDicoInitIsland_%5.5i" % (self.IdSharedMem,
                                                       iIsland)
                Dico = NpShared.SharedToDico(NameDico)
                self.DicoInitIndiv[iIsland] = copy.deepcopy(Dico)
                NpShared.DelAll(NameDico)

        if Parallel:
            for ii in range(NCPU):
                workerlist[ii].shutdown()
                workerlist[ii].terminate()
                workerlist[ii].join()

        #MyLogger.setLoud(["pymoresane.main"])
        #MyLogger.setLoud(["ClassImageDeconvMachineMSMF","ClassPSFServer","ClassMultiScaleMachine","GiveModelMachine","ClassModelMachineMSMF"])
        return self.DicoInitIndiv