def FindAlpha(self): self.DicoJonesMatrices=self.GiveDicoJonesMatrices() DicoJonesMatrices=self.DicoJonesMatrices print(" Find Alpha for smoothing", file=log) l_List=DicoJonesMatrices["DicoJones_killMS"]["DicoClusterDirs"]["l"].tolist() m_List=DicoJonesMatrices["DicoJones_killMS"]["DicoClusterDirs"]["m"].tolist() NDir=len(l_List) Jm=DicoJonesMatrices["DicoJones_killMS"]["Jones"] nt,nd,na,nf,_,_=Jm.shape self.AlphaReg=np.zeros((NDir,na),np.float32) for (iDir,l,m) in zip(range(NDir),l_List,m_List): self.AlphaReg[iDir,:]=self.FindAlphaSingleDir(DicoJonesMatrices,l,m) NpShared.ToShared("%sAlphaReg" % self.IdSharedMem, self.AlphaReg)
def __setitem__(self, item, value): if not self._readwrite: raise RuntimeError("SharedDict %s attached as read-only" % self.path) if type(item).__name__ not in _allowed_key_types: raise KeyError("unsupported key of type " + type(item).__name__) name = self._key_to_name(item) path = os.path.join(self.path, name) # remove previous item from SHM, if it's in the local dict if collections.OrderedDict.__contains__(self, item): for suffix in "ap": if os.path.exists(path + suffix): os.unlink(path + suffix) if os.path.exists(path + "d"): os.system("rm -fr " + path + "d") # if item is not in local dict but is on disk, this is a multiprocessing logic error else: for suffix in "apd": if os.path.exists(path + suffix): raise RuntimeError( "SharedDict entry %s exists, possibly added by another process. This is most likely a bug!" % (path + suffix)) # for arrays, copy to a shared array if isinstance(value, np.ndarray): value = NpShared.ToShared(_to_shm(path + 'a'), value) # for regular dicts, copy across elif isinstance(value, (dict, SharedDict, collections.OrderedDict)): dict1 = self.addSubdict(item) for key1, value1 in getattr(value, "iteritems", value.items)(): dict1[key1] = value1 value = dict1 # # for lists, use dict # elif isinstance(value, (list, tuple)): # dict1 = self.addList(item) # for key1, value1 in enumerate(value): # dict1[key1] = value1 # value = dict1 # all other types, just use pickle else: cPickle.dump(value, open(path + 'p', "wb"), 2) collections.OrderedDict.__setitem__(self, item, value)
def giveMinDist(self, DicoJob): iIsland=DicoJob["iIsland"] NIslands=len(self.ListIslands) Result=np.zeros((3,NIslands),np.int32) x0,y0=np.array(self.ListIslands[iIsland]).T for jIsland in range(NIslands): x1,y1=np.array(self.ListIslands[jIsland]).T dx=x0.reshape((-1,1))-x1.reshape((1,-1)) dy=y0.reshape((-1,1))-y1.reshape((1,-1)) d=np.sqrt(dx**2+dy**2) dmin=np.min(d) indx,indy=np.where(d==dmin) Res=dmin Result[0,jIsland]=dx[indx[0],indy[0]] Result[1,jIsland]=dy[indx[0],indy[0]] Result[2,jIsland]=dmin NpShared.ToShared("%sDistances_%6.6i"%(self.IdSharedMem,iIsland),Result) self.result_queue.put({"iIsland": iIsland, "Success":True})
def __init__(self, MeanResidual, MeanModelImage, PSFServer, DeltaChi2=4., IdSharedMem="", NCPU=6): IdSharedMem += "SmearSM." NpShared.DelAll(IdSharedMem) self.IdSharedMem = IdSharedMem self.NCPU = NCPU self.MeanModelImage = NpShared.ToShared( "%sMeanModelImage" % self.IdSharedMem, MeanModelImage) self.MeanResidual = NpShared.ToShared( "%sMeanResidual" % self.IdSharedMem, MeanResidual) NPixStats = 10000 RandomInd = np.int64(np.random.rand(NPixStats) * (MeanResidual.size)) self.RMS = np.std(np.real(self.MeanResidual.ravel()[RandomInd])) self.FWHMMin = 3. self.PSFServer = PSFServer self.DeltaChi2 = DeltaChi2 self.Var = self.RMS**2 self.NImGauss = 31 self.CubeMeanVariablePSF = NpShared.ToShared( "%sCubeMeanVariablePSF" % self.IdSharedMem, self.PSFServer.DicoVariablePSF['CubeMeanVariablePSF']) self.DicoConvMachine = {} N = self.NImGauss dx, dy = np.mgrid[-(N // 2):N // 2:1j * N, -(N // 2):N // 2:1j * N] ListPixParms = [(int(dx.ravel()[i]), int(dy.ravel()[i])) for i in range(dx.size)] ListPixData = ListPixParms ConvMode = "Matrix" N = self.NImGauss #stop #for #ClassConvMachine(): #def __init__(self,PSF,ListPixParms,ListPixData,ConvMode): d = np.sqrt(dx**2 + dy**2) self.dist = d self.NGauss = 10 GSig = np.linspace(0., 2, self.NGauss) self.GSig = GSig ListGauss = [] One = np.zeros_like(d) One[N // 2, N // 2] = 1. ListGauss.append(One) for sig in GSig[1::]: v = np.exp(-d**2 / (2. * sig**2)) Sv = np.sum(v) v /= Sv ListGauss.append(v) self.ListGauss = ListGauss print("Declare convolution machines", file=log) NJobs = self.PSFServer.NFacets pBAR = ProgressBar(Title=" Declare ") #pBAR.disable() pBAR.render(0, '%4i/%i' % (0, NJobs)) for iFacet in range(self.PSFServer.NFacets): #print iFacet,"/",self.PSFServer.NFacets PSF = self.PSFServer.DicoVariablePSF['CubeMeanVariablePSF'][ iFacet] #[0,0] _, _, NPixPSF, _ = PSF.shape PSF = PSF[:, :, NPixPSF // 2 - N:NPixPSF // 2 + N + 1, NPixPSF // 2 - N:NPixPSF // 2 + N + 1] #print PSF.shape #sig=1 #PSF=(np.exp(-self.dist**2/(2.*sig**2))).reshape(1,1,N,N) self.DicoConvMachine[iFacet] = ClassConvMachine.ClassConvMachine( PSF, ListPixParms, ListPixData, ConvMode) CM = self.DicoConvMachine[iFacet].CM NpShared.ToShared("%sCM_Facet%4.4i" % (self.IdSharedMem, iFacet), CM) #invCM=ModLinAlg.invSVD(np.float64(CM[0,0]))/self.Var #NpShared.ToShared("%sInvCov_Facet%4.4i"%(self.IdSharedMem,iFacet),invCM) NDone = iFacet + 1 intPercent = int(100 * NDone / float(NJobs)) pBAR.render(intPercent, '%4i/%i' % (NDone, NJobs)) PSFMean = np.mean( self.PSFServer.DicoVariablePSF['CubeMeanVariablePSF'], axis=0) self.ConvMachineMeanPSF = ClassConvMachine.ClassConvMachine( PSFMean, ListPixParms, ListPixData, ConvMode) CM = self.ConvMachineMeanPSF.CM invCM = ModLinAlg.invSVD(np.float64(CM[0, 0]), Cut=1e-8) / self.Var NpShared.ToShared("%sInvCov_AllFacet" % (self.IdSharedMem), invCM) self.FindSupport()
def Smear(self, Parallel=True): if Parallel: NCPU = self.NCPU else: NCPU = 1 StopWhenQueueEmpty = True print("Building queue", file=log) self.ModelOut = np.zeros_like(self.MeanModelImage) indx, indy = np.where(self.MeanModelImage[0, 0] != 0) #indx,indy=np.where(self.MeanModelImage==np.max(self.MeanModelImage)) work_queue = multiprocessing.Queue() result_queue = multiprocessing.Queue() SizeMax = int(indx.size / float(NCPU) / 100.) SizeMax = np.max([SizeMax, 1]) iPix = 0 iQueue = 0 Queue = [] while iPix < indx.size: xc, yc = indx[iPix], indy[iPix] FacetID = self.PSFServer.giveFacetID2(xc, yc) #DicoOrder={"xy":(xc,yc), # "FacetID":FacetID} Queue.append([xc, yc, FacetID]) iPix += 1 if (len(Queue) == SizeMax) | (iPix == indx.size): NpShared.ToShared("%sQueue_%3.3i" % (self.IdSharedMem, iQueue), np.array(Queue)) work_queue.put(iQueue) Queue = [] iQueue += 1 NJobs = indx.size workerlist = [] pBAR = ProgressBar(Title=" Find gaussian") #pBAR.disable() pBAR.render(0, '%4i/%i' % (0, NJobs)) for ii in range(NCPU): W = WorkerSmear(work_queue, result_queue, IdSharedMem=self.IdSharedMem, StopWhenQueueEmpty=StopWhenQueueEmpty, NImGauss=self.NImGauss, DeltaChi2=self.DeltaChi2, ListGauss=self.ListGauss, GSig=self.GSig, Var=self.Var, SigMin=self.SigMin) workerlist.append(W) if Parallel: workerlist[ii].start() else: workerlist[ii].run() N = self.NImGauss iResult = 0 #print "!!!!!!!!!!!!!!!!!!!!!!!!",iResult,NJobs while iResult < NJobs: DicoResult = None # for result_queue in List_Result_queue: # if result_queue.qsize()!=0: # try: # DicoResult=result_queue.get_nowait() # break # except: # pass # #DicoResult=result_queue.get() #print "!!!!!!!!!!!!!!!!!!!!!!!!! Qsize",result_queue.qsize() #print work_queue.qsize(),result_queue.qsize() if result_queue.qsize() != 0: try: DicoResult = result_queue.get_nowait() except: pass #DicoResult=result_queue.get() if DicoResult is None: time.sleep(0.001) continue if DicoResult["Success"]: iQueue = DicoResult["iQueue"] Queue = NpShared.GiveArray("%sQueue_%3.3i" % (self.IdSharedMem, iQueue)) for iJob in range(Queue.shape[0]): x0, y0, iGauss = Queue[iJob] SMax = self.MeanModelImage[0, 0, x0, y0] SubModelOut = self.ModelOut[0, 0][x0 - N // 2:x0 + N // 2 + 1, y0 - N // 2:y0 + N // 2 + 1] SubModelOut += self.ListRestoredGauss[iGauss] * SMax #iGauss=0 #print #print SMax #print np.sum(self.ListGauss[iGauss]) #print SubModelOut += self.ListGauss[iGauss] * SMax iResult += 1 NDone = iResult intPercent = int(100 * NDone / float(NJobs)) pBAR.render(intPercent, '%4i/%i' % (NDone, NJobs)) for ii in range(NCPU): try: workerlist[ii].shutdown() workerlist[ii].terminate() workerlist[ii].join() except: pass return self.ModelOut