def fillSharedMemDictForLocalStep(self, ShMem=None): """ Get dict of shared mem arrays needed for parallel local step. Returns ------- ShMem : dict of RawArray objects """ # No shared memory required here. if not isinstance(ShMem, dict): ShMem = dict() K = self.K if 'startTheta' in ShMem: shared_startTheta = sharedMemToNumpyArray(ShMem['startTheta']) assert shared_startTheta.size >= K + 1 shared_startTheta[:K + 1] = self.startTheta shared_transTheta = sharedMemToNumpyArray(ShMem['transTheta']) assert shared_transTheta.shape[0] >= K assert shared_transTheta.shape[1] >= K + 1 shared_transTheta[:K, :K + 1] = self.transTheta else: ShMem['startTheta'] = numpyToSharedMemArray(self.startTheta) ShMem['transTheta'] = numpyToSharedMemArray(self.transTheta) return ShMem
def fillSharedMemDictForLocalStep(self, ShMem=None): """ Get dict of shared mem arrays needed for parallel local step. Returns ------- ShMem : dict of RawArray objects """ if ShMem is None: ShMem = dict() if 'nu' in ShMem: fillSharedMemArray(ShMem['nu'], self.Post.nu) fillSharedMemArray(ShMem['kappa'], self.Post.kappa) fillSharedMemArray(ShMem['m'], self.Post.m) fillSharedMemArray(ShMem['beta'], self.Post.beta) fillSharedMemArray(ShMem['E_logL'], self._E_logL('all')) else: ShMem['nu'] = numpyToSharedMemArray(self.Post.nu) ShMem['kappa'] = numpyToSharedMemArray(self.Post.kappa) # Post.m is strided, so we need to copy it to do shared mem. ShMem['m'] = numpyToSharedMemArray(self.Post.m.copy()) ShMem['beta'] = numpyToSharedMemArray(self.Post.beta.copy()) ShMem['E_logL'] = numpyToSharedMemArray(self._E_logL('all')) return ShMem
def getRawDataAsSharedMemDict(self): ''' Create dict with copies of raw data as shared memory arrays ''' dataShMemDict = dict() dataShMemDict['doc_range'] = numpyToSharedMemArray(self.doc_range) dataShMemDict['word_id'] = numpyToSharedMemArray(self.word_id) dataShMemDict['word_count'] = numpyToSharedMemArray(self.word_count) dataShMemDict['vocab_size'] = self.vocab_size return dataShMemDict
def getRawDataAsSharedMemDict(self): ''' Create dict with copies of raw data as shared memory arrays ''' dataShMemDict = dict() dataShMemDict['X'] = numpyToSharedMemArray(self.X) dataShMemDict['doc_range'] = numpyToSharedMemArray(self.doc_range) dataShMemDict['nDocTotal'] = self.nDocTotal if hasattr(self, 'Xprev'): dataShMemDict['Xprev'] = numpyToSharedMemArray(self.Xprev) return dataShMemDict
def fillSharedMemDictForLocalStep(self, ShMem=None): """ Get dict of shared mem arrays needed for parallel local step. Returns ------- ShMem : dict of RawArray objects """ if ShMem is None: ShMem = dict() if 'nu' in ShMem: fillSharedMemArray(ShMem['nu'], self.Post.nu) fillSharedMemArray(ShMem['cholB'], self._cholB('all')) fillSharedMemArray(ShMem['E_logdetL'], self._E_logdetL('all')) else: ShMem['nu'] = numpyToSharedMemArray(self.Post.nu) ShMem['cholB'] = numpyToSharedMemArray(self._cholB('all')) ShMem['E_logdetL'] = numpyToSharedMemArray(self._E_logdetL('all')) return ShMem
def fillSharedMemDictForLocalStep(self, ShMem=None): """ Get dict of shared mem arrays needed for parallel local step. Returns ------- ShMem : dict of RawArray objects """ ElogphiT, Elog1mphiT = self.GetCached('E_logphiT_log1mphiT', 'all') K = self.K if ShMem is None: ShMem = dict() if 'ElogphiT' not in ShMem: ShMem['ElogphiT'] = numpyToSharedMemArray(ElogphiT) ShMem['Elog1mphiT'] = numpyToSharedMemArray(Elog1mphiT) else: ElogphiT_shView = sharedMemToNumpyArray(ShMem['ElogphiT']) assert ElogphiT_shView.shape >= K ElogphiT_shView[:, :K] = ElogphiT Elog1mphiT_shView = sharedMemToNumpyArray(ShMem['Elog1mphiT']) assert Elog1mphiT_shView.shape >= K Elog1mphiT_shView[:, :K] = Elog1mphiT return ShMem
def fillSharedMemDictForLocalStep(self, ShMem=None): """ Get dict of shared mem arrays needed for parallel local step. Returns ------- ShMem : dict of RawArray objects """ # No shared memory required here. if not isinstance(ShMem, dict): ShMem = dict() alphaEbeta = self.alpha_E_beta() if 'alphaEbeta' in ShMem: shared_alphaEbeta = sharedMemToNumpyArray(ShMem['alphaEbeta']) assert shared_alphaEbeta.size >= self.K shared_alphaEbeta[:alphaEbeta.size] = alphaEbeta else: ShMem['alphaEbeta'] = numpyToSharedMemArray(alphaEbeta.copy()) return ShMem