def _render_chunks_async(self, chunks, processes): """Starts up a process pool and renders all the chunks asynchronously. chunks is a list of (col, row, chunkfile) Returns a dictionary mapping (col, row) to the file where that chunk is rendered as an image """ # The set of chunks to render, or None for all of them. The logic is # slightly more compliated than it should seem, since we still need to # build the results dict out of all chunks, even if they're not being # rendered. inclusion_set = self._get_chunk_renderset() results = {} if processes == 1: # Skip the multiprocessing stuff logging.debug("Rendering chunks synchronously since you requested 1 process") for i, (col, row, chunkfile) in enumerate(chunks): if inclusion_set and (col, row) not in inclusion_set: # Skip rendering, just find where the existing image is _, imgpath = chunk.ChunkRenderer(chunkfile, self.cachedir).find_oldimage(False) if imgpath: results[(col, row)] = imgpath continue result = chunk.render_and_save(chunkfile, self.cachedir, cave=self.caves) results[(col, row)] = result if i > 0: if 1000 % i == 0 or i % 1000 == 0: logging.info("{0}/{1} chunks rendered".format(i, len(chunks))) else: logging.debug("Rendering chunks in {0} processes".format(processes)) pool = multiprocessing.Pool(processes=processes) asyncresults = [] for col, row, chunkfile in chunks: if inclusion_set and (col, row) not in inclusion_set: # Skip rendering, just find where the existing image is _, imgpath = chunk.ChunkRenderer(chunkfile, self.cachedir).find_oldimage(False) if imgpath: results[(col, row)] = imgpath continue result = pool.apply_async( chunk.render_and_save, args=(chunkfile, self.cachedir), kwds=dict(cave=self.caves) ) asyncresults.append((col, row, result)) pool.close() for i, (col, row, result) in enumerate(asyncresults): results[(col, row)] = result.get() if i > 0: if 1000 % i == 0 or i % 1000 == 0: logging.info("{0}/{1} chunks rendered".format(i, len(asyncresults))) pool.join() logging.info("Done!") return results
def _render_chunks_async(self, chunks, processes): """Starts up a process pool and renders all the chunks asynchronously. chunks is a list of (col, row, chunkfile) Returns a dictionary mapping (col, row) to the file where that chunk is rendered as an image """ results = {} if processes == 1: # Skip the multiprocessing stuff print "Rendering chunks synchronously since you requested 1 process" for i, (col, row, chunkfile) in enumerate(chunks): result = chunk.render_and_save(chunkfile, cave=self.caves) results[(col, row)] = result if i > 0: if 1000 % i == 0 or i % 1000 == 0: print "{0}/{1} chunks rendered".format(i, len(chunks)) else: print "Rendering chunks in {0} processes".format(processes) pool = multiprocessing.Pool(processes=processes) asyncresults = [] for col, row, chunkfile in chunks: result = pool.apply_async(chunk.render_and_save, args=(chunkfile,), kwds=dict(cave=self.caves)) asyncresults.append((col, row, result)) pool.close() for i, (col, row, result) in enumerate(asyncresults): results[(col, row)] = result.get() if i > 0: if 1000 % i == 0 or i % 1000 == 0: print "{0}/{1} chunks rendered".format(i, len(chunks)) pool.join() print "Done!" return results
def go(): for f in chunklist: chunk.render_and_save(f[2], w.cachedir, w, (None,None), None)
def _render_chunks_async(self, chunks, processes): """Starts up a process pool and renders all the chunks asynchronously. chunks is a list of (col, row, chunkfile) Returns a dictionary mapping (col, row) to the file where that chunk is rendered as an image """ # The set of chunks to render, or None for all of them. The logic is # slightly more compliated than it should seem, since we still need to # build the results dict out of all chunks, even if they're not being # rendered. inclusion_set = self._get_chunk_renderset() results = {} manager = multiprocessing.Manager() q = manager.Queue() if processes == 1: # Skip the multiprocessing stuff logging.debug("Rendering chunks synchronously since you requested 1 process") for i, (col, row, chunkfile) in enumerate(chunks): if inclusion_set and (col, row) not in inclusion_set: # Skip rendering, just find where the existing image is _, imgpath = chunk.find_oldimage(chunkfile, cached, self.caves) if imgpath: results[(col, row)] = imgpath continue oldimg = chunk.find_oldimage(chunkfile, cached, self.caves) if chunk.check_cache(chunkfile, oldimg): result = oldimg[1] else: result = chunk.render_and_save(chunkfile, self.cachedir, self, oldimg, queue=q) results[(col, row)] = result if i > 0: try: item = q.get(block=False) if item[0] == "newpoi": self.POI.append(item[1]) elif item[0] == "removePOI": self.persistentData['POI'] = filter(lambda x: x['chunk'] != item[1], self.persistentData['POI']) except Queue.Empty: pass if 1000 % i == 0 or i % 1000 == 0: logging.info("{0}/{1} chunks rendered".format(i, len(chunks))) else: logging.debug("Rendering chunks in {0} processes".format(processes)) pool = multiprocessing.Pool(processes=processes) asyncresults = [] for col, row, chunkfile in chunks: if inclusion_set and (col, row) not in inclusion_set: # Skip rendering, just find where the existing image is _, imgpath = chunk.find_oldimage(chunkfile, cached, self.caves) if imgpath: results[(col, row)] = imgpath continue oldimg = chunk.find_oldimage(chunkfile, cached, self.caves) if chunk.check_cache(chunkfile, oldimg): result = FakeAsyncResult(oldimg[1]) else: result = pool.apply_async(chunk.render_and_save, args=(chunkfile,self.cachedir,self, oldimg), kwds=dict(cave=self.caves, queue=q)) asyncresults.append((col, row, result)) pool.close() for i, (col, row, result) in enumerate(asyncresults): results[(col, row)] = result.get() try: item = q.get(block=False) if item[0] == "newpoi": self.POI.append(item[1]) elif item[0] == "removePOI": self.persistentData['POI'] = filter(lambda x: x['chunk'] != item[1], self.persistentData['POI']) except Queue.Empty: pass if i > 0: if 1000 % i == 0 or i % 1000 == 0: logging.info("{0}/{1} chunks rendered".format(i, len(asyncresults))) pool.join() logging.info("Done!") return results
def _render_chunks_async(self, chunks, processes): """Starts up a process pool and renders all the chunks asynchronously. chunks is a list of (col, row, (chunkX, chunkY)). Use chunkX,chunkY to find the chunk data in a region file Returns a dictionary mapping (col, row) to the file where that chunk is rendered as an image """ # The set of chunks to render, or None for all of them. The logic is # slightly more compliated than it should seem, since we still need to # build the results dict out of all chunks, even if they're not being # rendered. inclusion_set = self._get_chunk_renderset() results = {} manager = multiprocessing.Manager() q = manager.Queue() if processes == 1: # Skip the multiprocessing stuff logging.debug( "Rendering chunks synchronously since you requested 1 process") for i, (col, row, chunkXY) in enumerate(chunks): ##TODO##/if inclusion_set and (col, row) not in inclusion_set: ##TODO##/ # Skip rendering, just find where the existing image is ##TODO##/ _, imgpath = chunk.find_oldimage(chunkfile, cached, self.caves) ##TODO##/ if imgpath: ##TODO##/ results[(col, row)] = imgpath ##TODO##/ continue oldimg = chunk.find_oldimage(chunkXY, cached, self.caves) # TODO remove this shortcircuit if chunk.check_cache(self, chunkXY, oldimg): result = oldimg[1] else: #logging.debug("check cache failed, need to render (could be ghost chunk)") result = chunk.render_and_save(chunkXY, self.cachedir, self, oldimg, queue=q) if result: results[(col, row)] = result if i > 0: try: item = q.get(block=False) if item[0] == "newpoi": self.POI.append(item[1]) elif item[0] == "removePOI": self.persistentData['POI'] = filter( lambda x: x['chunk'] != item[1], self.persistentData['POI']) except Queue.Empty: pass if 1000 % i == 0 or i % 1000 == 0: logging.info("{0}/{1} chunks rendered".format( i, len(chunks))) else: logging.debug( "Rendering chunks in {0} processes".format(processes)) pool = multiprocessing.Pool(processes=processes) asyncresults = [] for col, row, chunkXY in chunks: ##TODO/if inclusion_set and (col, row) not in inclusion_set: ##TODO/ # Skip rendering, just find where the existing image is ##TODO/ _, imgpath = chunk.find_oldimage(chunkfile, cached, self.caves) ##TODO/ if imgpath: ##TODO/ results[(col, row)] = imgpath ##TODO/ continue oldimg = chunk.find_oldimage(chunkXY, cached, self.caves) if chunk.check_cache(self, chunkXY, oldimg): result = FakeAsyncResult(oldimg[1]) else: result = pool.apply_async(chunk.render_and_save, args=(chunkXY, self.cachedir, self, oldimg), kwds=dict(cave=self.caves, queue=q)) asyncresults.append((col, row, result)) pool.close() for i, (col, row, result) in enumerate(asyncresults): results[(col, row)] = result.get() try: item = q.get(block=False) if item[0] == "newpoi": self.POI.append(item[1]) elif item[0] == "removePOI": self.persistentData['POI'] = filter( lambda x: x['chunk'] != item[1], self.persistentData['POI']) except Queue.Empty: pass if i > 0: if 1000 % i == 0 or i % 1000 == 0: logging.info("{0}/{1} chunks rendered".format( i, len(asyncresults))) pool.join() logging.info("Done!") return results