def _init_runtime(self): if self.idx != 0: from tensorpack.models._common import disable_layer_logging disable_layer_logging() self.func = OfflinePredictor(self.config) if self.idx == 0: describe_model()
def _init_runtime(self): """ Call _init_runtime under different CUDA_VISIBLE_DEVICES, you'll have workers that run on multiGPUs """ if self.idx != 0: from tensorpack.models._common import disable_layer_logging disable_layer_logging() self.func = OfflinePredictor(self.config) if self.idx == 0: describe_model()
def _init_runtime(self): if self.gpuid >= 0: logger.info("Worker {} uses GPU {}".format(self.idx, self.gpuid)) os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpuid) else: logger.info("Worker {} uses CPU".format(self.idx)) os.environ['CUDA_VISIBLE_DEVICES'] = '' G = tf.Graph() # build a graph for each process, because they don't need to share anything with G.as_default(): if self.idx != 0: from tensorpack.models._common import disable_layer_logging disable_layer_logging() self.func = get_predict_func(self.config) if self.idx == 0: describe_model()