def minibatch_loader_thread(self): """Load mini-batches and put them onto the mini-batch queue.""" with self.coordinator.stop_on_exception(): while not self.coordinator.should_stop(): flag=True if self._roidb_tmp is not None and self._unlabel_minibatch_queue.qsize()<self._minibatch_queue.qsize(): flag=False if flag: blobs = self.get_next_minibatch() else: blobs = self.get_next_unlabel_minibatch() ordered_blobs = OrderedDict() for key in self.get_output_names(): assert blobs[key].dtype in (np.int32, np.float32), \ 'Blob {} of dtype {} must have dtype of ' \ 'np.int32 or np.float32'.format(key, blobs[key].dtype) ordered_blobs[key] = blobs[key] if flag: coordinated_put( self.coordinator, self._minibatch_queue, ordered_blobs ) else: coordinated_put( self.coordinator,self._unlabel_minibatch_queue,ordered_blobs ) logger.info('Stopping mini-batch loading thread')
def put_blobs_into_queue(blobs): ordered_blobs = OrderedDict() for key in self.get_output_names(): assert blobs[key].dtype in (np.int32, np.float32), \ 'Blob {} of dtype {} must have dtype of ' \ 'np.int32 or np.float32'.format(key, blobs[key].dtype) ordered_blobs[key] = blobs[key] coordinated_put(self.coordinator, self._minibatch_queue, ordered_blobs)
def put_minibatch_in_queue(minibatch_queue_mp): from utils.coordinator import coordinated_put import time, cPickle, os, json from utils.coordinator import Coordinator with open('/detectron/lib/datasets/data/roidb.pkl') as f: roidb = cPickle.load(f) with open('/detectron/lib/datasets/data/cfg.pkl') as f: other_cfg = cPickle.load(f) merge_cfg_from_cfg(other_cfg) try: os.makedirs(RAND_LOG_DIR) except: pass RAND_SEED = get_worker_seed() with open(os.path.join(RAND_LOG_DIR, str(get_worker_id())), 'a') as f: json.dump({'RANDOM_SEED': str(RAND_SEED)}, f) f.write(os.linesep) np.random.seed(RAND_SEED) coordinator = Coordinator() # filter out really big images roidb = [r for r in roidb if len(r['segms']) <= 370] perm, cur = _shuffle_roidb_inds(roidb) while True: t = time.time() db_inds, perm, cur = _get_next_minibatch_inds(roidb, perm, cur) minibatch_db = [roidb[i] for i in db_inds] blobs, valid = get_minibatch(minibatch_db) if not valid: continue # Blobs must be queued in the order specified by # self.get_output_names from collections import OrderedDict ordered_blobs = OrderedDict() for key in get_minibatch_blob_names(): assert blobs[key].dtype in (np.int32, np.float32), \ 'Blob {} of dtype {} must have dtype of ' \ 'np.int32 or np.float32'.format(key, blobs[key].dtype) ordered_blobs[key] = blobs[key] coordinated_put( coordinator, minibatch_queue_mp, ordered_blobs )
def minibatch_loader_thread(self): """Load mini-batches and put them onto the mini-batch queue.""" with self.coordinator.stop_on_exception(): while not self.coordinator.should_stop(): blobs = self.get_next_minibatch() # Blobs must be queued in the order specified by # self.get_output_names ordered_blobs = OrderedDict() for key in self.get_output_names(): assert blobs[key].dtype in (np.int32, np.float32), \ 'Blob {} of dtype {} must have dtype of ' \ 'np.int32 or np.float32'.format(key, blobs[key].dtype) ordered_blobs[key] = blobs[key] coordinated_put(self.coordinator, self._minibatch_queue, ordered_blobs) logger.info('Stopping mini-batch loading thread')
def minibatch_loader_thread(self): """Load mini-batches and put them onto the mini-batch queue.""" augmentation_process_pool = None mini_thread_batch_iter = 0 with self.coordinator.stop_on_exception(): while not self.coordinator.should_stop(): t = time.time() if mini_thread_batch_iter % 10 == 0: if augmentation_process_pool: augmentation_process_pool.close() augmentation_process_pool.join() logger.info('get_next_parallel_minibatch DELETE POOL Thread: {} took time: {} MINI_ITER: {}'.format(threading.currentThread(), time.time() - t, mini_thread_batch_iter)) augmentation_process_pool = self.crate_augmentation_process_pool(self.num_augmentation_processes) logger.info('get_next_parallel_minibatch CREATE POOL Thread: {} took time: {} MINI_ITER: {}'.format(threading.currentThread(), time.time() - t, mini_thread_batch_iter)) t = time.time() logger.info('get_next_parallel_minibatch: Going to prepare for thread: {} MINI_ITER: {}'.format(threading.currentThread(), mini_thread_batch_iter)) blobs_list = self.get_next_parallel_minibatch(augmentation_process_pool, self.num_augmentation_processes) logger.info('get_next_parallel_minibatch Thread: {} {}: len of blobs_list: {} MINI_ITER: {}'.format(threading.currentThread(), time.time() - t, len(blobs_list), mini_thread_batch_iter)) t = time.time() for blobs in blobs_list: # Blobs must be queued in the order specified by # self.get_output_names ordered_blobs = OrderedDict() for key in self.get_output_names(): assert blobs[key].dtype in (np.int32, np.float32), \ 'Blob {} of dtype {} must have dtype of ' \ 'np.int32 or np.float32'.format(key, blobs[key].dtype) ordered_blobs[key] = blobs[key] coordinated_put( self.coordinator, self._minibatch_queue, ordered_blobs ) logger.debug('coordinated_put {}: len of blobs_list: {} MINI_ITER: {}'.format(time.time() - t, len(blobs_list), mini_thread_batch_iter)) t = time.time() # del blobs_list mini_thread_batch_iter += 1 if augmentation_process_pool: augmentation_process_pool.close() augmentation_process_pool.join() logger.info('get_next_parallel_minibatch DELETE POOL Thread: {}'.format(threading.currentThread())) logger.info('Stopping mini-batch loading thread')
def minibatch_loader(self): """Load mini-batches and put them onto the mini-batch queue.""" """This function is now DEPRECATED, won't work with multiprocessing. """ with self.coordinator.stop_on_exception(): while not self.coordinator.should_stop(): blobs = self._get_next_minibatch() # Blobs must be queued in the order specified by # self.get_output_names ordered_blobs = OrderedDict() for key in self.get_output_names(): assert blobs[key].dtype in (np.int32, np.float32), \ 'Blob {} of dtype {} must have dtype of ' \ 'np.int32 or np.float32'.format(key, blobs[key].dtype) ordered_blobs[key] = blobs[key] coordinated_put( self.coordinator, self._minibatch_queue, ordered_blobs) logger.info('Stopping mini-batch loading thread')
def minibatch_loader2(shared_readonly_dict, minibatch_queue, lock, mp_cur, mp_perm, coordinator): """Load mini-batches and put them onto the mini-batch queue.""" output_names = shared_readonly_dict['output_names'] with coordinator.stop_on_exception(): while not coordinator.should_stop(): blobs = RoIDataLoader._get_next_minibatch2( shared_readonly_dict, lock, mp_cur, mp_perm) # Blobs must be queued in the order specified by # self.get_output_names ordered_blobs = OrderedDict() for key in output_names: assert blobs[key].dtype in (np.int32, np.float32), \ 'Blob {} of dtype {} must have dtype of ' \ 'np.int32 or np.float32'.format(key, blobs[key].dtype) ordered_blobs[key] = blobs[key] coordinated_put(coordinator, minibatch_queue, ordered_blobs) logger.info('Stopping mini-batch loading thread')
def minibatch_loader2(shared_readonly_dict, minibatch_queue, lock, mp_cur, mp_perm, coordinator): """Load mini-batches and put them onto the mini-batch queue.""" output_names = shared_readonly_dict['output_names'] with coordinator.stop_on_exception(): while not coordinator.should_stop(): blobs = RoIDataLoader._get_next_minibatch2( shared_readonly_dict, lock, mp_cur, mp_perm) # Blobs must be queued in the order specified by # self.get_output_names ordered_blobs = OrderedDict() for key in output_names: assert blobs[key].dtype in (np.int32, np.float32), \ 'Blob {} of dtype {} must have dtype of ' \ 'np.int32 or np.float32'.format(key, blobs[key].dtype) ordered_blobs[key] = blobs[key] coordinated_put( coordinator, minibatch_queue, ordered_blobs) logger.info('Stopping mini-batch loading thread')