def iteritems(self, every_k_frames=1, verbose=False): for key, frames in progressbar(self.data.iteritems(), size=len(self.data), verbose=verbose): print 'Processing: %s' % key for frame in frames.iteritems(every_k_frames=every_k_frames): yield frame
def iterchunks(self, key, batch_size=10, verbose=False): if key not in self.keys: raise RuntimeError('Key %s not found in dataset. keys: %s' % (key, self.keys)) chunks = grouper(self.itervalues_for_key(key), batch_size) for chunk in progressbar(chunks, size=self.length(key) / batch_size, verbose=verbose): yield chunk
def iterscenes(self, targets=None, blacklist=None, verbose=False, with_ground_truth=False): for key in progressbar(self.dataset_.iterkeys(), size=len(self.dataset_), verbose=verbose): # Optionally only iterate over targets, and avoid blacklist if (targets is not None and key not in targets) or \ (blacklist is not None and key in blacklist): continue yield key, self.scene(key, with_ground_truth=with_ground_truth)
def iteritems(self, every_k_frames=1, verbose=False, with_ground_truth=False): print 'Scenes: %i %s, With GT: {}' % (len( self.scenes()), self.scenes(), with_ground_truth) for key, scene in progressbar(self.iterscenes( verbose=verbose, with_ground_truth=with_ground_truth), size=len(self.scenes()), verbose=verbose): for frame in scene.iteritems(every_k_frames=every_k_frames): yield frame
def iter_keys_values(self, keys, inds=None, verbose=False): for key in keys: if key not in self.keys_: raise RuntimeError('Key %s not found in dataset. keys: %s' % (key, self.keys_)) idx, ii = 0, 0 total_chunks = len(self.meta_file_.chunks) inds = np.sort(inds) if inds is not None else None for chunk_idx, chunk in enumerate( progressbar(self.meta_file_.chunks, size=total_chunks, verbose=verbose)): data = AttrDict.load(self.get_chunk_filename(chunk_idx)) # if inds is None: items = (data[key] for key in keys) for item in izip(*items): yield item
def iterchunks(self, key, batch_size=10, verbose=False): if key not in self.keys_: raise RuntimeError( 'Key %s not found in dataset. keys: %s' % (key, self.keys_)) idx, ii = 0, 0 total_chunks = len(self.meta_file_.chunks) batch_chunks = chunks(range(len(self.meta_file_.chunks)), batch_size) for chunk_group in progressbar(batch_chunks, size=total_chunks / batch_size, verbose=verbose): items = [] # print key, chunk_group for chunk_idx in chunk_group: # grouper will fill chunks with default none values if chunk_idx is None: continue # Load chunk data = AttrDict.load(self.get_chunk_filename(chunk_idx)) for item in data[key]: items.append(item) yield items
def itervalues(self, key, inds=None, verbose=False): if key not in self.keys_: raise RuntimeError('Key %s not found in dataset. keys: %s' % (key, self.keys_)) idx, ii, done = 0, 0, False total_chunks = len(self.meta_file_.chunks) if inds is not None: import warnings warnings.warn( '{}::itervalues indices provided must be sorted'.format( self.__class__.__name__)) inds = np.sort(inds) for chunk_idx, chunk in enumerate( progressbar(self.meta_file_.chunks, size=total_chunks, verbose=verbose)): data = AttrDict.load(self.get_chunk_filename(chunk_idx)) if inds is None: for item in data[key]: yield item idx += 1 else: for i, item in enumerate(data[key]): if ii >= len(inds): done = True break if idx == inds[ii]: # print i, ii, idx yield item ii += 1 idx += 1 if done: break