def iterate_seqs(self, chunk_size=None, chunk_step=None, used_data_keys=None): """ Takes chunking into consideration. :param int chunk_size: :param int chunk_step: :param set(str)|None used_data_keys: :return: generator which yields tuples (seq index, seq start, seq end) :rtype: list[(int,NumbersDict,NumbersDict)] """ if chunk_size is None: chunk_size = self.chunk_size if chunk_step is None: chunk_step = self.chunk_step s = 0 while self.is_less_than_num_seqs(s): length = self.get_seq_length(s) if chunk_size == 0: yield (s, length.constant_like(0), length) else: if used_data_keys is not None: length = NumbersDict( {k: length[k] for k in used_data_keys}) t = length.constant_like(0) default_key = "data" # There are usually the 'data' (input) and 'classes' (targets) data-keys in `length` but there can be others. # We expect them all of the same length so that we can do chunking. # In case that some length is 0 or 1, # we treat it special and always return the full seq repeated for every chunk. keys_with_full_seqs = [] for key in length.keys(): if length[key] == length[default_key]: continue # ok if length[key] <= 1: keys_with_full_seqs.append(key) continue raise Exception( "Chunking with multiple data-keys of different length: %r" % length) while length[default_key] > t[default_key]: chunk_start = NumbersDict(t) chunk_end = NumbersDict.min([t + chunk_size, length]) for key in keys_with_full_seqs: chunk_start[key] = 0 chunk_end[key] = length[key] if length.value is None: chunk_start.value = None chunk_end.value = None yield (s, chunk_start, chunk_end) t += chunk_step if length[default_key] - t[ default_key] <= self.min_chunk_size: break s += 1
def __init__( self, name=None, window=1, context_window=None, chunking=None, seq_ordering='default', partition_epoch=None, shuffle_frames_of_nseqs=0, min_chunk_size=0, estimated_num_seqs=None, ): """ :param str name: e.g. "train" or "eval" :param int window: features will be of dimension window * feature_dim, as we add a context-window around. not all datasets support this option. :param None|int|dict|NumbersDict context_window: will add this context for each chunk :param None|str|int|(int,int)|dict|(dict,dict) chunking: "chunk_size:chunk_step" :param str seq_ordering: "batching"-option in config. e.g. "default", "sorted" or "random". See self.get_seq_order_for_epoch() for more details. :param int|None partition_epoch: :param int shuffle_frames_of_nseqs: shuffles the frames. not always supported :param None|int estimated_num_seqs: for progress reporting in case the real num_seqs is unknown """ self.name = name or ("dataset_id%s" % id(self)) self.lock = RLock( ) # Used when manipulating our data potentially from multiple threads. self.num_inputs = 0 # usually not used, but num_outputs instead, which is more generic self.num_outputs = None " :type: dict[str,(int,int)] " # tuple is num-classes, len(shape). self.window = window self.seq_ordering = seq_ordering # "default", "sorted" or "random". See self.get_seq_order_for_epoch(). self.partition_epoch = partition_epoch or 1 self.timestamps = None self.labels = {} """ :type: dict[str,list[str]] """ self.nbytes = 0 self.num_running_chars = 0 # CTC running chars. self._num_timesteps = 0 self._num_codesteps = None " :type: int " # Num output frames, could be different from input, seq2seq, ctc. self._num_seqs = 0 self._estimated_num_seqs = estimated_num_seqs self.min_chunk_size = min_chunk_size if isinstance(chunking, str): if ":" in chunking: chunking = tuple(map(int, chunking.split(":"))) else: chunking = int(chunking) if not isinstance(chunking, (tuple, list)): chunking = (chunking, None) chunk_size, chunk_step = chunking if chunk_size is None: chunk_size = 0 assert isinstance(chunk_size, (int, dict, NumbersDict)) chunk_size = NumbersDict(chunk_size) assert chunk_size == 0 or chunk_size.min_value( ) > 0, "chunk size must not be negative" self.chunk_size = chunk_size if chunk_step in (None, 0): chunk_step = self.chunk_size assert isinstance(chunk_step, (int, dict, NumbersDict)) chunk_step = NumbersDict(chunk_step) if self.chunk_size != 0: assert sorted(chunk_step.keys()) == sorted(chunk_size.keys()) assert chunk_step.max_value( ) > 0, "chunking step must be positive (for some key)" self.chunk_step = chunk_step if context_window is None: context_window = NumbersDict(0) elif isinstance(context_window, int): context_window = NumbersDict(broadcast_value=0, numbers_dict={"data": context_window}) elif isinstance(context_window, dict): context_window = NumbersDict(broadcast_value=0, numbers_dict=context_window) assert isinstance(context_window, NumbersDict) self.context_window = context_window self.shuffle_frames_of_nseqs = shuffle_frames_of_nseqs self.epoch = None
def iterate_seqs(self, chunk_size=None, chunk_step=None, used_data_keys=None): """ Takes chunking into consideration. :param int|NumbersDict chunk_size: :param int|NumbersDict chunk_step: :param set(str)|None used_data_keys: :return: generator which yields tuples (seq index, seq start, seq end) :rtype: list[(int,NumbersDict,NumbersDict)] """ if chunk_size is None: chunk_size = self.chunk_size if chunk_step is None: chunk_step = self.chunk_step chunk_size = NumbersDict(chunk_size) chunk_step = NumbersDict(chunk_step) s = 0 while self.is_less_than_num_seqs(s): length = self.get_seq_length(s) if chunk_size == 0: yield (s, NumbersDict.constant_like(0, numbers_dict=length), length) else: default_key = "data" if used_data_keys is not None: length = NumbersDict( {k: length[k] for k in used_data_keys}) if default_key not in used_data_keys: default_key = sorted(used_data_keys)[0] if chunk_step[ default_key] == 0: # allow some keys with zero chunk-step assert chunk_step.max_value() > 0 default_key = [ key for key in sorted(used_data_keys) if chunk_step[key] > 0 ][0] assert chunk_step[default_key] > 0 t = NumbersDict.constant_like(0, numbers_dict=length) # There are usually the 'data' (input) and 'classes' (targets) data-keys in `length` but there can be others. # We expect them all of the same length so that we can do chunking. # In case that some length is 0 or 1, # we treat it special and always return the full seq repeated for every chunk. keys_with_full_seqs = [] for key in length.keys(): if chunk_step[key] == chunk_step[default_key]: if length[key] == length[default_key]: continue # ok if length[key] <= 1: # special case as explained above keys_with_full_seqs.append(key) continue if chunk_step[key] == chunk_step[default_key]: raise Exception( "Chunking with multiple data-keys of different length: %r" % length) else: nr_of_full_chunks_key = (length[key] - chunk_size[key] ) // chunk_step[key] + 1 nr_of_full_chunks_default_key = ( length[default_key] - chunk_size[default_key] ) // chunk_step[default_key] + 1 assert nr_of_full_chunks_key == nr_of_full_chunks_default_key while length[default_key] > t[default_key]: chunk_start = NumbersDict(t) chunk_end = NumbersDict.min([t + chunk_size, length]) for key in keys_with_full_seqs: chunk_start[key] = 0 chunk_end[key] = length[key] if length.value is None: chunk_start.value = None chunk_end.value = None yield (s, chunk_start, chunk_end) t += chunk_step if length[default_key] - t[ default_key] <= self.min_chunk_size: break s += 1
def iterate_seqs(self, chunk_size=None, chunk_step=None, used_data_keys=None): """ Takes chunking into consideration. :param int|NumbersDict chunk_size: :param int|NumbersDict chunk_step: :param set(str)|None used_data_keys: :return: generator which yields tuples (seq index, seq start, seq end) :rtype: list[(int,NumbersDict,NumbersDict)] """ if chunk_size is None: chunk_size = self.chunk_size if chunk_step is None: chunk_step = self.chunk_step chunk_size = NumbersDict(chunk_size) chunk_step = NumbersDict(chunk_step) s = 0 while self.is_less_than_num_seqs(s): length = self.get_seq_length(s) if chunk_size == 0: yield (s, NumbersDict.constant_like(0, numbers_dict=length), length) else: default_key = "data" if used_data_keys is not None: length = NumbersDict({k: length[k] for k in used_data_keys}) if default_key not in used_data_keys: default_key = sorted(used_data_keys)[0] if chunk_step[default_key] == 0: # allow some keys with zero chunk-step assert chunk_step.max_value() > 0 default_key = [key for key in sorted(used_data_keys) if chunk_step[key] > 0][0] assert chunk_step[default_key] > 0 t = NumbersDict.constant_like(0, numbers_dict=length) # There are usually the 'data' (input) and 'classes' (targets) data-keys in `length` but there can be others. # We expect them all of the same length so that we can do chunking. # In case that some length is 0 or 1, # we treat it special and always return the full seq repeated for every chunk. keys_with_full_seqs = [] for key in length.keys(): if chunk_step[key] == chunk_step[default_key]: if length[key] == length[default_key]: continue # ok if length[key] <= 1: # special case as explained above keys_with_full_seqs.append(key) continue if chunk_step[key] == chunk_step[default_key]: raise Exception("Chunking with multiple data-keys of different length: %r" % length) else: nr_of_full_chunks_key = (length[key] - chunk_size[key]) // chunk_step[key] + 1 nr_of_full_chunks_default_key = ( (length[default_key] - chunk_size[default_key]) // chunk_step[default_key] + 1) assert nr_of_full_chunks_key == nr_of_full_chunks_default_key while length[default_key] > t[default_key]: chunk_start = NumbersDict(t) chunk_end = NumbersDict.min([t + chunk_size, length]) for key in keys_with_full_seqs: chunk_start[key] = 0 chunk_end[key] = length[key] if length.value is None: chunk_start.value = None chunk_end.value = None yield (s, chunk_start, chunk_end) t += chunk_step if length[default_key] - t[default_key] <= self.min_chunk_size: break s += 1
def __init__(self, name=None, window=1, context_window=None, chunking=None, seq_ordering='default', partition_epoch=None, repeat_epoch=None, shuffle_frames_of_nseqs=0, min_chunk_size=0, estimated_num_seqs=None,): """ :param str name: e.g. "train" or "eval" :param int window: features will be of dimension window * feature_dim, as we add a context-window around. not all datasets support this option. :param None|int|dict|NumbersDict context_window: will add this context for each chunk :param None|str|int|(int,int)|dict|(dict,dict) chunking: "chunk_size:chunk_step" :param str seq_ordering: "batching"-option in config. e.g. "default", "sorted" or "random". See self.get_seq_order_for_epoch() for more details. :param int|None partition_epoch: :param int|None repeat_epoch: Repeat the sequences in an epoch this many times. Useful to scale the dataset relative to other datasets, e.g. when used in CombinedDataset. Not allowed to be used in combination with partition_epoch. :param int shuffle_frames_of_nseqs: shuffles the frames. not always supported :param None|int estimated_num_seqs: for progress reporting in case the real num_seqs is unknown """ self.name = name or ("dataset_id%s" % id(self)) self.lock = RLock() # Used when manipulating our data potentially from multiple threads. self.rnd_seq_drop = None # type: typing.Optional[Random] self.num_inputs = 0 # usually not used, but num_outputs instead, which is more generic self.num_outputs = None # type: typing.Optional[typing.Dict[str,typing.Tuple[int,int]]] # tuple is num-classes, len(shape). # nopep8 self.window = window self.seq_ordering = seq_ordering # "default", "sorted" or "random". See self.get_seq_order_for_epoch(). self.partition_epoch = partition_epoch or 1 self.repeat_epoch = repeat_epoch or 1 # There is probably no use case for combining the two, so avoid potential misconfiguration. assert self.partition_epoch == 1 or self.repeat_epoch == 1, ( "Combining partition_epoch and repeat_epoch is prohibited.") self.timestamps = None self.labels = {} # type: typing.Dict[str,typing.List[str]] self.weights = {} self.nbytes = 0 self.num_running_chars = 0 # CTC running chars. self._num_timesteps = 0 self._num_codesteps = None # type: typing.Optional[int] # Num output frames, could be different from input, seq2seq, ctc. # nopep8 self._num_seqs = 0 self._estimated_num_seqs = estimated_num_seqs self.min_chunk_size = min_chunk_size if isinstance(chunking, str): if ":" in chunking: chunking = tuple(map(int, chunking.split(":"))) else: chunking = int(chunking) if not isinstance(chunking, (tuple, list)): chunking = (chunking, None) chunk_size, chunk_step = chunking if chunk_size is None: chunk_size = 0 assert isinstance(chunk_size, (int, dict, NumbersDict)) chunk_size = NumbersDict(chunk_size) assert chunk_size == 0 or chunk_size.min_value() > 0, "chunk size must not be negative" self.chunk_size = chunk_size if chunk_step in (None, 0): chunk_step = self.chunk_size assert isinstance(chunk_step, (int, dict, NumbersDict)) chunk_step = NumbersDict(chunk_step) if self.chunk_size != 0: assert sorted(chunk_step.keys()) == sorted(chunk_size.keys()) assert chunk_step.max_value() > 0, "chunking step must be positive (for some key)" self.chunk_step = chunk_step if context_window is None: context_window = NumbersDict(0) elif isinstance(context_window, int): context_window = NumbersDict(broadcast_value=0, numbers_dict={"data": context_window}) elif isinstance(context_window, dict): context_window = NumbersDict(broadcast_value=0, numbers_dict=context_window) assert isinstance(context_window, NumbersDict) self.context_window = context_window self.shuffle_frames_of_nseqs = shuffle_frames_of_nseqs self.epoch = None