def add_frames(self, seq_idx, seq_start_frame, length, frame_dim_corresponds=True): """ Adds frames to all data-batches. Will add one data-batch if we don't have one yet. :param int seq_idx: :param NumbersDict|int seq_start_frame: :param NumbersDict length: number of (time) frames :param bool frame_dim_corresponds: if the batch frame offset should always be the same (max value) for all keys """ batch_frame_offset = self.max_num_frames_per_slice if frame_dim_corresponds: batch_frame_offset = NumbersDict(batch_frame_offset.max_value()) self.max_num_frames_per_slice = NumbersDict( self.max_num_frames_per_slice.max_value()) self.max_num_frames_per_slice += length self.num_slices = max(self.num_slices, 1) self.seqs += [ BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=seq_start_frame + length, batch_slice=0, batch_frame_offset=batch_frame_offset) ]
def add_frames(self, seq_idx, seq_start_frame, length, frame_dim_corresponds=True): """ Adds frames to all data-batches. Will add one data-batch if we don't have one yet. :param int seq_idx: :param NumbersDict|int seq_start_frame: :param NumbersDict length: number of (time) frames :param bool frame_dim_corresponds: if the batch frame offset should always be the same (max value) for all keys """ batch_frame_offset = self.max_num_frames_per_slice if frame_dim_corresponds: batch_frame_offset = NumbersDict(batch_frame_offset.max_value()) self.max_num_frames_per_slice = NumbersDict(self.max_num_frames_per_slice.max_value()) self.max_num_frames_per_slice += length self.num_slices = max(self.num_slices, 1) self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=seq_start_frame + length, batch_slice=0, batch_frame_offset=batch_frame_offset)]
class DeviceBatchRun(threading.Thread): def __init__(self, parent, devices): """ :type parent: TaskThread """ threading.Thread.__init__(self, name="DeviceThread %s" % " ".join([dev.name for dev in devices])) self.alloc_devices = devices self.parent = parent self.devices_batches_idx = None self.run_start_batch_idx = None self.eval_info = None " :type: dict[str] | None " self.allocated = False self.processing = False self.finished = True self.crashed = False self.num_frames = NumbersDict(0) self.run_frames = NumbersDict(0) self.daemon = True self.active = True self.result = { 'batchess': [], 'results': [], 'result_format': None, 'num_frames': 0 } if self.alloc_devices: self.start() def allocate(self): self.devices_batches_idx = self.parent.batches.get_current_batch_idx( ) self.allocated_devices_batches = self.parent.allocate_devices( self.alloc_devices) self.run_frames = NumbersDict(0) for batches, device in zip(self.allocated_devices_batches, self.alloc_devices): assert batches assert batches[0].seqs #assert batches[0].seqs[0].frame_length[1] > 0 device.num_updates += 1 if not device.update_specs[ 'block_size'] else int( ceil( sum([len(batch.seqs) for batch in batches]) / float(device.update_specs['block_size']))) self.run_frames += sum( [batch.get_total_num_frames() for batch in batches]) if self.parent.share_batches: self.run_frames /= len(self.alloc_devices) assert self.run_frames.max_value() > 0 self.allocated = True def finish(self): """ :returns whether everything is fine. """ device_results, outputs_format = self.device_collect_results() if device_results is None: if not getattr(sys, "exited", False): print("device crashed on batch", self.run_start_batch_idx, file=log.v3) self.parent.device_crash_batch = self.run_start_batch_idx self.crashed = True return False assert len(device_results) == len(self.alloc_devices) == len( self.running_devices_batches) if outputs_format and any( [k.startswith("gparam:") for k in outputs_format]): # WARNING: this code is untested and likely broken! for i in range(len(self.alloc_devices)): res = Device.make_result_dict(device_results[i], outputs_format) self.alloc_devices[i].sync_net_train_params() devnet = self.alloc_devices[i].get_net_train_params( self.parent.network) vars = self.parent.network.get_all_params_vars() for p, q in zip(vars, devnet): p.set_value(q) gparams = {} for p in vars: gparams[p] = numpy.zeros(p.get_value( borrow=True, return_internal_type=True).shape, dtype=theano.config.floatX) for p in vars: q = res["gparam:%s" % p.name] if q.shape == p.get_value().shape: gparams[p] = q elif q.shape: print( "warning: shape for gradient does not match:", p.get_value().shape, q.shape, file=log.v2) self.parent.updater.setNetParamDeltas(gparams) self.parent.updater.update() self.alloc_devices[i].set_net_params(self.parent.network) self.result = { 'batchess': self.running_devices_batches, 'results': device_results, 'result_format': outputs_format, 'num_frames': self.num_frames } self.eval_info = self.parent.evaluate(**self.result) self.parent.lock.acquire() self.print_process() self.parent.lock.release() return True def run(self): try: while self.active and not getattr(sys, "exited", False): if self.allocated and not self.finished: self.device_run() self.num_frames = self.run_frames self.processing = True self.allocated = False self.finish() self.finished = True self.processing = False else: time.sleep(0.01) except BaseException: self.crashed = True sys.excepthook(*sys.exc_info()) finally: self.finished = True def stop(self): self.active = False def device_run(self): batch_idx = self.run_start_batch_idx = self.devices_batches_idx assert len(self.alloc_devices) == len( self.allocated_devices_batches) self.running_devices_batches = self.allocated_devices_batches for device, batches in zip(self.alloc_devices, self.running_devices_batches): if self.parent.network.recurrent: print("running", device.targets["data"].shape[1], \ "sequence slices (%i nts)" % (device.targets["data"].shape[0] * device.targets["data"].shape[1]), end=' ', file=log.v5) else: print("running", device.targets["data"].shape[0] * device.targets["data"].shape[1], "frames", end=' ', file=log.v5) if device.num_batches == 1: print("of batch %i" % batch_idx, end=' ', file=log.v5) else: print("of batches %i-%i" % (batch_idx, batch_idx + device.num_batches - 1), end=' ', file=log.v5) print("on device", device.name, file=log.v5) device.run(self.parent.task) #if not self.share batch_idx += device.num_batches def device_collect_results(self): device_results = [] outputs_format = None for i, device in enumerate(self.alloc_devices): try: result, outputs_format_new = device.result() except RuntimeError: return None, None if result is None: return None, None assert isinstance(result, list) assert len(result) > 0 # we always expect to get some result if i >= 1: assert outputs_format == outputs_format_new, "We expect to always get the same output format." outputs_format = outputs_format_new device_results.append(result) return device_results, outputs_format def device_mem_usage_str(self, devices): """ :type devices: list[Device.Device] :rtype: str | None """ if not devices: return None mem_info = [device.get_memory_info() for device in devices] if len(mem_info) == 1 and mem_info[0] is None: return None mem_usage = [info.used if info else None for info in mem_info] s = [ "%s MB" % (mem / (1024 * 1024)) if mem is not None else "unknown" for mem in mem_usage ] return "/".join(s) def print_process(self): if not self.parent.interactive and not log.v[5]: return start_elapsed = time.time() - self.parent.start_time complete = self.parent.batches.completed_frac() assert complete > 0 total_time_estimated = start_elapsed / complete remaining_estimated = total_time_estimated - start_elapsed if log.verbose[5]: mem_usage = self.device_mem_usage_str(self.alloc_devices) info = [ self.parent.report_prefix, "batch %i" % self.run_start_batch_idx ] if self.eval_info: # Such as score. info += [ "%s %s" % item for item in sorted(self.eval_info.items()) ] info += [ "elapsed %s" % hms(start_elapsed), "exp. remaining %s" % hms(remaining_estimated), "complete %.02f%%" % (complete * 100) ] if mem_usage: info += ["memory %s" % mem_usage] print(", ".join(filter(None, info)), file=log.v5) if self.parent.interactive: progress_bar(complete, hms(remaining_estimated))
class Batch: """ A batch can consists of several sequences (= segments). This is basically just a list of BatchSeqCopyPart. """ def __init__(self): self.max_num_frames_per_slice = NumbersDict(0) self.num_slices = 0 # original data_shape = [0, 0], format (time,batch/slice) # data_shape = [max_num_frames_per_slice, num_slices] self.seqs = [] # type: typing.List[BatchSeqCopyPart] def __repr__(self): return "<Batch start_seq:%r, len(seqs):%i>" % (self.start_seq, len(self.seqs)) def try_sequence_as_slice(self, length): """ :param NumbersDict length: number of (time) frames :return: new shape which covers the old shape and one more data-batch, format (time,batch) :rtype: (NumbersDict,int) """ return [NumbersDict.max([self.max_num_frames_per_slice, length]), self.num_slices + 1] def add_sequence_as_slice(self, seq_idx, seq_start_frame, length): """ Adds one data-batch in an additional slice. :param int seq_idx: :param NumbersDict|int seq_start_frame: :param NumbersDict length: number of (time) frames """ self.max_num_frames_per_slice, self.num_slices = self.try_sequence_as_slice(length) self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=seq_start_frame + length, batch_slice=self.num_slices - 1, batch_frame_offset=0)] def add_frames(self, seq_idx, seq_start_frame, length, frame_dim_corresponds=True): """ Adds frames to all data-batches. Will add one data-batch if we don't have one yet. :param int seq_idx: :param NumbersDict|int seq_start_frame: :param NumbersDict length: number of (time) frames :param bool frame_dim_corresponds: if the batch frame offset should always be the same (max value) for all keys """ batch_frame_offset = self.max_num_frames_per_slice if frame_dim_corresponds: batch_frame_offset = NumbersDict(batch_frame_offset.max_value()) self.max_num_frames_per_slice = NumbersDict(self.max_num_frames_per_slice.max_value()) self.max_num_frames_per_slice += length self.num_slices = max(self.num_slices, 1) self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=seq_start_frame + length, batch_slice=0, batch_frame_offset=batch_frame_offset)] def init_with_one_full_sequence(self, seq_idx, dataset): """ :param int seq_idx: :param Dataset.Dataset dataset: """ assert not self.seqs start, end = dataset.get_start_end_frames_full_seq(seq_idx) self.add_frames(seq_idx=seq_idx, seq_start_frame=start, length=end - start) def get_all_slices_num_frames(self): """ Note that this is only an upper limit in case of data_shape[1] > 1 because data_shape[0] is the max frame len of all seqs. :return: related to the data-key with max length :rtype: NumbersDict """ return self.max_num_frames_per_slice * self.num_slices def get_total_num_frames(self): """ :rtype: NumbersDict """ return sum([s.frame_length for s in self.seqs]) @property def start_seq(self): """ :rtype: int|None """ if not self.seqs: return None return min([s.seq_idx for s in self.seqs]) @property def end_seq(self): """ :rtype: int|None """ if not self.seqs: return None return max([s.seq_idx for s in self.seqs]) + 1 def get_num_seqs(self): """ :rtype: int """ if not self.seqs: return 0 return self.end_seq - self.start_seq
def iterate_seqs(self, chunk_size=None, chunk_step=None, used_data_keys=None): """ Takes chunking into consideration. :param int|NumbersDict chunk_size: :param int|NumbersDict chunk_step: :param set(str)|None used_data_keys: :return: generator which yields tuples (seq index, seq start, seq end) :rtype: list[(int,NumbersDict,NumbersDict)] """ if chunk_size is None: chunk_size = self.chunk_size if chunk_step is None: chunk_step = self.chunk_step chunk_size = NumbersDict(chunk_size) chunk_step = NumbersDict(chunk_step) s = 0 while self.is_less_than_num_seqs(s): length = self.get_seq_length(s) if chunk_size == 0: yield (s, length.constant_like(0), length) else: default_key = "data" if used_data_keys is not None: length = NumbersDict( {k: length[k] for k in used_data_keys}) if default_key not in used_data_keys: default_key = sorted(used_data_keys)[0] if chunk_step[ default_key] == 0: # allow some keys with zero chunk-step assert chunk_step.max_value() > 0 default_key = [ key for key in sorted(used_data_keys) if chunk_step[key] > 0 ][0] assert chunk_step[default_key] > 0 t = length.constant_like(0) # There are usually the 'data' (input) and 'classes' (targets) data-keys in `length` but there can be others. # We expect them all of the same length so that we can do chunking. # In case that some length is 0 or 1, # we treat it special and always return the full seq repeated for every chunk. keys_with_full_seqs = [] for key in length.keys(): if chunk_step[key] == chunk_step[default_key]: if length[key] == length[default_key]: continue # ok if length[key] <= 1: # special case as explained above keys_with_full_seqs.append(key) continue if chunk_step[key] == chunk_step[default_key]: raise Exception( "Chunking with multiple data-keys of different length: %r" % length) else: nr_of_full_chunks_key = (length[key] - chunk_size[key] ) // chunk_step[key] + 1 nr_of_full_chunks_default_key = ( length[default_key] - chunk_size[default_key] ) // chunk_step[default_key] + 1 assert nr_of_full_chunks_key == nr_of_full_chunks_default_key while length[default_key] > t[default_key]: chunk_start = NumbersDict(t) chunk_end = NumbersDict.min([t + chunk_size, length]) for key in keys_with_full_seqs: chunk_start[key] = 0 chunk_end[key] = length[key] if length.value is None: chunk_start.value = None chunk_end.value = None yield (s, chunk_start, chunk_end) t += chunk_step if length[default_key] - t[ default_key] <= self.min_chunk_size: break s += 1
def __init__( self, name=None, window=1, context_window=None, chunking=None, seq_ordering='default', partition_epoch=None, shuffle_frames_of_nseqs=0, min_chunk_size=0, estimated_num_seqs=None, ): """ :param str name: e.g. "train" or "eval" :param int window: features will be of dimension window * feature_dim, as we add a context-window around. not all datasets support this option. :param None|int|dict|NumbersDict context_window: will add this context for each chunk :param None|str|int|(int,int)|dict|(dict,dict) chunking: "chunk_size:chunk_step" :param str seq_ordering: "batching"-option in config. e.g. "default", "sorted" or "random". See self.get_seq_order_for_epoch() for more details. :param int|None partition_epoch: :param int shuffle_frames_of_nseqs: shuffles the frames. not always supported :param None|int estimated_num_seqs: for progress reporting in case the real num_seqs is unknown """ self.name = name or ("dataset_id%s" % id(self)) self.lock = RLock( ) # Used when manipulating our data potentially from multiple threads. self.num_inputs = 0 # usually not used, but num_outputs instead, which is more generic self.num_outputs = None " :type: dict[str,(int,int)] " # tuple is num-classes, len(shape). self.window = window self.seq_ordering = seq_ordering # "default", "sorted" or "random". See self.get_seq_order_for_epoch(). self.partition_epoch = partition_epoch or 1 self.timestamps = None self.labels = {} """ :type: dict[str,list[str]] """ self.nbytes = 0 self.num_running_chars = 0 # CTC running chars. self._num_timesteps = 0 self._num_codesteps = None " :type: int " # Num output frames, could be different from input, seq2seq, ctc. self._num_seqs = 0 self._estimated_num_seqs = estimated_num_seqs self.min_chunk_size = min_chunk_size if isinstance(chunking, str): if ":" in chunking: chunking = tuple(map(int, chunking.split(":"))) else: chunking = int(chunking) if not isinstance(chunking, (tuple, list)): chunking = (chunking, None) chunk_size, chunk_step = chunking if chunk_size is None: chunk_size = 0 assert isinstance(chunk_size, (int, dict, NumbersDict)) chunk_size = NumbersDict(chunk_size) assert chunk_size == 0 or chunk_size.min_value( ) > 0, "chunk size must not be negative" self.chunk_size = chunk_size if chunk_step in (None, 0): chunk_step = self.chunk_size assert isinstance(chunk_step, (int, dict, NumbersDict)) chunk_step = NumbersDict(chunk_step) if self.chunk_size != 0: assert sorted(chunk_step.keys()) == sorted(chunk_size.keys()) assert chunk_step.max_value( ) > 0, "chunking step must be positive (for some key)" self.chunk_step = chunk_step if context_window is None: context_window = NumbersDict(0) elif isinstance(context_window, int): context_window = NumbersDict(broadcast_value=0, numbers_dict={"data": context_window}) elif isinstance(context_window, dict): context_window = NumbersDict(broadcast_value=0, numbers_dict=context_window) assert isinstance(context_window, NumbersDict) self.context_window = context_window self.shuffle_frames_of_nseqs = shuffle_frames_of_nseqs self.epoch = None
class Batch: """ A batch can consists of several sequences (= segments). This is basically just a list of BatchSeqCopyPart. """ def __init__(self): self.max_num_frames_per_slice = NumbersDict(0) self.num_slices = 0 # original data_shape = [0, 0], format (time,batch/slice) # data_shape = [max_num_frames_per_slice, num_slices] self.seqs = []; " :type: list[BatchSeqCopyPart] " def __repr__(self): return "<Batch start_seq:%r, #seqs:%i>" % (self.start_seq, len(self.seqs)) def try_sequence_as_slice(self, length): """ :param NumbersDict length: number of (time) frames :return: new shape which covers the old shape and one more data-batch, format (time,batch) :rtype: (NumbersDict,int) """ return [NumbersDict.max([self.max_num_frames_per_slice, length]), self.num_slices + 1] def add_sequence_as_slice(self, seq_idx, seq_start_frame, length): """ Adds one data-batch in an additional slice. :param NumbersDict length: number of (time) frames """ self.max_num_frames_per_slice, self.num_slices = self.try_sequence_as_slice(length) self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=seq_start_frame + length, batch_slice=self.num_slices - 1, batch_frame_offset=0)] def add_frames(self, seq_idx, seq_start_frame, length): """ Adds frames to all data-batches. Will add one data-batch if we don't have one yet. :type seq_start_frame: NumbersDict | int :param NumbersDict length: number of (time) frames """ self.max_num_frames_per_slice += length self.num_slices = max(self.num_slices, 1) self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx, seq_start_frame=seq_start_frame, seq_end_frame=seq_start_frame + length, batch_slice=0, batch_frame_offset=self.max_num_frames_per_slice - length)] def get_all_slices_num_frames(self): """ Note that this is only an upper limit in case of data_shape[1] > 1 because data_shape[0] is the max frame len of all seqs. """ return self.max_num_frames_per_slice.max_value() * self.num_slices def get_total_num_frames(self): return sum([s.frame_length for s in self.seqs]) @property def start_seq(self): if not self.seqs: return None return min([s.seq_idx for s in self.seqs]) @property def end_seq(self): if not self.seqs: return None return max([s.seq_idx for s in self.seqs]) + 1 def get_num_seqs(self): if not self.seqs: return 0 return self.end_seq - self.start_seq
def run_inner(self): self.start_time = time.time() for device in self.devices: device.prepare(epoch=self.epoch, **self.get_device_prepare_args()) self.initialize() terminal_width, _ = terminal_size() self.interactive = (log.v[3] and terminal_width >= 0) print("starting task", self.task, file=log.v5) for device in self.devices: device.eval_batch_idx = -1 device.start_epoch_stats() device.num_frames = 0 device.num_updates = 0 device.tot = 0 num_device_runs = 1 if self.share_batches else len(self.devices) deviceRuns = [ self.DeviceBatchRun(self, [self.devices[i]] if not self.share_batches else self.devices) for i in range(num_device_runs) ] results = { 'batchess': [], 'results': [], 'num_frames' : NumbersDict(0) } run_frames = NumbersDict(0) crashed = False assert num_device_runs > 0 while True: if getattr(sys, "exited", False): # This happens when we exit Python. # Without this check, this thread would keep running until all exit handlers of Python are done. print("%s stopped" % self, file=log.v5) crashed = True break for i in range(num_device_runs): if deviceRuns[i].crashed or not deviceRuns[i].is_alive(): crashed = True break if deviceRuns[i].finished: results['batchess'] += deviceRuns[i].result['batchess'][:] results['results'] += deviceRuns[i].result['results'][:] results['result_format'] = deviceRuns[i].result['result_format'] deviceRuns[i].finished = False if crashed: break if run_frames.max_value() >= self.eval_batch_size or not self.batches.has_more(): if all(not (dev.finished or dev.allocated or dev.processing) for dev in deviceRuns): results['num_frames'] = run_frames self.num_frames += run_frames if self.share_batches: run_frames *= len(self.devices) self.reduce(run_frames) self.eval_batch_idx += 1 run_frames = NumbersDict(0) results['batchess'] = [] results['results'] = [] for device in self.devices: device.num_frames = 0 device.num_updates = 0 if not self.batches.has_more(): break else: time.sleep(0.01) match = True while self.batches.has_more() and run_frames.max_value() < self.eval_batch_size and match: self.batch_idx = self.batches.get_current_batch_idx() if self.batch_idx < self.start_batch: self.batches.advance(1) break match = False for i in range(num_device_runs): if not deviceRuns[i].allocated: deviceRuns[i].allocate() run_frames += deviceRuns[i].run_frames match = True break if not match: time.sleep(0.01) for run in deviceRuns: run.stop() if crashed: return for device in self.devices: device.finish_epoch_stats() self.finalize() if self.interactive: progress_bar() self.elapsed = (time.time() - self.start_time)
def run_inner(self): self.start_time = time.time() for device in self.devices: device.prepare(epoch=self.epoch, **self.get_device_prepare_args()) self.initialize() terminal_width, _ = terminal_size() self.interactive = (log.v[3] and terminal_width >= 0) print >> log.v5, "starting task", self.task for device in self.devices: device.eval_batch_idx = -1 device.start_epoch_stats() device.num_frames = 0 device.num_updates = 0 device.tot = 0 num_device_runs = 1 if self.share_batches else len(self.devices) deviceRuns = [ self.DeviceBatchRun(self, [self.devices[i]] if not self.share_batches else self.devices) for i in range(num_device_runs) ] results = { 'batchess': [], 'results': [], 'num_frames' : NumbersDict(0) } run_frames = NumbersDict(0) crashed = False while True: if getattr(sys, "exited", False): # This happens when we exit Python. # Without this check, this thread would keep running until all exit handlers of Python are done. print >> log.v5, "%s stopped" % self crashed = True break for i in range(num_device_runs): if deviceRuns[i].crashed: crashed = True break if deviceRuns[i].finished: results['batchess'] += deviceRuns[i].result['batchess'][:] results['results'] += deviceRuns[i].result['results'][:] results['result_format'] = deviceRuns[i].result['result_format'] deviceRuns[i].finished = False if crashed: break if run_frames.max_value() >= self.eval_batch_size or not self.batches.has_more(): if all(not (dev.finished or dev.allocated or dev.processing) for dev in deviceRuns): results['num_frames'] = run_frames self.num_frames += run_frames if self.share_batches: run_frames *= len(self.devices) self.reduce(run_frames) self.eval_batch_idx += 1 run_frames = NumbersDict(0) results['batchess'] = [] results['results'] = [] for device in self.devices: device.num_frames = 0 device.num_updates = 0 if not self.batches.has_more(): break else: time.sleep(0.01) match = True while self.batches.has_more() and run_frames.max_value() < self.eval_batch_size and match: self.batch_idx = self.batches.get_current_batch_idx() if self.batch_idx < self.start_batch: self.batches.advance(1) break match = False for i in range(num_device_runs): if not deviceRuns[i].allocated: deviceRuns[i].allocate() run_frames += deviceRuns[i].run_frames match = True break if not match: time.sleep(0.01) for run in deviceRuns: run.stop() if crashed: return for device in self.devices: device.finish_epoch_stats() self.finalize() if self.interactive: progress_bar() self.elapsed = (time.time() - self.start_time)
class DeviceBatchRun(threading.Thread): def __init__(self, parent, devices): """ :type parent: TaskThread """ threading.Thread.__init__(self, name="DeviceThread %s" % " ".join([dev.name for dev in devices])) self.alloc_devices = devices self.parent = parent self.devices_batches_idx = None self.run_start_batch_idx = None self.eval_info = None; " :type: dict[str] | None " self.allocated = False self.processing = False self.finished = True self.crashed = False self.num_frames = NumbersDict(0) self.run_frames = NumbersDict(0) self.daemon = True self.active = True self.result = { 'batchess': [], 'results': [], 'result_format': None, 'num_frames': 0 } if self.alloc_devices: self.start() def allocate(self): self.devices_batches_idx = self.parent.batches.get_current_batch_idx() self.devices_batches = self.parent.allocate_devices(self.alloc_devices) self.run_frames = NumbersDict(0) for batches, device in zip(self.devices_batches,self.alloc_devices): assert batches assert batches[0].seqs #assert batches[0].seqs[0].frame_length[1] > 0 device.num_updates += 1 if not device.update_specs['block_size'] else int(ceil(sum([len(batch.seqs) for batch in batches]) / float(device.update_specs['block_size']))) self.run_frames += sum([batch.get_total_num_frames() for batch in batches]) if self.parent.share_batches: self.run_frames /= len(self.alloc_devices) assert self.run_frames.max_value() > 0 self.allocated = True def finish(self): """ :returns whether everything is fine. """ device_results, outputs_format = self.device_collect_results() if device_results is None: if not getattr(sys, "exited", False): print >> log.v3, "device crashed on batch", self.run_start_batch_idx self.parent.device_crash_batch = self.run_start_batch_idx self.crashed = True return False assert len(device_results) == len(self.alloc_devices) == len(self.devices_batches) if outputs_format and any([k.startswith("gparam:") for k in outputs_format]): # WARNING: this code is untested and likely broken! for i in range(len(self.alloc_devices)): res = Device.make_result_dict(device_results[i], outputs_format) self.alloc_devices[i].sync_net_train_params() devnet = self.alloc_devices[i].get_net_train_params(self.parent.network) vars = self.parent.network.get_all_params_vars() for p, q in zip(vars, devnet): p.set_value(q) gparams = {} for p in vars: gparams[p] = numpy.zeros(p.get_value(borrow=True, return_internal_type=True).shape, dtype=theano.config.floatX) for p in vars: q = res["gparam:%s" % p.name] if q.shape == p.get_value().shape: gparams[p] = q elif q.shape: print >> log.v2, "warning: shape for gradient does not match:", p.get_value().shape, q.shape self.parent.updater.setNetParamDeltas(gparams) self.parent.updater.update() self.alloc_devices[i].set_net_params(self.parent.network) self.result = { 'batchess': self.devices_batches, 'results': device_results, 'result_format': outputs_format, 'num_frames': self.num_frames } self.eval_info = self.parent.evaluate(**self.result) self.parent.lock.acquire() self.print_process() self.parent.lock.release() return True def run(self): try: while self.active and not getattr(sys, "exited", False): if self.allocated and not self.finished: self.device_run() self.num_frames = self.run_frames self.processing = True self.allocated = False self.finish() self.finished = True self.processing = False else: time.sleep(0.01) except BaseException: self.crashed = True sys.excepthook(*sys.exc_info()) finally: self.finished = True def stop(self): self.active = False def device_run(self): batch_idx = self.run_start_batch_idx = self.devices_batches_idx assert len(self.alloc_devices) == len(self.devices_batches) for device, batches in zip(self.alloc_devices, self.devices_batches): if self.parent.network.recurrent: print >> log.v5, "running", device.targets["data"].shape[1], \ "sequence slices (%i nts)" % (device.targets["data"].shape[0] * device.targets["data"].shape[1]), else: print >> log.v5, "running", device.targets["data"].shape[0] * device.targets["data"].shape[1], "frames", if device.num_batches == 1: print >> log.v5, "of batch %i" % batch_idx, else: print >> log.v5, "of batches %i-%i" % (batch_idx, batch_idx + device.num_batches - 1), print >> log.v5, "on device", device.name device.run(self.parent.task) #if not self.share batch_idx += device.num_batches def device_collect_results(self): device_results = [] outputs_format = None for i, device in enumerate(self.alloc_devices): try: result, outputs_format_new = device.result() except RuntimeError: return None, None if result is None: return None, None assert isinstance(result, list) assert len(result) > 0 # we always expect to get some result if i >= 1: assert outputs_format == outputs_format_new, "We expect to always get the same output format." outputs_format = outputs_format_new device_results.append(result) return device_results, outputs_format def device_mem_usage_str(self, devices): """ :type devices: list[Device.Device] :rtype: str | None """ if not devices: return None mem_info = [device.get_memory_info() for device in devices] if len(mem_info) == 1 and mem_info[0] is None: return None mem_usage = [info.used if info else None for info in mem_info] s = ["%s MB" % (mem / (1024*1024)) if mem is not None else "unknown" for mem in mem_usage] return "/".join(s) def print_process(self): if not self.parent.interactive and not log.v[5]: return start_elapsed = time.time() - self.parent.start_time complete = self.parent.batches.completed_frac() assert complete > 0 total_time_estimated = start_elapsed / complete remaining_estimated = total_time_estimated - start_elapsed if log.verbose[5]: mem_usage = self.device_mem_usage_str(self.alloc_devices) info = [ self.parent.report_prefix, "batch %i" % self.run_start_batch_idx] if self.eval_info: # Such as score. info += ["%s %s" % item for item in sorted(self.eval_info.items())] info += [ "elapsed %s" % hms(start_elapsed), "exp. remaining %s" % hms(remaining_estimated), "complete %.02f%%" % (complete * 100)] if mem_usage: info += ["memory %s" % mem_usage] print >> log.v5, ", ".join(filter(None, info)) if self.parent.interactive: progress_bar(complete, hms(remaining_estimated))
def iterate_seqs(self, chunk_size=None, chunk_step=None, used_data_keys=None): """ Takes chunking into consideration. :param int|NumbersDict chunk_size: :param int|NumbersDict chunk_step: :param set(str)|None used_data_keys: :return: generator which yields tuples (seq index, seq start, seq end) :rtype: list[(int,NumbersDict,NumbersDict)] """ if chunk_size is None: chunk_size = self.chunk_size if chunk_step is None: chunk_step = self.chunk_step chunk_size = NumbersDict(chunk_size) chunk_step = NumbersDict(chunk_step) s = 0 while self.is_less_than_num_seqs(s): length = self.get_seq_length(s) if chunk_size == 0: yield (s, NumbersDict.constant_like(0, numbers_dict=length), length) else: default_key = "data" if used_data_keys is not None: length = NumbersDict({k: length[k] for k in used_data_keys}) if default_key not in used_data_keys: default_key = sorted(used_data_keys)[0] if chunk_step[default_key] == 0: # allow some keys with zero chunk-step assert chunk_step.max_value() > 0 default_key = [key for key in sorted(used_data_keys) if chunk_step[key] > 0][0] assert chunk_step[default_key] > 0 t = NumbersDict.constant_like(0, numbers_dict=length) # There are usually the 'data' (input) and 'classes' (targets) data-keys in `length` but there can be others. # We expect them all of the same length so that we can do chunking. # In case that some length is 0 or 1, # we treat it special and always return the full seq repeated for every chunk. keys_with_full_seqs = [] for key in length.keys(): if chunk_step[key] == chunk_step[default_key]: if length[key] == length[default_key]: continue # ok if length[key] <= 1: # special case as explained above keys_with_full_seqs.append(key) continue if chunk_step[key] == chunk_step[default_key]: raise Exception("Chunking with multiple data-keys of different length: %r" % length) else: nr_of_full_chunks_key = (length[key] - chunk_size[key]) // chunk_step[key] + 1 nr_of_full_chunks_default_key = ( (length[default_key] - chunk_size[default_key]) // chunk_step[default_key] + 1) assert nr_of_full_chunks_key == nr_of_full_chunks_default_key while length[default_key] > t[default_key]: chunk_start = NumbersDict(t) chunk_end = NumbersDict.min([t + chunk_size, length]) for key in keys_with_full_seqs: chunk_start[key] = 0 chunk_end[key] = length[key] if length.value is None: chunk_start.value = None chunk_end.value = None yield (s, chunk_start, chunk_end) t += chunk_step if length[default_key] - t[default_key] <= self.min_chunk_size: break s += 1
def __init__(self, name=None, window=1, context_window=None, chunking=None, seq_ordering='default', partition_epoch=None, repeat_epoch=None, shuffle_frames_of_nseqs=0, min_chunk_size=0, estimated_num_seqs=None,): """ :param str name: e.g. "train" or "eval" :param int window: features will be of dimension window * feature_dim, as we add a context-window around. not all datasets support this option. :param None|int|dict|NumbersDict context_window: will add this context for each chunk :param None|str|int|(int,int)|dict|(dict,dict) chunking: "chunk_size:chunk_step" :param str seq_ordering: "batching"-option in config. e.g. "default", "sorted" or "random". See self.get_seq_order_for_epoch() for more details. :param int|None partition_epoch: :param int|None repeat_epoch: Repeat the sequences in an epoch this many times. Useful to scale the dataset relative to other datasets, e.g. when used in CombinedDataset. Not allowed to be used in combination with partition_epoch. :param int shuffle_frames_of_nseqs: shuffles the frames. not always supported :param None|int estimated_num_seqs: for progress reporting in case the real num_seqs is unknown """ self.name = name or ("dataset_id%s" % id(self)) self.lock = RLock() # Used when manipulating our data potentially from multiple threads. self.rnd_seq_drop = None # type: typing.Optional[Random] self.num_inputs = 0 # usually not used, but num_outputs instead, which is more generic self.num_outputs = None # type: typing.Optional[typing.Dict[str,typing.Tuple[int,int]]] # tuple is num-classes, len(shape). # nopep8 self.window = window self.seq_ordering = seq_ordering # "default", "sorted" or "random". See self.get_seq_order_for_epoch(). self.partition_epoch = partition_epoch or 1 self.repeat_epoch = repeat_epoch or 1 # There is probably no use case for combining the two, so avoid potential misconfiguration. assert self.partition_epoch == 1 or self.repeat_epoch == 1, ( "Combining partition_epoch and repeat_epoch is prohibited.") self.timestamps = None self.labels = {} # type: typing.Dict[str,typing.List[str]] self.weights = {} self.nbytes = 0 self.num_running_chars = 0 # CTC running chars. self._num_timesteps = 0 self._num_codesteps = None # type: typing.Optional[int] # Num output frames, could be different from input, seq2seq, ctc. # nopep8 self._num_seqs = 0 self._estimated_num_seqs = estimated_num_seqs self.min_chunk_size = min_chunk_size if isinstance(chunking, str): if ":" in chunking: chunking = tuple(map(int, chunking.split(":"))) else: chunking = int(chunking) if not isinstance(chunking, (tuple, list)): chunking = (chunking, None) chunk_size, chunk_step = chunking if chunk_size is None: chunk_size = 0 assert isinstance(chunk_size, (int, dict, NumbersDict)) chunk_size = NumbersDict(chunk_size) assert chunk_size == 0 or chunk_size.min_value() > 0, "chunk size must not be negative" self.chunk_size = chunk_size if chunk_step in (None, 0): chunk_step = self.chunk_size assert isinstance(chunk_step, (int, dict, NumbersDict)) chunk_step = NumbersDict(chunk_step) if self.chunk_size != 0: assert sorted(chunk_step.keys()) == sorted(chunk_size.keys()) assert chunk_step.max_value() > 0, "chunking step must be positive (for some key)" self.chunk_step = chunk_step if context_window is None: context_window = NumbersDict(0) elif isinstance(context_window, int): context_window = NumbersDict(broadcast_value=0, numbers_dict={"data": context_window}) elif isinstance(context_window, dict): context_window = NumbersDict(broadcast_value=0, numbers_dict=context_window) assert isinstance(context_window, NumbersDict) self.context_window = context_window self.shuffle_frames_of_nseqs = shuffle_frames_of_nseqs self.epoch = None