Пример #1
0
class BatchSeqCopyPart:
  """
  A batch used for training in CRNN can consist of several parts from sequences,
   ordered in various ways. The dataset, depending on the configuration, can
   generate these. For the non-recurrent case, we usually concatenate
   them together into one slice. For the recurrent case, we have a single
   slice per sequence, or even multiple slices for a sequence in case of chunking.
  This class represents one single such part and where it is going to
   be stored in the batch.
  """

  def __init__(self, seq_idx, seq_start_frame, seq_end_frame,
               batch_slice, batch_frame_offset):
    """
    :type seq_idx: int
    :type seq_start_frame: NumbersDict | int
    :type seq_end_frame: NumbersDict | int
      Frame idx are input seq, output seq.
    :type batch_slice: int
    :type batch_frame_offset: int | NumbersDict
    """
    self.seq_idx = seq_idx
    self.seq_start_frame = NumbersDict(seq_start_frame)
    self.seq_end_frame = NumbersDict(seq_end_frame)
    self.batch_slice = batch_slice
    self.batch_frame_offset = NumbersDict(batch_frame_offset)
    assert self.seq_start_frame.has_values()
    assert self.seq_end_frame.has_values()
    assert self.batch_frame_offset.has_values()

  @property
  def frame_length(self):
    return self.seq_end_frame - self.seq_start_frame
Пример #2
0
class BatchSeqCopyPart:
    """
  A batch used for training in CRNN can consist of several parts from sequences,
   ordered in various ways. The dataset, depending on the configuration, can
   generate these. For the non-recurrent case, we usually concatenate
   them together into one slice. For the recurrent case, we have a single
   slice per sequence, or even multiple slices for a sequence in case of chunking.
  This class represents one single such part and where it is going to
   be stored in the batch.
  """
    def __init__(self, seq_idx, seq_start_frame, seq_end_frame, batch_slice,
                 batch_frame_offset):
        """
    :type seq_idx: int
    :type seq_start_frame: NumbersDict | int
    :type seq_end_frame: NumbersDict | int
      Frame idx are input seq, output seq.
    :type batch_slice: int
    :type batch_frame_offset: int | NumbersDict
    """
        self.seq_idx = seq_idx
        self.seq_start_frame = NumbersDict(seq_start_frame)
        self.seq_end_frame = NumbersDict(seq_end_frame)
        self.batch_slice = batch_slice
        self.batch_frame_offset = NumbersDict(batch_frame_offset)
        assert self.seq_start_frame.has_values()
        assert self.seq_end_frame.has_values()
        assert self.batch_frame_offset.has_values()

    @property
    def frame_length(self):
        return self.seq_end_frame - self.seq_start_frame
Пример #3
0
    def shapes_for_batches(self, batches, data_keys, batch_dim_first=False):
        """
    :type batches: list[EngineBatch.Batch]
    :rtype: dict[str,list[int]] | None
    """
        all_data_keys = set(data_keys) | {"data"}

        # The final device.data.shape is in format (time,batch,feature).
        shape = [NumbersDict(0), 0]  # time,batch
        for batch in batches:
            shape = [
                NumbersDict.max([shape[0], batch.max_num_frames_per_slice]),
                shape[1] + batch.num_slices
            ]
        if shape[1] == 0:
            return None
        assert shape[0].max_value() > 0
        # Theano has some buggy behaviour with tensors with some shape of zero.
        # We will just use one dummy frame in that case.
        # The index will stay zero in that case. (see EngineUtil.assign_dev_data())
        # However, also see the OutputLayer.output_index() behavior for forwarding.
        for k in all_data_keys:
            shape[0][k] = max(shape[0][k], 1)

        d = {k: [shape[0][k], shape[1]] for k in all_data_keys}
        for k in d:
            d[k] += self.get_data_shape(k)

        if batch_dim_first:
            # Just flip the first two dimensions.
            d = {
                k: [shape[1], shape[0]] + shape[2:]
                for (k, shape) in d.items()
            }
        return d
Пример #4
0
 def __init__(
     self,
     name="dataset",
     window=1,
     context_window=None,
     chunking="0",
     seq_ordering='default',
     shuffle_frames_of_nseqs=0,
     min_chunk_size=0,
     estimated_num_seqs=None,
 ):
     """
 :param str name: e.g. "train" or "eval"
 :param int window: features will be of dimension window * feature_dim, as we add a context-window around.
   not all datasets support this option.
 :param None|int|dict|NumbersDict context_window: will add this context for each chunk
 :param str chunking: "chunk_size:chunk_step"
 :param str seq_ordering: "batching"-option in config. e.g. "default", "sorted" or "random".
   See self.get_seq_order_for_epoch() for more details.
 :param int shuffle_frames_of_nseqs: shuffles the frames. not always supported
 :param None|int estimated_num_seqs: for progress reporting in case the real num_seqs is unknown
 """
     self.name = name
     self.lock = RLock(
     )  # Used when manipulating our data potentially from multiple threads.
     self.num_inputs = 0
     self.num_outputs = None
     " :type: dict[str,(int,int)] "  # tuple is num-classes, len(shape).
     self.window = window
     self.seq_ordering = seq_ordering  # "default", "sorted" or "random". See self.get_seq_order_for_epoch().
     self.timestamps = None
     self.labels = {}
     """ :type: dict[str,list[str]] """
     self.nbytes = 0
     self.num_running_chars = 0  # CTC running chars.
     self._num_timesteps = 0
     self._num_codesteps = None
     " :type: int "  # Num output frames, could be different from input, seq2seq, ctc.
     self._num_seqs = 0
     self._estimated_num_seqs = estimated_num_seqs
     self.chunk_size = int(chunking.split(':')[0])
     self.min_chunk_size = min_chunk_size
     if ':' in chunking:
         self.chunk_step = int(chunking.split(':')[1])
         assert self.chunk_step > 0, "chunking step must be positive"
     else:
         self.chunk_step = self.chunk_size
     assert self.chunk_size >= 0, "chunk size must not be negative"
     if context_window is None:
         context_window = NumbersDict(0)
     elif isinstance(context_window, int):
         context_window = NumbersDict(broadcast_value=0,
                                      numbers_dict={"data": context_window})
     elif isinstance(context_window, dict):
         context_window = NumbersDict(broadcast_value=0,
                                      numbers_dict=context_window)
     assert isinstance(context_window, NumbersDict)
     self.context_window = context_window
     self.shuffle_frames_of_nseqs = shuffle_frames_of_nseqs
     self.epoch = None
Пример #5
0
 def __init__(self, parent, devices):
     """
 :type parent: TaskThread
 """
     threading.Thread.__init__(self,
                               name="DeviceThread %s" %
                               " ".join([dev.name for dev in devices]))
     self.alloc_devices = devices
     self.parent = parent
     self.devices_batches_idx = None
     self.run_start_batch_idx = None
     self.eval_info = None
     " :type: dict[str] | None "
     self.allocated = False
     self.processing = False
     self.finished = True
     self.crashed = False
     self.num_frames = NumbersDict(0)
     self.run_frames = NumbersDict(0)
     self.daemon = True
     self.active = True
     self.result = {
         'batchess': [],
         'results': [],
         'result_format': None,
         'num_frames': 0
     }
     if self.alloc_devices:
         self.start()
Пример #6
0
    def add_frames(self,
                   seq_idx,
                   seq_start_frame,
                   length,
                   frame_dim_corresponds=True):
        """
    Adds frames to all data-batches.
    Will add one data-batch if we don't have one yet.

    :param int seq_idx:
    :param NumbersDict|int seq_start_frame:
    :param NumbersDict length: number of (time) frames
    :param bool frame_dim_corresponds: if the batch frame offset should always be the same (max value) for all keys
    """
        batch_frame_offset = self.max_num_frames_per_slice
        if frame_dim_corresponds:
            batch_frame_offset = NumbersDict(batch_frame_offset.max_value())
            self.max_num_frames_per_slice = NumbersDict(
                self.max_num_frames_per_slice.max_value())
        self.max_num_frames_per_slice += length
        self.num_slices = max(self.num_slices, 1)
        self.seqs += [
            BatchSeqCopyPart(seq_idx=seq_idx,
                             seq_start_frame=seq_start_frame,
                             seq_end_frame=seq_start_frame + length,
                             batch_slice=0,
                             batch_frame_offset=batch_frame_offset)
        ]
Пример #7
0
 def __init__(self):
     self.max_num_frames_per_slice = NumbersDict(0)
     self.num_slices = 0
     # original data_shape = [0, 0], format (time,batch/slice)
     #          data_shape = [max_num_frames_per_slice, num_slices]
     self.seqs = []
     " :type: list[BatchSeqCopyPart] "
Пример #8
0
 def allocate(self):
   self.devices_batches_idx = self.parent.batches.get_current_batch_idx()
   self.allocated_devices_batches = self.parent.allocate_devices(self.alloc_devices)
   self.run_frames = NumbersDict(0)
   for batches, device in zip(self.allocated_devices_batches, self.alloc_devices):
     assert batches
     assert batches[0].seqs
     #assert batches[0].seqs[0].frame_length[1] > 0
     device.num_updates += 1 if not device.update_specs['block_size'] else int(ceil(sum([len(batch.seqs) for batch in batches]) / float(device.update_specs['block_size'])))
     self.run_frames += sum([batch.get_total_num_frames() for batch in batches])
   if self.parent.share_batches:
     self.run_frames /= len(self.alloc_devices)
   assert self.run_frames.max_value() > 0
   self.allocated = True
Пример #9
0
def shapes_for_batches(batches,
                       data_keys,
                       dataset=None,
                       extern_data=None,
                       enforce_min_len1=False):
    """
  :param list[EngineBatch.Batch] batches:
  :param list[str] data_keys:
  :param Dataset dataset:
  :param TFNetwork.ExternData extern_data: detailed data description. only used for TensorFlow
  :param bool enforce_min_len1:
  :rtype: dict[str,list[int]] | None
  """
    assert dataset or extern_data
    all_data_keys = set(data_keys)

    # The final device.data.shape is in format (time,batch,feature) in case of Theano.
    shape = [NumbersDict(0), 0]  # time,batch
    for batch in batches:
        shape = [
            NumbersDict.max([shape[0], batch.max_num_frames_per_slice]),
            shape[1] + batch.num_slices
        ]
    if shape[1] == 0:
        return None
    assert shape[0].max_value() > 0
    # Theano has some buggy behaviour with tensors with some shape of zero.
    # We will just use one dummy frame in that case.
    # The index will stay zero in that case. (see EngineUtil.assign_dev_data())
    # However, also see the OutputLayer.output_index() behavior for forwarding.
    if not extern_data or enforce_min_len1:  # not needed if TensorFlow is used
        for k in all_data_keys:
            shape[0][k] = max(shape[0][k], 1)

    if extern_data:
        d = {}
        for k in all_data_keys:
            data_shape = list(extern_data.data[k].batch_shape)
            data_shape[extern_data.data[k].batch_dim_axis] = shape[1]
            if extern_data.data[k].have_time_axis():
                data_shape[extern_data.data[k].time_dim_axis] = shape[0][k]
            assert all([n is not None
                        for n in data_shape]), "data %r" % extern_data.data[k]
            d[k] = data_shape
    else:  # shape via dataset
        d = {k: [shape[0][k], shape[1]] for k in all_data_keys}
        for k in all_data_keys:
            d[k] += dataset.get_data_shape(k)
    return d
Пример #10
0
 def num_frames(self):
     """
 :rtype: NumbersDict
 """
     d = {"data": self.features.shape[0]}
     d.update({k: self.targets[k].shape[0] for k in self.targets.keys()})
     return NumbersDict(d)
Пример #11
0
 def __init__(self, tf_session, dataset, batches, extern_data, data_keys=None, capacity=10, have_fixed_batch_size=False):
   """
   :param tf.Session tf_session:
   :param Dataset.Dataset dataset:
   :param BatchSetGenerator batches:
   :param ExternData extern_data:
   :param set(str)|None data_keys:
   :param int capacity:
   """
   self.tf_session = tf_session
   self.coord = tf.train.Coordinator()
   self.dataset = dataset
   self.batches = batches
   self.extern_data = extern_data
   if data_keys is None:
     data_keys = extern_data.data.keys()
   self.data_keys = sorted(data_keys)
   self.state_change_cond = Condition()
   self.queue = None  # type: Queue
   self.tf_queue = None  # type: tf.FIFOQueue
   self._have_fixed_batch_size = have_fixed_batch_size
   if have_fixed_batch_size:
     # TODO... also cache this ....
     self.tf_queue = tf.FIFOQueue(capacity=capacity, **extern_data.get_queue_args(with_batch_dim=True))
   else:
     self.queue = Queue(maxsize=capacity)
   self.thread = None  # type: Thread
   self.num_frames = NumbersDict(0)
   self.thread_finished = False
   self.reached_end = False
Пример #12
0
 def num_frames(self):
   """
   :rtype: NumbersDict
   """
   d = {k: (v.shape[0] if v.ndim >= 1 else 1)
        for (k, v) in self.features.items()}
   return NumbersDict(d)
Пример #13
0
 def try_sequence_as_slice(self, length):
   """
   :param NumbersDict length: number of (time) frames
   :return: new shape which covers the old shape and one more data-batch, format (time,batch)
   :rtype: (NumbersDict,int)
   """
   return [NumbersDict.max([self.max_num_frames_per_slice, length]), self.num_slices + 1]
Пример #14
0
 def iterate_seqs(self,
                  chunk_size=None,
                  chunk_step=None,
                  used_data_keys=None):
     """
 Takes chunking into consideration.
 :param int chunk_size:
 :param int chunk_step:
 :param set(str)|None used_data_keys:
 :return: generator which yields tuples (seq index, seq start, seq end)
 :rtype: list[(int,NumbersDict,NumbersDict)]
 """
     if chunk_size is None:
         chunk_size = self.chunk_size
     if chunk_step is None:
         chunk_step = self.chunk_step
     s = 0
     while self.is_less_than_num_seqs(s):
         length = self.get_seq_length(s)
         if chunk_size == 0:
             yield (s, length.constant_like(0), length)
         else:
             if used_data_keys is not None:
                 length = NumbersDict(
                     {k: length[k]
                      for k in used_data_keys})
             t = length.constant_like(0)
             default_key = "data"
             # There are usually the 'data' (input) and 'classes' (targets) data-keys in `length` but there can be others.
             # We expect them all of the same length so that we can do chunking.
             # In case that some length is 0 or 1,
             # we treat it special and always return the full seq repeated for every chunk.
             keys_with_full_seqs = []
             for key in length.keys():
                 if length[key] == length[default_key]:
                     continue  # ok
                 if length[key] <= 1:
                     keys_with_full_seqs.append(key)
                     continue
                 raise Exception(
                     "Chunking with multiple data-keys of different length: %r"
                     % length)
             while length[default_key] > t[default_key]:
                 chunk_start = NumbersDict(t)
                 chunk_end = NumbersDict.min([t + chunk_size, length])
                 for key in keys_with_full_seqs:
                     chunk_start[key] = 0
                     chunk_end[key] = length[key]
                 if length.value is None:
                     chunk_start.value = None
                     chunk_end.value = None
                 yield (s, chunk_start, chunk_end)
                 t += chunk_step
                 if length[default_key] - t[
                         default_key] <= self.min_chunk_size:
                     break
         s += 1
Пример #15
0
 def get_seq_length(self, seq_idx):
     """
 :rtype: NumbersDict
 """
     input_len, output_len = self.get_seq_length_2d(seq_idx)
     d = {"data": input_len}
     d.update({k: output_len for k in self.get_target_list()})
     return NumbersDict(d)
Пример #16
0
 def _iterate_seqs(self, chunk_size, chunk_step, used_data_keys):
     """
 Takes chunking into consideration.
 :type chunk_size: int
 :type chunk_step: int
 :param set(str)|None used_data_keys:
 :return: index, and seq start, seq end
 :rtype: list[(int,NumbersDict,NumbersDict)]
 """
     s = 0
     while self.is_less_than_num_seqs(s):
         length = self.get_seq_length(s)
         if chunk_size == 0:
             yield (s, NumbersDict(0), length)
         else:
             if used_data_keys is not None:
                 length = length.copy()
                 for key in list(length.keys()):
                     if key not in used_data_keys:
                         del length[key]
             t = 0
             default_key = "data"
             # There are usually the 'data' (input) and 'classes' (targets) data-keys in `length` but there can be others.
             # We expect them all of the same length so that we can do chunking.
             # In case that some length is 0 or 1,
             # we treat it special and always return the full seq repeated for every chunk.
             keys_with_full_seqs = []
             for key in length.keys():
                 if length[key] == length[default_key]:
                     continue  # ok
                 if length[key] <= 1:
                     keys_with_full_seqs.append(key)
                     continue
                 raise Exception(
                     "Chunking with multiple data-keys of different length: %r"
                     % length)
             while t < length[default_key]:
                 l = min(t + chunk_size, length[default_key])
                 chunk_start = NumbersDict(t)
                 chunk_end = NumbersDict(l)
                 for key in keys_with_full_seqs:
                     chunk_start[key] = 0
                     chunk_end[key] = length[key]
                 yield (s, chunk_start, chunk_end)
                 t += chunk_step
         s += 1
Пример #17
0
 def get_seq_length(self, seq_idx):
   """
   :rtype: NumbersDict
   """
   lengths = self.get_seq_length_nd(seq_idx)
   d = {"data": lengths[0]}
   for k, l in zip(self.target_keys, lengths[1:]):
     d[k] = l
   return NumbersDict(d)
Пример #18
0
 def num_frames(self):
     """
 :rtype: NumbersDict
 """
     d = {"data": self.features.shape[0]}
     d.update({
         k: (v.shape[0] if v.ndim >= 1 else 1)
         for (k, v) in self.targets.items()
     })
     return NumbersDict(d)
Пример #19
0
 def get_seq_length(self, seq_idx):
     """
 :param int seq_idx:
 :rtype: NumbersDict
 :returns the len of the input features and the len of the target sequence.
 """
     assert self.__class__.get_seq_length_2d is not Dataset.get_seq_length_2d, "Override get_seq_length."
     input_len, output_len = self.get_seq_length_2d(seq_idx)
     d = {"data": input_len}
     d.update({k: output_len for k in self.get_target_list()})
     return NumbersDict(d)
Пример #20
0
  def __init__(self,
               seq_list_file, seq_lens_file,
               datasets,
               data_map, data_dims,
               data_dtypes=None,
               window=1, **kwargs):
    """
    :param str seq_list_file: filename. line-separated
    :param str seq_lens_file: filename. json. dict[str,dict[str,int]], seq-tag -> data-key -> len
    :param dict[str,dict[str]] datasets: dataset-key -> dataset-kwargs. including keyword 'class' and maybe 'files'
    :param dict[str,(str,str)] data_map: self-data-key -> (dataset-key, dataset-data-key).
      Should contain 'data' as key. Also defines the target-list, which is all except 'data'.
    :param dict[str,(int,int)] data_dims: self-data-key -> data-dimension, len(shape) (1 ==> sparse repr).
    :param dict[str,str] data_dtypes: self-data-key -> dtype. automatic if not specified
    """
    assert window == 1  # not implemented
    super(MetaDataset, self).__init__(**kwargs)
    assert self.shuffle_frames_of_nseqs == 0  # not implemented. anyway only for non-recurrent nets

    self.seq_list_original = open(seq_list_file).read().splitlines()
    self.tag_idx = {tag: idx for (idx, tag) in enumerate(self.seq_list_original)}
    self._num_seqs = len(self.seq_list_original)

    self.data_map = data_map
    self.dataset_keys = set([m[0] for m in self.data_map.values()]); ":type: set[str]"
    self.data_keys = set(self.data_map.keys()); ":type: set[str]"
    assert "data" in self.data_keys
    self.target_list = sorted(self.data_keys - ["data"])

    data_dims = convert_data_dims(data_dims)
    self.data_dims = data_dims
    assert "data" in data_dims
    for key in self.target_list:
      assert key in data_dims
    self.num_inputs = data_dims["data"][0]
    self.num_outputs = data_dims

    self.data_dtypes = {data_key: _select_dtype(data_key, data_dims, data_dtypes) for data_key in self.data_keys}

    if seq_lens_file:
      seq_lens = load_json(filename=seq_lens_file)
      assert isinstance(seq_lens, dict)
      # dict[str,NumbersDict], seq-tag -> data-key -> len
      self._seq_lens = {tag: NumbersDict(l) for (tag, l) in seq_lens.items()}
    else:
      self._seq_lens = None

    if self._seq_lens:
      self._num_timesteps = sum([self._seq_lens[s] for s in self.seq_list_original])
    else:
      self._num_timesteps = None

    # Will only init the needed datasets.
    self.datasets = {key: init_dataset(datasets[key]) for key in self.dataset_keys}
Пример #21
0
def analyze_data(config):  # pylint: disable=redefined-outer-name
    """
  :param Config config:
  """
    dss = config.value('analyze_dataset', 'train')
    ds = {"train": train_data, "dev": dev_data, "eval": eval_data}[dss]
    epoch = config.int('epoch', 1)
    print("Analyze dataset", dss, "epoch", epoch, file=log.v1)
    ds.init_seq_order(epoch=epoch)
    stat_prefix = config.value('statistics_save_prefix', 'statistics')
    dtype = config.value('statistics_dtype', 'float64')
    target = config.value('target', 'classes')
    data_key = config.value('data_key', 'data')
    assert ds.is_data_sparse(target), "need for prior calculation"
    assert not ds.is_data_sparse(data_key), "needed for mean/var estimation"
    from Util import inplace_increment, progress_bar_with_time, NumbersDict

    priors = numpy.zeros((ds.get_data_dim(target), ), dtype=dtype)
    mean = numpy.zeros((ds.get_data_dim(data_key), ), dtype=dtype)
    mean_sq = numpy.zeros((ds.get_data_dim(data_key), ), dtype=dtype)
    total_targets_len = 0
    total_data_len = 0

    # Note: This is not stable! See :class:`Util.Stats` for a better alternative.
    seq_idx = 0
    while ds.is_less_than_num_seqs(seq_idx):
        progress_bar_with_time(ds.get_complete_frac(seq_idx))
        ds.load_seqs(seq_idx, seq_idx + 1)
        targets = ds.get_data(seq_idx, target)
        inplace_increment(priors, targets, 1)
        total_targets_len += targets.shape[0]
        data = ds.get_data(seq_idx, data_key)
        new_total_data_len = total_data_len + data.shape[0]
        f = float(total_data_len) / new_total_data_len
        mean = mean * f + numpy.sum(data, axis=0) * (1.0 - f)
        mean_sq = mean_sq * f + numpy.sum(data * data, axis=0) * (1.0 - f)
        total_data_len = new_total_data_len
        seq_idx += 1
    log_priors = numpy.log(priors)
    log_priors -= numpy.log(NumbersDict(ds.get_num_timesteps())[target])
    std_dev = numpy.sqrt(mean_sq - mean * mean)
    print("Finished. %i total target frames, %i total data frames" %
          (total_targets_len, total_data_len),
          file=log.v1)
    priors_fn = stat_prefix + ".log_priors.txt"
    mean_fn = stat_prefix + ".mean.txt"
    std_dev_fn = stat_prefix + ".std_dev.txt"
    print("Dump priors to", priors_fn, file=log.v1)
    numpy.savetxt(priors_fn, log_priors)
    print("Dump mean to", mean_fn, file=log.v1)
    numpy.savetxt(mean_fn, mean)
    print("Dump std dev to", std_dev_fn, file=log.v1)
    numpy.savetxt(std_dev_fn, std_dev)
    print("Done.", file=log.v1)
Пример #22
0
 def get_seq_length(self, seq_idx):
     """
 :rtype: NumbersDict
 """
     lengths = self.get_seq_length_2d(seq_idx)
     d = {"data": lengths[0]}
     for k, l in zip(self.target_keys, lengths[1:]):
         d[k] = l
     #d.update(self.get_output_lengths)
     #d.update({k: output_len for k in self.get_target_list()})
     return NumbersDict(d)
Пример #23
0
 def allocate(self):
   self.devices_batches_idx = self.parent.batches.get_current_batch_idx()
   assert len(self.alloc_devices) == 1
   self.devices_batches = [None] * len(self.alloc_devices)
   self.num_frames = NumbersDict(13)
   batch_dim = 1
   self.alloc_devices[0].alloc_data(shapes={
     "data": (self.num_frames["data"], batch_dim, config.typed_value("num_inputs")),
     "classes": (self.num_frames["classes"], batch_dim)})
   self.parent.num_frames += self.num_frames
   self.allocated = True
Пример #24
0
  def add_frames(self, seq_idx, seq_start_frame, length, frame_dim_corresponds=True):
    """
    Adds frames to all data-batches.
    Will add one data-batch if we don't have one yet.

    :param int seq_idx:
    :param NumbersDict|int seq_start_frame:
    :param NumbersDict length: number of (time) frames
    :param bool frame_dim_corresponds: if the batch frame offset should always be the same (max value) for all keys
    """
    batch_frame_offset = self.max_num_frames_per_slice
    if frame_dim_corresponds:
      batch_frame_offset = NumbersDict(batch_frame_offset.max_value())
      self.max_num_frames_per_slice = NumbersDict(self.max_num_frames_per_slice.max_value())
    self.max_num_frames_per_slice += length
    self.num_slices = max(self.num_slices, 1)
    self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx,
                                   seq_start_frame=seq_start_frame,
                                   seq_end_frame=seq_start_frame + length,
                                   batch_slice=0,
                                   batch_frame_offset=batch_frame_offset)]
Пример #25
0
 def __init__(self,
              task,
              network,
              devices,
              data,
              batches,
              eval_batch_size=0,
              start_batch=0,
              share_batches=False,
              reduction_rate=1.0,
              report_prefix=None,
              exclude=None,
              epoch=None):
     """
   :type task: str
   :type network: Network.LayerNetwork
   :type devices: list[Device.Device]
   :type data: Dataset.Dataset
   :type batches: EngineBatch.BatchSetGenerator
   :type start_batch: int
   :param str report_prefix: such as epoch or so. only for reporting
   """
     threading.Thread.__init__(self, name="TaskThread %s" % task)
     assert len(devices) > 0
     if eval_batch_size == 0:
         eval_batch_size = sys.maxsize
     self.share_batches = share_batches
     self.eval_batch_size = eval_batch_size
     self.eval_batch_idx = 0
     self.start_batch = start_batch
     self.reduction_rate = reduction_rate
     self.devices = devices
     self.network = network
     self.batches = batches
     self.exclude = exclude
     self.task = task
     self.data = data
     self.daemon = True
     self.elapsed = 0
     self.finalized = False
     self.score = {}
     self.error = {}
     self.results = {}
     self.num_frames = NumbersDict(0)
     self.batch_idx = None
     " :type: int | None "
     self.device_crash_batch = None
     " :type: int | None "
     self.report_prefix = report_prefix or self.task
     self.epoch = epoch
     self.lock = threading.Lock()
     self.start()
Пример #26
0
 def get_start_end_frames_full_seq(self, seq_idx):
     """
 :param int seq_idx:
 :return: (start,end) frame, taking context_window into account
 :rtype: (NumbersDict,NumbersDict)
 """
     end = self.get_seq_length(seq_idx)
     start = NumbersDict.constant_like(0, numbers_dict=end)
     ctx_lr = self._get_context_window_left_right()
     if ctx_lr:
         start -= ctx_lr[0]
         end += ctx_lr[1]
     return start, end
Пример #27
0
 def get_start_end_frames_full_seq(self, seq_idx):
   """
   :param int seq_idx:
   :return: (start,end) frame, taking context_window into account
   :rtype: (NumbersDict,NumbersDict)
   """
   end = self.get_seq_length(seq_idx)
   start = NumbersDict.constant_like(0, numbers_dict=end)
   ctx_lr = self._get_context_window_left_right()
   if ctx_lr:
     start -= ctx_lr[0]
     end += ctx_lr[1]
   return start, end
Пример #28
0
 def allocate(self):
   self.devices_batches_idx = self.parent.batches.get_current_batch_idx()
   self.devices_batches = self.parent.allocate_devices(self.alloc_devices)
   self.run_frames = NumbersDict(0)
   for batches, device in zip(self.devices_batches,self.alloc_devices):
     assert batches
     assert batches[0].seqs
     #assert batches[0].seqs[0].frame_length[1] > 0
     device.num_updates += 1 if not device.update_specs['block_size'] else int(ceil(sum([len(batch.seqs) for batch in batches]) / float(device.update_specs['block_size'])))
     self.run_frames += sum([batch.get_total_num_frames() for batch in batches])
   if self.parent.share_batches:
     self.run_frames /= len(self.alloc_devices)
   assert self.run_frames.max_value() > 0
   self.allocated = True
Пример #29
0
def analyze_data(config):
    dss = config.value('analyze_dataset', 'train')
    ds = {"train": train_data, "dev": dev_data, "eval": eval_data}[dss]
    epoch = config.int('epoch', 1)
    print >> log.v1, "Analyze dataset", dss, "epoch", epoch
    ds.init_seq_order(epoch=epoch)
    stat_prefix = config.value('statistics_save_prefix', 'statistics')
    dtype = config.value('statistics_dtype', 'float64')
    target = config.value('target', 'classes')
    data_key = config.value('data_key', 'data')
    assert ds.is_data_sparse(target), "need for prior calculation"
    assert not ds.is_data_sparse(data_key), "needed for mean/var estimation"
    from Util import inplace_increment, progress_bar_with_time, NumbersDict

    priors = numpy.zeros((ds.get_data_dim(target), ), dtype=dtype)
    mean = numpy.zeros((ds.get_data_dim(data_key), ), dtype=dtype)
    mean_sq = numpy.zeros((ds.get_data_dim(data_key), ), dtype=dtype)
    total_targets_len = 0
    total_data_len = 0

    seq_idx = 0
    while ds.is_less_than_num_seqs(seq_idx):
        progress_bar_with_time(ds.get_complete_frac(seq_idx))
        ds.load_seqs(seq_idx, seq_idx + 1)
        targets = ds.get_data(seq_idx, target)
        inplace_increment(priors, targets, 1)
        total_targets_len += targets.shape[0]
        data = ds.get_data(seq_idx, data_key)
        new_total_data_len = total_data_len + data.shape[0]
        f = float(total_data_len) / new_total_data_len
        mean = mean * f + numpy.sum(data, axis=0) * (1.0 - f)
        mean_sq = mean_sq * f + numpy.sum(data * data, axis=0) * (1.0 - f)
        total_data_len = new_total_data_len
        seq_idx += 1
    log_priors = numpy.log(priors)
    log_priors -= numpy.log(NumbersDict(ds.get_num_timesteps())[target])
    var = numpy.sqrt(mean_sq - mean * mean)
    print >> log.v1, "Finished. %i total target frames, %i total data frames" % (
        total_targets_len, total_data_len)
    priors_fn = stat_prefix + ".log_priors.txt"
    mean_fn = stat_prefix + ".mean.txt"
    var_fn = stat_prefix + ".var.txt"
    print >> log.v1, "Dump priors to", priors_fn
    numpy.savetxt(priors_fn, log_priors)
    print >> log.v1, "Dump mean to", mean_fn
    numpy.savetxt(mean_fn, mean)
    print >> log.v1, "Dump var to", var_fn
    numpy.savetxt(var_fn, var)
    print >> log.v1, "Done."
Пример #30
0
 def _get_context_window_left_right(self):
     """
 :return: (ctx_left, ctx_right)
 :rtype: None|(NumbersDict,NumbersDict)
 """
     if self.context_window:
         # One less because the original frame also counts, and context_window=1 means that we just have that single frame.
         # ctx_total is how much frames we add additionally.
         ctx_total = NumbersDict.max([self.context_window, 1]) - 1
         # In case ctx_total is odd / context_window is even, we have to decide where to put one more frame.
         # To keep it consistent with e.g. 1D convolution with a kernel of even size, we add one more to the right.
         # See test_tfconv1d_evensize().
         ctx_left = ctx_total // 2
         ctx_right = ctx_total - ctx_left
         return ctx_left, ctx_right
     else:
         return None
Пример #31
0
 def _get_context_window_left_right(self):
   """
   :return: (ctx_left, ctx_right)
   :rtype: None|(NumbersDict,NumbersDict)
   """
   if self.context_window:
     # One less because the original frame also counts, and context_window=1 means that we just have that single frame.
     # ctx_total is how much frames we add additionally.
     ctx_total = NumbersDict.max([self.context_window, 1]) - 1
     # In case ctx_total is odd / context_window is even, we have to decide where to put one more frame.
     # To keep it consistent with e.g. 1D convolution with a kernel of even size, we add one more to the right.
     # See test_tfconv1d_evensize().
     ctx_left = ctx_total // 2
     ctx_right = ctx_total - ctx_left
     return ctx_left, ctx_right
   else:
     return None
Пример #32
0
  def shapes_for_batches(self, batches, data_keys):
    """
    :type batches: list[EngineBatch.Batch]
    :rtype: dict[str,list[int]] | None
    """
    # The final device.data.shape is in format (time,batch,feature).
    shape = [NumbersDict(0), 0]  # time,batch
    for batch in batches:
      shape = [NumbersDict.max([shape[0], batch.max_num_frames_per_slice]), shape[1] + batch.num_slices]
    if shape[1] == 0:
      return None
    assert shape[0].max_value() > 0

    d = {k: [shape[0][k], shape[1]] for k in (set(data_keys) | {"data"})}
    for k in d:
      d[k] += self.get_data_shape(k)
    return d
Пример #33
0
def shapes_for_batches(batches, data_keys, dataset=None, extern_data=None, enforce_min_len1=False):
  """
  :param list[EngineBatch.Batch] batches:
  :param list[str] data_keys:
  :param Dataset dataset:
  :param TFNetwork.ExternData extern_data: detailed data description. only used for TensorFlow
  :param bool enforce_min_len1:
  :rtype: dict[str,list[int]] | None
  """
  assert dataset or extern_data
  all_data_keys = set(data_keys)

  # The final device.data.shape is in format (time,batch,feature) in case of Theano.
  shape = [NumbersDict(0), 0]  # time,batch
  for batch in batches:
    shape = [NumbersDict.max([shape[0], batch.max_num_frames_per_slice]), shape[1] + batch.num_slices]
  if shape[1] == 0:
    return None
  assert shape[0].max_value() > 0
  # Theano has some buggy behaviour with tensors with some shape of zero.
  # We will just use one dummy frame in that case.
  # The index will stay zero in that case. (see EngineUtil.assign_dev_data())
  # However, also see the OutputLayer.output_index() behavior for forwarding.
  if not extern_data or enforce_min_len1:  # not needed if TensorFlow is used
    for k in all_data_keys:
      shape[0][k] = max(shape[0][k], 1)

  if extern_data:
    d = {}
    for k in all_data_keys:
      data_shape = list(extern_data.data[k].batch_shape)
      data_shape[extern_data.data[k].batch_dim_axis] = shape[1]
      if extern_data.data[k].have_time_axis():
        data_shape[extern_data.data[k].time_dim_axis] = shape[0][k]
      assert all([n is not None for n in data_shape]), "data %r" % extern_data.data[k]
      d[k] = data_shape
  else:  # shape via dataset
    d = {k: [shape[0][k], shape[1]] for k in all_data_keys}
    for k in all_data_keys:
      d[k] += dataset.get_data_shape(k)
  return d
Пример #34
0
 def __init__(self, parent, devices):
   """
   :type parent: TaskThread
   """
   threading.Thread.__init__(self, name="DeviceThread %s" % " ".join([dev.name for dev in devices]))
   self.alloc_devices = devices
   self.parent = parent
   self.devices_batches_idx = None
   self.run_start_batch_idx = None
   self.eval_info = None; " :type: dict[str] | None "
   self.allocated = False
   self.processing = False
   self.finished = True
   self.crashed = False
   self.num_frames = NumbersDict(0)
   self.run_frames = NumbersDict(0)
   self.daemon = True
   self.active = True
   self.result = { 'batchess': [], 'results': [], 'result_format': None, 'num_frames': 0 }
   if self.alloc_devices:
     self.start()
Пример #35
0
 def __init__(self, tf_session, dataset, batches, capacity=10, tf_queue=None, **kwargs):
   """
   :param tf.Session|tf.InteractiveSession tf_session:
   :param Dataset dataset:
   :param BatchSetGenerator batches:
   :param ExternData extern_data:
   :param set(str)|None data_keys:
   :param int capacity:
   :param TFDataQueues|None tf_queue:
   """
   super(FeedDictDataProvider, self).__init__(**kwargs)
   self.tf_session = tf_session
   self.dataset = dataset
   self.batches = batches
   self.state_change_cond = Condition()
   self.queue = None  # type: Queue
   self.tf_queue = tf_queue
   if not self.tf_queue:
     self.queue = Queue(maxsize=capacity)
   self.thread = None  # type: Thread
   self.num_frames = NumbersDict(0)
   self.thread_finished = False
   self.reached_end = False
Пример #36
0
 def __init__(self, seq_idx, seq_start_frame, seq_end_frame,
              batch_slice, batch_frame_offset):
   """
   :type seq_idx: int
   :type seq_start_frame: NumbersDict | int
   :type seq_end_frame: NumbersDict | int
     Frame idx are input seq, output seq.
   :type batch_slice: int
   :type batch_frame_offset: int | NumbersDict
   """
   self.seq_idx = seq_idx
   self.seq_start_frame = NumbersDict(seq_start_frame)
   self.seq_end_frame = NumbersDict(seq_end_frame)
   self.batch_slice = batch_slice
   self.batch_frame_offset = NumbersDict(batch_frame_offset)
   assert self.seq_start_frame.has_values()
   assert self.seq_end_frame.has_values()
   assert self.batch_frame_offset.has_values()
Пример #37
0
def hdf_dump_from_dataset(dataset, hdf_dataset, parser_args):
    """
  :param Dataset dataset: could be any dataset implemented as child of Dataset
  :type hdf_dataset: h5py._hl.files.File
  :param parser_args: argparse object from main()
  :return:
  """
    print("Work on epoch: %i" % parser_args.epoch, file=log.v3)
    dataset.init_seq_order(parser_args.epoch)

    data_keys = sorted(dataset.get_data_keys())
    print("Data keys:", data_keys, file=log.v3)
    if "orth" in data_keys:
        data_keys.remove("orth")

    # We need to do one run through the dataset to collect some stats like total len.
    print("Collect stats, iterate through all data...", file=log.v3)
    seq_idx = parser_args.start_seq
    seq_idxs = []
    seq_tags = []
    seq_lens = []
    total_seq_len = NumbersDict(0)
    max_tag_len = 0
    dataset_num_seqs = try_run(lambda: dataset.num_seqs,
                               default=None)  # can be unknown
    if parser_args.end_seq != float("inf"):
        if dataset_num_seqs is not None:
            dataset_num_seqs = min(dataset_num_seqs, parser_args.end_seq)
        else:
            dataset_num_seqs = parser_args.end_seq
    if dataset_num_seqs is not None:
        dataset_num_seqs -= parser_args.start_seq
        assert dataset_num_seqs > 0
    while dataset.is_less_than_num_seqs(
            seq_idx) and seq_idx <= parser_args.end_seq:
        seq_idxs += [seq_idx]
        dataset.load_seqs(seq_idx, seq_idx + 1)
        seq_len = dataset.get_seq_length(seq_idx)
        seq_lens += [seq_len]
        tag = dataset.get_tag(seq_idx)
        seq_tags += [tag]
        max_tag_len = max(len(tag), max_tag_len)
        total_seq_len += seq_len
        if dataset_num_seqs is not None:
            progress_bar_with_time(
                float(seq_idx - parser_args.start_seq) / dataset_num_seqs)
        seq_idx += 1
    num_seqs = len(seq_idxs)

    assert num_seqs > 0
    shapes = {}
    for data_key in data_keys:
        assert data_key in total_seq_len.dict
        shape = [total_seq_len[data_key]]
        shape += dataset.get_data_shape(data_key)
        print("Total len of %r is %s, shape %r, dtype %s" %
              (data_key, human_size(
                  shape[0]), shape, dataset.get_data_dtype(data_key)),
              file=log.v3)
        shapes[data_key] = shape

    print("Set seq tags...", file=log.v3)
    hdf_dataset.create_dataset('seqTags',
                               shape=(num_seqs, ),
                               dtype="S%i" % (max_tag_len + 1))
    for i, tag in enumerate(seq_tags):
        hdf_dataset['seqTags'][i] = numpy.array(tag,
                                                dtype="S%i" %
                                                (max_tag_len + 1))
        progress_bar_with_time(float(i) / num_seqs)

    print("Set seq len info...", file=log.v3)
    hdf_dataset.create_dataset(HDFDataset.attr_seqLengths,
                               shape=(num_seqs, 2),
                               dtype="int32")
    for i, seq_len in enumerate(seq_lens):
        data_len = seq_len["data"]
        targets_len = seq_len["classes"]
        for data_key in dataset.get_target_list():
            if data_key == "orth":
                continue
            assert seq_len[
                data_key] == targets_len, "different lengths in multi-target not supported"
        if targets_len is None:
            targets_len = data_len
        hdf_dataset[HDFDataset.attr_seqLengths][i] = [data_len, targets_len]
        progress_bar_with_time(float(i) / num_seqs)

    print("Create arrays in HDF...", file=log.v3)
    hdf_dataset.create_group('targets/data')
    hdf_dataset.create_group('targets/size')
    hdf_dataset.create_group('targets/labels')
    for data_key in data_keys:
        if data_key == "data":
            hdf_dataset.create_dataset('inputs',
                                       shape=shapes[data_key],
                                       dtype=dataset.get_data_dtype(data_key))
        else:
            hdf_dataset['targets/data'].create_dataset(
                data_key,
                shape=shapes[data_key],
                dtype=dataset.get_data_dtype(data_key))
            hdf_dataset['targets/size'].attrs[data_key] = dataset.num_outputs[
                data_key]

        if data_key in dataset.labels:
            labels = dataset.labels[data_key]
            assert len(labels) == dataset.num_outputs[data_key][0]
        else:
            labels = [
                "%s-class-%i" % (data_key, i)
                for i in range(dataset.get_data_dim(data_key))
            ]
        print("Labels for %s:" % data_key, labels[:3], "...", file=log.v5)
        max_label_len = max(map(len, labels))
        if data_key != "data":
            hdf_dataset['targets/labels'].create_dataset(
                data_key, (len(labels), ), dtype="S%i" % (max_label_len + 1))
            for i, label in enumerate(labels):
                hdf_dataset['targets/labels'][data_key][i] = numpy.array(
                    label, dtype="S%i" % (max_label_len + 1))

    # Again iterate through dataset, and set the data
    print("Write data...", file=log.v3)
    dataset.init_seq_order(parser_args.epoch)
    offsets = NumbersDict(0)
    for seq_idx, tag in zip(seq_idxs, seq_tags):
        dataset.load_seqs(seq_idx, seq_idx + 1)
        tag_ = dataset.get_tag(seq_idx)
        assert tag == tag_  # Just a check for sanity. We expect the same order.
        seq_len = dataset.get_seq_length(seq_idx)
        for data_key in data_keys:
            if data_key == "data":
                hdf_data = hdf_dataset['inputs']
            else:
                hdf_data = hdf_dataset['targets/data'][data_key]
            data = dataset.get_data(seq_idx, data_key)
            hdf_data[offsets[data_key]:offsets[data_key] +
                     seq_len[data_key]] = data

        progress_bar_with_time(float(offsets["data"]) / total_seq_len["data"])

        offsets += seq_len

    assert offsets == total_seq_len  # Sanity check.

    # Set some old-format attribs. Not needed for newer CRNN versions.
    hdf_dataset.attrs[HDFDataset.attr_inputPattSize] = dataset.num_inputs
    hdf_dataset.attrs[HDFDataset.attr_numLabels] = dataset.num_outputs.get(
        "classes", (0, 0))[0]

    print("All done.", file=log.v3)
Пример #38
0
 def iterate_seqs(self, chunk_size=None, chunk_step=None, used_data_keys=None):
   """
   Takes chunking into consideration.
   :param int|NumbersDict chunk_size:
   :param int|NumbersDict chunk_step:
   :param set(str)|None used_data_keys:
   :return: generator which yields tuples (seq index, seq start, seq end)
   :rtype: list[(int,NumbersDict,NumbersDict)]
   """
   if chunk_size is None:
     chunk_size = self.chunk_size
   if chunk_step is None:
     chunk_step = self.chunk_step
   chunk_size = NumbersDict(chunk_size)
   chunk_step = NumbersDict(chunk_step)
   s = 0
   while self.is_less_than_num_seqs(s):
     length = self.get_seq_length(s)
     if chunk_size == 0:
       yield (s, NumbersDict.constant_like(0, numbers_dict=length), length)
     else:
       default_key = "data"
       if used_data_keys is not None:
         length = NumbersDict({k: length[k] for k in used_data_keys})
         if default_key not in used_data_keys:
           default_key = sorted(used_data_keys)[0]
         if chunk_step[default_key] == 0:  # allow some keys with zero chunk-step
           assert chunk_step.max_value() > 0
           default_key = [key for key in sorted(used_data_keys) if chunk_step[key] > 0][0]
       assert chunk_step[default_key] > 0
       t = NumbersDict.constant_like(0, numbers_dict=length)
       # There are usually the 'data' (input) and 'classes' (targets) data-keys in `length` but there can be others.
       # We expect them all of the same length so that we can do chunking.
       # In case that some length is 0 or 1,
       # we treat it special and always return the full seq repeated for every chunk.
       keys_with_full_seqs = []
       for key in length.keys():
         if chunk_step[key] == chunk_step[default_key]:
           if length[key] == length[default_key]:
             continue  # ok
         if length[key] <= 1:  # special case as explained above
           keys_with_full_seqs.append(key)
           continue
         if chunk_step[key] == chunk_step[default_key]:
           raise Exception("Chunking with multiple data-keys of different length: %r" % length)
         else:
           nr_of_full_chunks_key = (length[key] - chunk_size[key]) // chunk_step[key] + 1
           nr_of_full_chunks_default_key = (
             (length[default_key] - chunk_size[default_key]) // chunk_step[default_key] + 1)
           assert nr_of_full_chunks_key == nr_of_full_chunks_default_key
       while length[default_key] > t[default_key]:
         chunk_start = NumbersDict(t)
         chunk_end = NumbersDict.min([t + chunk_size, length])
         for key in keys_with_full_seqs:
           chunk_start[key] = 0
           chunk_end[key] = length[key]
         if length.value is None:
           chunk_start.value = None
           chunk_end.value = None
         yield (s, chunk_start, chunk_end)
         t += chunk_step
         if length[default_key] - t[default_key] <= self.min_chunk_size:
           break
     s += 1
Пример #39
0
    def run_inner(self):
        self.start_time = time.time()
        for device in self.devices:
            device.prepare(epoch=self.epoch, **self.get_device_prepare_args())
        self.initialize()
        terminal_width, _ = terminal_size()
        self.interactive = (log.v[3] and terminal_width >= 0)
        print("starting task", self.task, file=log.v5)

        for device in self.devices:
            device.eval_batch_idx = -1
            device.start_epoch_stats()
            device.num_frames = 0
            device.num_updates = 0
            device.tot = 0

        num_device_runs = 1 if self.share_batches else len(self.devices)
        deviceRuns = [
            self.DeviceBatchRun(
                self,
                [self.devices[i]] if not self.share_batches else self.devices)
            for i in range(num_device_runs)
        ]

        results = {'batchess': [], 'results': [], 'num_frames': NumbersDict(0)}
        run_frames = NumbersDict(0)
        cost_result_format = -1

        crashed = False
        assert num_device_runs > 0

        while True:
            if getattr(sys, "exited", False):
                # This happens when we exit Python.
                # Without this check, this thread would keep running until all exit handlers of Python are done.
                print("%s stopped" % self, file=log.v5)
                crashed = True
                break

            for i in range(num_device_runs):
                if deviceRuns[i].crashed or not deviceRuns[i].is_alive():
                    crashed = True
                    break
                if deviceRuns[i].finished:
                    results['batchess'] += deviceRuns[i].result['batchess'][:]
                    results['results'] += deviceRuns[i].result['results'][:]
                    results['result_format'] = deviceRuns[i].result[
                        'result_format']
                    deviceRuns[i].finished = False
            if crashed:
                break

            if cost_result_format < 0 and deviceRuns[i].result['result_format']:
                for idx, fmt in enumerate(
                        deviceRuns[i].result['result_format']):
                    if fmt and fmt.startswith('cost:'):
                        cost_result_format = idx
            total_cost = 0
            if results['results'] and cost_result_format >= 0:
                total_cost = numpy.asarray(
                    results['results'])[:, cost_result_format].sum()
            if total_cost >= self.eval_batch_size or not self.batches.has_more(
            ):
                if all(not (dev.finished or dev.allocated or dev.processing)
                       for dev in deviceRuns):
                    results['num_frames'] = run_frames
                    self.num_frames += run_frames
                    if self.share_batches: run_frames *= len(self.devices)
                    self.reduce(run_frames)
                    self.eval_batch_idx += 1
                    run_frames = NumbersDict(0)
                    results['batchess'] = []
                    results['results'] = []
                    for device in self.devices:
                        device.num_frames = 0
                        device.num_updates = 0
                    if not self.batches.has_more():
                        break
                else:
                    time.sleep(0.01)

            match = True
            while self.batches.has_more(
            ) and total_cost < self.eval_batch_size and match:
                self.batch_idx = self.batches.get_current_batch_idx()
                if self.batch_idx < self.start_batch:
                    self.batches.advance(1)
                    break
                match = False
                for i in range(num_device_runs):
                    if not deviceRuns[i].allocated:
                        deviceRuns[i].allocate()
                        run_frames += deviceRuns[i].run_frames
                        match = True
                        break
            if not match:
                time.sleep(0.01)

        for run in deviceRuns:
            run.stop()
        if crashed: return
        for device in self.devices:
            device.finish_epoch_stats()
        self.finalize()
        if self.interactive: progress_bar()
        self.elapsed = (time.time() - self.start_time)
Пример #40
0
  def _generate_batches(self, recurrent_net,
                        batch_size, max_seqs=-1, max_seq_length=sys.maxsize,
                        min_seq_length=0, pruning=0.0,
                        seq_drop=0.0, max_total_num_seqs=-1,
                        used_data_keys=None):
    """
    :param bool recurrent_net: If True, the batch might have a batch seq dimension > 1.
      Otherwise, the batch seq dimension is always 1 and multiple seqs will be concatenated.
    :param int|dict[str,int]|NumbersDict batch_size: Max number of frames in one batch.
    :param int max_seqs: Max number of seqs per batch.
    :param int max_total_num_seqs:
    :param int|dict[str,int]|NumbersDict max_seq_length:
    :param set(str)|None used_data_keys:
    """
    if not batch_size:
      batch_size = sys.maxsize
    batch_size = NumbersDict(batch_size)
    assert not batch_size.any_compare(NumbersDict(0), (lambda a, b: a <= b))
    if max_seqs == -1:
      max_seqs = float('inf')
    if not max_seq_length:
      max_seq_length = sys.maxsize
    if isinstance(max_seq_length, int) and max_seq_length < 0:
      max_seq_length = {"classes": -max_seq_length}
    max_seq_length = NumbersDict(max_seq_length)
    min_seq_length = NumbersDict(min_seq_length)
    assert max_seqs > 0
    assert seq_drop <= 1.0
    if not max_total_num_seqs or max_total_num_seqs < 0:
      max_total_num_seqs = float("inf")
    chunk_size = self.chunk_size
    chunk_step = self.chunk_step
    if not recurrent_net:
      if chunk_size != 0:
        print("Non-recurrent network, chunk size %s:%s ignored" % (chunk_size, chunk_step), file=log.v4)
        chunk_size = 0
    batch = Batch()
    ctx_lr = self._get_context_window_left_right()
    total_num_seqs = 0
    last_seq_idx = -1
    avg_weight = sum([v[0] for v in self.weights.values()]) / (len(self.weights.keys()) or 1)
    for idx in self.weights:
      self.weights[idx][1] = random() * avg_weight * pruning
      self.weights[idx][0] *= (1. + pruning)
    for seq_idx, t_start, t_end in self.iterate_seqs(
          chunk_size=chunk_size, chunk_step=chunk_step, used_data_keys=used_data_keys):
      if not self.sample(seq_idx):
        continue
      if total_num_seqs > max_total_num_seqs:
        break
      if ctx_lr:
        t_start -= ctx_lr[0]
        t_end += ctx_lr[1]
      if recurrent_net:
        length = t_end - t_start
        if length.any_compare(max_seq_length, (lambda a, b: a > b)):
          continue
        if length.any_compare(min_seq_length, (lambda a, b: a < b)):
          continue
        if length.any_compare(batch_size, (lambda a, b: a > b)):
          print("warning: sequence length (%r) larger than limit (%r)" % (length, batch_size), file=log.v4)
        if self.rnd_seq_drop.random() < seq_drop:
          continue
        dt, ds = batch.try_sequence_as_slice(length)
        if ds > 1 and ((dt * ds).any_compare(batch_size, (lambda a, b: a > b)) or ds > max_seqs):
          yield batch
          batch = Batch()
        batch.add_sequence_as_slice(seq_idx=seq_idx, seq_start_frame=t_start, length=length)
      else:  # Not recurrent.
        while t_start.max_value() < t_end.max_value():
          length = t_end - t_start
          num_frames = NumbersDict.min(
            [length, batch_size.copy_like(length) - batch.get_all_slices_num_frames().copy_like(length)])
          assert num_frames.max_value() > 0
          batch.add_frames(seq_idx=seq_idx, seq_start_frame=t_start, length=num_frames)
          if (batch.get_all_slices_num_frames().any_compare(batch_size, (lambda a, b: a >= b))
                  or batch.get_num_seqs() > max_seqs):
            yield batch
            batch = Batch()
          t_start += num_frames
      if seq_idx != last_seq_idx:
        last_seq_idx = seq_idx
        total_num_seqs += 1

    if batch.get_all_slices_num_frames().max_value() > 0:
      yield batch
Пример #41
0
    class DeviceBatchRun(threading.Thread):
      def __init__(self, parent, devices):
        """
        :type parent: TaskThread
        """
        threading.Thread.__init__(self, name="DeviceThread %s" % " ".join([dev.name for dev in devices]))
        self.alloc_devices = devices
        self.parent = parent
        self.devices_batches_idx = None
        self.run_start_batch_idx = None
        self.eval_info = None; " :type: dict[str] | None "
        self.allocated = False
        self.processing = False
        self.finished = True
        self.crashed = False
        self.num_frames = NumbersDict(0)
        self.run_frames = NumbersDict(0)
        self.daemon = True
        self.active = True
        self.result = { 'batchess': [], 'results': [], 'result_format': None, 'num_frames': 0 }
        if self.alloc_devices:
          self.start()

      def allocate(self):
        self.devices_batches_idx = self.parent.batches.get_current_batch_idx()
        self.devices_batches = self.parent.allocate_devices(self.alloc_devices)
        self.run_frames = NumbersDict(0)
        for batches, device in zip(self.devices_batches,self.alloc_devices):
          assert batches
          assert batches[0].seqs
          #assert batches[0].seqs[0].frame_length[1] > 0
          device.num_updates += 1 if not device.update_specs['block_size'] else int(ceil(sum([len(batch.seqs) for batch in batches]) / float(device.update_specs['block_size'])))
          self.run_frames += sum([batch.get_total_num_frames() for batch in batches])
        if self.parent.share_batches:
          self.run_frames /= len(self.alloc_devices)
        assert self.run_frames.max_value() > 0
        self.allocated = True

      def finish(self):
        """
        :returns whether everything is fine.
        """
        device_results, outputs_format = self.device_collect_results()
        if device_results is None:
          if not getattr(sys, "exited", False):
            print >> log.v3, "device crashed on batch", self.run_start_batch_idx
          self.parent.device_crash_batch = self.run_start_batch_idx
          self.crashed = True
          return False
        assert len(device_results) == len(self.alloc_devices) == len(self.devices_batches)

        if outputs_format and any([k.startswith("gparam:") for k in outputs_format]):
          # WARNING: this code is untested and likely broken!
          for i in range(len(self.alloc_devices)):
            res = Device.make_result_dict(device_results[i], outputs_format)
            self.alloc_devices[i].sync_net_train_params()
            devnet = self.alloc_devices[i].get_net_train_params(self.parent.network)
            vars = self.parent.network.get_all_params_vars()
            for p, q in zip(vars, devnet):
              p.set_value(q)
            gparams = {}
            for p in vars:
              gparams[p] = numpy.zeros(p.get_value(borrow=True, return_internal_type=True).shape, dtype=theano.config.floatX)
            for p in vars:
              q = res["gparam:%s" % p.name]
              if q.shape == p.get_value().shape:
                gparams[p] = q
              elif q.shape:
                print >> log.v2, "warning: shape for gradient does not match:", p.get_value().shape, q.shape
            self.parent.updater.setNetParamDeltas(gparams)
            self.parent.updater.update()
            self.alloc_devices[i].set_net_params(self.parent.network)

        self.result = { 'batchess': self.devices_batches, 'results': device_results, 'result_format': outputs_format, 'num_frames': self.num_frames }
        self.eval_info = self.parent.evaluate(**self.result)
        self.parent.lock.acquire()
        self.print_process()
        self.parent.lock.release()
        return True

      def run(self):
        try:
          while self.active and not getattr(sys, "exited", False):
            if self.allocated and not self.finished:
              self.device_run()
              self.num_frames = self.run_frames
              self.processing = True
              self.allocated = False
              self.finish()
              self.finished = True
              self.processing = False
            else:
              time.sleep(0.01)
        except BaseException:
          self.crashed = True
          sys.excepthook(*sys.exc_info())
        finally:
          self.finished = True

      def stop(self):
        self.active = False

      def device_run(self):
        batch_idx = self.run_start_batch_idx = self.devices_batches_idx
        assert len(self.alloc_devices) == len(self.devices_batches)
        for device, batches in zip(self.alloc_devices, self.devices_batches):
          if self.parent.network.recurrent:
            print >> log.v5, "running", device.targets["data"].shape[1], \
                             "sequence slices (%i nts)" % (device.targets["data"].shape[0] * device.targets["data"].shape[1]),
          else:
            print >> log.v5, "running", device.targets["data"].shape[0] * device.targets["data"].shape[1], "frames",
          if device.num_batches == 1:
            print >> log.v5, "of batch %i" % batch_idx,
          else:
            print >> log.v5, "of batches %i-%i" % (batch_idx, batch_idx + device.num_batches - 1),
          print >> log.v5, "on device", device.name
          device.run(self.parent.task)
      #if not self.share batch_idx += device.num_batches

      def device_collect_results(self):
        device_results = []
        outputs_format = None
        for i, device in enumerate(self.alloc_devices):
          try:
            result, outputs_format_new = device.result()
          except RuntimeError:
            return None, None
          if result is None:
            return None, None
          assert isinstance(result, list)
          assert len(result) > 0  # we always expect to get some result
          if i >= 1:
            assert outputs_format == outputs_format_new, "We expect to always get the same output format."
          outputs_format = outputs_format_new
          device_results.append(result)
        return device_results, outputs_format

      def device_mem_usage_str(self, devices):
        """
        :type devices: list[Device.Device]
        :rtype: str | None
        """
        if not devices:
          return None
        mem_info = [device.get_memory_info() for device in devices]
        if len(mem_info) == 1 and mem_info[0] is None:
          return None
        mem_usage = [info.used if info else None for info in mem_info]
        s = ["%s MB" % (mem / (1024*1024)) if mem is not None else "unknown" for mem in mem_usage]
        return "/".join(s)

      def print_process(self):
        if not self.parent.interactive and not log.v[5]:
          return
        start_elapsed = time.time() - self.parent.start_time
        complete = self.parent.batches.completed_frac()
        assert complete > 0
        total_time_estimated = start_elapsed / complete
        remaining_estimated = total_time_estimated - start_elapsed
        if log.verbose[5]:
          mem_usage = self.device_mem_usage_str(self.alloc_devices)
          info = [
            self.parent.report_prefix,
            "batch %i" % self.run_start_batch_idx]
          if self.eval_info:  # Such as score.
            info += ["%s %s" % item for item in sorted(self.eval_info.items())]
          info += [
            "elapsed %s" % hms(start_elapsed),
            "exp. remaining %s" % hms(remaining_estimated),
            "complete %.02f%%" % (complete * 100)]
          if mem_usage:
            info += ["memory %s" % mem_usage]
          print >> log.v5, ", ".join(filter(None, info))
        if self.parent.interactive:
          progress_bar(complete, hms(remaining_estimated))
Пример #42
0
    def run_inner(self):
      self.start_time = time.time()
      for device in self.devices:
        device.prepare(epoch=self.epoch, **self.get_device_prepare_args())
      self.initialize()
      terminal_width, _ = terminal_size()
      self.interactive = (log.v[3] and terminal_width >= 0)
      print >> log.v5, "starting task", self.task

      for device in self.devices:
        device.eval_batch_idx = -1
        device.start_epoch_stats()
        device.num_frames = 0
        device.num_updates = 0
        device.tot = 0

      num_device_runs = 1 if self.share_batches else len(self.devices)
      deviceRuns = [ self.DeviceBatchRun(self, [self.devices[i]] if not self.share_batches else self.devices) for i in range(num_device_runs) ]

      results = { 'batchess': [], 'results': [], 'num_frames' : NumbersDict(0) }
      run_frames = NumbersDict(0)

      crashed = False

      while True:
        if getattr(sys, "exited", False):
          # This happens when we exit Python.
          # Without this check, this thread would keep running until all exit handlers of Python are done.
          print >> log.v5, "%s stopped" % self
          crashed = True
          break

        for i in range(num_device_runs):
          if deviceRuns[i].crashed:
            crashed = True
            break
          if deviceRuns[i].finished:
            results['batchess'] += deviceRuns[i].result['batchess'][:]
            results['results'] += deviceRuns[i].result['results'][:]
            results['result_format'] = deviceRuns[i].result['result_format']
            deviceRuns[i].finished = False
        if crashed:
          break

        if run_frames.max_value() >= self.eval_batch_size or not self.batches.has_more():
          if all(not (dev.finished or dev.allocated or dev.processing) for dev in deviceRuns):
            results['num_frames'] = run_frames
            self.num_frames += run_frames
            if self.share_batches: run_frames *= len(self.devices)
            self.reduce(run_frames)
            self.eval_batch_idx += 1
            run_frames = NumbersDict(0)
            results['batchess'] = []
            results['results'] = []
            for device in self.devices:
              device.num_frames = 0
              device.num_updates = 0
            if not self.batches.has_more():
              break
          else:
            time.sleep(0.01)

        match = True
        while self.batches.has_more() and run_frames.max_value() < self.eval_batch_size and match:
          self.batch_idx = self.batches.get_current_batch_idx()
          if self.batch_idx < self.start_batch:
            self.batches.advance(1)
            break
          match = False
          for i in range(num_device_runs):
            if not deviceRuns[i].allocated:
              deviceRuns[i].allocate()
              run_frames += deviceRuns[i].run_frames
              match = True
              break
        if not match:
          time.sleep(0.01)

      for run in deviceRuns:
        run.stop()
      if crashed: return
      for device in self.devices:
        device.finish_epoch_stats()
      self.finalize()
      if self.interactive: progress_bar()
      self.elapsed = (time.time() - self.start_time)
Пример #43
0
class Batch:
  """
  A batch can consists of several sequences (= segments).
  This is basically just a list of BatchSeqCopyPart.
  """

  def __init__(self):
    self.max_num_frames_per_slice = NumbersDict(0)
    self.num_slices = 0
    # original data_shape = [0, 0], format (time,batch/slice)
    #          data_shape = [max_num_frames_per_slice, num_slices]
    self.seqs = []; " :type: list[BatchSeqCopyPart] "

  def __repr__(self):
    return "<Batch start_seq:%r, #seqs:%i>" % (self.start_seq, len(self.seqs))

  def try_sequence_as_slice(self, length):
    """
    :param NumbersDict length: number of (time) frames
    :return: new shape which covers the old shape and one more data-batch, format (time,batch)
    :rtype: (NumbersDict,int)
    """
    return [NumbersDict.max([self.max_num_frames_per_slice, length]), self.num_slices + 1]

  def add_sequence_as_slice(self, seq_idx, seq_start_frame, length):
    """
    Adds one data-batch in an additional slice.
    :param NumbersDict length: number of (time) frames
    """
    self.max_num_frames_per_slice, self.num_slices = self.try_sequence_as_slice(length)
    self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx,
                                   seq_start_frame=seq_start_frame,
                                   seq_end_frame=seq_start_frame + length,
                                   batch_slice=self.num_slices - 1,
                                   batch_frame_offset=0)]

  def add_frames(self, seq_idx, seq_start_frame, length):
    """
    Adds frames to all data-batches.
    Will add one data-batch if we don't have one yet.
    :type seq_start_frame: NumbersDict | int
    :param NumbersDict length: number of (time) frames
    """
    self.max_num_frames_per_slice += length
    self.num_slices = max(self.num_slices, 1)
    self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx,
                                   seq_start_frame=seq_start_frame,
                                   seq_end_frame=seq_start_frame + length,
                                   batch_slice=0,
                                   batch_frame_offset=self.max_num_frames_per_slice - length)]

  def get_all_slices_num_frames(self):
    """
    Note that this is only an upper limit in case of data_shape[1] > 1
    because data_shape[0] is the max frame len of all seqs.
    """
    return self.max_num_frames_per_slice.max_value() * self.num_slices

  def get_total_num_frames(self):
    return sum([s.frame_length for s in self.seqs])

  @property
  def start_seq(self):
    if not self.seqs:
      return None
    return min([s.seq_idx for s in self.seqs])

  @property
  def end_seq(self):
    if not self.seqs:
      return None
    return max([s.seq_idx for s in self.seqs]) + 1

  def get_num_seqs(self):
    if not self.seqs:
      return 0
    return self.end_seq - self.start_seq
Пример #44
0
 def __init__(self, name=None,
              window=1, context_window=None, chunking=None,
              seq_ordering='default', partition_epoch=None, repeat_epoch=None,
              shuffle_frames_of_nseqs=0, min_chunk_size=0,
              estimated_num_seqs=None,):
   """
   :param str name: e.g. "train" or "eval"
   :param int window: features will be of dimension window * feature_dim, as we add a context-window around.
     not all datasets support this option.
   :param None|int|dict|NumbersDict context_window: will add this context for each chunk
   :param None|str|int|(int,int)|dict|(dict,dict) chunking: "chunk_size:chunk_step"
   :param str seq_ordering: "batching"-option in config. e.g. "default", "sorted" or "random".
     See self.get_seq_order_for_epoch() for more details.
   :param int|None partition_epoch:
   :param int|None repeat_epoch: Repeat the sequences in an epoch this many times. Useful to scale the dataset
     relative to other datasets, e.g. when used in CombinedDataset. Not allowed to be used in combination with
     partition_epoch.
   :param int shuffle_frames_of_nseqs: shuffles the frames. not always supported
   :param None|int estimated_num_seqs: for progress reporting in case the real num_seqs is unknown
   """
   self.name = name or ("dataset_id%s" % id(self))
   self.lock = RLock()  # Used when manipulating our data potentially from multiple threads.
   self.rnd_seq_drop = None  # type: typing.Optional[Random]
   self.num_inputs = 0  # usually not used, but num_outputs instead, which is more generic
   self.num_outputs = None  # type: typing.Optional[typing.Dict[str,typing.Tuple[int,int]]]  # tuple is num-classes, len(shape).  # nopep8
   self.window = window
   self.seq_ordering = seq_ordering  # "default", "sorted" or "random". See self.get_seq_order_for_epoch().
   self.partition_epoch = partition_epoch or 1
   self.repeat_epoch = repeat_epoch or 1
   # There is probably no use case for combining the two, so avoid potential misconfiguration.
   assert self.partition_epoch == 1 or self.repeat_epoch == 1, (
     "Combining partition_epoch and repeat_epoch is prohibited.")
   self.timestamps = None
   self.labels = {}  # type: typing.Dict[str,typing.List[str]]
   self.weights = {}
   self.nbytes = 0
   self.num_running_chars = 0  # CTC running chars.
   self._num_timesteps = 0
   self._num_codesteps = None  # type: typing.Optional[int]  # Num output frames, could be different from input, seq2seq, ctc.  # nopep8
   self._num_seqs = 0
   self._estimated_num_seqs = estimated_num_seqs
   self.min_chunk_size = min_chunk_size
   if isinstance(chunking, str):
     if ":" in chunking:
       chunking = tuple(map(int, chunking.split(":")))
     else:
       chunking = int(chunking)
   if not isinstance(chunking, (tuple, list)):
     chunking = (chunking, None)
   chunk_size, chunk_step = chunking
   if chunk_size is None:
     chunk_size = 0
   assert isinstance(chunk_size, (int, dict, NumbersDict))
   chunk_size = NumbersDict(chunk_size)
   assert chunk_size == 0 or chunk_size.min_value() > 0, "chunk size must not be negative"
   self.chunk_size = chunk_size
   if chunk_step in (None, 0):
     chunk_step = self.chunk_size
   assert isinstance(chunk_step, (int, dict, NumbersDict))
   chunk_step = NumbersDict(chunk_step)
   if self.chunk_size != 0:
     assert sorted(chunk_step.keys()) == sorted(chunk_size.keys())
     assert chunk_step.max_value() > 0, "chunking step must be positive (for some key)"
   self.chunk_step = chunk_step
   if context_window is None:
     context_window = NumbersDict(0)
   elif isinstance(context_window, int):
     context_window = NumbersDict(broadcast_value=0, numbers_dict={"data": context_window})
   elif isinstance(context_window, dict):
     context_window = NumbersDict(broadcast_value=0, numbers_dict=context_window)
   assert isinstance(context_window, NumbersDict)
   self.context_window = context_window
   self.shuffle_frames_of_nseqs = shuffle_frames_of_nseqs
   self.epoch = None
Пример #45
0
class Batch:
  """
  A batch can consists of several sequences (= segments).
  This is basically just a list of BatchSeqCopyPart.
  """

  def __init__(self):
    self.max_num_frames_per_slice = NumbersDict(0)
    self.num_slices = 0
    # original data_shape = [0, 0], format (time,batch/slice)
    #          data_shape = [max_num_frames_per_slice, num_slices]
    self.seqs = []  # type: typing.List[BatchSeqCopyPart]

  def __repr__(self):
    return "<Batch start_seq:%r, len(seqs):%i>" % (self.start_seq, len(self.seqs))

  def try_sequence_as_slice(self, length):
    """
    :param NumbersDict length: number of (time) frames
    :return: new shape which covers the old shape and one more data-batch, format (time,batch)
    :rtype: (NumbersDict,int)
    """
    return [NumbersDict.max([self.max_num_frames_per_slice, length]), self.num_slices + 1]

  def add_sequence_as_slice(self, seq_idx, seq_start_frame, length):
    """
    Adds one data-batch in an additional slice.

    :param int seq_idx:
    :param NumbersDict|int seq_start_frame:
    :param NumbersDict length: number of (time) frames
    """
    self.max_num_frames_per_slice, self.num_slices = self.try_sequence_as_slice(length)
    self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx,
                                   seq_start_frame=seq_start_frame,
                                   seq_end_frame=seq_start_frame + length,
                                   batch_slice=self.num_slices - 1,
                                   batch_frame_offset=0)]

  def add_frames(self, seq_idx, seq_start_frame, length, frame_dim_corresponds=True):
    """
    Adds frames to all data-batches.
    Will add one data-batch if we don't have one yet.

    :param int seq_idx:
    :param NumbersDict|int seq_start_frame:
    :param NumbersDict length: number of (time) frames
    :param bool frame_dim_corresponds: if the batch frame offset should always be the same (max value) for all keys
    """
    batch_frame_offset = self.max_num_frames_per_slice
    if frame_dim_corresponds:
      batch_frame_offset = NumbersDict(batch_frame_offset.max_value())
      self.max_num_frames_per_slice = NumbersDict(self.max_num_frames_per_slice.max_value())
    self.max_num_frames_per_slice += length
    self.num_slices = max(self.num_slices, 1)
    self.seqs += [BatchSeqCopyPart(seq_idx=seq_idx,
                                   seq_start_frame=seq_start_frame,
                                   seq_end_frame=seq_start_frame + length,
                                   batch_slice=0,
                                   batch_frame_offset=batch_frame_offset)]

  def init_with_one_full_sequence(self, seq_idx, dataset):
    """
    :param int seq_idx:
    :param Dataset.Dataset dataset:
    """
    assert not self.seqs
    start, end = dataset.get_start_end_frames_full_seq(seq_idx)
    self.add_frames(seq_idx=seq_idx, seq_start_frame=start, length=end - start)

  def get_all_slices_num_frames(self):
    """
    Note that this is only an upper limit in case of data_shape[1] > 1
    because data_shape[0] is the max frame len of all seqs.

    :return: related to the data-key with max length
    :rtype: NumbersDict
    """
    return self.max_num_frames_per_slice * self.num_slices

  def get_total_num_frames(self):
    """
    :rtype: NumbersDict
    """
    return sum([s.frame_length for s in self.seqs])

  @property
  def start_seq(self):
    """
    :rtype: int|None
    """
    if not self.seqs:
      return None
    return min([s.seq_idx for s in self.seqs])

  @property
  def end_seq(self):
    """
    :rtype: int|None
    """
    if not self.seqs:
      return None
    return max([s.seq_idx for s in self.seqs]) + 1

  def get_num_seqs(self):
    """
    :rtype: int
    """
    if not self.seqs:
      return 0
    return self.end_seq - self.start_seq
Пример #46
0
 def __init__(self):
   self.max_num_frames_per_slice = NumbersDict(0)
   self.num_slices = 0
   # original data_shape = [0, 0], format (time,batch/slice)
   #          data_shape = [max_num_frames_per_slice, num_slices]
   self.seqs = []  # type: typing.List[BatchSeqCopyPart]