def test_load_num_sequence_text_invalid(tmp_path: Path): p = tmp_path / "dummy.txt" with p.open("w") as f: f.write("abc 12.3.3.,4.44\n") with pytest.raises(ValueError): load_num_sequence_text(p) with p.open("w") as f: f.write("abc 1 2\n") f.write("abc 2 4\n") with pytest.raises(RuntimeError): load_num_sequence_text(p)
def __init__( self, batch_size: int, shape_file: str, sort_in_batch: str = "descending", sort_batch: str = "ascending", drop_last: bool = False, ): assert check_argument_types() assert batch_size > 0 self.batch_size = batch_size self.shape_file = shape_file self.sort_in_batch = sort_in_batch self.sort_batch = sort_batch self.drop_last = drop_last # utt2shape: (Length, ...) # uttA 100,... # uttB 201,... utt2shape = load_num_sequence_text(shape_file, loader_type="csv_int") if sort_in_batch == "descending": # Sort samples in descending order (required by RNN) keys = sorted(utt2shape, key=lambda k: -utt2shape[k][0]) elif sort_in_batch == "ascending": # Sort samples in ascending order keys = sorted(utt2shape, key=lambda k: utt2shape[k][0]) else: raise ValueError( f"sort_in_batch must be either one of " f"ascending, descending, or None: {sort_in_batch}") if len(keys) == 0: raise RuntimeError(f"0 lines found: {shape_file}") # Apply max(, 1) to avoid 0-batches N = max(len(keys) // batch_size, 1) if not self.drop_last: # Split keys evenly as possible as. Note that If N != 1, # the these batches always have size of batch_size at minimum. self.batch_list = [ keys[i * len(keys) // N:(i + 1) * len(keys) // N] for i in range(N) ] else: self.batch_list = [ tuple(keys[i * batch_size:(i + 1) * batch_size]) for i in range(N) ] if len(self.batch_list) == 0: logging.warning(f"{shape_file} is empty") if sort_in_batch != sort_batch: if sort_batch not in ("ascending", "descending"): raise ValueError( f"sort_batch must be ascending or descending: {sort_batch}" ) self.batch_list.reverse() if len(self.batch_list) == 0: raise RuntimeError("0 batches")
def test_load_num_sequence_text(loader_type: str, tmp_path: Path): p = tmp_path / "dummy.txt" if "csv" in loader_type: delimiter = "," else: delimiter = " " with p.open("w") as f: f.write("abc " + delimiter.join(["0", "1", "2"]) + "\n") f.write("def " + delimiter.join(["3", "4", "5"]) + "\n") desired = {"abc": np.array([0, 1, 2]), "def": np.array([3, 4, 5])} if loader_type == "dummy": with pytest.raises(ValueError): load_num_sequence_text(p, loader_type=loader_type) return else: target = load_num_sequence_text(p, loader_type=loader_type) for k in desired: np.testing.assert_array_equal(target[k], desired[k])
def __init__( self, shape_file: Union[Path, str], dtype: Union[str, np.dtype] = "float32", loader_type: str = "csv_int", ): assert check_argument_types() shape_file = Path(shape_file) self.utt2shape = load_num_sequence_text(shape_file, loader_type) self.dtype = np.dtype(dtype)
def __init__( self, shape_file: Union[Path, str], low: int, high: int = None, dtype: Union[str, np.dtype] = "int64", loader_type: str = "csv_int", ): assert check_argument_types() shape_file = Path(shape_file) self.utt2shape = load_num_sequence_text(shape_file, loader_type) self.dtype = np.dtype(dtype) self.low = low self.high = high
def __init__( self, batch_bins: int, shape_files: Union[Tuple[str, ...], List[str]], min_batch_size: int = 1, sort_in_batch: str = "descending", sort_batch: str = "ascending", drop_last: bool = False, padding: bool = True, ): assert check_argument_types() assert batch_bins > 0 if sort_batch != "ascending" and sort_batch != "descending": raise ValueError( f"sort_batch must be ascending or descending: {sort_batch}") if sort_in_batch != "descending" and sort_in_batch != "ascending": raise ValueError( f"sort_in_batch must be ascending or descending: {sort_in_batch}" ) self.batch_bins = batch_bins self.shape_files = shape_files self.sort_in_batch = sort_in_batch self.sort_batch = sort_batch self.drop_last = drop_last # utt2shape: (Length, ...) # uttA 100,... # uttB 201,... utt2shapes = [ load_num_sequence_text(s, loader_type="csv_int") for s in shape_files ] first_utt2shape = utt2shapes[0] for s, d in zip(shape_files, utt2shapes): if set(d) != set(first_utt2shape): raise RuntimeError( f"keys are mismatched between {s} != {shape_files[0]}") # Sort samples in ascending order # (shape order should be like (Length, Dim)) keys = sorted(first_utt2shape, key=lambda k: first_utt2shape[k][0]) if len(keys) == 0: raise RuntimeError(f"0 lines found: {shape_files[0]}") # Decide batch-sizes batch_sizes = [] current_batch_keys = [] for key in keys: current_batch_keys.append(key) # shape: (Length, dim1, dim2, ...) if padding: # bins = bs x max_length bins = sum( len(current_batch_keys) * sh[key][0] for sh in utt2shapes) else: # bins = sum of lengths bins = sum(d[k][0] for k in current_batch_keys for d in utt2shapes) if bins > batch_bins and len(current_batch_keys) >= min_batch_size: batch_sizes.append(len(current_batch_keys)) current_batch_keys = [] else: if len(current_batch_keys) != 0 and (not self.drop_last or len(batch_sizes) == 0): batch_sizes.append(len(current_batch_keys)) if len(batch_sizes) == 0: # Maybe we can't reach here raise RuntimeError("0 batches") # If the last batch-size is smaller than minimum batch_size, # the samples are redistributed to the other mini-batches if len(batch_sizes) > 1 and batch_sizes[-1] < min_batch_size: for i in range(batch_sizes.pop(-1)): batch_sizes[-(i % len(batch_sizes)) - 1] += 1 if not self.drop_last: # Bug check assert sum(batch_sizes) == len( keys), f"{sum(batch_sizes)} != {len(keys)}" # Set mini-batch self.batch_list = [] iter_bs = iter(batch_sizes) bs = next(iter_bs) minibatch_keys = [] for key in keys: minibatch_keys.append(key) if len(minibatch_keys) == bs: if sort_in_batch == "descending": minibatch_keys.reverse() elif sort_in_batch == "ascending": # Key are already sorted in ascending pass else: raise ValueError("sort_in_batch must be ascending" f" or descending: {sort_in_batch}") self.batch_list.append(tuple(minibatch_keys)) minibatch_keys = [] try: bs = next(iter_bs) except StopIteration: break if sort_batch == "ascending": pass elif sort_batch == "descending": self.batch_list.reverse() else: raise ValueError( f"sort_batch must be ascending or descending: {sort_batch}")
def __init__( self, batch_size: int, shape_files: Union[Tuple[str, ...], List[str]], fold_lengths: Sequence[int], min_batch_size: int = 1, sort_in_batch: str = "descending", sort_batch: str = "ascending", drop_last: bool = False, utt2category_file: str = None, ): assert check_argument_types() assert batch_size > 0 if sort_batch != "ascending" and sort_batch != "descending": raise ValueError( f"sort_batch must be ascending or descending: {sort_batch}") if sort_in_batch != "descending" and sort_in_batch != "ascending": raise ValueError( f"sort_in_batch must be ascending or descending: {sort_in_batch}" ) self.batch_size = batch_size self.shape_files = shape_files self.sort_in_batch = sort_in_batch self.sort_batch = sort_batch self.drop_last = drop_last # utt2shape: (Length, ...) # uttA 100,... # uttB 201,... utt2shapes = [ load_num_sequence_text(s, loader_type="csv_int") for s in shape_files ] first_utt2shape = utt2shapes[0] for s, d in zip(shape_files, utt2shapes): if set(d) != set(first_utt2shape): raise RuntimeError( f"keys are mismatched between {s} != {shape_files[0]}") # Sort samples in ascending order # (shape order should be like (Length, Dim)) keys = sorted(first_utt2shape, key=lambda k: first_utt2shape[k][0]) if len(keys) == 0: raise RuntimeError(f"0 lines found: {shape_files[0]}") category2utt = {} if utt2category_file is not None: utt2category = read_2column_text(utt2category_file) if set(utt2category) != set(first_utt2shape): raise RuntimeError("keys are mismatched between " f"{utt2category_file} != {shape_files[0]}") for k in keys: category2utt.setdefault(utt2category[k], []).append(k) else: category2utt["default_category"] = keys self.batch_list = [] for d, v in category2utt.items(): category_keys = v # Decide batch-sizes start = 0 batch_sizes = [] while True: k = category_keys[start] factor = max( int(d[k][0] / m) for d, m in zip(utt2shapes, fold_lengths)) bs = max(min_batch_size, int(batch_size / (1 + factor))) if self.drop_last and start + bs > len(category_keys): # This if-block avoids 0-batches if len(self.batch_list) > 0: break bs = min(len(category_keys) - start, bs) batch_sizes.append(bs) start += bs if start >= len(category_keys): break if len(batch_sizes) == 0: # Maybe we can't reach here raise RuntimeError("0 batches") # If the last batch-size is smaller than minimum batch_size, # the samples are redistributed to the other mini-batches if len(batch_sizes) > 1 and batch_sizes[-1] < min_batch_size: for i in range(batch_sizes.pop(-1)): batch_sizes[-(i % len(batch_sizes)) - 2] += 1 if not self.drop_last: # Bug check assert sum(batch_sizes) == len( category_keys ), f"{sum(batch_sizes)} != {len(category_keys)}" # Set mini-batch cur_batch_list = [] start = 0 for bs in batch_sizes: assert len(category_keys) >= start + bs, "Bug" minibatch_keys = category_keys[start:start + bs] start += bs if sort_in_batch == "descending": minibatch_keys.reverse() elif sort_in_batch == "ascending": # Key are already sorted in ascending pass else: raise ValueError("sort_in_batch must be ascending or " f"descending: {sort_in_batch}") cur_batch_list.append(tuple(minibatch_keys)) if sort_batch == "ascending": pass elif sort_batch == "descending": cur_batch_list.reverse() else: raise ValueError( f"sort_batch must be ascending or descending: {sort_batch}" ) self.batch_list.extend(cur_batch_list)
def __init__( self, batch_bins: int, shape_files: Union[Tuple[str, ...], List[str]], min_batch_size: int = 1, sort_in_batch: str = "descending", sort_batch: str = "ascending", drop_last: bool = False, padding: bool = True, ): assert check_argument_types() assert batch_bins > 0 if sort_batch != "ascending" and sort_batch != "descending": raise ValueError( f"sort_batch must be ascending or descending: {sort_batch}" ) if sort_in_batch != "descending" and sort_in_batch != "ascending": raise ValueError( f"sort_in_batch must be ascending or descending: {sort_in_batch}" ) self.batch_bins = batch_bins self.shape_files = shape_files self.sort_in_batch = sort_in_batch self.sort_batch = sort_batch self.drop_last = drop_last # utt2shape: (Length, ...) # uttA 100,... # uttB 201,... utt2shapes = [ load_num_sequence_text(s, loader_type="csv_int") for s in shape_files ] first_utt2shape = utt2shapes[0] for s, d in zip(shape_files, utt2shapes): if set(d) != set(first_utt2shape): raise RuntimeError( f"keys are mismatched between {s} != {shape_files[0]}" ) # Sort samples in ascending order # (shape order should be like (Length, Dim)) keys = sorted(first_utt2shape, key=lambda k: first_utt2shape[k][0]) if len(keys) == 0: raise RuntimeError(f"0 lines found: {shape_files[0]}") # Decide batch-sizes start = 0 batch_sizes = [] bs = 1 while True: # shape: (Length, dim1, dim2, ...) if padding: max_lengths = [ max(d[keys[i]][0] for i in range(start, start + bs)) for d in utt2shapes ] # bins = bs x max_length bins = sum(bs * lg for lg in max_lengths) else: # bins = sum of lengths bins = sum( d[keys[i]][0] for i in range(start, start + bs) for d in utt2shapes ) if bins > batch_bins and bs >= min_batch_size: batch_sizes.append(bs) start += bs bs = 1 else: bs += 1 if start >= len(keys): break if start + bs > len(keys): if not self.drop_last or len(batch_sizes) == 0: batch_sizes.append(len(keys) - start) break if len(batch_sizes) == 0: # Maybe we can't reach here raise RuntimeError("0 batches") # If the last batch-size is smaller than minimum batch_size, # the samples are redistributed to the other mini-batches if len(batch_sizes) > 1 and batch_sizes[-1] < min_batch_size: for i in range(batch_sizes.pop(-1)): batch_sizes[-(i % len(batch_sizes)) - 1] += 1 if not self.drop_last: # Bug check assert sum(batch_sizes) == len(keys), f"{sum(batch_sizes)} != {len(keys)}" # Set mini-batch self.batch_list = [] start = 0 for bs in batch_sizes: assert len(keys) >= start + bs, "Bug" minibatch_keys = keys[start : start + bs] start += bs if sort_in_batch == "descending": minibatch_keys.reverse() elif sort_in_batch == "ascending": # Key are already sorted in ascending pass else: raise ValueError( f"sort_in_batch must be ascending or descending: {sort_in_batch}" ) self.batch_list.append(tuple(minibatch_keys)) if sort_batch == "ascending": pass elif sort_batch == "descending": self.batch_list.reverse() else: raise ValueError( f"sort_batch must be ascending or descending: {sort_batch}" )
def __init__( self, batch_bins: int, shape_files: Union[Tuple[str, ...], List[str]], task_file: str = None, K: int = 1, min_batch_size: int = 1, sort_in_batch: str = "descending", sort_batch: str = "descending", drop_last: bool = False, padding: bool = True, ): assert check_argument_types() assert batch_bins > 0 if sort_batch != "ascending" and sort_batch != "descending": raise ValueError( f"sort_batch must be ascending or descending: {sort_batch}") if sort_in_batch != "descending" and sort_in_batch != "ascending": raise ValueError( f"sort_in_batch must be ascending, descending: {sort_in_batch}" ) if task_file is None: raise ValueError(f"task_file required for curriculum learning") self.batch_bins = batch_bins self.shape_files = shape_files self.sort_in_batch = sort_in_batch self.sort_batch = sort_batch self.drop_last = drop_last self.task_file = task_file self.K = K # utt2shape: (Length, ...) # uttA 100,... # uttB 201,... utt2shapes = [ load_num_sequence_text(s, loader_type="csv_int") for s in shape_files ] tasks = load_num_sequence_text(task_file, loader_type="text_int") first_utt2shape = utt2shapes[0] for s, d in zip(shape_files, utt2shapes): if set(d) != set(first_utt2shape): raise RuntimeError( f"keys are mismatched between {s} != {shape_files[0]}") #Check if keys match in task file and shape files if set(tasks) != set(first_utt2shape): raise RuntimeError( f"keys are mismatched between {shape_files[0]} != {self.task_file}" ) task_keys = [[] for k in range(self.K)] for id in tasks: task_keys[tasks[id][0]].append(id) # Sort samples in descending order sorted_task_keys = [ sorted(t, key=lambda k: first_utt2shape[k][0]) for t in task_keys ] if len(first_utt2shape) == 0: raise RuntimeError(f"0 lines found: {shape_files[0]}") if padding: # If padding case, the feat-dim must be same over whole corpus, # therefore the first sample is referred feat_dims = [ np.prod(d[sorted_task_keys[0][0]][1:]) for d in utt2shapes ] else: feat_dims = None self.task_batch_lists = [] for k in range(self.K): #Shuffle keys = sorted_task_keys[k] random.shuffle(keys) # Decide batch-sizes batch_sizes = [] current_batch_keys = [] for key in keys: current_batch_keys.append(key) # shape: (Length, dim1, dim2, ...) if padding: for d, s in zip(utt2shapes, shape_files): if tuple(d[key][1:]) != tuple( d[sorted_task_keys[0][0]][1:]): raise RuntimeError( "If padding=True, the " f"feature dimension must be unified: {s}", ) bins = sum( len(current_batch_keys) * sh[key][0] * d for sh, d in zip(utt2shapes, feat_dims)) else: bins = sum( np.prod(d[k]) for k in current_batch_keys for d in utt2shapes) if bins > batch_bins and len( current_batch_keys) >= min_batch_size: batch_sizes.append(len(current_batch_keys)) current_batch_keys = [] else: if len(current_batch_keys) != 0 and (not self.drop_last or len(batch_sizes) == 0): batch_sizes.append(len(current_batch_keys)) if len(batch_sizes) == 0: # Maybe we can't reach here raise RuntimeError("0 batches") # If the last batch-size is smaller than minimum batch_size, # the samples are redistributed to the other mini-batches if len(batch_sizes) > 1 and batch_sizes[-1] < min_batch_size: for i in range(batch_sizes.pop(-1)): batch_sizes[-(i % len(batch_sizes)) - 1] += 1 if not self.drop_last: # Bug check assert sum(batch_sizes) == len( keys), f"{sum(batch_sizes)} != {len(keys)}" # Set mini-batch batch_list = [] iter_bs = iter(batch_sizes) bs = next(iter_bs) minibatch_keys = [] for key in keys: minibatch_keys.append(key) if len(minibatch_keys) == bs: if sort_in_batch == "descending": minibatch_keys.reverse() elif sort_in_batch == "ascending": # Key are already sorted in ascending pass else: raise ValueError("sort_in_batch must be ascending" f" or descending: {sort_in_batch}") batch_list.append(tuple(minibatch_keys)) minibatch_keys = [] try: bs = next(iter_bs) except StopIteration: break if sort_batch == "ascending": pass elif sort_batch == "descending": batch_list.reverse() else: raise ValueError( f"sort_batch must be ascending or descending: {sort_batch}" ) self.task_batch_lists.append(batch_list)