def wrapper(x, *args, **kwargs): input_axis = resolve_deprecation(axis, x.ndim, patch_size, stride) local_size, local_stride = broadcast_to_axis( input_axis, patch_size, stride) if valid: shape = extract(x.shape, input_axis) padded_shape = np.maximum(shape, local_size) new_shape = padded_shape + (local_stride - padded_shape + local_size) % local_stride x = pad_to_shape(x, new_shape, input_axis, padding_values, ratio) patches = pmap( predict, divide_grid(x, new_shape, local_size, local_stride, input_axis), *args, **kwargs) # patches = pmap(predict, divide(x, local_size, local_stride, input_axis), *args, **kwargs) prediction = combine(patches, extract(x.shape, input_axis), local_stride, axis) if valid: # print(prediction.shape, shape) prediction = crop_to_shape(prediction, shape, axis, ratio) return prediction
def stratified_train_val_test_split(ids, labels, *, val_size, n_splits, random_state=42): cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_state) train_val_test_ids = [] for i, (train_val_indices, test_indices) in enumerate(cv.split(ids, labels)): train_val_ids = extract(ids, train_val_indices) test_ids = extract(ids, test_indices) train_ids, val_ids = train_test_split( train_val_ids, test_size=val_size, random_state=25 + i ) train_val_test_ids.append((train_ids, val_ids, test_ids)) return train_val_test_ids
def get_random_patch(*arrays: np.ndarray, patch_size: AxesLike, axes: AxesLike = None, distribution: Callable = uniform): """ Get a random patch of size ``path_size`` along the ``axes`` for each of the ``arrays``. The patch position is equal for all the ``arrays``. Parameters ---------- arrays patch_size axes distribution: Callable(shape) function that samples a random number in the range ``[0, n)`` for each axis. Defaults to a uniform distribution. """ if not arrays: raise ValueError('No arrays given.') axes = expand_axes(axes, patch_size) check_shape_along_axis(*arrays, axis=axes) shape = extract(arrays[0].shape, axes) start = distribution(shape_after_convolution(shape, patch_size)) box = np.array([start, start + patch_size]) return squeeze_first(tuple(crop_to_box(arr, box, axes) for arr in arrays))
def wrapper(x): if valid: shape = np.array(x.shape)[list(axes)] padded_shape = np.maximum(shape, patch_size) new_shape = padded_shape + (stride - padded_shape + patch_size) % stride x = pad_to_shape(x, new_shape, axes, padding_values, ratio) patches = map(predict, divide(x, patch_size, stride, axes)) prediction = combine(patches, extract(x.shape, axes), stride, axes) if valid: prediction = crop_to_shape(prediction, shape, axes, ratio) return prediction
def elastic_transform(x: np.ndarray, amplitude: float, axes: AxesLike = None, order: int = 1): """Apply a gaussian elastic distortion with a given amplitude to a tensor along the given axes.""" axes = expand_axes(axes, x.shape) grid_shape = extract(x.shape, axes) deltas = [ gaussian_filter(np.random.uniform(-amplitude, amplitude, grid_shape), 1) for _ in grid_shape ] grid = np.mgrid[tuple(map(slice, grid_shape))] + deltas return apply_along_axes( partial(map_coordinates, coordinates=grid, order=order), x, axes)
def shape_after_full_convolution(shape: AxesLike, kernel_size: AxesLike, axes: AxesLike = None, stride: AxesLike = 1, padding: AxesLike = 0, dilation: AxesLike = 1, valid: bool = True) -> tuple: """ Get the shape of a tensor after applying a convolution with corresponding parameters along the given axes. The dimensions along the remaining axes will become singleton. """ axes, *params = broadcast_to_axes(axes, kernel_size, stride, padding, dilation) return fill_by_indices( np.ones_like(shape), shape_after_convolution(extract(shape, axes), *params, valid), axes)
def test_extract(self): idx = [2, 5, 3, 9, 0] self.assertListEqual(extract(range(15), idx), idx)