def ShiftRight(params, inputs, **kwargs): """Layer to shift the tensor to the right by padding on axis 1.""" del params, kwargs pad_widths = [(0, 0), (1, 0)] pad_widths += [(0, 0) for _ in range(len(inputs.shape) - 2)] padded = np.pad(inputs, pad_widths, mode='constant') return padded[:, :-1, ...]
def ShiftRight(x, **unused_kwargs): """Layer to shift the tensor to the right by padding on axis 1.""" pad_widths = [(0, 0)] * len(x.shape) pad_widths[1] = (1, 0) # Padding on axis=1 padded = np.pad(x, pad_widths, mode='constant', constant_values=x.dtype.type(0)) return padded[:, :-1]
def ShiftRight(x, mode='train', **unused_kwargs): """Layer to shift the tensor to the right by padding on axis 1.""" if mode == 'predict': # Do nothing in predict mode, as then the sequence length is 1. return x pad_widths = [(0, 0)] * len(x.shape) pad_widths[1] = (1, 0) # Padding on axis=1 padded = np.pad(x, pad_widths, mode='constant', constant_values=x.dtype.type(0)) return padded[:, :-1]
def call(self, x, params=(), **kwargs): assert self._padding == 'VALID' # Left pad with 0s. Applying an unmasked valid convolution on top of this # yields a causal convolution. # TODO(ddohan): Support strided and dilated convolutions. rate = 1 effective_kernel_size = int((self._kernel_size[0] - 1) * rate + 1) pad = effective_kernel_size - 1 x_leftpad = np.pad(x, pad_width=[[0, 0], [pad, 0], [0, 0]], mode='constant') res = super(CausalConv, self).call(x_leftpad, params) return res
def DiagonalGate(x, params, **kwargs): """Split channels in 3 parts. Shifts 1st and 3rd sections to left/right.""" del params del kwargs # x : [batch, 1, length, depth] x = np.pad( x, [(0, 0), (0, 0), (1, 1), (0, 0)], mode='constant', constant_values=0.0) depth = x.shape[-1] // 3 assert 3 * depth == x.shape[-1], ('Depth must be divisible by 3', depth, x.shape) xs = [ x[:, :, :-2, :depth], x[:, :, 1:-1, depth:2 * depth], x[:, :, 2:, 2 * depth:3 * depth] ] return np.concatenate(xs, axis=3)
def ShiftRight(x, **unused_kwargs): """Layer to shift the tensor to the right by padding on axis 1.""" if not isinstance(x, (list, tuple)): # non-chunked inputs pad_widths = [(0, 0), (1, 0)] padded = np.pad(x, pad_widths, mode='constant') return padded[:, :-1] # Handling chunked inputs. Recall that the list of chunks represents a big # sequence (the concatenation of the chunks). We want to shift that sequence, # so we put a 0 in the beginning of the first chunk and the last element of # that chunk is used as the new first element of the next chunk, and so on. padded = [] last_value = np.zeros_like(x[0][:, -1]) for chunk in x: padded_chunk = np.concatenate([last_value[:, np.newaxis], chunk], axis=1) last_value = chunk[:, -1] padded.append(padded_chunk[:, :-1]) return padded
def apply_fun(params, inputs, **kwargs): del params, kwargs pad_widths = [(0, 0), (1, 0)] pad_widths += [(0, 0) for _ in range(len(inputs.shape) - 2)] padded = np.pad(inputs, pad_widths, mode='constant') return padded[:, :-1, ...]
def pad_input(x): pad_widths = [(0, 0)] * len(x.shape) pad_widths[-2] = (0, pad_len) # Padding on axis=-2 return np.pad(x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
def ShiftRight(x, **unused_kwargs): """Layer to shift the tensor to the right by padding on axis 1.""" pad_widths = [(0, 0), (1, 0)] pad_widths += [(0, 0) for _ in range(len(x.shape) - 2)] padded = np.pad(x, pad_widths, mode='constant') return padded[:, :-1, ...]