def index_one_hot_ellipsis(tensor, dim, index): """Return tensor[:, :, index, ...]""" tensor_shape = tensor.size() tensor = tensor.view(prod(tensor_shape[:dim]), tensor_shape[dim], prod(tensor_shape[dim+1:])) assert tensor.size(0) == index.size(0) index = index.unsqueeze(-1).unsqueeze(-1) index = index.expand(tensor.size(0), 1, tensor.size(2)) tensor = tensor.gather(1, index) return tensor.view(tensor_shape[:dim] + tensor_shape[dim+1:])
def forward(self, input): dim = self.dim if dim < 0: dim += input.dim() input_shape = input.size() imm_shape = (prod(input_shape[:dim]), input_shape[dim], prod(input_shape[dim+1:])) input = input.view(imm_shape) mean = input.mean(1, keepdim=True) std = input.std(1, keepdim=True, unbiased=False) # Compute the output. if self.affine: output = (input - mean) * (_unsqueeze_ft(self.weight) / std) + _unsqueeze_ft(self.bias) else: output = (input - mean) / std return output.view(input_shape)
def index_one_hot_ellipsis(tensor, dim, index): """ Args: tensor (Tensor): input. dim (int) the dimension. index: (LongTensor): the tensor containing the indices along the `dim` dimension. Returns: Tensor: `tensor[:, :, index, ...]`. """ tensor_shape = tensor.size() tensor = tensor.view(prod(tensor_shape[:dim]), tensor_shape[dim], prod(tensor_shape[dim+1:])) assert tensor.size(0) == index.size(0) index = index.unsqueeze(-1).unsqueeze(-1) index = index.expand(tensor.size(0), 1, tensor.size(2)) tensor = tensor.gather(1, index) return tensor.view(tensor_shape[:dim] + tensor_shape[dim+1:])