def flatten(inputs, name=None): """ Flattens the input while maintaining the batch_size. Assumes that the first dimension represents the batch. Args: inputs: A tensor of size [batch_size, ...]. scope: Optional name for operation. Returns: A flattened tensor with shape [batch_size, k]. """ return ops.Flatten(inputs, axis=1)
def flatten(x, outdim=1): """Flatten the input by keeping the specific dimensions. Parameters ---------- x : Tensor The input tensor. outdim : int The number of dimensions to keep. Returns ------- Tensor The output tensor. """ return ops.Flatten(x, keep_axes=outdim)
def to_one_hot(y, nb_class, **kwargs): """Generate a matrix where each row corresponds to the one hot encoding. The ``y`` should be a 1d vector. Parameters ---------- y: Tensor The input tensor. nb_class : int The number of classes. Returns ------- Tensor The one hot matrix. """ flat_y = ops.Flatten(y, keep_axes=1) return ops.OneHot(flat_y, depth=nb_class)
def flatten(inputs, name=None): return ops.Flatten(inputs, axis=1)
def LayerSetup(self, bottom): return _ops.Flatten(bottom, **self.arguments)
def flatten(inputs, outputs_collections=None, scope=None): return op_lib.Flatten(inputs, axis=0, keep_axes=2)
def Setup(self, bottom): super(FlattenLayer, self).Setup(bottom) input = bottom[0] if isinstance(bottom, list) else bottom return ops.Flatten(input, **self._param)