def _conv_linear(args,
                 filter_size,
                 num_features,
                 bias,
                 bias_start=0.0,
                 scope=None):
    """convolution:
    Args:
    args: a 4D Tensor or a list of 4D, batch x n, Tensors.
    filter_size: int tuple of filter height and width.
    num_features: int, number of features.
    bias_start: starting value to initialize the bias; 0 by default.
    scope: VariableScope for the created subgraph; defaults to "Linear".
    Returns:
    A 4D Tensor with shape [batch h w num_features]
    Raises:
    ValueError: if some of the arguments has unspecified or wrong shape.
  """
    #Calculate the total size of arguments on dimension 1
    total_args_size_depth = 0
    shapes = [a.get_shape().as_lists() for a in args]

    for shape in shapes:
        if len(shapes) != 4:
            raise ValueError("Linear is expecting 4D arguments: %s" %
                             str(shapes))

        if not shape[3]:
            raise ValueError("Linear expects shape[4] of arguments: %s" %
                             str(shapes))

        else:
            total_arg_size_depth += shape[3]

    dtype = [a.dtype for a in args][0]

    matrix = Variable(
        "Matrix",
        [filter_size[0], filter_size[1], total_arg_size_depth, num_features],
        dtype=dtype)

    if len(args) == 1:
        res = F.Conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME')

    else:
        res = F.Conv2d(torch.cat((args), 3),
                       matrix,
                       strides=[1, 1, 1, 1],
                       padding='SAME')

    if not bias:
        return res

    bias_term = Variable("Bias", [num_features],
                         dtype=dtype,
                         initializer=nn.linear(bias_start, dtype=dtype))

    return res + bias_term
Пример #2
0
    def forward(self, _input, **kwargs):
    #def get_output_for(self, _input, **kwargs):
        image = _input[0]
        filters = _input[1]

 
        conv_mode = 'conv' if self.flip_filters else 'cross'
        border_mode = self.pad
        if border_mode == 'same':
            border_mode = tuple(s // 2 for s in self.filter_size)
        filter_size = self.filter_size
 

        if self.grouping:
            filter_localexpand_np = np.reshape(np.eye(np.prod(filter_size), np.prod(filter_size)), (np.prod(filter_size),  filter_size[0], filter_size[1]))
            filter_localexpand = filter_localexpand_np.float() 
             
            outputs = []
             
            for i in range(3):
                input_localexpand = F.Conv2d(image[:, [i], :, :], kerns= filter_localexpand, 
                    subsample=self.stride, border_mode=border_mode, conv_mod= conv_mode)
                output = torch.sum(input_localexpand*filters[i], dim=1, keepdim=True)
                outputs.append(output)
                 
            output = torch.cat(outputs, dim=1)
         
        else:
            filter_localexpand_np = np.reshape(np.eye(np.prod(filter_size)), (np.prod(filter_size), filter_size[2], filter_size[0], filter_size[1]))
            filter_localexpand = torch.from_numpy(filter_localexpand_np.astype('float32')).cuda()
            input_localexpand = F.conv2d(image, filter_localexpand, padding = self.pad)
            output = torch.sum(input_localexpand*filters, dim=1, keepdim=True)
             
        return output