Ejemplo n.º 1
0
    def __init__(self, model, input_dim, output_dim, hidden_dim, activation='sigmoid'):
        super().__init__(model, input_dim, output_dim, hidden_dim)

        if self.method is NeuralLogicInferenceMethod.MLP:
            self.layer.add_module(str(len(self.layer)), get_activation(activation))
        else:
            raise NotImplementedError('Unknown logic inference method: {}.'.format(self.method))
Ejemplo n.º 2
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=1,
                 padding_mode='default', padding=0, border_mode='zero',
                 dilation=1, groups=1,
                 batch_norm=None, dropout=None, bias=None, activation=None):

        if bias is None:
            bias = (batch_norm is None)

        nr_dims = type(self).__nr_dims__
        clz_name = 'Conv{}d'.format(nr_dims)
        modules = [getattr(conv, clz_name)(
            in_channels, out_channels, kernel_size,
            stride=stride, padding_mode=padding_mode, padding=padding, border_mode=border_mode,
            dilation=dilation, groups=groups, bias=bias
        )]

        if batch_norm is not None and batch_norm is not False:
            modules.append(get_batcnnorm(batch_norm, out_channels, nr_dims))
        if dropout is not None and dropout is not False:
            modules.append(get_dropout(dropout, nr_dims))
        if activation is not None and activation is not False:
            modules.append(get_activation(activation))

        super().__init__(*modules)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding_mode = padding_mode
        self.padding = padding
        self.border_mode = border_mode
        self.dilation = dilation
        self.groups = groups
Ejemplo n.º 3
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=None,
                 padding_mode='same', padding=0, border_mode=None,
                 dilation=1, groups=1,
                 output_size=None, scale_factor=None, resize_mode='nearest',
                 batch_norm=None, dropout=None, bias=None, activation=None,
                 algo='resizeconv'):

        super().__init__()

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = stride
        self.padding_mode = padding_mode
        self.padding = padding
        self.border_mode = border_mode
        self.dilation = dilation
        self.groups = groups

        if bias is None:
            bias = (batch_norm is None)
        self.algo = algo = DeconvAlgo.from_string(algo)

        nr_dims = type(self).__nr_dims__
        if algo is DeconvAlgo.CONVTRANSPOSE:
            clz_name = 'ConvTranspose{}d'.format(nr_dims)
            assert scale_factor is not None
            assert stride is None, 'Can not set strides for Conv-Transpose based Deconv.'
            assert border_mode is None, 'Can not set strides for Conv-Transpose based Deconv.'
            self.deconv = getattr(conv, clz_name)(
                in_channels, out_channels, kernel_size, stride=scale_factor,
                padding_mode=padding_mode, padding=padding, border_mode='zero',
                dilation=dilation, groups=groups, bias=bias
            )
        elif algo is DeconvAlgo.RESIZECONV:
            clz_name = 'ResizeConv{}d'.format(nr_dims)
            stride = stride or 1
            border_mode = border_mode or 'replicate'
            self.deconv = getattr(conv, clz_name)(
                in_channels, out_channels, kernel_size, stride=stride,
                padding_mode=padding_mode, padding=padding, border_mode=border_mode,
                dilation=dilation, groups=groups, bias=bias,
                output_size=output_size, scale_factor=scale_factor, resize_mode=resize_mode
            )

        self.output_size = output_size
        self.scale_factor = scale_factor

        post_modules = []
        if batch_norm is not None and batch_norm is not False:
            post_modules.append(get_batcnnorm(batch_norm, out_channels, nr_dims))
        if dropout is not None and dropout is not False:
            post_modules.append(get_dropout(dropout, nr_dims))
        if activation is not None and activation is not False:
            post_modules.append(get_activation(activation))
        self.post_process = nn.Sequential(*post_modules)
Ejemplo n.º 4
0
    def __init__(self, in_features, out_features, batch_norm=None, dropout=None, bias=None, activation=None):
        if bias is None:
            bias = (batch_norm is None)

        modules = [nn.Linear(in_features, out_features, bias=bias)]
        if batch_norm is not None and batch_norm is not False:
            modules.append(get_batcnnorm(batch_norm, out_features, 1))
        if dropout is not None and dropout is not False:
            modules.append(get_dropout(dropout, 1))
        if activation is not None and activation is not False:
            modules.append(get_activation(activation))
        super().__init__(*modules)

        self.in_features = in_features
        self.out_features = out_features