Esempio n. 1
0
    def __init__(self, num_input_features, args):
        super().__init__()
        self.memory_efficient = args.memory_efficient
        self.drop_rate = args.convolution_dropout
        bn_size = args.bn_size
        growth_rate = args.growth_rate
        inter_features = bn_size * growth_rate
        kernel_size = args.kernel_size

        self.norm1 = PervasiveBatchNorm(num_input_features)
        self.conv1 = nn.Conv2d(num_input_features,
                               inter_features,
                               kernel_size=1,
                               stride=1,
                               bias=False)
        self.norm2 = PervasiveBatchNorm(inter_features)
        dilsrc = args.source_dilation
        diltrg = args.target_dilation
        padding_trg = diltrg * (kernel_size - 1) // 2
        padding_src = dilsrc * (kernel_size - 1) // 2
        padding = (padding_trg, padding_src)

        self.mconv2 = MaskedConvolution(
            inter_features,
            growth_rate,
            kernel_size,
            args,
            padding=padding,
        )
Esempio n. 2
0
    def __init__(self, num_input_features, args):
        super().__init__()
        self.memory_efficient = args.memory_efficient
        self.drop_rate = args.convolution_dropout
        bn_size = args.bn_size
        growth_rate = args.growth_rate
        inter_features = bn_size * growth_rate
        kernel_size = args.kernel_size
        ffn_dim = args.ffn_dim

        self.conv1 = nn.Conv2d(num_input_features,
                               inter_features,
                               kernel_size=1,
                               stride=1,
                               bias=False
                              )
        dilsrc = args.source_dilation
        diltrg = args.target_dilation
        padding_trg = diltrg * (kernel_size - 1) // 2
        padding_src = dilsrc * (kernel_size - 1) // 2
        padding = (padding_trg, padding_src)

        self.mconv2 = MaskedConvolution(
            inter_features, growth_rate,
            kernel_size, args,
            padding=padding,
        )
        self.fc1 = Linear(growth_rate, ffn_dim)
        self.fc2 = Linear(ffn_dim, growth_rate)
Esempio n. 3
0
    def __init__(self, num_input_features, kernel_size,
                 growth_rate, bn_size, drop_rate, args, bias=False):
        super().__init__()

        self.drop_rate = drop_rate
        inter_features = bn_size * growth_rate

        self.layer = nn.Sequential()
        self.bn1 = nn.BatchNorm2d(num_input_features)
        self.relu1 = nn.ReLU(inplace=True)
        self.conv1 = nn.Conv2d(num_input_features, inter_features,
                               kernel_size=1, stride=1, bias=bias)

        self.layer2 = nn.Sequential()
        self.layer2.add_module('bn2', nn.BatchNorm2d(inter_features))
        self.layer2.add_module('relu2', nn.ReLU(inplace=True))

        mask = torch.ones(growth_rate, inter_features,
                          kernel_size, kernel_size)
        if kernel_size > 1:
            mask[:, :, kernel_size // 2 + 1:, :] = 0
        padding = (kernel_size - 1) // 2

        self.layer2.add_module(
            'mconv2',
            MaskedConvolution(
                mask, inter_features, growth_rate,
                kernel_size, padding, bias=bias
            )
        )
        self.layer2.add_module('do1', nn.Dropout(p=self.drop_rate))
Esempio n. 4
0
    def __init__(self, num_features, kernel_size, args):
        super().__init__()
        self.drop_rate = args.convolution_dropout
        mid_features = args.reduce_dim
        stride = args.conv_stride  # source dimension stride
        dilsrc = args.source_dilation
        diltrg = args.target_dilation
        resolution = args.maintain_resolution
        if resolution:
            if not stride == 1:
                raise ValueError('Could not maintain the resolution with stride=%d' % stride)

            # choose the padding accordingly:
            padding_trg = diltrg * (kernel_size - 1) // 2
            padding_src = dilsrc * (kernel_size - 1) // 2
            padding = (padding_trg, padding_src)
        else:
            # must maintain the target resolution:
            padding = (diltrg * (kernel_size - 1) // 2, 0)

        # Reduce dim should be dividible by groups
        self.conv1 = nn.Conv2d(num_features,
                               mid_features,
                               kernel_size=1,
                               stride=1,
                               bias=args.conv_bias)

        self.mconv2 = MaskedConvolution(
            mid_features, num_features,
            kernel_size, args,
            padding=padding,
        )
        self.scale = 0.5 ** .5
        self.nonzero_padding = args.nonzero_padding
Esempio n. 5
0
    def __init__(self, num_features, kernel_size, args, source_dilation,
                 target_dilation):
        super().__init__()
        self.drop_rate = args.convolution_dropout
        ffn_dim = args.ffn_dim
        mid_features = args.reduce_dim
        stride = args.conv_stride  # source dimension stride
        resolution = args.maintain_resolution
        if resolution:
            if not stride == 1:
                raise ValueError(
                    'Could not maintain the resolution with stride=%d' %
                    stride)

            # choose the padding accordingly:
            padding_trg = source_dilation * (kernel_size - 1) // 2
            padding_src = target_dilation * (kernel_size - 1) // 2
            padding = (padding_trg, padding_src)
        else:
            # must maintain the target resolution:
            padding = (diltrg * (kernel_size - 1) // 2, 0)

        # Reduce dim should be dividible by groups
        self.conv1 = nn.Conv2d(num_features,
                               mid_features,
                               kernel_size=1,
                               stride=1,
                               bias=False)

        self.mconv2 = MaskedConvolution(
            mid_features,
            num_features,
            kernel_size,
            args,
            padding=padding,
            source_dilation=source_dilation,
            target_dilation=target_dilation,
        )
        self.fc1 = Linear(num_features, ffn_dim)
        self.fc2 = Linear(ffn_dim, num_features)
        self.scale = 0.5**.5
Esempio n. 6
0
    def __init__(self, num_features, kernel_size, args):
        super().__init__()
        self.drop_rate = args.convolution_dropout
        ffn_dim = args.ffn_dim
        mid_features = args.reduce_dim
        self.conv1 = nn.Conv2d(num_features,
                               mid_features,
                               kernel_size=1,
                               stride=1,
                               bias=False)

        self.mconv2 = MaskedConvolution(mid_features,
                                        num_features,
                                        kernel_size,
                                        bias=False,
                                        separable=True)
        self.ln1 = nn.LayerNorm(num_features)
        self.fc1 = Linear(num_features, ffn_dim)
        self.fc2 = Linear(ffn_dim, num_features)
        self.ln2 = nn.LayerNorm(num_features)
        self.scale = 2**.5