Esempio n. 1
0
 def make_conv(cls, cin, cout, f=2, stride=2):
     cin2 = cin // f
     cout2 = cout // f
     conv1 = lambda x, y, s: SequenceWise(ConvLayer(x, y, stride=s))
     conv2 = lambda x, y, s: SequenceWise(ConvLayer(x, y, stride=s))
     return OctConv((cin, cin2), (cout, cout2), (conv1, conv2),
                    sequence_upsample, stride)
Esempio n. 2
0
    def __init__(self,
                 in_channels,
                 num_anchors,
                 num_classes,
                 act='sigmoid',
                 n_layers=0):
        super(BoxHead, self).__init__()
        self.num_classes = num_classes
        self.in_channels = in_channels
        self.num_anchors = num_anchors

        self.aspect_ratios = []
        self.act = act

        conv_func = lambda x, y: ConvLayer(
            x, y, norm='none', activation='ReLU')
        self.box_head = self._make_head(in_channels, self.num_anchors * 4,
                                        n_layers, conv_func)
        self.cls_head = self._make_head(in_channels,
                                        self.num_anchors * self.num_classes,
                                        n_layers, conv_func)

        def initialize_layer(layer):
            if isinstance(layer, nn.Conv2d):
                nn.init.normal_(layer.weight, std=0.01)
                if layer.bias is not None:
                    nn.init.constant_(layer.bias, val=0)

        self.cls_head.apply(initialize_layer)
        self.box_head.apply(initialize_layer)

        if self.act == 'softmax':
            softmax_init(self.cls_head[-1], self.num_anchors, self.num_classes)
        else:
            sigmoid_init(self.cls_head[-1], self.num_anchors, self.num_classes)
Esempio n. 3
0
    def __init__(self,
                 in_channels,
                 hidden_dim,
                 kernel_size=3,
                 hard=False,
                 local=False):
        super(ConvPlastic, self).__init__(hard)
        self.hidden_dim = hidden_dim

        self.conv_x2h = SequenceWise(
            ConvLayer(in_channels,
                      hidden_dim,
                      kernel_size=5,
                      stride=2,
                      dilation=1,
                      padding=2,
                      activation='Identity'))

        self.K = kernel_size
        self.P = kernel_size // 2
        self.C = hidden_dim
        self.CK2 = self.C * self.K**2

        # fixed part of weights
        self.fixed_weights = nn.Parameter(
            .01 * torch.randn(self.C, self.C, self.K, self.K).float())

        # fixed modulation of plastic weights
        self.alpha = nn.Parameter(
            .01 * torch.randn(self.C, self.C, self.K, self.K).float())

        self.eta = nn.Parameter(.01 * torch.ones(1).float())
        self.reset()
Esempio n. 4
0
def ff_preact_stem(cin, base):
    return SequenceWise(
        nn.Sequential(
            ConvLayer(cin, base * 2, kernel_size=7, stride=2, padding=3),
            PreActBlock(base * 2, base * 4, stride=2),
            PreActBlock(base * 4, base * 8, stride=2),
        ))
Esempio n. 5
0
 def __init__(self, channel_list, mode='cat'):
     down, up = partial(ConvLSTMCell, stride=2), partial(ConvLSTMCell,
                                                         stride=1)
     skip = partial(nn.Conv2d, kernel_size=3, stride=1, padding=1)
     resize = lambda x, y: F.interpolate(
         x, size=y.shape[-2:], mode='nearest')
     super(ONet, self).__init__(channel_list, mode, down, up, skip, resize)
     self.feedback = ConvLayer(self.ups[-1].out_channels,
                               2 * self.downs[0].out_channels,
                               stride=2)
Esempio n. 6
0
 def __init__(self, backbone, out_channels=256, add_p6p7=True):
     super(BackboneWithP6P7, self).__init__()
     self.bb = backbone
     self.levels = 5
     self.add_p6p7 = add_p6p7
     if add_p6p7:
         self.p6 = SequenceWise(
             ConvLayer(backbone.out_channel_list[-1],
                       out_channels,
                       norm='none',
                       stride=2))
         self.p7 = SequenceWise(
             ConvLayer(out_channels,
                       out_channels,
                       stride=2,
                       norm='none',
                       activation='Identity'))
     self.out_channel_list = backbone.out_channel_list + [
         out_channels, out_channels
     ]
Esempio n. 7
0
    def __init__(self,
                 in_channels_list,
                 out_channels,
                 up=lambda x, y: SequenceWise(nn.Conv2d(x, y, 3, 1, 1)),
                 add_p6p7=True):
        super(FeaturePyramidNetwork, self).__init__()

        skip = lambda x, y: SequenceWise(nn.Conv2d(x, y, 1, 1, 0))

        self.inner_blocks = nn.ModuleList()
        self.layer_blocks = nn.ModuleList()
        for in_channels in in_channels_list:
            if in_channels == 0:
                continue
            inner_block_module = skip(in_channels, out_channels)
            layer_block_module = up(out_channels, out_channels)
            self.inner_blocks.append(inner_block_module)
            self.layer_blocks.append(layer_block_module)

        self.add_p6p7 = add_p6p7

        if add_p6p7:
            self.p6 = SequenceWise(
                ConvLayer(in_channels_list[-1],
                          out_channels,
                          stride=2,
                          norm='InstanceNorm2d',
                          activation='ReLU'))
            self.p7 = SequenceWise(
                ConvLayer(out_channels,
                          out_channels,
                          stride=2,
                          norm='InstanceNorm2d',
                          activation='Identity'))

        # initialize parameters now to avoid modifying the initialization of top_blocks
        for m in self.children():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight, a=1)
                nn.init.constant_(m.bias, 0)
Esempio n. 8
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size=3,
                 stride=1,
                 padding=1,
                 dilation=1,
                 bias=True,
                 conv_func=nn.Conv2d,
                 hard=False,
                 feedback_channels=None):
        super(ConvLSTMCell, self).__init__(hard)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.conv_x2h = ConvLayer(in_channels,
                                  self.out_channels * 4,
                                  activation='Identity',
                                  stride=stride)

        self.conv_h2h = conv_func(in_channels=self.out_channels,
                                  out_channels=4 * self.out_channels,
                                  kernel_size=kernel_size,
                                  stride=1,
                                  padding=padding,
                                  dilation=dilation,
                                  bias=bias)

        if feedback_channels is not None:
            self.conv_fb2h = conv_func(in_channels=feedback_channels,
                                       out_channels=4 * self.out_channels,
                                       kernel_size=kernel_size,
                                       stride=1,
                                       padding=padding,
                                       dilation=dilation,
                                       bias=bias)

        self.stride = stride

        self.reset()