コード例 #1
0
    def __init__(self, encoder, n_classes, final_bias=0., chs=256, n_anchors=9, flatten=True, chip_size=(256,256), n_bands=3):

        # chs - channels for top down layers in FPN
        
        super().__init__()
        self.n_classes,self.flatten = n_classes,flatten
        self.chip_size = chip_size
        
        
        # Fetch the sizes of various activation layers of the backbone
        sfs_szs = model_sizes(encoder, size=self.chip_size)

        hooks = hook_outputs(encoder)

        self.encoder = encoder
        self.c5top5 = conv2d(sfs_szs[-1][1], chs, ks=1, bias=True)
        self.c5top6 = conv2d(sfs_szs[-1][1], chs, stride=2, bias=True)
        self.p6top7 = nn.Sequential(nn.ReLU(), conv2d(chs, chs, stride=2, bias=True))
        self.merges = nn.ModuleList([LateralUpsampleMerge(chs, szs[1], hook) 
                                        for szs,hook in zip(sfs_szs[-2:-4:-1], hooks[-2:-4:-1])])
        self.smoothers = nn.ModuleList([conv2d(chs, chs, 3, bias=True) for _ in range(3)])
        self.classifier = self._head_subnet(n_classes, n_anchors, final_bias, chs=chs)
        self.box_regressor = self._head_subnet(4, n_anchors, 0., chs=chs)

        # Create a dummy x to be passed through the model and fetch the sizes
        x_dummy = torch.rand(n_bands,self.chip_size[0],self.chip_size[1]).unsqueeze(0)
        p_states = self._create_p_states(x_dummy)
        self.sizes = [[p.size(2), p.size(3)] for p in p_states]
コード例 #2
0
 def __init__(self, hooks, nc):
     super(Hcolumns, self).__init__()
     self.hooks = hooks
     self.n = len(self.hooks)
     self.factorization = None
     if nc is not None:
         self.factorization = nn.ModuleList()
         for i in range(self.n):
             self.factorization.append(nn.Sequential(
                 conv2d(nc[i], nc[-1], 3, padding=1, bias=True),
                 conv2d(nc[-1], nc[-1], 3, padding=1, bias=True)))
コード例 #3
0
ファイル: cs_fpn.py プロジェクト: omriKramer/csPose
 def __init__(self, body, out_dims, fpn_dim=256, emb_type='conv'):
     super().__init__()
     self.ifn, self.bu, self.td, self.fusion, ch = build_fpn(body, fpn_dim, bu_in_lateral=True,
                                                             out_dim=out_dims['object'])
     self.embedding = resolve_embedding(emb_type, out_dims.keys(), ch[-1])
     head = {key: nn.Sequential(nnlayers.conv_layer(fpn_dim, fpn_dim), conv2d(fpn_dim, fn, ks=1, bias=True))
             for key, fn in out_dims.items()}
     self.head = nn.ModuleDict(head)
コード例 #4
0
ファイル: nnlayers.py プロジェクト: omriKramer/csPose
 def __init__(self, in_dim, out_dims):
     super().__init__()
     d = {
         key: nn.Sequential(conv_layer(in_dim, in_dim),
                            conv2d(in_dim, fn, ks=1, bias=True))
         for key, fn in out_dims.items()
     }
     self.heads = nn.ModuleDict(d)
コード例 #5
0
 def _head_subnet(self,
                  n_classes,
                  n_anchors,
                  final_bias=0.,
                  n_conv=4,
                  chs=256):
     layers = [conv_layer(chs, chs, bias=True) for _ in range(n_conv)]
     layers += [conv2d(chs, n_classes * n_anchors, bias=True)]
     layers[-1].bias.data.zero_().add_(final_bias)
     layers[-1].weight.data.fill_(0)
     return nn.Sequential(*layers)
コード例 #6
0
ファイル: taskmod.py プロジェクト: omriKramer/csPose
 def __init__(self, instructor, fpn, fpn_dim=512):
     classifier = nn.Sequential(nnlayers.conv_layer(fpn_dim, fpn_dim), conv2d(fpn_dim, 1, ks=1, bias=True))
     super().__init__(instructor, fpn, classifier)
コード例 #7
0
 def __init__(self, ch, ch_lat, hook):
     super().__init__()
     self.hook = hook
     self.conv_lat = conv2d(ch_lat, ch, ks=1, bias=True)