Exemple #1
0
 def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
     # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
     super(CrossConv, self).__init__()
     c_ = int(c2 * e)  # hidden channels
     self.cv1 = Conv(c1, c_, (1, k), (1, s))
     self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
     self.add = shortcut and c1 == c2
Exemple #2
0
 def __init__(self,
              c1,
              c2,
              k=1,
              s=1,
              g=1,
              act=True):  # ch_in, ch_out, kernel, stride, groups
     super(GhostConv, self).__init__()
     c_ = c2 // 2  # hidden channels
     self.cv1 = Conv(c1, c_, k, s, g, act)
     self.cv2 = Conv(c_, c_, 5, 1, c_, act)
Exemple #3
0
 def __init__(self,
              c1,
              c2,
              n=1,
              shortcut=True,
              g=1,
              e=0.5):  # ch_in, ch_out, number, shortcut, groups, expansion
     super(C3, self).__init__()
     c_ = int(c2 * e)  # hidden channels
     self.cv1 = Conv(c1, c_, 1, 1)
     self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
     self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
     self.cv4 = Conv(2 * c_, c2, 1, 1)
     self.bn = nn.BatchNorm2d(2 * c_)  # applied to cat(cv2, cv3)
     self.act = nn.LeakyReLU(0.1, inplace=True)
     self.m = nn.Sequential(
         *[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
Exemple #4
0
 def __init__(self, c1, c2, k, s):
     super(GhostBottleneck, self).__init__()
     c_ = c2 // 2
     self.conv = nn.Sequential(
         GhostConv(c1, c_, 1, 1),  # pw
         DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(),  # dw
         GhostConv(c_, c2, 1, 1, act=False))  # pw-linear
     self.shortcut = nn.Sequential(DWConv(
         c1, c1, k, s, act=False), Conv(
             c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity()
Exemple #5
0
 def __init__(self,mask=1,ch=()):
     super(DenseMask, self).__init__()
     self.proj1 = Conv(ch[0]//2, 1,k=3)
     self.proj2 = nn.ConvTranspose2d(ch[1], 1, 4, stride=2,
                                                  padding=1, output_padding=0,
                                                  groups=1, bias=False)
     self.proj3 = nn.ConvTranspose2d(ch[2], 1, 8, stride=4,
                                                  padding=2, output_padding=0,
                                                  groups=1, bias=False)
     self.sigmoid = nn.Sigmoid()
Exemple #6
0
    def __init__(self,id_embedding=256,ch=()):
        super(SAAN, self).__init__()
        self.proj1 = nn.Sequential(Conv(ch[0]//2, 256,k=3),
                                   SAAN_Attention(k_size=3, ch=256, s_state=True, c_state=False))
        self.proj2 = nn.Sequential( Conv(ch[1], 256,k=3),
                                    nn.ConvTranspose2d(256, 256, 4, stride=2,
                                                       padding=1, output_padding=0,
                                                       groups=256, bias=False),
                                    SAAN_Attention(k_size=3, ch=256, s_state=True, c_state=False))
        self.proj3 = nn.Sequential(Conv(ch[2], 256,k=3),
                                   nn.ConvTranspose2d(256, 256, 8, stride=4,
                                                      padding=2, output_padding=0,
                                                      groups=256, bias=False),
                                   SAAN_Attention(k_size=3, ch=256, s_state=True, c_state=False))

        self.node = nn.Sequential(SAAN_Attention(k_size=3, ch=256*3, s_state=False, c_state=True),
                                  Conv(256 * 3, 256,k=3),
                                  nn.Conv2d(256, id_embedding,
                                            kernel_size=1, stride=1,
                                            padding=0, bias=True)
                                  )
Exemple #7
0
    def __init__(self,k_size = 3,ch = 256, s_state = False, c_state = False):
        super(SAAN_Attention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)
        self.sigmoid = nn.Sigmoid()
        #self.conv1 = Conv(ch, ch,k=1)

        self.s_state = s_state
        self.c_state = c_state

        if c_state:
            self.c_attention = nn.Sequential(nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False),
                                                  nn.LayerNorm([1, ch]),
                                                  nn.LeakyReLU(0.3, inplace=True),
                                                  nn.Linear(ch, ch, bias=False))

        if s_state:
            self.conv_s = nn.Sequential(Conv(ch, ch // 4, k=1))
            self.s_attention = nn.Conv2d(2, 1, 7, padding=3, bias=False)