def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)): super(SPPCSP, self).__init__() c_ = int(2 * c2 * e) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) self.cv3 = Conv(c_, c_, 3, 1) self.cv4 = Conv(c_, c_, 1, 1) self.m = nn.ModuleList( [nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) self.cv5 = Conv(4 * c_, c_, 1, 1) self.cv6 = Conv(c_, c_, 3, 1) self.bn = nn.BatchNorm2d(2 * c_) self.act = Mish() self.cv7 = Conv(2 * c_, c2, 1, 1)
def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion super(BottleneckCSP2, self).__init__() c_ = int(c2) # hidden channels self.cv1 = Conv(c1, c_, 1, 1) self.cv2 = nn.Conv2d(c_, c_, 1, 1, bias=False) self.cv3 = Conv(2 * c_, c2, 1, 1) self.bn = nn.BatchNorm2d(2 * c_) self.act = Mish() self.m = nn.Sequential( *[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups super(ConvSqu, self).__init__() self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) self.act = Mish() if act else nn.Identity()
def __init__(self, d=3, k=10, emb_dims=1024, dropout=0.5): super(Model, self).__init__() self.emb_dims = emb_dims self.k = k self.dropout = dropout self.d = d self.conv1 = nn.Sequential( nn.Conv2d(2 * self.d, 64, kernel_size=1, bias=False), nn.BatchNorm2d(64), Mish()) self.conv2 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=1, bias=False), nn.BatchNorm2d(64), Mish()) self.conv3 = nn.Sequential( nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False), nn.BatchNorm2d(64), Mish()) self.conv4 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=1, bias=False), nn.BatchNorm2d(64), Mish()) self.conv5 = nn.Sequential( nn.Conv2d(64 * 2, 64, kernel_size=1, bias=False), nn.BatchNorm2d(64), Mish()) self.conv6 = nn.Sequential( nn.Conv1d(64 * 3, self.emb_dims, kernel_size=1, bias=False), nn.BatchNorm1d(self.emb_dims), Mish()) self.conv7 = nn.Sequential( nn.Conv1d(self.emb_dims + 64 * 3, 512, kernel_size=1, bias=False), nn.BatchNorm1d(512), Mish()) self.conv8 = nn.Sequential( nn.Conv1d(512, 256, kernel_size=1, bias=False), nn.BatchNorm1d(256), Mish()) self.conv9 = nn.Conv1d(256, 2, kernel_size=1, bias=False)
print(opt) # Input img = torch.zeros((opt.batch_size, 3, *opt.img_size)) # image size(1,3,320,192) iDetection # Load PyTorch model model = attempt_load(opt.weights, map_location=torch.device('cpu')) # load FP32 model # Update model for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability if isinstance(m, models.common.Conv) and isinstance( m.act, models.common.Mish): m.act = Mish() # assign activation if isinstance(m, models.common.BottleneckCSP) or isinstance(m, models.common.BottleneckCSP2) \ or isinstance(m, models.common.SPPCSP): if isinstance(m.bn, nn.SyncBatchNorm): bn = nn.BatchNorm2d(m.bn.num_features, eps=m.bn.eps, momentum=m.bn.momentum) bn.training = False bn._buffers = m.bn._buffers bn._non_persistent_buffers_set = set() m.bn = bn if isinstance(m.act, models.common.Mish): m.act = Mish() # assign activation # if isinstance(m, models.yolo.Detect): # m.forward = m.forward_export # assign forward (optional)