def vgg(cfg, i, batch_norm=False): layers = [] in_channels = i for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] elif v == 'C': layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)] else: conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) if batch_norm: layers += [ conv2d, nn.BatchNorm2d(v), Activation('relu', auto_optimize=True) ] else: layers += [conv2d, Activation('relu', auto_optimize=True)] in_channels = v pool5 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=3, dilation=3) conv7 = nn.Conv2d(1024, 1024, kernel_size=1) layers += [ pool5, conv6, Activation('relu', auto_optimize=True), conv7, Activation('relu', auto_optimize=True) ] return layers
def __init__(self, in_c, out_c, kernel_size=1, ratio=2, dw_size=3, stride=1, act=True, act_type='relu'): super(GhostModule, self).__init__() if ratio != 2: print("Please change output channels manually.") init_c = math.ceil(out_c / ratio) new_c = init_c * (ratio - 1) self.primary_conv = nn.Sequential( nn.Conv2d(in_c, init_c, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(init_c), Activation(act_type) if act else nn.Identity()) self.cheap_operation = nn.Sequential( nn.Conv2d(init_c, new_c, dw_size, 1, dw_size // 2, groups=init_c, bias=False), nn.BatchNorm2d(new_c), Activation(act_type) if act else nn.Identity())
def __init__(self, in_c, mid_c, out_c, dropout, act_type='relu'): super(Head, self).__init__() self.head = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_c, mid_c, 1, bias=True), Activation(act_type), nn.Flatten(), nn.Dropout(dropout) if dropout > 0 else nn.Identity(), nn.Linear(mid_c, out_c))
def __init__(self, num_classes=1000, width=1.75, dropout=0.8): super(GhostNet600, self).__init__() assert dropout >= 0, "Use = 0 to disable or > 0 to enable." self.width = width stem_c = make_divisible(16 * width, 4) self.stem = Stem(stem_c, 'h_swish') self.stage = nn.Sequential( # stage1 GhostBottleneck(stem_c, self.get_c(16), self.get_c(16), 3, 1, 0.1, 'h_swish'), # stage2 GhostBottleneck(self.get_c(16), self.get_c(48), self.get_c(24), 3, 2, 0.1, 'h_swish'), GhostBottleneck(self.get_c(24), self.get_c(72), self.get_c(24), 3, 1, 0.1, 'h_swish'), # stage3 GhostBottleneck(self.get_c(24), self.get_c(72), self.get_c(40), 5, 2, 0.1, 'h_swish'), GhostBottleneck(self.get_c(40), self.get_c(120), self.get_c(40), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(40), self.get_c(120), self.get_c(40), 3, 1, 0.1, 'h_swish'), # stage4 GhostBottleneck(self.get_c(40), self.get_c(240), self.get_c(80), 3, 2, 0.1, 'h_swish'), GhostBottleneck(self.get_c(80), self.get_c(200), self.get_c(80), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(80), self.get_c(200), self.get_c(80), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(80), self.get_c(200), self.get_c(80), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(80), self.get_c(480), self.get_c(112), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(112), self.get_c(672), self.get_c(112), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(112), self.get_c(672), self.get_c(112), 3, 1, 0.1, 'h_swish'), # stage5 GhostBottleneck(self.get_c(112), self.get_c(672), self.get_c(160), 5, 2, 0.1, 'h_swish'), GhostBottleneck(self.get_c(160), self.get_c(960), self.get_c(160), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(160), self.get_c(960), self.get_c(160), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(160), self.get_c(960), self.get_c(160), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(160), self.get_c(960), self.get_c(160), 3, 1, 0.1, 'h_swish'), GhostBottleneck(self.get_c(160), self.get_c(960), self.get_c(160), 3, 1, 0.1, 'h_swish'), # conv-bn-act nn.Conv2d(self.get_c(160), self.get_c(960), 1, bias=False), nn.BatchNorm2d(self.get_c(960)), Activation('h_swish'), ) self.head = Head(self.get_c(960), 1400, num_classes, dropout, 'h_swish')
def add_tcb_(cfg): feature_scale_layers = [] feature_upsample_layers = [] feature_pred_layers = [] for k, v in enumerate(cfg): feature_scale_layers += [ nn.Conv2d(cfg[k], 256, 3, padding=1), Activation('h_swish', auto_optimize=True), nn.Conv2d(256, 256, 3, padding=1) ] feature_pred_layers += [ Activation('h_swish', auto_optimize=True), nn.Conv2d(256, 256, 3, padding=1), Activation('h_swish', auto_optimize=True) ] if k != 0: feature_upsample_layers += [nn.Conv2d(256, 256, 3, 2, 1)] return (feature_scale_layers, feature_upsample_layers, feature_pred_layers)
def __init__(self, in_c, expansion, out_c, kernel_size, stride, se=False, activation='relu6', first_conv=True, skip=True, linear=True): super(MobileNetBottleneck, self).__init__() self.act = Activation( activation, auto_optimize=True) # [bug]no use when linear=True hidden_c = round(in_c * expansion) self.linear = linear self.skip = stride == 1 and in_c == out_c and skip seq = [] if first_conv and in_c != hidden_c: seq.append(nn.Conv2d(in_c, hidden_c, 1, 1, bias=False)) seq.append(nn.BatchNorm2d(hidden_c)) seq.append(Activation(activation, auto_optimize=True)) seq.append( nn.Conv2d(hidden_c, hidden_c, kernel_size, stride, kernel_size // 2, groups=hidden_c, bias=False)) seq.append(nn.BatchNorm2d(hidden_c)) seq.append(Activation(activation, auto_optimize=True)) if se: seq.append(SE_Module(hidden_c)) seq.append(nn.Conv2d(hidden_c, out_c, 1, 1, bias=False)) seq.append(nn.BatchNorm2d(out_c)) self.seq = nn.Sequential(*seq)
def __init__(self, in_c, reducation_c, act='relu'): super(SELayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.lin1 = nn.Linear(in_c, reducation_c) self.act = Activation(act, auto_optimize=True) self.lin2 = nn.Linear(reducation_c, in_c)
def __init__(self, out_c, act_type='relu'): super(Stem, self).__init__() self.stem = nn.Sequential(nn.Conv2d(3, out_c, 3, 2, 1, bias=False), nn.BatchNorm2d(out_c), Activation(act_type))