def __init__(self, in_channels, num_anchors, num_classes, num_layers, onnx_export=False): super(Classifier, self).__init__() self.num_anchors = num_anchors self.num_classes = num_classes self.num_layers = num_layers self.conv_list = nn.ModuleList([ SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers) ]) self.bn_list = nn.ModuleList([ nn.ModuleList([ nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers) ]) for j in range(5) ]) self.header = SeparableConvBlock(in_channels, num_anchors * num_classes, norm=False, activation=False) self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def __init__(self, in_channels, num_anchors, num_layers, onnx_export=False): """ num_layers 的选值 [3, 3, 3, 4, 4, 4, 5, 5] num_anchors = len(self.aspect_ratios) * self.num_scales aspect_ratios = kwargs.get('ratios', [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)]) num_scales = len(kwargs.get('scales', [2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])) """ super(Regressor, self).__init__() self.num_layers = num_layers self.num_layers = num_layers self.conv_list = nn.ModuleList([ SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers) ]) self.bn_list = nn.ModuleList([ nn.ModuleList([ nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers) ]) for j in range(5) ]) self.header = SeparableConvBlock(in_channels, num_anchors * 4, norm=False, activation=False) self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def __init__(self, in_channels, out_channels=None, norm=True, activation=False, onnx_export=False): super(SeparableConvBlock, self).__init__() if out_channels is None: out_channels = in_channels # Q: whether separate conv # share bias between depthwise_conv and pointwise_conv # or just pointwise_conv apply bias. # A: Confirmed, just pointwise_conv applies bias, depthwise_conv has no bias. self.depthwise_conv = Conv2dStaticSamePadding(in_channels, in_channels, kernel_size=3, stride=1, groups=in_channels, bias=False) self.pointwise_conv = Conv2dStaticSamePadding(in_channels, out_channels, kernel_size=1, stride=1) self.norm = norm if self.norm: # Warning: pytorch momentum is different from tensorflow's, momentum_pytorch = 1 - momentum_tensorflow self.bn = nn.BatchNorm2d(num_features=out_channels, momentum=0.01, eps=1e-3) self.activation = activation if self.activation: self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def __init__(self, num_channels, conv_channels, first_time=False, epsilon=1e-4, onnx_export=False, attention=True): """ Args: num_channels: conv_channels: first_time: whether the input comes directly from the efficientnet, if True, downchannel it first, and downsample P5 to generate P6 then P7 epsilon: epsilon of fast weighted attention sum of BiFPN, not the BN's epsilon onnx_export: if True, use Swish instead of MemoryEfficientSwish """ super(BiFPN, self).__init__() self.epsilon = epsilon # Conv layers self.conv6_up = SeparableConvBlock(num_channels, onnx_export=onnx_export) self.conv5_up = SeparableConvBlock(num_channels, onnx_export=onnx_export) self.conv4_up = SeparableConvBlock(num_channels, onnx_export=onnx_export) self.conv3_up = SeparableConvBlock(num_channels, onnx_export=onnx_export) self.conv4_down = SeparableConvBlock(num_channels, onnx_export=onnx_export) self.conv5_down = SeparableConvBlock(num_channels, onnx_export=onnx_export) self.conv6_down = SeparableConvBlock(num_channels, onnx_export=onnx_export) self.conv7_down = SeparableConvBlock(num_channels, onnx_export=onnx_export) # Feature scaling layers self.p6_upsample = nn.Upsample(scale_factor=2, mode='nearest') self.p5_upsample = nn.Upsample(scale_factor=2, mode='nearest') self.p4_upsample = nn.Upsample(scale_factor=2, mode='nearest') self.p3_upsample = nn.Upsample(scale_factor=2, mode='nearest') self.p4_downsample = MaxPool2dStaticSamePadding(3, 2) self.p5_downsample = MaxPool2dStaticSamePadding(3, 2) self.p6_downsample = MaxPool2dStaticSamePadding(3, 2) self.p7_downsample = MaxPool2dStaticSamePadding(3, 2) self.swish = MemoryEfficientSwish() if not onnx_export else Swish() self.first_time = first_time if self.first_time: self.p5_down_channel = nn.Sequential( Conv2dStaticSamePadding(conv_channels[2], num_channels, 1), nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3), ) self.p4_down_channel = nn.Sequential( Conv2dStaticSamePadding(conv_channels[1], num_channels, 1), nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3), ) self.p3_down_channel = nn.Sequential( Conv2dStaticSamePadding(conv_channels[0], num_channels, 1), nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3), ) self.p5_to_p6 = nn.Sequential( Conv2dStaticSamePadding(conv_channels[2], num_channels, 1), nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3), MaxPool2dStaticSamePadding(3, 2)) self.p6_to_p7 = nn.Sequential(MaxPool2dStaticSamePadding(3, 2)) self.p4_down_channel_2 = nn.Sequential( Conv2dStaticSamePadding(conv_channels[1], num_channels, 1), nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3), ) self.p5_down_channel_2 = nn.Sequential( Conv2dStaticSamePadding(conv_channels[2], num_channels, 1), nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3), ) # Weight self.p6_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True) self.p6_w1_relu = nn.ReLU() self.p5_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True) self.p5_w1_relu = nn.ReLU() self.p4_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True) self.p4_w1_relu = nn.ReLU() self.p3_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True) self.p3_w1_relu = nn.ReLU() self.p4_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True) self.p4_w2_relu = nn.ReLU() self.p5_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True) self.p5_w2_relu = nn.ReLU() self.p6_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True) self.p6_w2_relu = nn.ReLU() self.p7_w2 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True) self.p7_w2_relu = nn.ReLU() if not attention: self.p6_w1.requires_grad = False self.p5_w1.requires_grad = False self.p4_w1.requires_grad = False self.p3_w1.requires_grad = False self.p4_w2.requires_grad = False self.p5_w2.requires_grad = False self.p6_w2.requires_grad = False self.p7_w2.requires_grad = False