def __init__(self, planes, stride=1, downsample=None, *args, **kwargs): super(BasicBlock, self).__init__() self.body = nn.Sequential(xnn.Conv2d(planes, 3, stride, 1, bias=False), xnn.BatchNorm2d(), nn.ReLU(inplace=True), xnn.Conv2d(planes, 3, 1, 1, bias=False), xnn.BatchNorm2d()) self.downsample = downsample
def __init__(self, plane, num_anchors, num_classes): super(PredictHead, self).__init__() self.num_classes = num_classes self.body = nn.Sequential(xnn.Conv2d(plane, 3, 1, 1, bias=False), xnn.BatchNorm2d(), nn.ReLU(inplace=True), xnn.Conv2d(num_anchors * num_classes, 3, 1, 1))
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, is_first=True): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: down_layers = [] if self.avg_down: if dilation == 1: down_layers.append( nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False)) else: down_layers.append( nn.AvgPool2d(kernel_size=1, stride=1, ceil_mode=True, count_include_pad=False)) down_layers.append( xnn.Conv2d(planes * block.expansion, kernel_size=1, stride=1, bias=False)) else: down_layers.append( xnn.Conv2d(planes * block.expansion, kernel_size=1, stride=stride, bias=False)) down_layers.append(xnn.BatchNorm2d()) downsample = nn.Sequential(*down_layers) layers = [] if dilation == 1 or dilation == 2: layers.append( block(planes, stride, downsample, self.radix, self.cardinality, self.bottleneck_width, self.avd, self.avd_first, 1, is_first)) elif dilation == 4: layers.append( block(planes, stride, downsample, self.radix, self.cardinality, self.bottleneck_width, self.avd, self.avd_first, 2, is_first)) else: raise RuntimeError("=> unknown dilation size: {}".format(dilation)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block(planes, 1, None, self.radix, self.cardinality, self.bottleneck_width, self.avd, self.avd_first, dilation)) return nn.Sequential(*layers)
def __init__(self, depth, plane): super(FeaturePyramidNet, self).__init__() self.link = nn.ModuleList() self.fuse = nn.ModuleList() for i in range(depth): self.link.append(nn.Sequential(xnn.Conv2d(plane, 1, 1, 0, bias=False), xnn.BatchNorm2d())) if i != depth: self.fuse.append(nn.Sequential(nn.ReLU(inplace=True), xnn.Conv2d(plane, 3, 1, 1, bias=False), xnn.BatchNorm2d()))
def __init__(self, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros', radix=2, reduction_factor=4): super(SplAtConv2d, self).__init__() inter_channels = max(out_channels * radix // reduction_factor, 32) self.radix = radix self.conv = xnn.Conv2d(out_channels * radix, kernel_size, stride, padding, dilation, groups * radix, bias, padding_mode) self.bn0 = xnn.BatchNorm2d() self.relu = nn.ReLU(inplace=True) self.fc1 = xnn.Conv2d(inter_channels, 1, groups=groups) self.bn1 = xnn.BatchNorm2d() self.fc2 = xnn.Conv2d(out_channels * radix, 1, groups=groups) self.rsoftmax = rSoftMax(radix, groups)
def __init__(self, planes, stride=1, downsample=None, radix=1, cardinality=1, bottleneck_width=64, avd=False, avd_first=False, dilation=1, is_first=False): super(Bottleneck, self).__init__() group_width = int(planes * (bottleneck_width / 64)) * cardinality avd = avd and (stride > 1 or is_first) body = [ xnn.Conv2d(group_width, kernel_size=1, bias=False), xnn.BatchNorm2d(), nn.ReLU(inplace=True) ] if avd: avd_layer = nn.AvgPool2d(3, stride, padding=1) stride = 1 if avd_first: body.append(avd_layer) if radix > 1: body.append( SplAtConv2d(group_width, 3, stride, dilation, dilation, cardinality, bias=False, radix=radix)) else: body.append( xnn.Conv2d(group_width, 3, stride, dilation, dilation, cardinality, bias=False)) body.append(xnn.BatchNorm2d()) body.append(nn.ReLU(inplace=True)) if avd and not avd_first: body.append(avd_layer) body.append(xnn.Conv2d(planes * self.expansion, 1, bias=False)) body.append(xnn.BatchNorm2d()) self.body = nn.Sequential(*body) self.downsample = downsample
def __init__(self, backbone, cfg): super(RDD, self).__init__() cfg.setdefault('iou_thresh', [0.4, 0.5]) cfg.setdefault('variance', [0.1, 0.2, 0.1]) cfg.setdefault('balance', 0.5) cfg.setdefault('conf_thresh', 0.01) cfg.setdefault('nms_thresh', 0.5) cfg.setdefault('top_n', None) cfg.setdefault('extra', 0) cfg.setdefault('fpn_plane', 256) cfg.setdefault('extra_plane', 512) self.backbone = backbone self.prior_box = LFUPriorBox(cfg['prior_box']) self.num_levels = self.prior_box.num_levels self.num_classes = cfg['num_classes'] self.iou_thresh = cfg['iou_thresh'] self.variance = cfg['variance'] self.balance = cfg['balance'] self.conf_thresh = cfg['conf_thresh'] self.nms_thresh = cfg['nms_thresh'] self.top_n = cfg['top_n'] self.extra = cfg['extra'] self.fpn_plane = cfg['fpn_plane'] self.extra_plane = cfg['extra_plane'] self.fpn = FeaturePyramidNet(self.num_levels, self.fpn_plane) self.predict = DetPredict(self.num_levels, self.fpn_plane, self.prior_box.num_prior_boxes, self.num_classes, 5) if self.extra > 0: self.extra_layers = nn.ModuleList() for i in range(self.extra): self.extra_layers.append( nn.Sequential( xnn.Conv2d(self.extra_plane, 3, 2, 1, bias=False), xnn.BatchNorm2d(), nn.ReLU(inplace=True)))
def CBR(plane, kernel_size, stride=1, padding=0): return nn.Sequential( xnn.Conv2d(plane, kernel_size, stride, padding, bias=False), xnn.BatchNorm2d(), nn.ReLU(inplace=True))
def __init__(self, block, layers, name=None, fetch_feature=False, radix=1, groups=1, bottleneck_width=64, dilated=False, dilation=1, deep_stem=False, stem_width=64, avg_down=False, avd=False, avd_first=False): self.cardinality = groups self.bottleneck_width = bottleneck_width # ResNet-D params self.inplanes = stem_width * 2 if deep_stem else 64 self.avg_down = avg_down # ResNeSt params self.radix = radix self.avd = avd self.avd_first = avd_first super(Backbone, self).__init__() self.name = name self.fetch_feature = fetch_feature if deep_stem: head = [ xnn.Conv2d(stem_width, kernel_size=3, stride=2, padding=1, bias=False), xnn.BatchNorm2d(), nn.ReLU(inplace=True), xnn.Conv2d(stem_width, kernel_size=3, stride=1, padding=1, bias=False), xnn.BatchNorm2d(), nn.ReLU(inplace=True), xnn.Conv2d(stem_width * 2, kernel_size=3, stride=1, padding=1, bias=False) ] else: head = [ xnn.Conv2d(64, kernel_size=7, stride=2, padding=3, bias=False) ] self.head = nn.Sequential( *head, xnn.BatchNorm2d(), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) self.layer1 = self._make_layer(block, 64, layers[0], is_first=False) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) if dilated or dilation == 4: self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) elif dilation == 2: self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilation=1) self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=2) else: self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2)