def __init__(self, num_channels: int, num_filters: int, stride: int = 1, has_se: bool = False, downsample: bool = False, name: str = None): super(BasicBlock, self).__init__() self.has_se = has_se self.downsample = downsample self.conv1 = layers.ConvBNReLU( in_channels=num_channels, out_channels=num_filters, kernel_size=3, stride=stride, padding='same', bias_attr=False) self.conv2 = layers.ConvBN( in_channels=num_filters, out_channels=num_filters, kernel_size=3, padding='same', bias_attr=False) if self.downsample: self.conv_down = layers.ConvBNReLU( in_channels=num_channels, out_channels=num_filters, kernel_size=1, padding='same', bias_attr=False) if self.has_se: self.se = SELayer(num_channels=num_filters, num_filters=num_filters, reduction_ratio=16, name=name + '_fc')
def __init__(self, in_channels: int, out_channels: int, name: str = None): super(TransitionLayer, self).__init__() num_in = len(in_channels) num_out = len(out_channels) self.conv_bn_func_list = [] for i in range(num_out): residual = None if i < num_in: if in_channels[i] != out_channels[i]: residual = self.add_sublayer( "transition_{}_layer_{}".format(name, i + 1), layers.ConvBNReLU( in_channels=in_channels[i], out_channels=out_channels[i], kernel_size=3, padding='same', bias_attr=False)) else: residual = self.add_sublayer( "transition_{}_layer_{}".format(name, i + 1), layers.ConvBNReLU( in_channels=in_channels[-1], out_channels=out_channels[i], kernel_size=3, stride=2, padding='same', bias_attr=False)) self.conv_bn_func_list.append(residual)
def __init__(self, in_channels: int, out_channels: int, multi_scale_output: bool = True, name: str = None, align_corners: bool = False): super(FuseLayers, self).__init__() self._actual_ch = len(in_channels) if multi_scale_output else 1 self._in_channels = in_channels self.align_corners = align_corners self.residual_func_list = [] for i in range(self._actual_ch): for j in range(len(in_channels)): if j > i: residual_func = self.add_sublayer( "residual_{}_layer_{}_{}".format(name, i + 1, j + 1), layers.ConvBN( in_channels=in_channels[j], out_channels=out_channels[i], kernel_size=1, padding='same', bias_attr=False)) self.residual_func_list.append(residual_func) elif j < i: pre_num_filters = in_channels[j] for k in range(i - j): if k == i - j - 1: residual_func = self.add_sublayer( "residual_{}_layer_{}_{}_{}".format(name, i + 1, j + 1, k + 1), layers.ConvBN( in_channels=pre_num_filters, out_channels=out_channels[i], kernel_size=3, stride=2, padding='same', bias_attr=False)) pre_num_filters = out_channels[i] else: residual_func = self.add_sublayer( "residual_{}_layer_{}_{}_{}".format(name, i + 1, j + 1, k + 1), layers.ConvBNReLU( in_channels=pre_num_filters, out_channels=out_channels[j], kernel_size=3, stride=2, padding='same', bias_attr=False)) pre_num_filters = out_channels[j] self.residual_func_list.append(residual_func)
def __init__(self, num_classes: int, backbone_indices: Tuple[int] = (-1, ), backbone_channels: Tuple[int] = (270, ), channels: int = None): super(FCNHead, self).__init__() self.num_classes = num_classes self.backbone_indices = backbone_indices if channels is None: channels = backbone_channels[0] self.conv_1 = layers.ConvBNReLU(in_channels=backbone_channels[0], out_channels=channels, kernel_size=1, padding='same', stride=1) self.cls = nn.Conv2D(in_channels=channels, out_channels=self.num_classes, kernel_size=1, stride=1, padding=0)
def __init__(self, stage1_num_modules: int = 1, stage1_num_blocks: List[int] = [4], stage1_num_channels: List[int] = [64], stage2_num_modules: int = 1, stage2_num_blocks: List[int] = [4, 4], stage2_num_channels: List[int] = [48, 96], stage3_num_modules: int = 4, stage3_num_blocks: List[int] = [4, 4, 4], stage3_num_channels: List[int] = [48, 96, 192], stage4_num_modules: int = 3, stage4_num_blocks: List[int] = [4, 4, 4, 4], stage4_num_channels: List[int] = [48, 96, 192, 384], has_se=False, align_corners=False): super(HRNet_W48, self).__init__() self.stage1_num_modules = stage1_num_modules self.stage1_num_blocks = stage1_num_blocks self.stage1_num_channels = stage1_num_channels self.stage2_num_modules = stage2_num_modules self.stage2_num_blocks = stage2_num_blocks self.stage2_num_channels = stage2_num_channels self.stage3_num_modules = stage3_num_modules self.stage3_num_blocks = stage3_num_blocks self.stage3_num_channels = stage3_num_channels self.stage4_num_modules = stage4_num_modules self.stage4_num_blocks = stage4_num_blocks self.stage4_num_channels = stage4_num_channels self.has_se = has_se self.align_corners = align_corners self.feat_channels = [sum(stage4_num_channels)] self.conv_layer1_1 = layers.ConvBNReLU( in_channels=3, out_channels=64, kernel_size=3, stride=2, padding='same', bias_attr=False) self.conv_layer1_2 = layers.ConvBNReLU( in_channels=64, out_channels=64, kernel_size=3, stride=2, padding='same', bias_attr=False) self.la1 = Layer1( num_channels=64, num_blocks=self.stage1_num_blocks[0], num_filters=self.stage1_num_channels[0], has_se=has_se, name="layer2") self.tr1 = TransitionLayer( in_channels=[self.stage1_num_channels[0] * 4], out_channels=self.stage2_num_channels, name="tr1") self.st2 = Stage( num_channels=self.stage2_num_channels, num_modules=self.stage2_num_modules, num_blocks=self.stage2_num_blocks, num_filters=self.stage2_num_channels, has_se=self.has_se, name="st2", align_corners=align_corners) self.tr2 = TransitionLayer( in_channels=self.stage2_num_channels, out_channels=self.stage3_num_channels, name="tr2") self.st3 = Stage( num_channels=self.stage3_num_channels, num_modules=self.stage3_num_modules, num_blocks=self.stage3_num_blocks, num_filters=self.stage3_num_channels, has_se=self.has_se, name="st3", align_corners=align_corners) self.tr3 = TransitionLayer( in_channels=self.stage3_num_channels, out_channels=self.stage4_num_channels, name="tr3") self.st4 = Stage( num_channels=self.stage4_num_channels, num_modules=self.stage4_num_modules, num_blocks=self.stage4_num_blocks, num_filters=self.stage4_num_channels, has_se=self.has_se, name="st4", align_corners=align_corners)