예제 #1
0
    def __init__(self, block, layers, num_classes=1000):
        self.inplanes = 128
        super(ResNet, self).__init__()
        self.conv1 = conv3x3(3, 64, stride=2)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=True)
        self.conv2 = conv3x3(64, 64)
        self.bn2 = nn.BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=True)
        self.conv3 = conv3x3(64, 128)
        self.bn3 = nn.BatchNorm2d(128)
        self.relu3 = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, 64, layers[0])
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * block.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
예제 #2
0
    def __init__(self,
                 channels,
                 init_block_channels,
                 bottleneck,
                 dropout_rate=0.0,
                 in_channels=3,
                 in_size=(32, 32),
                 classes=10,
                 data_format="channels_last",
                 **kwargs):
        super(CIFARDenseNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format
        unit_class = DenseUnit if bottleneck else DenseSimpleUnit

        self.features = tf.keras.Sequential(name="features")
        self.features.add(
            conv3x3(in_channels=in_channels,
                    out_channels=init_block_channels,
                    data_format=data_format,
                    name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            if i != 0:
                stage.add(
                    TransitionBlock(in_channels=in_channels,
                                    out_channels=(in_channels // 2),
                                    data_format=data_format,
                                    name="trans{}".format(i + 1)))
                in_channels = in_channels // 2
            for j, out_channels in enumerate(channels_per_stage):
                stage.add(
                    unit_class(in_channels=in_channels,
                               out_channels=out_channels,
                               dropout_rate=dropout_rate,
                               data_format=data_format,
                               name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(
            PreResActivation(in_channels=in_channels,
                             data_format=data_format,
                             name="post_activ"))
        self.features.add(
            nn.AveragePooling2D(pool_size=8,
                                strides=1,
                                data_format=data_format,
                                name="final_pool"))

        self.output1 = nn.Dense(units=classes,
                                input_dim=in_channels,
                                name="output1")
예제 #3
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 strides,
                 dilations,
                 data_format="channels_last",
                 **kwargs):
        super(ESPBlock, self).__init__(**kwargs)
        num_branches = len(dilations)
        assert (out_channels % num_branches == 0)
        self.downsample = (strides != 1)
        mid_channels = out_channels // num_branches

        # dilations = [1] * len(dilations)

        self.reduce_conv = conv1x1_block(
            in_channels=in_channels,
            out_channels=mid_channels,
            groups=num_branches,
            activation=(lambda: PReLU2()),
            data_format=data_format,
            name="reduce_conv")

        self.branches = HierarchicalConcurrent(
            data_format=data_format,
            name="branches")
        for i in range(num_branches):
            self.branches.add(conv3x3(
                in_channels=mid_channels,
                out_channels=mid_channels,
                strides=strides,
                padding=dilations[i],
                dilation=dilations[i],
                groups=mid_channels,
                data_format=data_format,
                name="branch{}".format(i + 1)))

        self.merge_conv = conv1x1_block(
            in_channels=out_channels,
            out_channels=out_channels,
            groups=num_branches,
            activation=None,
            data_format=data_format,
            name="merge_conv")
        self.preactiv = PreActivation(
            in_channels=out_channels,
            data_format=data_format,
            name="preactiv")
        if not self.downsample:
            self.activ = PReLU2()
예제 #4
0
    def __init__(self,
                 channels,
                 init_block_channels,
                 bottleneck,
                 in_channels=3,
                 in_size=(32, 32),
                 classes=10,
                 data_format="channels_last",
                 **kwargs):
        super(CIFARPreResNet, self).__init__(**kwargs)
        self.in_size = in_size
        self.classes = classes
        self.data_format = data_format

        self.features = tf.keras.Sequential(name="features")
        self.features.add(conv3x3(
            in_channels=in_channels,
            out_channels=init_block_channels,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(channels):
            stage = tf.keras.Sequential(name="stage{}".format(i + 1))
            for j, out_channels in enumerate(channels_per_stage):
                strides = 2 if (j == 0) and (i != 0) else 1
                stage.add(PreResUnit(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    strides=strides,
                    bottleneck=bottleneck,
                    conv1_stride=False,
                    data_format=data_format,
                    name="unit{}".format(j + 1)))
                in_channels = out_channels
            self.features.add(stage)
        self.features.add(PreResActivation(
            in_channels=in_channels,
            data_format=data_format,
            name="post_activ"))
        self.features.add(nn.AveragePooling2D(
            pool_size=8,
            strides=1,
            data_format=data_format,
            name="final_pool"))

        self.output1 = nn.Dense(
            units=classes,
            input_dim=in_channels,
            name="output1")
예제 #5
0
    def __init__(self,
                 backbone,
                 backbone_out_channels,
                 channels,
                 return_heatmap=False,
                 in_channels=3,
                 in_size=(256, 192),
                 keypoints=17,
                 data_format="channels_last",
                 **kwargs):
        super(AlphaPose, self).__init__(**kwargs)
        assert (in_channels == 3)
        self.in_size = in_size
        self.keypoints = keypoints
        self.return_heatmap = return_heatmap
        self.data_format = data_format

        self.backbone = backbone
        self.backbone._name = "backbone"

        self.decoder = tf.keras.Sequential(name="decoder")
        self.decoder.add(PixelShuffle(
            scale_factor=2,
            data_format=data_format,
            name="init_block"))
        in_channels = backbone_out_channels // 4
        for i, out_channels in enumerate(channels):
            self.decoder.add(DucBlock(
                in_channels=in_channels,
                out_channels=out_channels,
                scale_factor=2,
                data_format=data_format,
                name="unit{}".format(i + 1)))
            in_channels = out_channels
        self.decoder.add(conv3x3(
            in_channels=in_channels,
            out_channels=keypoints,
            use_bias=True,
            data_format=data_format,
            name="final_block"))

        self.heatmap_max_det = HeatmapMaxDetBlock(
            data_format=data_format,
            name="heatmap_max_det")
예제 #6
0
 def __init__(self,
              in_channels,
              out_channels,
              data_format="channels_last",
              **kwargs):
     super(ShuffleInitBlock, self).__init__(**kwargs)
     self.conv = conv3x3(in_channels=in_channels,
                         out_channels=out_channels,
                         strides=2,
                         data_format=data_format,
                         name="conv")
     self.bn = BatchNorm(
         # in_channels=out_channels,
         data_format=data_format,
         name="bn")
     self.activ = nn.ReLU()
     self.pool = MaxPool2d(pool_size=3,
                           strides=2,
                           padding=1,
                           data_format=data_format,
                           name="pool")
예제 #7
0
    def __init__(self,
                 enc_channels,
                 dec_channels,
                 init_block_channels,
                 layers,
                 int_bends,
                 use_preresnet,
                 in_channels=3,
                 in_size=(640, 640),
                 data_format="channels_last",
                 **kwargs):
        super(LFFD, self).__init__(**kwargs)
        self.in_size = in_size
        self.data_format = data_format
        unit_class = PreResUnit if use_preresnet else ResUnit
        use_bias = True
        use_bn = False

        self.encoder = MultiOutputSequential(return_last=False)
        self.encoder.add(conv3x3_block(
            in_channels=in_channels,
            out_channels=init_block_channels,
            strides=2,
            padding=0,
            use_bias=use_bias,
            use_bn=use_bn,
            data_format=data_format,
            name="init_block"))
        in_channels = init_block_channels
        for i, channels_per_stage in enumerate(enc_channels):
            layers_per_stage = layers[i]
            int_bends_per_stage = int_bends[i]
            stage = MultiOutputSequential(multi_output=False, dual_output=True, name="stage{}".format(i + 1))
            stage.add(conv3x3(
                in_channels=in_channels,
                out_channels=channels_per_stage,
                strides=2,
                padding=0,
                use_bias=use_bias,
                data_format=data_format,
                name="trans{}".format(i + 1)))
            for j in range(layers_per_stage):
                unit = unit_class(
                    in_channels=channels_per_stage,
                    out_channels=channels_per_stage,
                    strides=1,
                    use_bias=use_bias,
                    use_bn=use_bn,
                    bottleneck=False,
                    data_format=data_format,
                    name="unit{}".format(j + 1))
                if layers_per_stage - j <= int_bends_per_stage:
                    unit.do_output = True
                stage.add(unit)
            final_activ = nn.ReLU(name="final_activ")
            final_activ.do_output = True
            stage.add(final_activ)
            stage.do_output2 = True
            in_channels = channels_per_stage
            self.encoder.add(stage)

        self.decoder = ParallelConcurent()
        k = 0
        for i, channels_per_stage in enumerate(enc_channels):
            layers_per_stage = layers[i]
            int_bends_per_stage = int_bends[i]
            for j in range(layers_per_stage):
                if layers_per_stage - j <= int_bends_per_stage:
                    self.decoder.add(LffdDetectionBlock(
                        in_channels=channels_per_stage,
                        mid_channels=dec_channels,
                        use_bias=use_bias,
                        use_bn=use_bn,
                        data_format=data_format,
                        name="unit{}".format(k + 1)))
                    k += 1
            self.decoder.add(LffdDetectionBlock(
                in_channels=channels_per_stage,
                mid_channels=dec_channels,
                use_bias=use_bias,
                use_bn=use_bn,
                data_format=data_format,
                name="unit{}".format(k + 1)))
            k += 1
예제 #8
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 side_channels,
                 groups,
                 downsample,
                 ignore_group,
                 data_format="channels_last",
                 **kwargs):
        super(MEUnit, self).__init__(**kwargs)
        self.data_format = data_format
        self.downsample = downsample
        mid_channels = out_channels // 4

        if downsample:
            out_channels -= in_channels

        # residual branch
        self.compress_conv1 = conv1x1(in_channels=in_channels,
                                      out_channels=mid_channels,
                                      groups=(1 if ignore_group else groups),
                                      data_format=data_format,
                                      name="compress_conv1")
        self.compress_bn1 = BatchNorm(
            # in_channels=mid_channels,
            data_format=data_format,
            name="compress_bn1")
        self.c_shuffle = ChannelShuffle(channels=mid_channels,
                                        groups=groups,
                                        data_format=data_format,
                                        name="c_shuffle")
        self.dw_conv2 = depthwise_conv3x3(
            channels=mid_channels,
            strides=(2 if self.downsample else 1),
            data_format=data_format,
            name="dw_conv2")
        self.dw_bn2 = BatchNorm(
            # in_channels=mid_channels,
            data_format=data_format,
            name="dw_bn2")
        self.expand_conv3 = conv1x1(in_channels=mid_channels,
                                    out_channels=out_channels,
                                    groups=groups,
                                    data_format=data_format,
                                    name="expand_conv3")
        self.expand_bn3 = BatchNorm(
            # in_channels=out_channels,
            data_format=data_format,
            name="expand_bn3")
        if downsample:
            self.avgpool = AvgPool2d(pool_size=3,
                                     strides=2,
                                     padding=1,
                                     data_format=data_format,
                                     name="avgpool")
        self.activ = nn.ReLU()

        # fusion branch
        self.s_merge_conv = conv1x1(in_channels=mid_channels,
                                    out_channels=side_channels,
                                    data_format=data_format,
                                    name="s_merge_conv")
        self.s_merge_bn = BatchNorm(
            # in_channels=side_channels,
            data_format=data_format,
            name="s_merge_bn")
        self.s_conv = conv3x3(in_channels=side_channels,
                              out_channels=side_channels,
                              strides=(2 if self.downsample else 1),
                              data_format=data_format,
                              name="s_conv")
        self.s_conv_bn = BatchNorm(
            # in_channels=side_channels,
            data_format=data_format,
            name="s_conv_bn")
        self.s_evolve_conv = conv1x1(in_channels=side_channels,
                                     out_channels=mid_channels,
                                     data_format=data_format,
                                     name="s_evolve_conv")
        self.s_evolve_bn = BatchNorm(
            # in_channels=mid_channels,
            data_format=data_format,
            name="s_evolve_bn")