예제 #1
0
 def __init__(self, qconfig):
     super().__init__()
     self.conv1 = nn.Conv2d(3, 2, 1, bias=None).to(dtype=torch.float)
     self.bn1 = nn.BatchNorm2d(2).to(dtype=torch.float)
     self.relu1 = nn.ReLU(inplace=True).to(dtype=torch.float)
     self.sub1 = SubModelForFusion()
     self.sub2 = SubModelWithoutFusion()
     self.fc = nn.Linear(36, 10).to(dtype=torch.float)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.qconfig = qconfig
     self.conv2 = nn.Conv3d(3, 2, (1, 1, 1), bias=None).to(dtype=torch.float)
     self.relu2 = nn.ReLU(inplace=False).to(dtype=torch.float)
     self.bn2 = nn.BatchNorm3d(2).to(dtype=torch.float)
     self.relu3 = nn.ReLU(inplace=True).to(dtype=torch.float)
     self.conv3 = nn.Conv1d(3, 3, 2).to(dtype=torch.float)
     self.bn3 = nn.BatchNorm1d(3).to(dtype=torch.float)
     self.relu4 = nn.ReLU(inplace=True).to(dtype=torch.float)
     # don't quantize sub2
     self.sub2.qconfig = None
     self.fc.qconfig = None
예제 #2
0
    def __init__(self, num_classes):
        super(Decoder, self).__init__()

        self.layers = nn.ModuleList()

        self.layers.append(UpsamplerBlock(128, 64))
        self.layers.append(non_bottleneck_1d(64, 0, 1))
        self.layers.append(non_bottleneck_1d(64, 0, 1))

        self.layers.append(UpsamplerBlock(64, 16))
        self.layers.append(non_bottleneck_1d(16, 0, 1))
        self.layers.append(non_bottleneck_1d(16, 0, 1))

        self.output_conv = nn.ConvTranspose2d(16,
                                              num_classes,
                                              2,
                                              stride=2,
                                              padding=0,
                                              output_padding=0,
                                              bias=True)
        self.dequant = DeQuantStub()
        self.quant = QuantStub()
예제 #3
0
 def __init__(self):
     super().__init__()
     self.block = resnet.conv1x1BN(in_planes=64,
                                   out_planes=64,
                                   stride=1,
                                   require_relu=True)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     # weight_range = math.pow(2.0, 0.0)
     weight_range = math.pow(2.0, 4.0)
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             m.weight = torch.nn.Parameter(
                 torch.rand(size=m.weight.size()) * 1.9 * weight_range -
                 weight_range)
             if m.bias is not None:
                 nn.init.zeros_(m.bias)
         elif isinstance(m, nn.BatchNorm2d):
             m.weight = torch.nn.Parameter(
                 torch.rand(size=m.weight.size()) * 1.9 * weight_range -
                 weight_range)
             nn.init.zeros_(m.bias)
예제 #4
0
    def __init__(self,
                 in_chs,
                 out_chs,
                 kernel_size,
                 stride=1,
                 pad_type='',
                 act_layer=nn.ReLU,
                 norm_layer=nn.BatchNorm2d,
                 norm_kwargs=None):
        super(ConvBnAct, self).__init__()
        assert stride in [1, 2]
        norm_kwargs = norm_kwargs or {}
        self.conv = select_conv2d(in_chs,
                                  out_chs,
                                  kernel_size,
                                  stride=stride,
                                  padding=pad_type)
        self.bn1 = norm_layer(out_chs, **norm_kwargs)
        self.act1 = act_layer(inplace=True)

        self.quant = QuantStub()
        self.dequant = DeQuantStub()
예제 #5
0
    def __init__(self, classes=20, p=2, q=3):
        super().__init__()
        self.encoder = ESPNet_Encoder(classes, p, q)      
        # load the encoder modules

        # light-weight decoder
        self.level3_C = C(128 + 3, classes, 1, 1)
        self.b = CB(classes,classes,1,1)        
        self.conv = CBR(19 + classes, classes, 3, 1)
        self.up_l3 = CBR(classes, classes, 1, stride=1, padding=0, bias=False)
        
        self.combine_l2_l3 = DilatedParllelResidualBlockB(2*classes , classes, add=False)
        self.up_l2 = CBR(classes, classes, 1, stride=1, padding=0, bias=False)

        #self.classifier = C(classes, classes, 1, stride=1, padding=0)
        
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
        self.quant_cat1 = nn.quantized.FloatFunctional()
        self.quant_cat2 = nn.quantized.FloatFunctional()
        self.quant_cat3 = nn.quantized.FloatFunctional()
        self.quant_cat4 = nn.quantized.FloatFunctional()
        self.quant_cat5 = nn.quantized.FloatFunctional()
예제 #6
0
    def __init__(self):
        super(VGG16, self).__init__()
        self.conv1_1 = cm.ConvReLU(in_planes=3, out_planes=64, kernel_size=3, stride=1, bias=True)
        self.conv1_2 = cm.ConvReLU(in_planes=64, out_planes=64, kernel_size=3, stride=1, bias=True)
        self.maxpool1 = cm.MaxPool2dRelu(kernel_size=2, stride=2, relu=False)
        self.conv2_1 = cm.ConvReLU(in_planes=64, out_planes=128, kernel_size=3, stride=1, bias=True)
        self.conv2_2 = cm.ConvReLU(in_planes=128, out_planes=128, kernel_size=3, stride=1, bias=True)
        self.maxpool2 = cm.MaxPool2dRelu(kernel_size=2, stride=2, relu=False)
        self.conv3_1 = cm.ConvReLU(in_planes=128, out_planes=256, kernel_size=3, stride=1, bias=True)
        self.conv3_2 = cm.ConvReLU(in_planes=256, out_planes=256, kernel_size=3, stride=1, bias=True)
        self.conv3_3 = cm.ConvReLU(in_planes=256, out_planes=256, kernel_size=3, stride=1, bias=True)
        self.maxpool3 = cm.MaxPool2dRelu(kernel_size=2, stride=2, relu=False)
        self.conv4_1 = cm.ConvReLU(in_planes=256, out_planes=512, kernel_size=3, stride=1, bias=True)
        self.conv4_2 = cm.ConvReLU(in_planes=512, out_planes=512, kernel_size=3, stride=1, bias=True)
        self.conv4_3 = cm.ConvReLU(in_planes=512, out_planes=512, kernel_size=3, stride=1, bias=True)
        self.maxpool4 = cm.MaxPool2dRelu(kernel_size=2, stride=2, relu=False)
        self.conv5_1 = cm.ConvReLU(in_planes=512, out_planes=512, kernel_size=3, stride=1, bias=True)
        self.conv5_2 = cm.ConvReLU(in_planes=512, out_planes=512, kernel_size=3, stride=1, bias=True)
        self.conv5_3 = cm.ConvReLU(in_planes=512, out_planes=512, kernel_size=3, stride=1, bias=True)
        self.maxpool5 = cm.MaxPool2dRelu(kernel_size=2, stride=2, relu=False)
        '''
        Remove average pooling. When IW/IH = 224/224, the average pooling essentially disappears
        The original model uses adaptive 2d average pooling with target OW/OH = 7/7
        To see this, take a look at the formulae for adaptive 2d avg pooling: 
        https://stackoverflow.com/a/58694174
        
        '''
        #self.avgpool = cm.AvgPool2dRelu(kernel_size=7, divisor_override=64, relu=False)
        self.fc1 = cm.LinearReLU(in_features=512*7*7, out_features=4096, bias=True)
        self.fc2 = cm.LinearReLU(in_features=4096, out_features=4096, bias=True)
        self.fc3 = nn.Linear(in_features=4096, out_features=1000, bias=True)

        # Utils
        self.flatten = cm.Flatten()
        self.quant = QuantStub()
        self.deQuant = DeQuantStub()
예제 #7
0
 def __init__(self, ):
     super().__init__()
     self.float_op = nn.quantized.FloatFunctional()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
    def __init__(self,
                 num_classes=1000,
                 width_mult=1.0,
                 inverted_residual_setting=None,
                 round_nearest=8):
        """
        MobileNet V2 main class

        Args:
            num_classes (int): Number of classes
            width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
            inverted_residual_setting: Network structure
            round_nearest (int): Round the number of channels in each layer to be a multiple of this number
            Set to 1 to turn off rounding
        """
        super(MobileNetV2, self).__init__()
        block = InvertedResidual
        input_channel = 32
        last_channel = 1280

        if inverted_residual_setting is None:
            inverted_residual_setting = [
                # t, c, n, s
                [1, 16, 1, 1],
                [6, 24, 2, 2],
                [6, 32, 3, 2],
                [6, 64, 4, 2],
                [6, 96, 3, 1],
                [6, 160, 3, 2],
                [6, 320, 1, 1],
            ]

        # only check the first element, assuming user knows t,c,n,s are required
        if len(inverted_residual_setting) == 0 or len(
                inverted_residual_setting[0]) != 4:
            raise ValueError("inverted_residual_setting should be non-empty "
                             "or a 4-element list, got {}".format(
                                 inverted_residual_setting))

        # building first layer
        input_channel = _make_divisible(input_channel * width_mult,
                                        round_nearest)
        self.last_channel = _make_divisible(
            last_channel * max(1.0, width_mult), round_nearest)
        features = [ConvBNReLU(3, input_channel, stride=2)]
        # building inverted residual blocks
        for t, c, n, s in inverted_residual_setting:
            output_channel = _make_divisible(c * width_mult, round_nearest)
            for i in range(n):
                stride = s if i == 0 else 1
                features.append(
                    block(input_channel,
                          output_channel,
                          stride,
                          expand_ratio=t))
                input_channel = output_channel
        # building last several layers
        features.append(
            ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
        # make it nn.Sequential
        self.features = nn.Sequential(*features)
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
        # building classifier
        self.classifier = nn.Sequential(
            nn.Dropout(0.2),
            nn.Linear(self.last_channel, num_classes),
        )

        # weight initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.zeros_(m.bias)
예제 #9
0
 def __init__(self):
     super(ManualQuantModel, self).__init__()
     self.qconfig = default_qconfig
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
예제 #10
0
    def __init__(self, input_nc, output_nc, n_residual_blocks=9, use_bn=False):
        super(Generator, self).__init__()
        if use_bn:
            norm = nn.BatchNorm2d
        else:
            norm = nn.InstanceNorm2d

        self.quant = QuantStub()
        self.dequant = DeQuantStub()
        # Initial convolution block
        model = [  # nn.ReflectionPad2d(3),
            ConvNormReLU(input_nc, 64, 7, padding=3, use_bn=use_bn)
        ]
        # nn.Conv2d(input_nc, 64, 7),
        # norm(64),
        # nn.ReLU(inplace=True) ]

        # Downsampling
        in_features = 64
        out_features = in_features * 2
        for _ in range(2):
            model += [
                ConvNormReLU(in_features,
                             out_features,
                             3,
                             stride=2,
                             padding=1,
                             use_bn=use_bn)
            ]
            #nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
            #norm(out_features),
            #nn.ReLU(inplace=True) ]
            in_features = out_features
            out_features = in_features * 2

        # Residual blocks
        for _ in range(n_residual_blocks):
            model += [ResidualBlock(in_features, use_bn)]

        # Upsampling
        out_features = in_features // 2
        for _ in range(2):
            model += [
                nn.Upsample(scale_factor=2),
                #  nn.Conv2d(in_features, out_features, 3, stride=1, padding=1),
                # #  nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1),
                #  norm(out_features),
                # nn.ReLU(inplace=True)
                ConvNormReLU(in_features,
                             out_features,
                             3,
                             stride=1,
                             padding=1,
                             use_bn=use_bn)
            ]
            in_features = out_features
            out_features = in_features // 2

        # Output layer
        model += [  # nn.ReflectionPad2d(3),
            nn.Conv2d(64, output_nc, 7, padding=3),
            nn.Tanh()
        ]

        self.model = nn.Sequential(*model)
예제 #11
0
    def __init__(self,
                 block: Union[BasicBlock, BottleneckBlock],
                 num_blocks: List[int],
                 family: str = 'cifar10',
                 _flagTest: bool = False,
                 _stage_planes_override=None,
                 _stage_strides_override=None,
                 _avgpool_2d_size_override=None):
        """
        Construct a resnet according to the specifications
        :param block:
        :param num_blocks: Number of residual blocks in each stage
        :param family: The resnet family. Either 'cifar10' or 'imagenet1k' or 'test'
        Family test constraints the input plane to be 32 by 32
        """
        super(ResNet, self).__init__()
        assert family == 'cifar10' or family == 'imagenet1k' or family == 'test', \
            'Supported families are confined to [cifar10, imagenet1k, test]'
        self.family = family
        if family == 'cifar10':
            self.stage_planes = [16, 32, 64]
            self.stage_strides = [1, 2, 2]
            self.num_classes = 10
            self.fc_input_number = 64
            self.in_planes = 16
            self.in_kernel_size = 3
            self.in_stride = 1
            self.avgpool_2d_size = 8
        elif family == 'imagenet1k':
            self.stage_planes = [64, 128, 256, 512]
            self.stage_strides = [1, 2, 2, 2]
            self.num_classes = 1000
            self.fc_input_number = 512 * block.expansion
            self.in_planes = 64
            self.in_kernel_size = 7
            self.in_stride = 2
            self.avgpool_2d_size = 7
        else:
            raise ValueError('Unsupported ResNet variant')

        if _flagTest == True:
            assert (_avgpool_2d_size_override is not None) and \
                   isinstance(_stage_planes_override, list) and isinstance(_stage_strides_override, list), \
                'Not sufficient arguments provided by the test ResNet variant'
            self.stage_planes = deepcopy(_stage_planes_override)
            self.stage_strides = deepcopy(_stage_strides_override)
            self.num_classes = 1000 if family == 'imagenet1k' else 10
            self.fc_input_number = _stage_planes_override[-1] * block.expansion
            self.in_planes = _stage_planes_override[0]
            self.in_kernel_size = 3
            self.in_stride = 1
            self.avgpool_2d_size = _avgpool_2d_size_override

        self.avgpool_divisor = math.ceil(
            math.pow(2.0,
                     math.log2(self.avgpool_2d_size * self.avgpool_2d_size)))

        self.inputConvBNReLU = cm.ConvBNReLU(in_planes=3,
                                             out_planes=self.in_planes,
                                             stride=self.in_stride,
                                             kernel_size=self.in_kernel_size)
        self.relu = nn.ReLU(inplace=True)
        self.maxpoolrelu = cm.MaxPool2dRelu(
            kernel_size=3, stride=2, padding=1,
            relu=False) if family == 'imagenet1k' else None
        self.fc = nn.Linear(self.fc_input_number, self.num_classes)
        self.averagePool = cm.AvgPool2dRelu(
            kernel_size=self.avgpool_2d_size,
            divisor_override=self.avgpool_divisor,
            relu=False)

        assert (len(self.stage_planes) == len(num_blocks)) and (len(self.stage_planes) == len(self.stage_strides)), \
            'Incompatiable num_blocks, stage_planes, stage_strides'
        num_stages = len(self.stage_planes)

        self.stage1 = None
        if num_stages > 0:
            self.stage1 = self._make_stage(block=block,
                                           planes=self.stage_planes[0],
                                           num_block=num_blocks[0],
                                           stride=self.stage_strides[0])

        self.stage2 = None
        if num_stages > 1:
            self.stage2 = self._make_stage(block=block,
                                           planes=self.stage_planes[1],
                                           num_block=num_blocks[1],
                                           stride=self.stage_strides[1])

        self.stage3 = None
        if num_stages > 2:
            self.stage3 = self._make_stage(block=block,
                                           planes=self.stage_planes[2],
                                           num_block=num_blocks[2],
                                           stride=self.stage_strides[2])

        self.stage4 = None
        if num_stages > 3:
            self.stage4 = self._make_stage(block=block,
                                           planes=self.stage_planes[3],
                                           num_block=num_blocks[3],
                                           stride=self.stage_strides[3])

        self.quant = QuantStub()
        self.deQuant = DeQuantStub()
        self.flatten = cm.Flatten()

        # Parameter initialization
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode="fan_out")
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.zeros_(m.bias)

        # Initialize the last BN in each residual layer s.t.
        # initially inference flows through the short-cuts
        for m in self.modules():
            if isinstance(m, BottleneckBlock):
                nn.init.constant_(m.convBN3[1].weight, 0)
            elif isinstance(m, BasicBlock):
                nn.init.constant_(m.convBN2[1].weight, 0)
예제 #12
0
    def __init__(self,
                 input_ch=16,
                 final_ch=180,
                 width_mult=1.0,
                 depth_mult=1.0,
                 classes=1000,
                 use_se=True,
                 se_ratio=12,
                 dropout_ratio=0.2,
                 bn_momentum=0.9):
        super(ReXNetV1, self).__init__()

        layers = [1, 2, 2, 3, 3, 5]
        strides = [1, 2, 2, 2, 1, 2]
        layers = [ceil(element * depth_mult) for element in layers]
        strides = sum([[element] + [1] * (layers[idx] - 1)
                       for idx, element in enumerate(strides)], [])
        ts = [1] * layers[0] + [6] * sum(layers[1:])
        self.depth = sum(layers[:]) * 3

        stem_channel = 32 / width_mult if width_mult < 1.0 else 32
        inplanes = input_ch / width_mult if width_mult < 1.0 else input_ch

        self.quant = QuantStub()
        self.dequant = DeQuantStub()

        features = []
        in_channels_group = []
        channels_group = []

        _add_conv_swish(features,
                        3,
                        int(round(stem_channel * width_mult)),
                        kernel=3,
                        stride=2,
                        pad=1)

        # The following channel configuration is a simple instance to make each layer become an expand layer.
        for i in range(self.depth // 3):
            if i == 0:
                in_channels_group.append(int(round(stem_channel * width_mult)))
                channels_group.append(int(round(inplanes * width_mult)))
            else:
                in_channels_group.append(int(round(inplanes * width_mult)))
                inplanes += final_ch / (self.depth // 3 * 1.0)
                channels_group.append(int(round(inplanes * width_mult)))

        if use_se:
            use_ses = [False] * (layers[0] + layers[1]) + [True] * sum(
                layers[2:])
        else:
            use_ses = [False] * sum(layers[:])

        for block_idx, (in_c, c, t, s, se) in enumerate(
                zip(in_channels_group, channels_group, ts, strides, use_ses)):
            features.append(
                LinearBottleneck(in_channels=in_c,
                                 channels=c,
                                 t=t,
                                 stride=s,
                                 use_se=se,
                                 se_ratio=se_ratio))

        pen_channels = int(1280 * width_mult)
        _add_conv_swish(features, c, pen_channels)

        features.append(nn.AdaptiveAvgPool2d(1))
        self.features = nn.Sequential(*features)
        self.output = nn.Sequential(
            nn.Dropout(dropout_ratio),
            nn.Conv2d(pen_channels, classes, 1, bias=True))
예제 #13
0
 def __init__(self, **kwargs: bool) -> None:
     """Initialize."""
     super(QuantizableMixNet, self).__init__(**kwargs)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #14
0
 def __init__(self):
     super(ModelWithNoQconfigPropagation, self).__init__()
     self.fc1 = nn.Linear(5, 5).to(dtype=torch.float)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.no_quant_module = self.ListOutModule()
예제 #15
0
 def __init__(self):
     super().__init__()
     self.qconfig = torch.quantization.get_default_qconfig("qnnpack")
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.fc = torch.nn.Linear(5, 5).to(dtype=torch.float)
 def __init__(self, kernel_size, stride=None, padding=0, dilation=1,
              return_indices=False, ceil_mode=False, relu=False):
     super().__init__(kernel_size, stride, padding, dilation,
              return_indices, ceil_mode)
     self.relu = relu
     self.quant = QuantStub()
    def __init__(self,
                 ver='ver1',
                 ver_new_cfg=None,
                 run_type='train',
                 num_classes=100,
                 wide_factor=1,
                 depth_factor=1,
                 add_se=True,
                 Activation='ReLU',
                 Batchnorm='BatchNorm',
                 device='cpu'):

        super(MicroNet, self).__init__()
        # NOTE: change conv1 stride 2 -> 1 for CIFAR10
        '''
        wide_factor: ratio to expand channel
        depth_factor: ratio to expand depth
        '''
        # (expansion, out_planes, num_blocks, stride)
        #                                                   cba1                           cba2                             cba3                    shortcut                      se
        # (expansion, out_planes, num_blocks, [[kernel, stride, padding ,bias],[kernel, stride, padding, bias],[kernel, stride, padding, bias], [kernel, stride, padding,bias], [kernel, bias])
        if ver == 'ver1':
            self.cfg = [[
                3, 16, 2,
                [[1, 1, 0, False], [3, 1, 1, False], [1, 1, 0, False],
                 [1, 1, 0, False], [1, True]]
            ],
                        [
                            3, 32, 1,
                            [[1, 1, 0, False], [3, 2, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            3, 32, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            3, 48, 3,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            3, 72, 1,
                            [[1, 1, 0, False], [3, 2, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            3, 72, 4,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            3, 80, 1,
                            [[1, 1, 0, False], [3, 2, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            3, 88, 2,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            3, 106, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ]]
        elif ver == 'ver2':
            self.cfg = [[
                2.5, 20, 2,
                [[1, 1, 0, False], [3, 1, 1, False], [1, 1, 0, False],
                 [1, 1, 0, False], [1, True]]
            ],
                        [
                            2.5, 36, 1,
                            [[1, 1, 0, False], [3, 2, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            2.5, 36, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            2.5, 56, 3,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            2.5, 80, 1,
                            [[1, 1, 0, False], [3, 2, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            2.5, 80, 4,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            2.5, 88, 1,
                            [[1, 1, 0, False], [3, 2, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            2.5, 96, 2,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ],
                        [
                            2.5, 114, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, True]]
                        ]]
        elif ver == 'ver3':  #imagenet-scaled
            self.cfg = [[
                1, 16, 2,
                [[1, 1, 0, False], [3, 1, 1, False], [1, 1, 0, False],
                 [1, 1, 0, False], [1, False]]
            ],
                        [
                            3, 24, 1,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 24, 2,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 40, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 40, 2,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 80, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 80, 2,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 96, 2,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 192, 1,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 192, 3,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 320, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ]]
        #                                                   cba1                           cba2                             cba3                    shortcut                      se
        # (expansion, out_planes, num_blocks, [[kernel, stride, padding ,bias],[kernel, stride, padding, bias],[kernel, stride, padding, bias], [kernel, stride, padding,bias], [kernel, bias])
        elif ver == 'ver4':  #large imagenet-scaled
            self.cfg = [[
                1, 16, 2,
                [[1, 1, 0, False], [3, 1, 1, False], [1, 1, 0, False],
                 [1, 1, 0, False], [1, False]]
            ],
                        [
                            3, 24, 1,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 24, 2,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 40, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 40, 2,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 80, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 80, 2,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            5, 96, 2,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            5, 192, 1,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            5, 245, 3,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            5, 490, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ]]
        elif ver == 'ver5':  #large imagenet-scaled
            self.cfg = [[
                1, 32, 2,
                [[1, 1, 0, False], [3, 1, 1, False], [1, 1, 0, False],
                 [1, 1, 0, False], [1, False]]
            ],
                        [
                            3, 32, 1,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 64, 2,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            3, 64, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            5, 96, 2,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            5, 96, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            5, 128, 2,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            7, 128, 2,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            7, 192, 1,
                            [[1, 1, 0, False], [5, 2, 2, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            7, 245, 3,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ],
                        [
                            7, 980, 1,
                            [[1, 1, 0, False], [3, 1, 1, False],
                             [1, 1, 0, False], [1, 1, 0, False], [1, False]]
                        ]]
        else:
            #print("MicroNet's new net_cfg:", ver_new_cfg)
            self.cfg = ver_new_cfg

        self.change_cfg(wide_factor, depth_factor)

        self.device = device

        self.run_type = run_type

        #construct network
        self.add_se = add_se
        self.act = Activation
        self.input_channel = 32
        self.num_classes = num_classes

        #construct batchnorm
        self.norm = Batchnorm
        if self.norm == 'GhostBatchNorm':
            self.bn = GhostBatchNorm
        else:
            self.bn = BatchNorm

        #quantization
        self.quant = QuantStub()
        self.dequant = DeQuantStub()

        self.cb = ConvBN(3,
                         self.input_channel,
                         kernel_size=3,
                         stride=1,
                         padding=1,
                         bias=False,
                         momentum=0.01,
                         norm_layer=self.bn)

        self.layers = self._make_layers(in_planes=self.input_channel,
                                        run_type=self.run_type)

        self.avg = nn.AdaptiveAvgPool2d(1)
        self.dropout = nn.Dropout(p=0.3)
        self.linear = nn.Linear(self.cfg[-1][1], self.num_classes)

        self.act = Activation
        if self.act == 'HSwish':
            self.stem_act = HSwish()
        elif self.act == 'Mish':
            self.stem_act = HSwish()
        else:
            self.stem_act = nn.ReLU()

        #initialize the parameters
        self.reset_parameters()

        #initialize the parameters
        self.reset_custom_parameters()
예제 #18
0
def restructure_model(model):
    model = nn.Sequential(QuantStub(), model, DeQuantStub())
    return model
예제 #19
0
    def __init__(self,
                 nclass=1000,
                 mode='large',
                 width_mult=1.0,
                 dilated=False,
                 norm_layer=nn.BatchNorm2d,
                 RE=False,
                 **kwargs):
        super(MobileNetV3, self).__init__()
        if RE:
            if mode == 'large':
                layer1_setting = [
                    # k, exp_size, c, se, nl, s
                    [3, 16, 16, False, 'RE', 1],
                    [3, 64, 24, False, 'RE', 2],
                    [3, 72, 24, False, 'RE', 1],
                ]
                layer2_setting = [
                    [5, 72, 40, True, 'RE', 2],
                    [5, 120, 40, True, 'RE', 1],
                    [5, 120, 40, True, 'RE', 1],
                ]

                layer3_setting = [
                    [3, 240, 80, False, 'RE', 2],
                    [3, 200, 80, False, 'RE', 1],
                    [3, 184, 80, False, 'RE', 1],
                    [3, 184, 80, False, 'RE', 1],
                    [3, 480, 112, True, 'RE', 1],
                    [3, 672, 112, True, 'RE', 1],
                ]
                if dilated:  #Reduce by Factor of 2
                    layer4_setting = [
                        [5, 672, 160, True, 'RE', 2],
                        [5, 960, 160, True, 'RE', 1],
                        [5, 960 // 2, 160 // 2, True, 'RE', 1],
                    ]
                else:
                    layer4_setting = [
                        [5, 672, 160, True, 'RE', 2],
                        [5, 960, 160, True, 'RE', 1],
                        [5, 960, 160, True, 'RE', 1],
                    ]

            elif mode == 'small':
                layer1_setting = [
                    # k, exp_size, c, se, nl, s
                    [3, 16, 16, True, 'RE', 2],
                ]

                layer2_setting = [
                    [3, 72, 24, False, 'RE', 2],
                    [3, 88, 24, False, 'RE', 1],
                ]
                layer3_setting = [
                    [5, 96, 40, True, 'RE', 2],
                    [5, 240, 40, True, 'RE', 1],
                    [5, 240, 40, True, 'RE', 1],
                    [5, 120, 48, True, 'RE', 1],
                    [5, 144, 48, True, 'RE', 1],
                ]
                if dilated:
                    layer4_setting = [
                        [5, 288, 96, True, 'RE', 2],
                        [5, 576, 96, True, 'RE', 1],
                        [5, 576 // 2, 96 // 2, True, 'RE', 1],
                    ]
                else:
                    layer4_setting = [
                        [5, 288, 96, True, 'RE', 2],
                        [5, 576, 96, True, 'RE', 1],
                        [5, 576, 96, True, 'RE', 1],
                    ]
            else:
                raise ValueError('Unknown mode.')
        else:
            if mode == 'large':
                layer1_setting = [
                    # k, exp_size, c, se, nl, s
                    [3, 16, 16, False, 'RE', 1],
                    [3, 64, 24, False, 'RE', 2],
                    [3, 72, 24, False, 'RE', 1],
                ]
                layer2_setting = [
                    [5, 72, 40, True, 'RE', 2],
                    [5, 120, 40, True, 'RE', 1],
                    [5, 120, 40, True, 'RE', 1],
                ]

                layer3_setting = [
                    [3, 240, 80, False, 'HS', 2],
                    [3, 200, 80, False, 'HS', 1],
                    [3, 184, 80, False, 'HS', 1],
                    [3, 184, 80, False, 'HS', 1],
                    [3, 480, 112, True, 'HS', 1],
                    [3, 672, 112, True, 'HS', 1],
                ]
                if dilated:  #Reduce by Factor of 2
                    layer4_setting = [
                        [5, 672, 160, True, 'HS', 2],
                        [5, 960, 160, True, 'HS', 1],
                        [5, 960 // 2, 160 // 2, True, 'HS', 1],
                    ]
                else:
                    layer4_setting = [
                        [5, 672, 160, True, 'HS', 2],
                        [5, 960, 160, True, 'HS', 1],
                        [5, 960, 160, True, 'HS', 1],
                    ]

            elif mode == 'small':
                layer1_setting = [
                    # k, exp_size, c, se, nl, s
                    [3, 16, 16, True, 'RE', 2],
                ]

                layer2_setting = [
                    [3, 72, 24, False, 'RE', 2],
                    [3, 88, 24, False, 'RE', 1],
                ]
                layer3_setting = [
                    [5, 96, 40, True, 'HS', 2],
                    [5, 240, 40, True, 'HS', 1],
                    [5, 240, 40, True, 'HS', 1],
                    [5, 120, 48, True, 'HS', 1],
                    [5, 144, 48, True, 'HS', 1],
                ]
                if dilated:
                    layer4_setting = [
                        [5, 288, 96, True, 'HS', 2],
                        [5, 576, 96, True, 'HS', 1],
                        [5, 576 // 2, 96 // 2, True, 'HS', 1],
                    ]
                else:
                    layer4_setting = [
                        [5, 288, 96, True, 'HS', 2],
                        [5, 576, 96, True, 'HS', 1],
                        [5, 576, 96, True, 'HS', 1],
                    ]
            else:
                raise ValueError('Unknown mode.')

        # building first layer
        self.in_channels = int(16 * width_mult) if width_mult > 1.0 else 16
        if RE:
            self.conv1 = _ConvBNReLU(3,
                                     self.in_channels,
                                     3,
                                     2,
                                     1,
                                     norm_layer=norm_layer)
        else:
            self.conv1 = _ConvBNHswish(3,
                                       self.in_channels,
                                       3,
                                       2,
                                       1,
                                       norm_layer=norm_layer)

        # building bottleneck blocks
        self.layer1 = self._make_layer(Bottleneck,
                                       layer1_setting,
                                       width_mult,
                                       norm_layer=norm_layer)
        self.layer2 = self._make_layer(Bottleneck,
                                       layer2_setting,
                                       width_mult,
                                       norm_layer=norm_layer)
        self.layer3 = self._make_layer(Bottleneck,
                                       layer3_setting,
                                       width_mult,
                                       norm_layer=norm_layer)
        if dilated:
            self.layer4 = self._make_layer(Bottleneck,
                                           layer4_setting,
                                           width_mult,
                                           dilation=2,
                                           norm_layer=norm_layer)
        else:
            self.layer4 = self._make_layer(Bottleneck,
                                           layer4_setting,
                                           width_mult,
                                           norm_layer=norm_layer)

        # building last several layers
        classifier = list()
        if mode == 'large':
            if dilated:
                last_bneck_channels = int(
                    960 // 2 * width_mult) if width_mult > 1.0 else 960 // 2
            else:
                last_bneck_channels = int(
                    960 * width_mult) if width_mult > 1.0 else 960
            if RE:
                self.layer5 = _ConvBNReLU(self.in_channels,
                                          last_bneck_channels,
                                          1,
                                          norm_layer=norm_layer)
            else:
                self.layer5 = _ConvBNHswish(self.in_channels,
                                            last_bneck_channels,
                                            1,
                                            norm_layer=norm_layer)
            if not dilated:
                classifier.append(nn.AdaptiveAvgPool2d(1))
                classifier.append(nn.Conv2d(last_bneck_channels, 1280, 1))
                classifier.append(_Hswish(True))
                classifier.append(nn.Conv2d(1280, nclass, 1))
        elif mode == 'small':
            if dilated:
                last_bneck_channels = int(
                    576 // 2 * width_mult) if width_mult > 1.0 else 576 // 2
            else:
                last_bneck_channels = int(
                    576 * width_mult) if width_mult > 1.0 else 576
            if RE:
                self.layer5 = _ConvBNReLU(self.in_channels,
                                          last_bneck_channels,
                                          1,
                                          norm_layer=norm_layer)
            else:
                self.layer5 = _ConvBNHswish(self.in_channels,
                                            last_bneck_channels,
                                            1,
                                            norm_layer=norm_layer)
            if not dilated:
                classifier.append(SEModule(last_bneck_channels))
                classifier.append(nn.AdaptiveAvgPool2d(1))
                classifier.append(nn.Conv2d(last_bneck_channels, 1024, 1))
                classifier.append(_Hswish(True))
                classifier.append(nn.Conv2d(1024, nclass, 1))
        else:
            raise ValueError('Unknown mode.')
        self.mode = mode
        if not dilated:
            self.classifier = nn.Sequential(*classifier)
        self.dilated = dilated
        self.quant = QuantStub()
        self.dequant = DeQuantStub()
        self._init_weights()
예제 #20
0
    def __init__(self,
                 input_channels,
                 base_num_features,
                 num_classes,
                 num_pool,
                 num_conv_per_stage=2,
                 feat_map_mul_on_downscale=2,
                 conv_op=nn.Conv2d,
                 norm_op=nn.BatchNorm2d,
                 norm_op_kwargs=None,
                 dropout_op=nn.Dropout2d,
                 dropout_op_kwargs=None,
                 nonlin=nn.LeakyReLU,
                 nonlin_kwargs=None,
                 deep_supervision=True,
                 dropout_in_localization=False,
                 final_nonlin=softmax_helper,
                 weightInitializer=InitWeights_He(1e-2),
                 pool_op_kernel_sizes=None,
                 conv_kernel_sizes=None,
                 upscale_logits=False,
                 convolutional_pooling=False,
                 convolutional_upsampling=False,
                 max_num_features=None,
                 basic_block=ConvDropoutNormNonlin,
                 seg_output_use_bias=False):
        """
        basically more flexible than v1, architecture is the same

        Does this look complicated? Nah bro. Functionality > usability

        This does everything you need, including world peace.

        Questions? -> [email protected]
        """
        super(Generic_UNet, self).__init__()
        self.convolutional_upsampling = convolutional_upsampling
        self.convolutional_pooling = convolutional_pooling
        self.upscale_logits = upscale_logits
        if nonlin_kwargs is None:
            nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
        if dropout_op_kwargs is None:
            dropout_op_kwargs = {'p': 0.5, 'inplace': True}
        if norm_op_kwargs is None:
            norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}

        self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}

        self.nonlin = nonlin
        self.nonlin_kwargs = nonlin_kwargs
        self.dropout_op_kwargs = dropout_op_kwargs
        self.norm_op_kwargs = norm_op_kwargs
        self.weightInitializer = weightInitializer
        self.conv_op = conv_op
        self.norm_op = norm_op
        self.dropout_op = dropout_op
        self.num_classes = num_classes
        self.final_nonlin = final_nonlin
        self._deep_supervision = deep_supervision
        self.do_ds = deep_supervision

        if conv_op == nn.Conv2d:
            upsample_mode = 'bilinear'
            pool_op = nn.MaxPool2d
            transpconv = nn.ConvTranspose2d
            if pool_op_kernel_sizes is None:
                pool_op_kernel_sizes = [(2, 2)] * num_pool
            if conv_kernel_sizes is None:
                conv_kernel_sizes = [(3, 3)] * (num_pool + 1)
        elif conv_op == nn.Conv3d:
            upsample_mode = 'trilinear'
            pool_op = nn.MaxPool3d
            transpconv = nn.ConvTranspose3d
            if pool_op_kernel_sizes is None:
                pool_op_kernel_sizes = [(2, 2, 2)] * num_pool
            if conv_kernel_sizes is None:
                conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)
        else:
            raise ValueError(
                "unknown convolution dimensionality, conv op: %s" %
                str(conv_op))

        self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes,
                                                        0,
                                                        dtype=np.int64)
        self.pool_op_kernel_sizes = pool_op_kernel_sizes
        self.conv_kernel_sizes = conv_kernel_sizes

        self.conv_pad_sizes = []
        for krnl in self.conv_kernel_sizes:
            self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])

        if max_num_features is None:
            if self.conv_op == nn.Conv3d:
                self.max_num_features = self.MAX_NUM_FILTERS_3D
            else:
                self.max_num_features = self.MAX_FILTERS_2D
        else:
            self.max_num_features = max_num_features

        self.conv_blocks_context = []
        self.conv_blocks_localization = []
        self.td = []
        self.tu = []
        self.seg_outputs = []

        output_features = base_num_features
        input_features = input_channels

        for d in range(num_pool):
            # determine the first stride
            if d != 0 and self.convolutional_pooling:
                first_stride = pool_op_kernel_sizes[d - 1]
            else:
                first_stride = None

            self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]
            self.conv_kwargs['padding'] = self.conv_pad_sizes[d]
            # add convolutions
            self.conv_blocks_context.append(
                StackedConvLayers(input_features,
                                  output_features,
                                  num_conv_per_stage,
                                  self.conv_op,
                                  self.conv_kwargs,
                                  self.norm_op,
                                  self.norm_op_kwargs,
                                  self.dropout_op,
                                  self.dropout_op_kwargs,
                                  self.nonlin,
                                  self.nonlin_kwargs,
                                  first_stride,
                                  basic_block=basic_block))
            if not self.convolutional_pooling:
                self.td.append(pool_op(pool_op_kernel_sizes[d]))
            input_features = output_features
            output_features = int(
                np.round(output_features * feat_map_mul_on_downscale))

            output_features = min(output_features, self.max_num_features)

        # now the bottleneck.
        # determine the first stride
        if self.convolutional_pooling:
            first_stride = pool_op_kernel_sizes[-1]
        else:
            first_stride = None

        # the output of the last conv must match the number of features from the skip connection if we are not using
        # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be
        # done by the transposed conv
        if self.convolutional_upsampling:
            final_num_features = output_features
        else:
            final_num_features = self.conv_blocks_context[-1].output_channels

        self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]
        self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]
        self.conv_blocks_context.append(
            nn.Sequential(
                StackedConvLayers(input_features,
                                  output_features,
                                  num_conv_per_stage - 1,
                                  self.conv_op,
                                  self.conv_kwargs,
                                  self.norm_op,
                                  self.norm_op_kwargs,
                                  self.dropout_op,
                                  self.dropout_op_kwargs,
                                  self.nonlin,
                                  self.nonlin_kwargs,
                                  first_stride,
                                  basic_block=basic_block),
                StackedConvLayers(output_features,
                                  final_num_features,
                                  1,
                                  self.conv_op,
                                  self.conv_kwargs,
                                  self.norm_op,
                                  self.norm_op_kwargs,
                                  self.dropout_op,
                                  self.dropout_op_kwargs,
                                  self.nonlin,
                                  self.nonlin_kwargs,
                                  basic_block=basic_block)))

        # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here
        if not dropout_in_localization:
            old_dropout_p = self.dropout_op_kwargs['p']
            self.dropout_op_kwargs['p'] = 0.0

        # now lets build the localization pathway
        for u in range(num_pool):
            nfeatures_from_down = final_num_features
            nfeatures_from_skip = self.conv_blocks_context[-(
                2 + u
            )].output_channels  # self.conv_blocks_context[-1] is bottleneck, so start with -2
            n_features_after_tu_and_concat = nfeatures_from_skip * 2

            # the first conv reduces the number of features to match those of skip
            # the following convs work on that number of features
            # if not convolutional upsampling then the final conv reduces the num of features again
            if u != num_pool - 1 and not self.convolutional_upsampling:
                final_num_features = self.conv_blocks_context[-(
                    3 + u)].output_channels
            else:
                final_num_features = nfeatures_from_skip

            if not self.convolutional_upsampling:
                self.tu.append(
                    Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)],
                             mode=upsample_mode))
            else:
                self.tu.append(
                    transpconv(nfeatures_from_down,
                               nfeatures_from_skip,
                               pool_op_kernel_sizes[-(u + 1)],
                               pool_op_kernel_sizes[-(u + 1)],
                               bias=False))

            self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[-(u + 1)]
            self.conv_kwargs['padding'] = self.conv_pad_sizes[-(u + 1)]
            self.conv_blocks_localization.append(
                nn.Sequential(
                    StackedConvLayers(n_features_after_tu_and_concat,
                                      nfeatures_from_skip,
                                      num_conv_per_stage - 1,
                                      self.conv_op,
                                      self.conv_kwargs,
                                      self.norm_op,
                                      self.norm_op_kwargs,
                                      self.dropout_op,
                                      self.dropout_op_kwargs,
                                      self.nonlin,
                                      self.nonlin_kwargs,
                                      basic_block=basic_block),
                    StackedConvLayers(nfeatures_from_skip,
                                      final_num_features,
                                      1,
                                      self.conv_op,
                                      self.conv_kwargs,
                                      self.norm_op,
                                      self.norm_op_kwargs,
                                      self.dropout_op,
                                      self.dropout_op_kwargs,
                                      self.nonlin,
                                      self.nonlin_kwargs,
                                      basic_block=basic_block)))

        for ds in range(len(self.conv_blocks_localization)):
            self.seg_outputs.append(
                conv_op(self.conv_blocks_localization[ds][-1].output_channels,
                        num_classes, 1, 1, 0, 1, 1, seg_output_use_bias))

        self.upscale_logits_ops = []
        cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes),
                                  axis=0)[::-1]
        for usl in range(num_pool - 1):
            if self.upscale_logits:
                self.upscale_logits_ops.append(
                    Upsample(scale_factor=tuple(
                        [int(i) for i in cum_upsample[usl + 1]]),
                             mode=upsample_mode))
            else:
                self.upscale_logits_ops.append(lambda x: x)

        if not dropout_in_localization:
            self.dropout_op_kwargs['p'] = old_dropout_p

        # register all modules properly
        self.conv_blocks_localization = nn.ModuleList(
            self.conv_blocks_localization)
        self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)
        self.td = nn.ModuleList(self.td)
        self.tu = nn.ModuleList(self.tu)
        self.seg_outputs = nn.ModuleList(self.seg_outputs)
        if self.upscale_logits:
            self.upscale_logits_ops = nn.ModuleList(
                self.upscale_logits_ops
            )  # lambda x:x is not a Module so we need to distinguish here

        if self.weightInitializer is not None:
            self.apply(self.weightInitializer)
            # self.apply(print_module_training_status)

        self.quant = QuantStub()
        self.dequant = DeQuantStub()
        for u in range(len(self.tu)):
            setattr(self, 'cat_quant' + str(u), QuantStub())
예제 #21
0
 def __init__(self):
     super().__init__()
     self.relu = QuantWrapper(nn.ReLU())
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #22
0
 def __init__(self,resgroups=1,expansion=6, num_levels=4, 
              down_depth = [1,2,2,2], up_depth = [1,1,1],
              filters=[16,24,32,48],endchannels=[16,1],groupings=(1,1),
              upkern=3,use_JPU=False,dilate_channels=32,bias_ll=False):
     super().__init__()
     self.useJPU = False #use_JPU
     self.fused = False
     
     assert num_levels >= 3 and num_levels <= 6
     assert len(filters) == num_levels
     assert len(down_depth) == num_levels
     assert len(up_depth) == num_levels-1
     
     
     self.num_levels = num_levels
     
     # drop to 1/2
     self.encoder0 = Sequential(Conv(3,filters[0],DWS=False,stride=2))
     for j in range(down_depth[0]):
         name = "DownIR_{}_{}".format(0,j)
         self.encoder0.add_module(name,InvertedResidual(filters[0],
                                                        filters[0],
                                                        expansion))
     self.decoder0 = Sequential(InvertedResidual(2*filters[0],filters[0],
                                                 expansion))
     for j in range(up_depth[0]):
         name = "UpIR_{}_{}".format(0,j)
         self.decoder0.add_module(name,InvertedResidual(filters[0],
                                                        filters[0],
                                                        expansion))
     if upkern==3:
         self.upconv0 = UpConvUS(filters[0],endchannels[0],upsample=2,DWS=True)
     else:
         self.upconv0 = UpConv(filters[0],endchannels[0],upsample=2,DWS=True)
         
     # drop to 1/4
     i = 1
     self.encoder1 = Sequential(InvertedResidual(filters[i-1],
                                                 filters[i],
                                                 expansion,
                                                 stride=2))
     for j in range(down_depth[i]):
         name = "DownIR_{}_{}".format(i,j)
         self.encoder1.add_module(name,InvertedResidual(filters[i],
                                                        filters[i],
                                                        expansion))
     self.decoder1 = Sequential(InvertedResidual(2*filters[i],
                                                 filters[i],
                                                 expansion))
     for j in range(up_depth[i]):
         name = "UpIR_{}_{}".format(i,j)
         self.decoder1.add_module(name,InvertedResidual(filters[i],
                                                        filters[i],
                                                        expansion))
     if upkern==3:
         self.upconv1 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
     else:
         self.upconv1 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
     
     # drop to 1/8
     i = 2
     self.encoder2 = Sequential(InvertedResidual(filters[i-1],
                                                 filters[i],
                                                 expansion,
                                                 stride=2))
     for j in range(down_depth[i]):
         name = "DownIR_{}_{}".format(i,j)
         self.encoder2.add_module(name,InvertedResidual(filters[i],
                                                        filters[i],
                                                        expansion))
     if upkern==3:
         self.upconv2 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
     else:
         self.upconv2 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
     
     if num_levels > 3:
         # note: decoders only need one fewer level
         self.decoder2 = Sequential(InvertedResidual(2*filters[i],
                                                 filters[i],
                                                 expansion))
         for j in range(up_depth[i]):
             name = "UpIR_{}_{}".format(i,j)
             self.decoder2.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         
         # drop to 1/16
         i = 3
         self.encoder3 = Sequential(InvertedResidual(filters[i-1],
                                                     filters[i],
                                                     expansion,
                                                     stride=2))
         for j in range(down_depth[i]):
             name = "DownIR_{}_{}".format(i,j)
             self.encoder3.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         if upkern==3:
             self.upconv3 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
         else:
             self.upconv3 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
             
     if num_levels > 4:
         self.decoder3 = Sequential(InvertedResidual(2*filters[i],
                                                     filters[i],
                                                     expansion))
         for j in range(up_depth[i]):
             name = "UpIR_{}_{}".format(i,j)
             self.decoder3.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         
         # drop to 1/32
         i = 4
         self.encoder4 = Sequential(InvertedResidual(filters[i-1],
                                                     filters[i],
                                                     expansion,
                                                     stride=2))
         for j in range(down_depth[i]):
             name = "DownIR_{}_{}".format(i,j)
             self.encoder4.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         if upkern==3:
             self.upconv4 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
         else:
             self.upconv4 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
     
     if num_levels > 5:
         self.decoder4 = Sequential(InvertedResidual(2*filters[i],
                                                     filters[i],
                                                     expansion))
         for j in range(up_depth[i]):
             name = "UpIR_{}_{}".format(i,j)
             self.decoder4.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
             
         # drop to 1/64
         i = 5
         self.encoder5 = Sequential(InvertedResidual(filters[i-1],
                                                     filters[i],
                                                     expansion,
                                                     stride=2))
         for j in range(down_depth[i]):
             name = "DownIR_{}_{}".format(i,j)
             self.encoder5.add_module(name,InvertedResidual(filters[i],
                                                            filters[i],
                                                            expansion))
         if upkern==3:
             self.upconv5 = UpConvUS(filters[i],filters[i-1],upsample=2,DWS=True)
         else:
             self.upconv5 = UpConv(filters[i],filters[i-1],upsample=2,DWS=True)
     
         
     self.pred = Conv(endchannels[0],endchannels[1],DWS=False,bias=bias_ll)
     self.edge = Conv(endchannels[0],endchannels[1],DWS=False,bias=bias_ll)
     
     self.quant = QuantStub()
     self.dequant = DeQuantStub()    
 def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
              count_include_pad=True, divisor_override=None, relu=False):
     super().__init__(kernel_size, stride, padding, ceil_mode,
              count_include_pad, divisor_override)
     self.relu = relu
     self.quant = QuantStub()
예제 #24
0
파일: qnn_test.py 프로젝트: wenxcs/tvm
 def __init__(self, inputsize=(128, 128)):
     super(QuantizableBackbone, self).__init__()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.backbone = Backbone()
예제 #25
0
 def __init__(self):
     super(AnnotatedConvModel, self).__init__()
     self.qconfig = default_qconfig
     self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
 def __init__(self, relu=False):
     super().__init__()
     self.relu = relu
     self.quant = QuantStub()
     self.leftInputQuant = QuantStub()
     self.rightInputQuant = QuantStub()
예제 #27
0
 def __init__(self, qengine):
     super().__init__()
     self.qconfig = torch.quantization.get_default_qconfig(qengine)
     self.conv = torch.nn.Conv2d(3, 5, 3, bias=False).to(dtype=torch.float)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
예제 #28
0
 def __init__(self, add_stub=False):
     super().__init__()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.add_stub = add_stub
     self.hswish = nn.Hardswish()
예제 #29
0
 def __init__(self):
     super().__init__()
     self.fc1 = nn.Linear(5, 5).to(dtype=torch.float)
     self.quant = QuantStub()
     self.dequant = DeQuantStub()
     self.no_quant_module = self.ListOutModule()
예제 #30
0
 def __init__(self):
     super().__init__()
     self.quant = QuantStub()
     self.dequant = DeQuantStub()