Ejemplo n.º 1
0
    def __init__(self, C_in, C_out, norm_layer, affine=True, input_size=None):
        super(FactorizedReduce, self).__init__()
        assert C_out % 2 == 0
        self.conv_1 = Conv2d(C_in,
                             C_out // 2,
                             1,
                             stride=2,
                             padding=0,
                             bias=False)
        self.conv_2 = Conv2d(C_in,
                             C_out // 2,
                             1,
                             stride=2,
                             padding=0,
                             bias=False)
        self.bn = norm_layer(C_out, affine=affine)

        self.flops = self.get_flop([1, 1], 2, C_in, C_out, affine,
                                   input_size[0], input_size[1])
        # using Kaiming init
        for layer in [self.conv_1, self.conv_2]:
            for m in layer.modules():
                if isinstance(m, nn.Conv2d):
                    weight_init.kaiming_init(m, mode='fan_in')
                elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
                    if m.weight is not None:
                        nn.init.constant_(m.weight, 1)
                    if m.bias is not None:
                        nn.init.constant_(m.bias, 0)
Ejemplo n.º 2
0
 def __init__(self,
              C_in,
              C_out,
              kernel_size,
              stride,
              padding,
              norm_layer,
              affine=True,
              input_size=None):
     super(BasicResBlock, self).__init__()
     self.op = Conv2d(C_in,
                      C_out,
                      kernel_size,
                      stride=stride,
                      padding=padding,
                      bias=False,
                      norm=get_norm(norm_layer, C_out))
     self.flops = self.get_flop([kernel_size, kernel_size], stride, C_in,
                                C_out, affine, input_size[0], input_size[1])
     # using Kaiming init
     for m in self.op.modules():
         if isinstance(m, nn.Conv2d):
             weight_init.kaiming_init(m, mode='fan_in')
         elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
             if m.weight is not None:
                 nn.init.constant_(m.weight, 1)
             if m.bias is not None:
                 nn.init.constant_(m.bias, 0)
Ejemplo n.º 3
0
 def __init__(self,
              C_in,
              C_out,
              kernel_size,
              stride,
              padding,
              norm_layer,
              affine=True,
              input_size=None):
     super(SepConv, self).__init__()
     self.op = nn.Sequential(
         # depth wise
         Conv2d(C_in,
                C_in,
                kernel_size=kernel_size,
                stride=stride,
                padding=padding,
                groups=C_in,
                bias=False),
         # point wise
         Conv2d(C_in,
                C_in,
                kernel_size=1,
                padding=0,
                bias=False,
                norm=get_norm(norm_layer, C_in),
                activation=nn.ReLU()),
         # stack 2 separate depthwise-conv.
         Conv2d(C_in,
                C_in,
                kernel_size=kernel_size,
                stride=1,
                padding=padding,
                groups=C_in,
                bias=False),
         Conv2d(C_in,
                C_out,
                kernel_size=1,
                padding=0,
                bias=False,
                norm=get_norm(norm_layer, C_out)))
     self.flops = self.get_flop([kernel_size, kernel_size], stride, C_in,
                                C_out, affine, input_size[0], input_size[1])
     # using Kaiming init
     for m in self.op.modules():
         if isinstance(m, nn.Conv2d):
             weight_init.kaiming_init(m, mode='fan_in')
         elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
             if m.weight is not None:
                 nn.init.constant_(m.weight, 1)
             if m.bias is not None:
                 nn.init.constant_(m.bias, 0)
Ejemplo n.º 4
0
 def __init__(self,
              C_in,
              C_out,
              kernel_size,
              stride,
              padding,
              norm_layer,
              expansion=4,
              affine=True,
              input_size=None):
     super(MBConv, self).__init__()
     self.hidden_dim = expansion * C_in
     self.op = nn.Sequential(
         # pw
         Conv2d(C_in,
                self.hidden_dim,
                1,
                1,
                0,
                bias=False,
                norm=get_norm(norm_layer, self.hidden_dim),
                activation=nn.ReLU()),
         # dw
         Conv2d(self.hidden_dim,
                self.hidden_dim,
                kernel_size,
                stride,
                padding,
                groups=self.hidden_dim,
                bias=False,
                norm=get_norm(norm_layer, self.hidden_dim),
                activation=nn.ReLU()),
         # pw-linear without ReLU!
         Conv2d(self.hidden_dim,
                C_out,
                1,
                1,
                0,
                bias=False,
                norm=get_norm(norm_layer, C_out)))
     self.flops = self.get_flop([kernel_size, kernel_size], stride, C_in,
                                C_out, affine, input_size[0], input_size[1])
     # using Kaiming init
     for m in self.op.modules():
         if isinstance(m, nn.Conv2d):
             weight_init.kaiming_init(m, mode='fan_in')
         elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
             if m.weight is not None:
                 nn.init.constant_(m.weight, 1)
             if m.bias is not None:
                 nn.init.constant_(m.bias, 0)
Ejemplo n.º 5
0
 def __init__(self, C_in, C_out, norm_layer, affine=True, input_size=None):
     super(Identity, self).__init__()
     if C_in == C_out:
         self.change = False
         self.flops = 0.0
     else:
         self.change = True
         self.op = Conv2d(C_in,
                          C_out,
                          kernel_size=1,
                          padding=0,
                          bias=False,
                          norm=get_norm(norm_layer, C_out))
         self.flops = self.get_flop([1, 1], 1, C_in, C_out, affine,
                                    input_size[0], input_size[1])
         # using Kaiming init
         for m in self.op.modules():
             if isinstance(m, nn.Conv2d):
                 weight_init.kaiming_init(m, mode='fan_in')
             elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
                 if m.weight is not None:
                     nn.init.constant_(m.weight, 1)
                 if m.bias is not None:
                     nn.init.constant_(m.bias, 0)
Ejemplo n.º 6
0
    def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
        super().__init__()
        # fmt: off
        self.in_features = cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES
        feature_strides = {k: v.stride
                           for k, v in input_shape.items()}  # noqa:F841
        feature_channels = {k: v.channels for k, v in input_shape.items()}
        feature_resolution = {
            k: np.array([v.height, v.width])
            for k, v in input_shape.items()
        }
        self.ignore_value = cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE
        num_classes = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
        norm = cfg.MODEL.SEM_SEG_HEAD.NORM
        self.loss_weight = cfg.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT
        self.cal_flops = cfg.MODEL.CAL_FLOPS
        self.real_flops = 0.0
        # fmt: on

        self.layer_decoder_list = nn.ModuleList()
        # set affine in BatchNorm
        if 'Sync' in norm:
            affine = True
        else:
            affine = False
        # use simple decoder
        for _feat in self.in_features:
            res_size = feature_resolution[_feat]
            in_channel = feature_channels[_feat]
            if _feat == 'layer_0':
                out_channel = in_channel
            else:
                out_channel = in_channel // 2
            conv_1x1 = Conv2d(in_channel,
                              out_channel,
                              kernel_size=1,
                              stride=1,
                              padding=0,
                              bias=False,
                              norm=get_norm(norm, out_channel),
                              activation=nn.ReLU())
            self.real_flops += cal_op_flops.count_ConvBNReLU_flop(
                res_size[0],
                res_size[1],
                in_channel,
                out_channel, [1, 1],
                is_affine=affine)
            self.layer_decoder_list.append(conv_1x1)
        # using Kaiming init
        for layer in self.layer_decoder_list:
            for m in layer.modules():
                if isinstance(m, nn.Conv2d):
                    weight_init.kaiming_init(m, mode='fan_in')
                elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
                    if m.weight is not None:
                        nn.init.constant_(m.weight, 1)
                    if m.bias is not None:
                        nn.init.constant_(m.bias, 0)
        in_channel = feature_channels['layer_0']
        # the output layer
        self.predictor = Conv2d(in_channels=in_channel,
                                out_channels=num_classes,
                                kernel_size=3,
                                stride=1,
                                padding=1)
        self.real_flops += cal_op_flops.count_Conv_flop(
            feature_resolution['layer_0'][0], feature_resolution['layer_0'][1],
            in_channel, num_classes, [3, 3])
        # using Kaiming init
        for m in self.predictor.modules():
            if isinstance(m, nn.Conv2d):
                weight_init.kaiming_init(m, mode='fan_in')
            elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
                if m.weight is not None:
                    nn.init.constant_(m.weight, 1)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
Ejemplo n.º 7
0
    def __init__(  # noqa:C901
        self, C_in, C_out, norm, allow_up, allow_down, input_size,
        cell_type, cal_flops=True, using_gate=False,
        small_gate=False, gate_bias=1.5, affine=True
    ):
        super(Cell, self).__init__()
        self.channel_in = C_in
        self.channel_out = C_out
        self.allow_up = allow_up
        self.allow_down = allow_down
        self.cal_flops = cal_flops
        self.using_gate = using_gate
        self.small_gate = small_gate

        self.cell_ops = Mixed_OP(
            inplanes=self.channel_in, outplanes=self.channel_out,
            stride=1, cell_type=cell_type, norm=norm,
            affine=affine, input_size=input_size
        )
        self.cell_flops = self.cell_ops.flops
        # resolution keep
        self.res_keep = nn.ReLU()
        self.res_keep_flops = cal_op_flops.count_ReLU_flop(
            input_size[0], input_size[1], self.channel_out
        )
        # resolution up and dim down
        if self.allow_up:
            self.res_up = nn.Sequential(
                nn.ReLU(),
                Conv2d(
                    self.channel_out, self.channel_out // 2, kernel_size=1,
                    stride=1, padding=0, bias=False,
                    norm=get_norm(norm, self.channel_out // 2),
                    activation=nn.ReLU()
                )
            )
            # calculate Flops
            self.res_up_flops = cal_op_flops.count_ReLU_flop(
                input_size[0], input_size[1], self.channel_out
            ) + cal_op_flops.count_ConvBNReLU_flop(
                input_size[0], input_size[1], self.channel_out,
                self.channel_out // 2, [1, 1], is_affine=affine
            )
            # using Kaiming init
            for m in self.res_up.modules():
                if isinstance(m, nn.Conv2d):
                    weight_init.kaiming_init(m, mode='fan_in')
                elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
                    if m.weight is not None:
                        nn.init.constant_(m.weight, 1)
                    if m.bias is not None:
                        nn.init.constant_(m.bias, 0)
        # resolution down and dim up
        if self.allow_down:
            self.res_down = nn.Sequential(
                nn.ReLU(),
                Conv2d(
                    self.channel_out, 2 * self.channel_out,
                    kernel_size=1, stride=2, padding=0, bias=False,
                    norm=get_norm(norm, 2 * self.channel_out),
                    activation=nn.ReLU()
                )
            )
            # calculate Flops
            self.res_down_flops = cal_op_flops.count_ReLU_flop(
                input_size[0], input_size[1], self.channel_out
            ) + cal_op_flops.count_ConvBNReLU_flop(
                input_size[0], input_size[1], self.channel_out,
                2 * self.channel_out, [1, 1], stride=2, is_affine=affine
            )
            # using Kaiming init
            for m in self.res_down.modules():
                if isinstance(m, nn.Conv2d):
                    weight_init.kaiming_init(m, mode='fan_in')
                elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
                    if m.weight is not None:
                        nn.init.constant_(m.weight, 1)
                    if m.bias is not None:
                        nn.init.constant_(m.bias, 0)
        if self.allow_up and self.allow_down:
            self.gate_num = 3
        elif self.allow_up or self.allow_down:
            self.gate_num = 2
        else:
            self.gate_num = 1
        if self.using_gate:
            self.gate_conv_beta = nn.Sequential(
                Conv2d(
                    self.channel_in, self.channel_in // 2, kernel_size=1,
                    stride=1, padding=0, bias=False,
                    norm=get_norm(norm, self.channel_in // 2),
                    activation=nn.ReLU()
                ),
                nn.AdaptiveAvgPool2d((1, 1)),
                Conv2d(
                    self.channel_in // 2, self.gate_num, kernel_size=1,
                    stride=1, padding=0, bias=True
                )
            )
            if self.small_gate:
                input_size = input_size // 4
            self.gate_flops = cal_op_flops.count_ConvBNReLU_flop(
                input_size[0], input_size[1], self.channel_in,
                self.channel_in // 2, [1, 1], is_affine=affine
            ) + cal_op_flops.count_Pool2d_flop(
                input_size[0], input_size[1], self.channel_in // 2, [1, 1], 1
            ) + cal_op_flops.count_Conv_flop(
                1, 1, self.channel_in // 2, self.gate_num, [1, 1]
            )
            # using Kaiming init and predefined bias for gate
            for m in self.gate_conv_beta.modules():
                if isinstance(m, nn.Conv2d):
                    weight_init.kaiming_init(m, mode='fan_in', bias=gate_bias)
                elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
                    if m.weight is not None:
                        nn.init.constant_(m.weight, 1)
                    if m.bias is not None:
                        nn.init.constant_(m.bias, 0)
        else:
            self.register_buffer(
                'gate_weights_beta', torch.ones(1, self.gate_num, 1, 1).cuda()
            )
            self.gate_flops = 0.0
Ejemplo n.º 8
0
    def __init__(
        self, in_channels=3, mid_channels=64, out_channels=64,
        input_res=None, sept_stem=True, norm="BN", affine=True
    ):
        """
        Build basic STEM for Dynamic Network.
        Args:
            norm (str or callable): a callable that takes the number of
                channels and return a `nn.Module`, or a pre-defined string
                (one of {"FrozenBN", "BN", "GN"}).
        """
        super().__init__()

        self.real_flops = 0.0
        # start with 3 stem layers down-sampling by 4.
        self.stem_1 = Conv2d(
            in_channels, mid_channels, kernel_size=3, stride=2,
            bias=False, norm=get_norm(norm, mid_channels),
            activation=nn.ReLU()
        )
        self.real_flops += cal_op_flops.count_ConvBNReLU_flop(
            input_res[0], input_res[1], 3, mid_channels,
            [3, 3], stride=2, is_affine=affine
        )
        # stem 2
        input_res = input_res // 2
        if not sept_stem:
            self.stem_2 = Conv2d(
                mid_channels, mid_channels, kernel_size=3,
                stride=1, padding=1, bias=False,
                norm=get_norm(norm, mid_channels),
                activation=nn.ReLU()
            )
            self.real_flops += cal_op_flops.count_ConvBNReLU_flop(
                input_res[0], input_res[1], mid_channels,
                mid_channels, [3, 3], is_affine=affine
            )
        else:
            self.stem_2 = nn.Sequential(
                Conv2d(
                    mid_channels, mid_channels, kernel_size=3, stride=1,
                    padding=1, groups=mid_channels, bias=False
                ),
                Conv2d(
                    mid_channels, mid_channels, kernel_size=1,
                    stride=1, padding=0, bias=False,
                    norm=get_norm(norm, mid_channels),
                    activation=nn.ReLU()
                )
            )
            self.real_flops += (
                cal_op_flops.count_Conv_flop(
                    input_res[0], input_res[1], mid_channels,
                    mid_channels, [3, 3], groups=mid_channels
                ) + cal_op_flops.count_ConvBNReLU_flop(
                    input_res[0], input_res[1], mid_channels,
                    mid_channels, [1, 1], is_affine=affine
                )
            )
        # stem 3
        if not sept_stem:
            self.stem_3 = Conv2d(
                mid_channels, out_channels, kernel_size=3,
                stride=2, padding=1, bias=False,
                norm=get_norm(norm, out_channels),
                activation=nn.ReLU()
            )
            self.real_flops += cal_op_flops.count_ConvBNReLU_flop(
                input_res[0], input_res[1], mid_channels, out_channels,
                [3, 3], stride=2, is_affine=affine
            )
        else:
            self.stem_3 = nn.Sequential(
                Conv2d(
                    mid_channels, mid_channels, kernel_size=3, stride=2,
                    padding=1, groups=mid_channels, bias=False
                ),
                Conv2d(
                    mid_channels, out_channels, kernel_size=1, padding=0,
                    bias=False, norm=get_norm(norm, out_channels),
                    activation=nn.ReLU()
                )
            )
            self.real_flops += (
                cal_op_flops.count_Conv_flop(
                    input_res[0], input_res[1], mid_channels,
                    mid_channels, [3, 3], stride=2, groups=mid_channels
                ) + cal_op_flops.count_ConvBNReLU_flop(
                    input_res[0] // 2, input_res[1] // 2, mid_channels,
                    out_channels, [1, 1], is_affine=affine
                )
            )
        self.out_res = input_res // 2
        self.out_cha = out_channels
        # using Kaiming init
        for layer in [self.stem_1, self.stem_2, self.stem_3]:
            for name, m in layer.named_modules():
                if isinstance(m, nn.Conv2d):
                    weight_init.kaiming_init(m, mode='fan_in')
                elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)):
                    if m.weight is not None:
                        nn.init.constant_(m.weight, 1)
                    if m.bias is not None:
                        nn.init.constant_(m.bias, 0)