Esempio n. 1
0
    def __init__(self):
        super(ModelConv, self).__init__()
        with supernet(kernel_size=(3, 5, 7),
                      channel=((4, 8, 12), (8, 12, 16), (8, 12, 16),
                               (8, 12, 16))) as ofa_super:
            models = []
            models += [nn.Conv2D(3, 4, 3, padding=1)]
            models += [nn.InstanceNorm(4)]
            models += [ReLU()]
            models += [nn.Conv2D(4, 4, 3, groups=4)]
            models += [nn.InstanceNorm(4)]
            models += [ReLU()]
            models += [
                nn.Conv2DTranspose(4,
                                   4,
                                   3,
                                   groups=4,
                                   padding=1,
                                   use_cudnn=True)
            ]
            models += [nn.BatchNorm(4)]
            models += [ReLU()]
            models += [nn.Conv2D(4, 3, 3)]
            models += [ReLU()]
            models = ofa_super.convert(models)

        models += [
            Block(SuperSeparableConv2D(3,
                                       6,
                                       1,
                                       padding=1,
                                       candidate_config={'channel': (3, 6)}),
                  fixed=True)
        ]
        with supernet(kernel_size=(3, 5, 7),
                      expand_ratio=(1, 2, 4)) as ofa_super:
            models1 = []
            models1 += [nn.Conv2D(6, 4, 3)]
            models1 += [nn.BatchNorm(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2D(4, 4, 3, groups=2)]
            models1 += [nn.InstanceNorm(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3, groups=2)]
            models1 += [nn.BatchNorm(4)]
            models1 += [ReLU()]
            models1 += [nn.Conv2DTranspose(4, 4, 3)]
            models1 += [nn.BatchNorm(4)]
            models1 += [ReLU()]
            models1 = ofa_super.convert(models1)

        models += models1

        self.models = paddle.nn.Sequential(*models)
Esempio n. 2
0
File: tpn.py Progetto: ruyijidan/TPN
    def __init__(
        self,
        in_channels=[1024, 1024],
        mid_channels=[1024, 1024],
        out_channels=2048,
        ds_scales=[(1, 1, 1), (1, 1, 1)],
    ):
        super(LevelFusion, self).__init__()

        ops = []
        num_ins = len(in_channels)
        for i in range(num_ins):
            op = Downampling(in_channels[i],
                             mid_channels[i],
                             kernel_size=(1, 1, 1),
                             stride=(1, 1, 1),
                             padding=(0, 0, 0),
                             bias=False,
                             groups=32,
                             norm=True,
                             activation=True,
                             downsample_position='before',
                             downsample_scale=ds_scales[i])
            ops.append(op)
            self.ops = Sequential(*ops)

        in_dims = np.sum(mid_channels)
        self.fusion_conv = Sequential(
            nn.Conv3D(in_dims, out_channels, 1, 1, 0, bias_attr=False),
            nn.BatchNorm(out_channels), Relu())
Esempio n. 3
0
File: tpn.py Progetto: ruyijidan/TPN
    def __init__(
            self,
            inplanes,
            planes,
            kernel_size=(3, 1, 1),
            stride=(1, 1, 1),
            padding=(1, 0, 0),
            bias=False,
            groups=1,
            norm=False,
            activation=False,
            downsample_position='after',
            downsample_scale=(1, 2, 2),
    ):
        super(Downampling, self).__init__()

        self.conv = nn.Conv3D(inplanes,
                              planes,
                              kernel_size,
                              stride,
                              padding,
                              bias_attr=bias,
                              groups=groups)
        self.norm = nn.BatchNorm(planes) if norm else None
        self.relu = Relu() if activation else None
        assert (downsample_position in ['before', 'after'])
        self.downsample_position = downsample_position
        self.pool = MaxPool3D(downsample_scale,
                              downsample_scale, (0, 0, 0),
                              ceil_mode=True)
Esempio n. 4
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 in_size,
                 is_bias=True,
                 is_bn=True,
                 is_relu=True,
                 is_test=False):
        super(FCBNReluLayer, self).__init__()
        self.is_bn = is_bn
        self.is_relu = is_relu

        if is_bias:
            bias_init = fluid.ParamAttr(
                initializer=fluid.initializer.ConstantInitializer(0.))
        else:
            bias_init = False
        self.linear = nn.Linear(in_channels * in_size * in_size,
                                out_channels,
                                bias_attr=bias_init)
        self.bn = nn.BatchNorm(out_channels,
                               param_attr=norm_weight_init(),
                               bias_attr=norm_bias_init(),
                               act=None,
                               momentum=0.9,
                               use_global_stats=is_test)
Esempio n. 5
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 filter_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 bn_init_constant=1.0):
        super(ConvBNLayer, self).__init__()

        self.conv = nn.Conv2D(num_channels=in_channels,
                              filter_size=filter_size,
                              num_filters=out_channels,
                              stride=stride,
                              padding=padding,
                              dilation=dilation,
                              groups=groups,
                              param_attr=weight_init(),
                              bias_attr=False)
        self.bn = nn.BatchNorm(out_channels,
                               param_attr=norm_weight_init(bn_init_constant),
                               bias_attr=norm_bias_init(),
                               act=None,
                               momentum=0.1,
                               use_global_stats=True)
Esempio n. 6
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   spatial_stride=1,
                   temporal_stride=1,
                   dilation=1,
                   style='pytorch',
                   inflate_freq=1,
                   inflate_style='3x1x1',
                   nonlocal_freq=1,
                   nonlocal_cfg=None,
                   with_cp=False):
    inflate_freq = inflate_freq if not isinstance(inflate_freq, int) else (inflate_freq,) * blocks
    nonlocal_freq = nonlocal_freq if not isinstance(nonlocal_freq, int) else (nonlocal_freq,) * blocks
    assert len(inflate_freq) == blocks
    assert len(nonlocal_freq) == blocks
    downsample = None
    if spatial_stride != 1 or inplanes != planes * block.expansion:
        downsample = Sequential(
            nn.Conv3D(
                inplanes,
                planes * block.expansion,
                filter_size=1,
                stride=(temporal_stride, spatial_stride, spatial_stride),
                bias_attr=False),
            nn.BatchNorm(planes * block.expansion),
        )

    layers = []
    layers.append(
        block(
            inplanes,
            planes,
            spatial_stride,
            temporal_stride,
            dilation,
            downsample,
            style=style,
            if_inflate=(inflate_freq[0] == 1),
            inflate_style=inflate_style,
            if_nonlocal=(nonlocal_freq[0] == 1),
            nonlocal_cfg=nonlocal_cfg,
            with_cp=with_cp))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes,
                  planes,
                  1, 1,
                  dilation,
                  style=style,
                  if_inflate=(inflate_freq[i] == 1),
                  inflate_style=inflate_style,
                  if_nonlocal=(nonlocal_freq[i] == 1),
                  nonlocal_cfg=nonlocal_cfg,
                  with_cp=with_cp))

    return Sequential(*layers)
Esempio n. 7
0
 def __init__(self, num_channels, num_filters, is_test=False):
     super(AdjustLayer, self).__init__()
     self.conv = nn.Conv2D(num_channels=num_channels,
                           num_filters=num_filters,
                           filter_size=1,
                           param_attr=weight_init(),
                           bias_attr=False)
     self.bn = nn.BatchNorm(num_channels=num_filters,
                            param_attr=norm_weight_init(),
                            bias_attr=norm_bias_init(),
                            momentum=0.9,
                            act=None,
                            use_global_stats=is_test)
Esempio n. 8
0
File: tpn.py Progetto: ruyijidan/TPN
 def __init__(
     self,
     inplanes,
     planes,
     kernel_size,
     stride,
     padding,
     bias=False,
     groups=1,
 ):
     super(ConvModule, self).__init__()
     self.conv = nn.Conv3D(inplanes,
                           planes,
                           kernel_size,
                           stride,
                           padding,
                           bias_attr=bias,
                           groups=groups)
     self.bn = nn.BatchNorm(planes)
     self.relu = Relu()
Esempio n. 9
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 filter_size,
                 stride=1,
                 groups=1,
                 padding=1,
                 is_test=False):
        super(ConvBNReluLayer, self).__init__()

        self.conv = nn.Conv2D(num_channels=in_channels,
                              filter_size=filter_size,
                              num_filters=out_channels,
                              stride=stride,
                              padding=padding,
                              groups=groups,
                              bias_attr=bias_init(),
                              param_attr=weight_init())
        self.bn = nn.BatchNorm(out_channels,
                               param_attr=norm_weight_init(),
                               bias_attr=norm_bias_init(),
                               act=None,
                               momentum=0.9,
                               use_global_stats=is_test)
Esempio n. 10
0
File: tpn.py Progetto: ruyijidan/TPN
    def __init__(self,
                 in_channels=[256, 512, 1024, 2048],
                 out_channels=256,
                 spatial_modulation_config=None,
                 temporal_modulation_config=None,
                 upsampling_config=None,
                 downsampling_config=None,
                 level_fusion_config=None,
                 aux_head_config=None,
                 mode=None):
        super(TPN, self).__init__()
        assert isinstance(in_channels, list)
        assert isinstance(out_channels, int)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_ins = len(in_channels)
        self.mode = mode
        # spatial_modulation_config = Config(spatial_modulation_config) if isinstance(spatial_modulation_config,
        #                                                                             dict) else spatial_modulation_config
        # temporal_modulation_config = Config(temporal_modulation_config) if isinstance(temporal_modulation_config,
        #                                                                               dict) else temporal_modulation_config
        # upsampling_config = Config(upsampling_config) if isinstance(upsampling_config, dict) else upsampling_config
        # downsampling_config = Config(downsampling_config) if isinstance(downsampling_config,
        #                                                                 dict) else downsampling_config
        # aux_head_config = Config(aux_head_config) if isinstance(aux_head_config, dict) else aux_head_config
        # level_fusion_config = Config(level_fusion_config) if isinstance(level_fusion_config,
        #                                                                 dict) else level_fusion_config

        # self.temporal_modulation_ops = nn.ModuleList()
        # self.upsampling_ops = nn.ModuleList()
        # self.downsampling_ops = nn.ModuleList()

        temp_modulation_ops = []
        temp_upsampling_ops = []
        temp_downsampling_ops = []
        for i in range(0, self.num_ins, 1):
            inplanes = in_channels[-1]
            planes = out_channels

            if temporal_modulation_config is not None:
                # overwrite the temporal_modulation_config
                # print(temporal_modulation_config)

                temporal_modulation_config['param'][
                    'downsample_scale'] = temporal_modulation_config['scales'][
                        i]
                temporal_modulation_config['param']['inplanes'] = inplanes
                temporal_modulation_config['param']['planes'] = planes
                temporal_modulation = TemporalModulation(
                    **temporal_modulation_config['param'])
                temp_modulation_ops.append(temporal_modulation)
            self.temporal_modulation_ops = Sequential(*temp_modulation_ops)

            if i < self.num_ins - 1:
                if upsampling_config is not None:
                    # overwrite the upsampling_config
                    upsampling = Upsampling(**upsampling_config)
                    temp_upsampling_ops.append(upsampling)
                self.upsampling_ops = Sequential(*temp_upsampling_ops)
                if downsampling_config is not None:
                    # overwrite the downsampling_config
                    downsampling_config['param']['inplanes'] = planes
                    downsampling_config['param']['planes'] = planes
                    downsampling_config['param'][
                        'downsample_scale'] = downsampling_config['scales']
                    downsampling = Downampling(**downsampling_config['param'])
                    temp_downsampling_ops.append(downsampling)
                self.downsampling_ops = Sequential(*temp_downsampling_ops)

        self.level_fusion_op = LevelFusion()  # **level_fusion_config
        self.spatial_modulation = SpatialModulation(
        )  # **spatial_modulation_config
        out_dims = level_fusion_config['out_channels']

        # Two pyramids
        self.level_fusion_op2 = LevelFusion(**level_fusion_config)

        self.pyramid_fusion_op = Sequential(
            nn.Conv3D(out_dims * 2, 2048, 1, 1, 0, bias_attr=False),
            nn.BatchNorm(2048), Relu())

        # overwrite aux_head_config
        if aux_head_config is not None:
            aux_head_config['inplanes'] = self.in_channels[-2]
            self.aux_head = AuxHead(**aux_head_config)
        else:
            self.aux_head = None
Esempio n. 11
0
 def layer_init(self):
     # for conv1
     self.conv1 = nn.Conv2D(num_channels=3,
                            num_filters=96,
                            filter_size=11,
                            stride=2,
                            padding=0,
                            groups=1,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())
     self.bn1 = nn.BatchNorm(num_channels=96,
                             is_test=self.is_test,
                             param_attr=self.norm_weight_init(),
                             bias_attr=self.bias_init(),
                             use_global_stats=self.is_test)
     self.pool1 = nn.Pool2D(pool_size=3,
                            pool_type="max",
                            pool_stride=2,
                            pool_padding=0)
     # for conv2
     self.conv2 = nn.Conv2D(num_channels=96,
                            num_filters=256,
                            filter_size=5,
                            stride=1,
                            padding=0,
                            groups=2,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())
     self.bn2 = nn.BatchNorm(num_channels=256,
                             is_test=self.is_test,
                             param_attr=self.norm_weight_init(),
                             bias_attr=self.bias_init(),
                             use_global_stats=self.is_test)
     self.pool2 = nn.Pool2D(pool_size=3,
                            pool_type="max",
                            pool_stride=2,
                            pool_padding=0)
     # for conv3
     self.conv3 = nn.Conv2D(num_channels=256,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            groups=1,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())
     self.bn3 = nn.BatchNorm(num_channels=384,
                             is_test=self.is_test,
                             param_attr=self.norm_weight_init(),
                             bias_attr=self.bias_init(),
                             use_global_stats=self.is_test)
     # for conv4
     self.conv4 = nn.Conv2D(num_channels=384,
                            num_filters=384,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            groups=2,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())
     self.bn4 = nn.BatchNorm(num_channels=384,
                             is_test=self.is_test,
                             param_attr=self.norm_weight_init(),
                             bias_attr=self.bias_init(),
                             use_global_stats=self.is_test)
     # for conv5
     self.conv5 = nn.Conv2D(num_channels=384,
                            num_filters=256,
                            filter_size=3,
                            stride=1,
                            padding=0,
                            groups=2,
                            param_attr=self.weight_init(),
                            bias_attr=self.bias_init())
Esempio n. 12
0
    def __init__(self,
                 inplanes,
                 planes,
                 spatial_stride=1,
                 temporal_stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 if_inflate=True,
                 inflate_style='3x1x1',
                 if_nonlocal=True,
                 nonlocal_cfg=None,
                 with_cp=False):
        """Bottleneck block for ResNet.
        If style is "pytorch", the stride-two layer is the 3x3 conv layer,
        if it is "caffe", the stride-two layer is the first 1x1 conv layer.
        """
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert inflate_style in ['3x1x1', '3x3x3']
        self.inplanes = inplanes
        self.planes = planes

        if style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = spatial_stride
            self.conv1_stride_t = 1
            self.conv2_stride_t = temporal_stride
        else:
            self.conv1_stride = spatial_stride
            self.conv2_stride = 1
            self.conv1_stride_t = temporal_stride
            self.conv2_stride_t = 1
        if if_inflate:
            if inflate_style == '3x1x1':
                self.conv1 = nn.Conv3D(
                    inplanes,
                    planes,
                    filter_size=(3, 1, 1),
                    stride=(self.conv1_stride_t, self.conv1_stride, self.conv1_stride),
                    padding=(1, 0, 0),
                    bias_attr=False)
                self.conv2 = nn.Conv3D(
                    planes,
                    planes,
                    filter_size=(1, 3, 3),
                    stride=(self.conv2_stride_t, self.conv2_stride, self.conv2_stride),
                    padding=(0, dilation, dilation),
                    dilation=(1, dilation, dilation),
                    bias_attr=False)
            else:
                self.conv1 = nn.Conv3D(
                    inplanes,
                    planes,
                    filter_size=1,
                    stride=(self.conv1_stride_t, self.conv1_stride, self.conv1_stride),
                    bias_attr=False)
                self.conv2 = nn.Conv3D(
                    planes,
                    planes,
                    filter_size=3,
                    stride=(self.conv2_stride_t, self.conv2_stride, self.conv2_stride),
                    padding=(1, dilation, dilation),
                    dilation=(1, dilation, dilation),
                    bias_attr=False)
        else:
            self.conv1 = nn.Conv3D(
                inplanes,
                planes,
                filter_size=1,
                stride=(1, self.conv1_stride, self.conv1_stride),
                bias_attr=False)
            self.conv2 = nn.Conv3D(
                planes,
                planes,
                filter_size=(1, 3, 3),
                stride=(1, self.conv2_stride, self.conv2_stride),
                padding=(0, dilation, dilation),
                dilation=(1, dilation, dilation),
                bias_attr=False)

        self.bn1 = nn.BatchNorm(planes)
        self.bn2 = nn.BatchNorm(planes)
        self.conv3 = nn.Conv3D(
            planes, planes * self.expansion, filter_size=1, bias_attr=False)
        self.bn3 = nn.BatchNorm(planes * self.expansion)
        self.relu = Relu()
        self.downsample = downsample
        self.spatial_tride = spatial_stride
        self.temporal_tride = temporal_stride
        self.dilation = dilation
        self.with_cp = with_cp