示例#1
0
    def __init__(self, name_scope, groups=1):
        super(Res3, self).__init__(name_scope)

        self._conv1 = Conv3D(num_channels=96,
                             num_filters=128,
                             filter_size=3,
                             stride=1,
                             padding=1,
                             groups=groups,
                             act=None,
                             bias_attr=False)
        self._batch_norm1 = BatchNorm(128, act='relu')

        self._conv2 = Conv3D(num_channels=128,
                             num_filters=128,
                             filter_size=3,
                             stride=1,
                             padding=1,
                             groups=groups,
                             act=None,
                             bias_attr=False)
        self._batch_norm2 = BatchNorm(128, act='relu')

        self._conv3 = Conv3D(num_channels=128,
                             num_filters=128,
                             filter_size=3,
                             stride=1,
                             padding=1,
                             groups=groups,
                             act=None,
                             bias_attr=False)
        self._batch_norm3 = BatchNorm(128, act='relu')
示例#2
0
    def __init__(self, name_scope, groups=1):
        super(Res5, self).__init__(name_scope)

        self._conv1 = Conv3D(num_channels=256,
                             num_filters=512,
                             filter_size=3,
                             stride=2,
                             padding=1,
                             groups=groups,
                             act=None,
                             bias_attr=False)
        self._batch_norm1 = BatchNorm(512, act='relu')

        self._conv2 = Conv3D(num_channels=512,
                             num_filters=512,
                             filter_size=3,
                             stride=1,
                             padding=1,
                             groups=groups,
                             act=None,
                             bias_attr=False)

        self._conv3 = Conv3D(num_channels=256,
                             num_filters=512,
                             filter_size=3,
                             stride=2,
                             padding=1,
                             groups=groups,
                             act=None,
                             bias_attr=False)
        self._batch_norm2 = BatchNorm(512, act='relu')
        #########################################b
        self._conv4 = Conv3D(num_channels=512,
                             num_filters=512,
                             filter_size=3,
                             stride=1,
                             padding=1,
                             groups=groups,
                             act=None,
                             bias_attr=False)
        self._batch_norm3 = BatchNorm(512, act='relu')

        self._conv5 = Conv3D(num_channels=512,
                             num_filters=512,
                             filter_size=3,
                             stride=1,
                             padding=1,
                             groups=groups,
                             act=None,
                             bias_attr=False)

        self._batch_norm4 = BatchNorm(512, act='relu')
示例#3
0
 def __init__(self, dim_in, dim_out, batch_size, prefix, dim_inner, cfg, \
                    test_mode = False, max_pool_stride = 2):
     super(spacetime_nonlocal, self).__init__()
     self.cfg = cfg
     self.prefix = prefix
     self.dim_inner = dim_inner
     self.max_pool_stride = max_pool_stride
     self.conv3d_1 =  Conv3D(
             num_channels=dim_in,
             num_filters=dim_inner,
             filter_size=1,
             param_attr=ParamAttr(initializer=fluid.initializer.Normal(loc=0.0, scale=cfg.NONLOCAL.conv_init_std)),
             bias_attr=ParamAttr(initializer=fluid.initializer.Constant(value=0.)))
     
     self.conv3d_2 = Conv3D(
             num_channels=dim_in,
             num_filters=dim_inner,
             filter_size=1,
             param_attr=ParamAttr(initializer=fluid.initializer.Normal(loc=0.0, scale=cfg.NONLOCAL.conv_init_std)),
             bias_attr=ParamAttr(initializer=fluid.initializer.Constant(value=0.)))
 
     self.conv3d_3 = Conv3D(
                 num_channels=dim_in,
                 num_filters=dim_inner,
                 filter_size=1,
                 param_attr=ParamAttr(initializer=fluid.initializer.Normal(loc=0.0, scale=cfg.NONLOCAL.conv_init_std)),
                 bias_attr=ParamAttr(initializer=fluid.initializer.Constant(value=0.)))
 
     self.conv3d_4 = Conv3D(
             num_channels=dim_inner,
             num_filters=dim_out,
             filter_size=1,
             param_attr=ParamAttr(initializer=fluid.initializer.Normal(loc=0.0, scale=cfg.NONLOCAL.conv_init_std)),
             bias_attr=ParamAttr(initializer=fluid.initializer.Constant(value=0.)))
     
     self.bn = BatchNorm(
             num_channels=dim_out,
             is_test=test_mode,
             momentum=cfg.NONLOCAL.bn_momentum,
             epsilon=cfg.NONLOCAL.bn_epsilon,
             param_attr=ParamAttr(
                 initializer=fluid.initializer.Constant(
                     value=cfg.NONLOCAL.bn_init_gamma),
                 regularizer=fluid.regularizer.L2Decay(
                     cfg.TRAIN.weight_decay_bn)),
             bias_attr=ParamAttr(
                 regularizer=fluid.regularizer.L2Decay(
                     cfg.TRAIN.weight_decay_bn)))
def conv3x3x3(in_planes, out_planes, stride=1):
    return Conv3D(in_planes,
                  out_planes,
                  filter_size=3,
                  stride=stride,
                  padding=1,
                  param_attr=ParamAttr(initializer=Xavier()))
示例#5
0
    def __init__(self,
                 inplanes,
                 planes,
                 kernel_size,
                 stride,
                 padding,
                 bias=False,
                 groups=1):

        super(ConvModule, self).__init__()
        self.conv = Conv3D(
            num_channels=inplanes,
            num_filters=planes,
            filter_size=kernel_size,
            stride=stride,
            padding=padding,
            bias_attr=bias,
            groups=groups,
            param_attr=fluid.initializer.XavierInitializer(uniform=True,
                                                           fan_in=None,
                                                           fan_out=None,
                                                           seed=0),
        )
        self.bn = BatchNorm(
            num_channels=planes,
            act='relu',
            param_attr=fluid.initializer.ConstantInitializer(value=1.0,
                                                             force_cpu=False),
            bias_attr=fluid.initializer.ConstantInitializer(value=0.0,
                                                            force_cpu=False))
示例#6
0
    def __init__(
        self,
        in_channels=[1024, 1024],
        mid_channels=[1024, 1024],
        out_channels=2048,
        ds_scales=[(1, 1, 1), (1, 1, 1)],
    ):

        super(LevelFusion, self).__init__()
        self.ops = fluid.dygraph.LayerList()
        num_ins = len(in_channels)
        for i in range(num_ins):
            op = Downampling(in_channels[i],
                             mid_channels[i],
                             kernel_size=(1, 1, 1),
                             stride=(1, 1, 1),
                             padding=(0, 0, 0),
                             bias=False,
                             groups=32,
                             norm=True,
                             activation=True,
                             downsample_position='before',
                             downsample_scale=ds_scales[i])
            self.ops.append(op)

        in_dims = np.sum(mid_channels)
        self.fusion_conv = fluid.dygraph.Sequential(
            Conv3D(in_dims, out_channels, 1, 1, 0, bias_attr=False),
            BatchNorm(out_channels, act='relu'),
        )
示例#7
0
 def __init__(
         self,
         inplanes,
         planes,
         kernel_size=(3, 1, 1),
         stride=(1, 1, 1),
         padding=(1, 0, 0),
         bias=False,
         groups=1,
         norm=False,
         activation=False,
         downsample_position='after',
         downsample_scale=(1, 2, 2),
 ):
     super(Downampling, self).__init__()
     self.conv = Conv3D(inplanes,
                        planes,
                        kernel_size,
                        stride,
                        padding,
                        bias_attr=bias,
                        groups=groups)
     self.norm1 = norm
     self.norm = BatchNorm(planes) if norm else None
     self.activation = activation
     assert (downsample_position in ['before', 'after'])
     self.downsample_position = downsample_position
     self.downsample_scale = downsample_scale
示例#8
0
    def __init__(self,
                 name_scope,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 groups=1,
                 act='relu',
                 is_3d=False):
        super(ConvBNLayer, self).__init__(name_scope)

        self._conv = None
        if is_3d:
            self._conv = Conv3D(num_channels=num_channels,
                                num_filters=num_filters,
                                filter_size=filter_size,
                                stride=stride,
                                padding=(filter_size - 1) // 2,
                                act=None,
                                bias_attr=False)
        else:
            self._conv = Conv2D(num_channels=num_channels,
                                num_filters=num_filters,
                                filter_size=filter_size,
                                stride=stride,
                                padding=(filter_size - 1) // 2,
                                act=None,
                                bias_attr=False)

        self._batch_norm = BatchNorm(num_filters, act=act)
示例#9
0
 def forward(self, x):
     if not self.fcn_testing:
         if len(x.shape) == 4:
             x = fluid.layers.unsqueeze(x, axes=2)
         assert x.shape[1] == self.in_channels
         assert x.shape[2] == self.temporal_feature_size
         assert x.shape[3] == self.spatial_feature_size
         assert x.shape[4] == self.spatial_feature_size
         if self.with_avg_pool:
             x = fluid.layers.pool3d(x, pool_size=(self.temporal_feature_size, self.spatial_feature_size, self.spatial_feature_size), pool_type='avg',
                                     pool_stride=(1, 1, 1), pool_padding=(0, 0, 0))
         if self.dropout_ratio != 0:
             x = fluid.layers.dropout(x, self.dropout_ratio)
         x = fluid.layers.reshape(x, (x.shape[0], -1))
         cls_score = self.fc_cls(x)
         return cls_score
     else:
         if self.with_avg_pool:
             x = fluid.layers.pool3d(x, pool_size=(self.temporal_feature_size, self.spatial_feature_size, self.spatial_feature_size), pool_type='avg',
                                     pool_stride=(1, 1, 1), pool_padding=(0, 0, 0))
         if self.new_cls is None:
             self.new_cls = Conv3D(self.in_channels, self.num_classes, 1, 1, 0)
             # 这里需要再想想
             self.fc_cls = None
         class_map = self.new_cls(x)
         return class_map
示例#10
0
 def __init__(self, inplanes, planes, downsample_scale=8):
     super(TemporalModulation, self).__init__()
     self.conv = Conv3D(inplanes,
                        planes, (3, 1, 1), (1, 1, 1), (1, 0, 0),
                        bias_attr=False,
                        groups=32)
     self.downsample_scale = downsample_scale
示例#11
0
def conv1x1x1(in_planes, out_planes, stride=1):
    return Conv3D(in_planes,
                  out_planes,
                  filter_size=1,
                  stride=stride,
                  bias_attr=False,
                  param_attr=fluid.initializer.MSRAInitializer(uniform=False,
                                                               fan_in=None,
                                                               seed=10))
示例#12
0
def conv3d(in_planes, out_planes, filter_size=3, stride=1, padding=1):
    # data = numpy.random.random((5, 3, 12, 32, 32)).astype('float32')
    # conv3d = fluid.dygraph.nn.Conv3D(
    #       'Conv3D', num_filters=2, filter_size=3, act="relu")
    # ret = conv3d(fluid.dygraph.base.to_variable(data))
    # 3*3 conv with padding
    return Conv3D(
        num_channels=in_planes,
        num_filters=out_planes,
        filter_size=filter_size,
        stride=stride,
        padding=padding,
    )
    def __init__(self,
                 block,
                 layers,
                 block_inplanes,
                 n_input_channels=3,
                 conv1_t_size=7,
                 conv1_t_stride=1,
                 no_max_pool=False,
                 shortcut_type='B',
                 widen_factor=1.0,
                 n_classes=400):
        super(ResNet, self).__init__()
        block_inplanes = [int(x * widen_factor) for x in block_inplanes]

        self.in_planes = block_inplanes[0]
        self.no_max_pool = no_max_pool
        self.conv1 = Conv3D(n_input_channels,
                            self.in_planes,
                            filter_size=(conv1_t_size, 7, 7),
                            stride=(conv1_t_stride, 2, 2),
                            padding=(conv1_t_size // 2, 3, 3))
        self.bn1 = BatchNorm(self.in_planes)
        # self.maxpool = MaxPool3D(filter_size=3, stride=2, padding=1)

        self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],
                                       shortcut_type)

        self.layer2 = self._make_layer(block,
                                       block_inplanes[1],
                                       layers[1],
                                       shortcut_type,
                                       stride=2)
        self.layer3 = self._make_layer(block,
                                       block_inplanes[2],
                                       layers[2],
                                       shortcut_type,
                                       stride=2)
        self.layer4 = self._make_layer(block,
                                       block_inplanes[3],
                                       layers[3],
                                       shortcut_type,
                                       stride=2)

        # self.avgpool = AdaptiveAvgPool3d(1, 1, 1)
        self.fc = Linear(block_inplanes[3] * block.expansion,
                         n_classes,
                         param_attr=ParamAttr(initializer=Xavier()),
                         act='sigmoid')
示例#14
0
文件: models.py 项目: Desny/ECO
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 padding=0,
                 act=None):
        super(Conv3DBNLayer, self).__init__()
        self._conv = Conv3D(num_channels=num_channels,
                            num_filters=num_filters,
                            filter_size=filter_size,
                            stride=stride,
                            padding=padding,
                            act=act)

        self._batch_norm = BatchNorm(num_filters, act=act)
示例#15
0
 def __init__(self,
              name_scope,
              num_channels,
              num_filters,
              filter_size,
              stride=1,
              groups=1,
              act=None):
     super(ConvBNLayer, self).__init__(name_scope)
     # 卷积操作
     self._conv = Conv3D(num_channels=num_channels,
                         num_filters=num_filters,
                         filter_size=filter_size,
                         stride=stride,
                         padding=(filter_size - 1) // 2,
                         groups=groups,
                         act=None,
                         bias_attr=False)
     # 残差块中经典的操作,一个卷积后面跟一个batch_norm
     self._batch_norm = BatchNorm(num_filters, act=act)
    def __init__(self, name, cfg, mode='train'):
        super(NonLocal, self).__init__()
        self.name = name
        self.cfg = cfg
        self.mode = mode
        self.is_training = (mode == 'train')
        self.linear = Linear(10, 10)
        self.get_config()

        self.use_temp_convs_set, self.temp_strides_set, self.pool_stride = resnet_video.obtain_arc(
            cfg.MODEL.video_arc_choice, cfg[mode.upper()]['video_length'])
        self.conv3d = Conv3D(
            num_channels=3,
            num_filters=64,
            filter_size=[1 + self.use_temp_convs_set[0][0] * 2, 7, 7],
            stride=[self.temp_strides_set[0][0], 2, 2],
            padding=[self.use_temp_convs_set[0][0], 3, 3],
            param_attr=ParamAttr(initializer=fluid.initializer.MSRA()),
            bias_attr=False)

        self.test_mode = False if (mode == 'train') else True
        self.bn_conv1 = BatchNorm(
            num_channels=64,
            is_test=self.test_mode,
            momentum=cfg.MODEL.bn_momentum,
            epsilon=cfg.MODEL.bn_epsilon,
            param_attr=ParamAttr(regularizer=fluid.regularizer.L2Decay(
                cfg.TRAIN.weight_decay_bn)),
            bias_attr=ParamAttr(regularizer=fluid.regularizer.L2Decay(
                cfg.TRAIN.weight_decay_bn)),
            moving_mean_name="bn_conv1_mean",
            moving_variance_name="bn_conv1_variance")

        self.fc = Linear(
            2048,
            cfg.MODEL.num_classes,
            param_attr=ParamAttr(initializer=fluid.initializer.Normal(
                loc=0.0, scale=cfg.MODEL.fc_init_std)),
            bias_attr=ParamAttr(initializer=fluid.initializer.Constant(
                value=0.)))
示例#17
0
    def __init__(
        self,
        name_scope,
        num_channels,
        num_filters,
        filter_size=7,
        stride=1,
        padding=0,
        groups=1,
        act=None,
    ):
        super(ConvBNLayer, self).__init__(name_scope)

        self._conv = Conv3D(num_channels=num_channels,
                            num_filters=num_filters,
                            filter_size=filter_size,
                            stride=stride,
                            padding=padding,
                            groups=groups,
                            act=None,
                            bias_attr=False)

        self._batch_norm = BatchNorm(num_filters,
                                     act=act)  ##这是2D的BatchNorm,后面需要注意
示例#18
0
    def __init__(self,
                 inplanes,
                 planes,
                 spatial_stride=1,
                 temporal_stride=1,
                 dilation=1,
                 downsample=None,
                 style='pytorch',
                 if_inflate=True,
                 inflate_style='3x1x1',
                 if_nonlocal=True,
                 nonlocal_cfg=None,
                 with_cp=False):
        super(Bottleneck, self).__init__()
        assert style in ['pytorch', 'caffe']
        assert inflate_style in ['3x1x1', '3x3x3']
        self.inplanes = inplanes
        self.planes = planes

        if style == 'pytorch':
            self.conv1_stride = 1
            self.conv2_stride = spatial_stride
            self.conv1_stride_t = 1
            self.conv2_stride_t = temporal_stride
        else:
            self.conv1_stride = spatial_stride
            self.conv2_stride = 1
            self.conv1_stride_t = temporal_stride
            self.conv2_stride_t = 1
        if if_inflate:
            if inflate_style == '3x1x1':
                self.conv1 = Conv3D(num_channels=inplanes,
                                    num_filters=planes,
                                    filter_size=(3, 1, 1),
                                    stride=(self.conv1_stride_t,
                                            self.conv1_stride,
                                            self.conv1_stride),
                                    padding=(1, 0, 0),
                                    bias_attr=False)
                self.conv2 = Conv3D(num_channels=planes,
                                    num_filters=planes,
                                    filter_size=(1, 3, 3),
                                    stride=(self.conv2_stride_t,
                                            self.conv2_stride,
                                            self.conv2_stride),
                                    padding=(0, dilation, dilation),
                                    dilation=(1, dilation, dilation),
                                    bias_attr=False)
            else:
                self.conv1 = Conv3D(num_channels=inplanes,
                                    num_filters=planes,
                                    filter_size=1,
                                    stride=(self.conv1_stride_t,
                                            self.conv1_stride,
                                            self.conv1_stride),
                                    bias_attr=False)
                self.conv2 = Conv3D(num_channels=planes,
                                    num_filters=planes,
                                    filter_size=3,
                                    stride=(self.conv2_stride_t,
                                            self.conv2_stride,
                                            self.conv2_stride),
                                    padding=(1, dilation, dilation),
                                    dilation=(1, dilation, dilation),
                                    bias_attr=False)
        else:
            self.conv1 = Conv3D(num_channels=inplanes,
                                num_filters=planes,
                                filter_size=1,
                                stride=(1, self.conv1_stride,
                                        self.conv1_stride),
                                bias_attr=False)
            self.conv2 = Conv3D(num_channels=planes,
                                num_filters=planes,
                                filter_size=(1, 3, 3),
                                stride=(1, self.conv2_stride,
                                        self.conv2_stride),
                                padding=(0, dilation, dilation),
                                dilation=(1, dilation, dilation),
                                bias_attr=False)

        self.bn1 = BatchNorm(planes, act='relu')
        self.bn2 = BatchNorm(planes, act='relu')
        self.conv3 = Conv3D(planes,
                            planes * self.expansion,
                            filter_size=1,
                            bias_attr=False)
        self.bn3 = BatchNorm(planes * self.expansion)
        self.downsample = downsample
        self.spatial_tride = spatial_stride
        self.temporal_tride = temporal_stride
        self.dilation = dilation
        self.with_cp = with_cp

        if if_nonlocal and nonlocal_cfg is not None:
            nonlocal_cfg_ = nonlocal_cfg.copy()
            nonlocal_cfg_['in_channels'] = planes * self.expansion
            self.nonlocal_block = None  #build_nonlocal_block(nonlocal_cfg_)
        else:
            self.nonlocal_block = None
示例#19
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   spatial_stride=1,
                   temporal_stride=1,
                   dilation=1,
                   style='pytorch',
                   inflate_freq=1,
                   inflate_style='3x1x1',
                   nonlocal_freq=1,
                   nonlocal_cfg=None,
                   with_cp=False):

    inflate_freq = inflate_freq if not isinstance(
        inflate_freq, int) else (inflate_freq, ) * blocks
    nonlocal_freq = nonlocal_freq if not isinstance(
        nonlocal_freq, int) else (nonlocal_freq, ) * blocks
    assert len(inflate_freq) == blocks
    assert len(nonlocal_freq) == blocks
    downsample = None
    if spatial_stride != 1 or inplanes != planes * block.expansion:
        downsample = fluid.dygraph.Sequential(
            Conv3D(num_channels=inplanes,
                   num_filters=planes * block.expansion,
                   filter_size=1,
                   stride=(temporal_stride, spatial_stride, spatial_stride),
                   bias_attr=False),
            BatchNorm(planes * block.expansion),
        )

    layers = []
    layers.append(
        block(inplanes,
              planes,
              spatial_stride,
              temporal_stride,
              dilation,
              downsample,
              style=style,
              if_inflate=(inflate_freq[0] == 1),
              inflate_style=inflate_style,
              if_nonlocal=(nonlocal_freq[0] == 1),
              nonlocal_cfg=nonlocal_cfg,
              with_cp=with_cp))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes,
                  planes,
                  1,
                  1,
                  dilation,
                  style=style,
                  if_inflate=(inflate_freq[i] == 1),
                  inflate_style=inflate_style,
                  if_nonlocal=(nonlocal_freq[i] == 1),
                  nonlocal_cfg=nonlocal_cfg,
                  with_cp=with_cp))

    return fluid.dygraph.Sequential(*layers)
示例#20
0
    def __init__(self,
                 depth,
                 pretrained=None,
                 pretrained2d=True,
                 num_stages=4,
                 spatial_strides=(1, 2, 2, 2),
                 temporal_strides=(1, 1, 1, 1),
                 dilations=(1, 1, 1, 1),
                 out_indices=(0, 1, 2, 3),
                 conv1_kernel_t=5,
                 conv1_stride_t=2,
                 pool1_kernel_t=1,
                 pool1_stride_t=2,
                 style='pytorch',
                 frozen_stages=-1,
                 inflate_freq=(1, 1, 1, 1),
                 inflate_stride=(1, 1, 1, 1),
                 inflate_style='3x1x1',
                 nonlocal_stages=(-1, ),
                 nonlocal_freq=(0, 1, 1, 0),
                 nonlocal_cfg=None,
                 bn_eval=False,
                 bn_frozen=False,
                 partial_bn=False,
                 with_cp=False):
        super(ResNet_I3D, self).__init__()
        if depth not in self.arch_settings:
            raise KeyError('invalid depth {} for resnet'.format(depth))
        self.depth = depth
        self.pretrained = pretrained
        self.pretrained2d = pretrained2d
        self.num_stages = num_stages
        assert num_stages >= 1 and num_stages <= 4
        self.spatial_strides = spatial_strides
        self.temporal_strides = temporal_strides
        self.dilations = dilations
        assert len(spatial_strides) == len(temporal_strides) == len(
            dilations) == num_stages
        self.out_indices = out_indices
        assert max(out_indices) < num_stages
        self.style = style
        self.frozen_stages = frozen_stages
        self.inflate_freqs = inflate_freq if not isinstance(
            inflate_freq, int) else (inflate_freq, ) * num_stages
        self.inflate_style = inflate_style
        self.nonlocal_stages = nonlocal_stages
        self.nonlocal_freqs = nonlocal_freq if not isinstance(
            nonlocal_freq, int) else (nonlocal_freq, ) * num_stages
        self.nonlocal_cfg = nonlocal_cfg
        self.bn_eval = bn_eval
        self.bn_frozen = bn_frozen
        self.partial_bn = partial_bn
        self.with_cp = with_cp
        self.pool1_kernel_t = pool1_kernel_t
        self.pool1_stride_t = pool1_stride_t

        self.block, stage_blocks = self.arch_settings[depth]
        self.stage_blocks = stage_blocks[:num_stages]
        self.inplanes = 64

        self.conv1 = Conv3D(3,
                            64,
                            filter_size=(conv1_kernel_t, 7, 7),
                            stride=(conv1_stride_t, 2, 2),
                            padding=((conv1_kernel_t - 1) // 2, 3, 3),
                            bias_attr=False)
        self.bn1 = BatchNorm(64, act='relu')
        #self.maxpool = nn.MaxPool3d(kernel_size=(pool1_kernel_t, 3, 3), stride=(pool1_stride_t, 2, 2),
        #                            padding=(pool1_kernel_t // 2, 1, 1))

        self.res_layers = []
        for i, num_blocks in enumerate(self.stage_blocks):
            spatial_stride = spatial_strides[i]
            temporal_stride = temporal_strides[i]
            dilation = dilations[i]
            planes = 64 * 2**i
            res_layer = make_res_layer(self.block,
                                       self.inplanes,
                                       planes,
                                       num_blocks,
                                       spatial_stride=spatial_stride,
                                       temporal_stride=temporal_stride,
                                       dilation=dilation,
                                       style=self.style,
                                       inflate_freq=self.inflate_freqs[i],
                                       inflate_style=self.inflate_style,
                                       nonlocal_freq=self.nonlocal_freqs[i],
                                       nonlocal_cfg=self.nonlocal_cfg
                                       if i in self.nonlocal_stages else None,
                                       with_cp=with_cp)
            self.inplanes = planes * self.block.expansion
            layer_name = 'layer{}'.format(i + 1)
            self.add_sublayer(layer_name, res_layer)
            self.res_layers.append(layer_name)

        self.feat_dim = self.block.expansion * 64 * 2**(
            len(self.stage_blocks) - 1)
示例#21
0
    def __init__(self,
                 name_scope,
                 layers=50,
                 class_dim=102,
                 seg_num=10,
                 weight_devay=None):
        super(ResNet3D, self).__init__(name_scope)

        self.layers = layers
        self.seg_num = seg_num
        supported_layers = [50, 101, 152]
        assert layers in supported_layers, "supported layers are {} but input layer is {}".format(
            supported_layers, layers)

        if layers == 50:
            depth = [3, 4, 6, 3]
        elif layers == 101:
            depth = [3, 4, 23, 3]
        elif layers == 152:
            depth = [3, 8, 36, 3]
        num_filters = [64, 128, 256, 512]

        # 在进入残差网络之前,先对图片进行 7*7*7 的卷积和 3*3 最大池化
        # self.conv = ConvBNLayer( self.full_name(), num_channels=3, num_filters=64, filter_size=7, stride=2, act='relu')
        self.conv = Conv3D(num_channels=3,
                           num_filters=64,
                           filter_size=(7, 7, 7),
                           stride=(1, 2, 2),
                           padding=(7 // 2, 3, 3),
                           bias_attr=False)
        self.bn = BatchNorm(num_channels=64, act='relu')

        # pool3d 没有动态图的实现,自定义Pool3D类 继承layers.Layer,然后传参,构建forward
        self.pool3d_max = Pool3D.Pool3D(pool_size=3,
                                        pool_stride=2,
                                        pool_padding=1,
                                        pool_type='max')

        # 残差网络部分
        self.bottleneck_block_list = []
        num_channels = 64
        for block in range(len(depth)):
            shortcut = False
            for i in range(depth[block]):
                bottleneck_block = self.add_sublayer(
                    "bb_%d_%d" % (block, i),
                    BottleneckBlock(self.full_name(),
                                    num_channels=num_channels,
                                    num_filters=num_filters[block],
                                    stride=2 if i == 0 and block != 0 else 1,
                                    shortcut=shortcut))
                num_channels = bottleneck_block._num_channels_out
                self.bottleneck_block_list.append(bottleneck_block)
                shortcut = True

        # 残差网络之后的 平均池化
        self.pool3d_avg = Pool3D.Pool3D(pool_size=7,
                                        pool_type='avg',
                                        global_pooling=True)

        import math
        stdv = 1.0 / math.sqrt(2048 * 1.0)

        # 最后的 fc
        self.fc = Linear(
            input_dim=num_channels,
            output_dim=class_dim,
            act='softmax',
            param_attr=fluid.param_attr.ParamAttr(
                initializer=fluid.initializer.Uniform(-stdv, stdv)))
示例#22
0
    def __init__(self, name_scope):
        super(ECO3dNet, self).__init__(name_scope)
        self.res3a_2 = ConvBNLayer(self.full_name(),
                                   num_channels=96,
                                   num_filters=128,
                                   filter_size=3,
                                   stride=1,
                                   act='relu',
                                   is_3d=True)

        self.res3b_1 = ConvBNLayer(self.full_name(),
                                   num_channels=128,
                                   num_filters=128,
                                   filter_size=3,
                                   stride=1,
                                   act='relu',
                                   is_3d=True)

        self.res3b_2 = ConvBNLayer(self.full_name(),
                                   num_channels=128,
                                   num_filters=128,
                                   filter_size=3,
                                   stride=1,
                                   act='relu',
                                   is_3d=True)

        self.res3b_2 = ConvBNLayer(self.full_name(),
                                   num_channels=128,
                                   num_filters=128,
                                   filter_size=3,
                                   stride=1,
                                   act='relu',
                                   is_3d=True)

        self.res4a_1 = ConvBNLayer(self.full_name(),
                                   num_channels=128,
                                   num_filters=256,
                                   filter_size=3,
                                   stride=2,
                                   act='relu',
                                   is_3d=True)

        self.res4a_2 = Conv3D(num_channels=256,
                              num_filters=256,
                              filter_size=3,
                              stride=1,
                              padding=1,
                              act=None,
                              bias_attr=False)

        self.res4a_down = Conv3D(num_channels=128,
                                 num_filters=256,
                                 filter_size=3,
                                 stride=2,
                                 padding=1,
                                 act=None,
                                 bias_attr=False)

        self.res_4a_bn = BatchNorm(256, act="relu")

        self.res4b_1 = ConvBNLayer(self.full_name(),
                                   num_channels=256,
                                   num_filters=256,
                                   filter_size=3,
                                   stride=1,
                                   act='relu',
                                   is_3d=True)

        self.res4b_2 = Conv3D(num_channels=256,
                              num_filters=256,
                              filter_size=3,
                              stride=1,
                              padding=1,
                              act=None,
                              bias_attr=False)

        self.res_4b_bn = BatchNorm(256, act="relu")

        self.res5a_1 = ConvBNLayer(self.full_name(),
                                   num_channels=256,
                                   num_filters=512,
                                   filter_size=3,
                                   stride=2,
                                   act='relu',
                                   is_3d=True)

        self.res5a_2 = Conv3D(num_channels=512,
                              num_filters=512,
                              filter_size=3,
                              stride=1,
                              padding=1,
                              act=None,
                              bias_attr=False)

        self.res5a_down = Conv3D(num_channels=256,
                                 num_filters=512,
                                 filter_size=3,
                                 stride=2,
                                 padding=1,
                                 act=None,
                                 bias_attr=False)

        self.res5a_bn = BatchNorm(512, act="relu")

        self.res5b_1 = ConvBNLayer(self.full_name(),
                                   num_channels=512,
                                   num_filters=512,
                                   filter_size=3,
                                   stride=1,
                                   act='relu',
                                   is_3d=True)

        self.res5b_2 = Conv3D(num_channels=512,
                              num_filters=512,
                              filter_size=3,
                              stride=1,
                              padding=1,
                              act=None,
                              bias_attr=False)

        self.res5b_bn = BatchNorm(512, act="relu")
示例#23
0
    def __init__(
        self,
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        spatial_modulation_config=None,
        temporal_modulation_config=None,
        upsampling_config=None,
        downsampling_config=None,
        level_fusion_config=None,
        aux_head_config=None,
    ):
        super(TPN, self).__init__()
        assert isinstance(in_channels, list)
        assert isinstance(out_channels, int)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_ins = len(in_channels)

        spatial_modulation_config = spatial_modulation_config
        temporal_modulation_config = temporal_modulation_config
        upsampling_config = upsampling_config
        downsampling_config = downsampling_config
        aux_head_config = aux_head_config
        level_fusion_config = level_fusion_config
        self.temporal_modulation_ops = fluid.dygraph.LayerList()
        self.upsampling_ops = fluid.dygraph.LayerList()
        self.downsampling_ops = fluid.dygraph.LayerList()
        self.level_fusion_op = LevelFusion(**level_fusion_config)
        self.spatial_modulation = SpatialModulation(
            **spatial_modulation_config)
        for i in range(0, self.num_ins, 1):
            inplanes = in_channels[-1]
            planes = out_channels

            if temporal_modulation_config is not None:
                param = temporal_modulation_config.param
                param.downsample_scale = temporal_modulation_config.scales[i]
                param.inplanes = inplanes
                param.planes = planes
                print(inplanes, planes)
                print(param)
                temporal_modulation = TemporalModulation(**param)
                self.temporal_modulation_ops.append(temporal_modulation)

            if i < self.num_ins - 1:
                if upsampling_config is not None:

                    upsampling = Upsampling(**upsampling_config)
                    self.upsampling_ops.append(upsampling)

                if downsampling_config is not None:
                    param = downsampling_config.param
                    param.inplanes = planes
                    param.planes = planes
                    param.downsample_scale = downsampling_config.scales
                    downsampling = Downampling(**param)
                    self.downsampling_ops.append(downsampling)

        out_dims = level_fusion_config.out_channels

        # Two pyramids
        self.level_fusion_op2 = LevelFusion(**level_fusion_config)

        self.pyramid_fusion_op = fluid.dygraph.Sequential(
            Conv3D(out_dims * 2, 2048, 1, 1, 0, bias_attr=False),
            BatchNorm(2048, act='relu'))

        if aux_head_config is not None:
            aux_head_config.inplanes = self.in_channels[-2]
            self.aux_head = AuxHead(**aux_head_config)
        else:
            self.aux_head = None
示例#24
0
    def __init__(self,
                 name_scope,
                 channels=1,
                 bottleneck=32,
                 params=[1, 1, 1, 1, 1],
                 n_iter=10,
                 act=None):
        super(FlowLayer, self).__init__(name_scope)
        self.name_scope = name_scope
        self.n_iter = n_iter
        self._batch_norm = fluid.BatchNorm(channels, act=act)
        self._conv = Conv2D(32,
                            num_filters=32,
                            filter_size=1,
                            stride=1,
                            padding=0,
                            groups=1,
                            act=None,
                            bias_attr=False)
        # self.bottleneck = nn.Conv3d(channels, bottleneck, stride=1, padding=0, bias=False, kernel_size=1)

        self.bottleneck = Conv3D(bottleneck,
                                 channels,
                                 1,
                                 stride=1,
                                 padding=0,
                                 bias_attr=None,
                                 act=None)
        # self.unbottleneck = nn.Conv3d(bottleneck*2, channels, stride=1, padding=0, bias=False, kernel_size=1)
        self.unbottleneck = Conv3D(bottleneck * 2,
                                   channels,
                                   stride=1,
                                   padding=0,
                                   bias_attr=False,
                                   filter_size=1)
        # self.bn = nn.BatchNorm3d(channels)
        self.bn = BatchNorm(channels)
        #print(channels)
        channels = bottleneck

        if params[0]:
            #self.img_grad = nn.Parameter(torch.FloatTensor([[[[-0.5,0,0.5]]]]).repeat(channels,channels,1,1))
            #stop_gradient 属性为true,这意味这反向梯度不会被传递过这个数据变量。如果用户想传递反向梯度,可以设置 var.stop_gradient = False 。
            #self.img_grad2 = nn.Parameter(torch.FloatTensor([[[[-0.5,0,0.5]]]]).transpose(3,2).repeat(channels,channels,1,1))
            img_grad_temp = np.array([[[[-0.5, 0, 0.5]]]
                                      ]).astype("float32").repeat(channels, 1)
            img_grad_temp2 = np.array([[[[-0.5, 0,
                                          0.5]]]]).astype("float32").transpose(
                                              0, 1, 3, 2).repeat(channels, 1)
            img_grad_temp = np.reshape(img_grad_temp, [channels, 1, 1, 3])
            img_grad_temp2 = np.reshape(img_grad_temp2, [channels, 1, 3, 1])
            self.img_grad = self.create_parameter(shape=[channels, 1, 1, 3],
                                                  dtype="float32")
            self.img_grad2 = self.create_parameter(shape=[channels, 1, 3, 1],
                                                   dtype="float32")
            self.img_grad.set_value(img_grad_temp)
            self.img_grad2.set_value(img_grad_temp2)
        else:
            #self.img_grad = nn.Parameter(torch.FloatTensor([[[[-0.5,0,0.5]]]]).repeat(channels,channels,1,1), requires_grad=False)
            img_grad_temp = np.array([[[[-0.5, 0, 0.5]]]
                                      ]).astype("float32").repeat(channels, 1)
            img_grad_temp2 = np.array([[[[-0.5, 0,
                                          0.5]]]]).astype("float32").transpose(
                                              0, 1, 3, 2).repeat(channels, 1)
            img_grad_temp = np.reshape(img_grad_temp, [channels, 1, 1, 3])
            img_grad_temp2 = np.reshape(img_grad_temp2, [channels, 1, 3, 1])
            self.img_grad = self.create_parameter(shape=[channels, 1, 1, 3],
                                                  dtype="float32")
            self.img_grad2 = self.create_parameter(shape=[channels, 1, 3, 1],
                                                   dtype="float32")
            self.img_grad.set_value(img_grad_temp)
            self.img_grad2.set_value(img_grad_temp2)
            self.img_grad.stop_gradient = True
            self.img_grad2.stop_gradient = True

        self.conv2dimg_grad = Conv2D(
            32,
            num_filters=1,
            filter_size=(1, 3),
            stride=1,
            padding=(0, 1),
            groups=32,
            act=None,
            bias_attr=False)  #param_attr=self.img_grad,
        self.conv2dimg_grad2 = Conv2D(
            32,
            num_filters=1,
            filter_size=(3, 1),
            stride=1,
            padding=(1, 0),
            groups=32,
            act=None,
            bias_attr=False)  #param_attr=self.img_grad2,
        self.conv2dimg_grad.weight = self.img_grad
        self.conv2dimg_grad2.weight = self.img_grad2
        self.prelu = fluid.dygraph.PRelu(mode='all')
        if params[1]:
            #self.f_grad  = nn.Parameter(torch.FloatTensor([[[[-1], [1]]]]).repeat(channels, channels, 1, 1))
            #self.f_grad2 = nn.Parameter(torch.FloatTensor([[[[-1], [1]]]]).repeat(channels, channels, 1, 1))
            #self.div     = nn.Parameter(torch.FloatTensor([[[[-1], [1]]]]).repeat(channels, channels, 1, 1))
            #self.div2    = nn.Parameter(torch.FloatTensor([[[[-1], [1]]]]).repeat(channels, channels, 1, 1))
            img_grad_temp = np.array([[[[-1, 1]]]
                                      ]).astype("float32").repeat(channels,
                                                                  axis=0)
            img_grad_temp = np.reshape(img_grad_temp, [channels, 1, 1, 2])
            img_grad_temp2 = np.array([[[[-1], [1]]]
                                       ]).astype("float32").repeat(channels,
                                                                   axis=0)
            img_grad_temp2 = np.reshape(img_grad_temp, [channels, 1, 2, 1])
            self.f_grad = self.create_parameter(shape=[channels, 1, 1, 2],
                                                dtype="float32")

            self.f_grad2 = self.create_parameter(shape=[channels, 1, 2, 1],
                                                 dtype="float32")
            self.div = self.create_parameter(shape=[channels, 1, 1, 2],
                                             dtype="float32")
            self.div2 = self.create_parameter(shape=[channels, 1, 2, 1],
                                              dtype="float32")
            self.f_grad.set_value(img_grad_temp)
            self.f_grad2.set_value(img_grad_temp2)
            self.div.set_value(img_grad_temp)
            self.div2.set_value(img_grad_temp2)

        else:
            img_grad_temp = np.array([[[[-1], [1]]]
                                      ]).astype("float32").repeat(channels,
                                                                  axis=0)
            img_grad_temp = np.reshape(img_grad_temp, [channels, 1, 2, 1])
            self.f_grad = self.create_parameter(shape=[channels, 1, 2, 1],
                                                dtype="float32")
            self.f_grad2 = self.create_parameter(shape=[channels, 1, 2, 1],
                                                 dtype="float32")
            self.div = self.create_parameter(shape=[channels, 1, 2, 1],
                                             dtype="float32")
            self.div2 = self.create_parameter(shape=[channels, 1, 2, 1],
                                              dtype="float32")
            self.f_grad.set_value(img_grad_temp).stop_gradient = True
            self.f_grad2.set_value(img_grad_temp).stop_gradient = True
            self.div.set_value(img_grad_temp).stop_gradient = True
            self.div2.set_value(img_grad_temp).stop_gradient = True
            print('stop_gradient')
        self.conv2df_grad = Conv2D(32,
                                   num_filters=1,
                                   filter_size=(1, 2),
                                   stride=1,
                                   padding=(0, 0),
                                   groups=32,
                                   act=None,
                                   bias_attr=False)  #param_attr=self.f_grad,
        self.conv2df_grad2 = Conv2D(32,
                                    num_filters=1,
                                    filter_size=(2, 1),
                                    stride=1,
                                    padding=(0, 0),
                                    groups=32,
                                    act=None,
                                    bias_attr=False)  #param_attr=self.f_grad2,
        self.conv2ddiv = Conv2D(32,
                                num_filters=1,
                                filter_size=(1, 2),
                                stride=1,
                                padding=(0, 0),
                                groups=32,
                                act=None,
                                bias_attr=False)  #param_attr=self.div,
        self.conv2ddiv2 = Conv2D(32,
                                 num_filters=1,
                                 filter_size=(2, 1),
                                 stride=1,
                                 padding=(0, 0),
                                 groups=32,
                                 act=None,
                                 bias_attr=False)  #param_attr=self.div2,
        self.conv2df_grad.weight = self.f_grad
        self.conv2df_grad2.weight = self.f_grad2
        self.conv2ddiv.weight = self.div
        self.conv2ddiv2.weight = self.div2
        self.channels = channels
        self.t1 = np.array([0.3]).astype("float32")
        self.l1 = np.array([0.15]).astype("float32")
        self.a1 = np.array([0.25]).astype("float32")

        if params[2]:  # XITA
            #self.t = nn.Parameter(torch.FloatTensor([self.t]))
            self.t = self.create_parameter(shape=[1], dtype="float32")
            self.t.set_value(self.t1)
            #print(self.t)
        if params[3]:  # TAU
            #self.l = nn.Parameter(torch.FloatTensor([self.l]))
            self.l = self.create_parameter(shape=[1], dtype="float32")
            self.l.set_value(self.l1)
            #print(self.l)
        if params[4]:  # LABADA
            #self.a = nn.Parameter(torch.FloatTensor([self.a]))
            self.a = self.create_parameter(shape=[1], dtype="float32")
            self.a.set_value(self.a1)
示例#25
0
文件: models.py 项目: Desny/ECO
    def __init__(self):
        super(C3D, self).__init__()
        self.res3a_2 = Conv3DBNLayer(num_channels=96,
                                     num_filters=128,
                                     padding=1,
                                     filter_size=3,
                                     act='relu')
        self.res3b_1 = Conv3DBNLayer(num_channels=128,
                                     num_filters=128,
                                     padding=1,
                                     filter_size=3,
                                     act='relu')
        self.res3b_2 = Conv3DBNLayer(num_channels=128,
                                     num_filters=128,
                                     padding=1,
                                     filter_size=3,
                                     act='relu')
        # res3b<=Eltwise<=res3b_2,res3a_2
        self.res3b_bn = BatchNorm(num_channels=128, act='relu')

        self.res4a_1 = Conv3DBNLayer(num_channels=128,
                                     num_filters=256,
                                     padding=1,
                                     filter_size=3,
                                     stride=2,
                                     act='relu')
        self.res4a_2 = Conv3D(num_channels=256,
                              num_filters=256,
                              padding=1,
                              filter_size=3)
        # res4a_down<=Conv3d<=res3b_bn
        self.res4a_down = Conv3D(num_channels=128,
                                 num_filters=256,
                                 padding=1,
                                 filter_size=3,
                                 stride=2)
        # res4a<=Eltwise<=res4a_2,res4a_down
        self.res4a = BatchNorm(num_channels=256, act='relu')

        self.res4b_1 = Conv3DBNLayer(num_channels=256,
                                     num_filters=256,
                                     padding=1,
                                     filter_size=3,
                                     act='relu')
        self.res4b_2 = Conv3DBNLayer(num_channels=256,
                                     num_filters=256,
                                     padding=1,
                                     filter_size=3,
                                     act='relu')
        # res4b<=Eltwise<=res4b_2,res4a
        self.res4b_bn = BatchNorm(num_channels=256, act='relu')

        self.res5a_1 = Conv3DBNLayer(num_channels=256,
                                     num_filters=512,
                                     padding=1,
                                     stride=2,
                                     filter_size=3,
                                     act='relu')
        self.res5a_2 = Conv3D(num_channels=512,
                              num_filters=512,
                              padding=1,
                              filter_size=3,
                              act='relu')
        # res5a_down<=Conv3d<=res4b_bn
        self.res5a_down = Conv3D(num_channels=256,
                                 num_filters=512,
                                 padding=1,
                                 stride=2,
                                 filter_size=3)
        # res5a<=Eltwise<=res5a_2,res5a_down
        self.res5a = BatchNorm(num_channels=512, act='relu')

        self.res5b_1 = Conv3DBNLayer(num_channels=512,
                                     num_filters=512,
                                     padding=1,
                                     filter_size=3,
                                     act='relu')
        self.res5b_2 = Conv3D(num_channels=512,
                              num_filters=512,
                              padding=1,
                              filter_size=3)
        # res5b<=Eltwise<=res5b_2,res5a
        self.res5b = BatchNorm(num_channels=512, act='relu')