Esempio n. 1
0
File: tpn.py Progetto: ruyijidan/TPN
    def __init__(
        self,
        in_channels=[1024, 1024],
        mid_channels=[1024, 1024],
        out_channels=2048,
        ds_scales=[(1, 1, 1), (1, 1, 1)],
    ):
        super(LevelFusion, self).__init__()

        ops = []
        num_ins = len(in_channels)
        for i in range(num_ins):
            op = Downampling(in_channels[i],
                             mid_channels[i],
                             kernel_size=(1, 1, 1),
                             stride=(1, 1, 1),
                             padding=(0, 0, 0),
                             bias=False,
                             groups=32,
                             norm=True,
                             activation=True,
                             downsample_position='before',
                             downsample_scale=ds_scales[i])
            ops.append(op)
            self.ops = Sequential(*ops)

        in_dims = np.sum(mid_channels)
        self.fusion_conv = Sequential(
            nn.Conv3D(in_dims, out_channels, 1, 1, 0, bias_attr=False),
            nn.BatchNorm(out_channels), Relu())
Esempio n. 2
0
File: tpn.py Progetto: ruyijidan/TPN
    def __init__(
        self,
        inplanes=[1024, 2048],
        planes=2048,
    ):
        super(SpatialModulation, self).__init__()
        op = []
        for i, dim in enumerate(inplanes):

            ds_factor = planes // dim
            ds_num = int(np.log2(ds_factor))
            # print('ds_num',ds_num)
            # if ds_num < 1:
            #     # None
            #     op.append(Identity())
            #
            # else:
            #     for dsi in range(ds_num):
            #         print('dsi',dsi)
            #         in_factor = 2 ** dsi
            #         out_factor = 2 ** (dsi + 1)
            #         op.append(ConvModule(dim * in_factor, dim * out_factor, kernel_size=(1, 3, 3), stride=(1, 2, 2),
            #                              padding=(0, 1, 1), bias=False))

            # sub_layers.append(op)
            for j in range(ds_num):
                op.append(
                    ConvModule(1024,
                               2048,
                               kernel_size=(1, 3, 3),
                               stride=(1, 2, 2),
                               padding=(0, 1, 1),
                               bias=False))
                op = Sequential(*op)
        self.spatial_modulation = Sequential(op)
Esempio n. 3
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 kernel_size,
                 num_layers,
                 batch_first=True,
                 return_all_layers=False,
                 training=True):
        super(ConvLSTM, self).__init__()
        self.cell_list = Sequential()
        self._check_kernel_size_consistency(kernel_size)

        # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
        kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
        hidden_channels = self._extend_for_multilayer(hidden_channels,
                                                      num_layers)
        if not len(kernel_size) == len(hidden_channels) == num_layers:
            raise ValueError('Inconsistent list length.')

        self.input_dim = in_channels
        self.hidden_dim = hidden_channels
        self.kernel_size = kernel_size
        self.num_layers = num_layers
        self.batch_first = batch_first
        self.return_all_layers = return_all_layers
        for i in range(0, self.num_layers):
            cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i -
                                                                          1]

            self.cell_list.add_sublayer(name='{}'.format(i),
                                        sublayer=ConvLSTMCell(
                                            in_channels=cur_input_dim,
                                            hidden_channels=self.hidden_dim[i],
                                            kernel_size=self.kernel_size[i],
                                            training=training))
Esempio n. 4
0
def make_res_layer(block,
                   inplanes,
                   planes,
                   blocks,
                   spatial_stride=1,
                   temporal_stride=1,
                   dilation=1,
                   style='pytorch',
                   inflate_freq=1,
                   inflate_style='3x1x1',
                   nonlocal_freq=1,
                   nonlocal_cfg=None,
                   with_cp=False):
    inflate_freq = inflate_freq if not isinstance(inflate_freq, int) else (inflate_freq,) * blocks
    nonlocal_freq = nonlocal_freq if not isinstance(nonlocal_freq, int) else (nonlocal_freq,) * blocks
    assert len(inflate_freq) == blocks
    assert len(nonlocal_freq) == blocks
    downsample = None
    if spatial_stride != 1 or inplanes != planes * block.expansion:
        downsample = Sequential(
            nn.Conv3D(
                inplanes,
                planes * block.expansion,
                filter_size=1,
                stride=(temporal_stride, spatial_stride, spatial_stride),
                bias_attr=False),
            nn.BatchNorm(planes * block.expansion),
        )

    layers = []
    layers.append(
        block(
            inplanes,
            planes,
            spatial_stride,
            temporal_stride,
            dilation,
            downsample,
            style=style,
            if_inflate=(inflate_freq[0] == 1),
            inflate_style=inflate_style,
            if_nonlocal=(nonlocal_freq[0] == 1),
            nonlocal_cfg=nonlocal_cfg,
            with_cp=with_cp))
    inplanes = planes * block.expansion
    for i in range(1, blocks):
        layers.append(
            block(inplanes,
                  planes,
                  1, 1,
                  dilation,
                  style=style,
                  if_inflate=(inflate_freq[i] == 1),
                  inflate_style=inflate_style,
                  if_nonlocal=(nonlocal_freq[i] == 1),
                  nonlocal_cfg=nonlocal_cfg,
                  with_cp=with_cp))

    return Sequential(*layers)
Esempio n. 5
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = Sequential(
                Conv2D(self.inplanes, planes * block.expansion, filter_size=1, stride=stride, bias_attr=False),
                BatchNorm(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return Sequential(*layers)
Esempio n. 6
0
    def __init__(self, sequence_list):

        super(LinConPoo, self).__init__()
        self.__sequence_list = copy.deepcopy(sequence_list)

        if not isinstance(self.__sequence_list, list):
            raise ValueError('sequence_list error')

        self._layers_squence = Sequential()
        self._layers_list = []

        LAYLIST = [ConvBNLayer, Conv2D, Linear, Pool2D]
        for i, layer_arg in enumerate(self.__sequence_list):

            if isinstance(layer_arg, dict):

                layer_class = layer_arg.pop('type')

                if not layer_class in LAYLIST:

                    raise KeyError(
                        "the parameters of sequence_list must be within `[ConvBNLayer, Conv2D, Linear, Pool2D]`"
                    )

                layer_obj = layer_class(**layer_arg)

            elif isinstance(layer_arg, list):

                layer_class = layer_arg.pop(0)

                if not layer_class in LAYLIST:

                    raise KeyError(
                        "the parameters of sequence_list must be within `[ConvBNLayer, Conv2D, Linear, Pool2D]`"
                    )

                layer_obj = layer_class(*layer_arg)

            else:
                raise ValueError("sequence_list error")

            layer_name = layer_class.__name__ + str(i)

            self._layers_list.append((layer_name, layer_obj))

            self._layers_squence.add_sublayer(*(layer_name, layer_obj))

        self._layers_squence = Sequential(*self._layers_list)
Esempio n. 7
0
    def architecture_init(self):
        # shared layers
        ch_in = self.channels
        shared_sub_layers = [
            Conv2D(num_channels=3,
                   num_filters=ch_in,
                   filter_size=3,
                   padding=1,
                   param_attr=weight_initializer,
                   bias_attr=bias_initializer,
                   stride=1,
                   act=None)
        ]
        for i in range(self.repeat_num):
            ch_out = min(ch_in * 2, self.max_conv_dim)
            sub_layer = ResBlock(ch_in,
                                 ch_out,
                                 normalize=False,
                                 downsample=True,
                                 sn=self.sn,
                                 act=None)
            ch_in = ch_out
            shared_sub_layers.append(sub_layer)
        shared_sub_layers.append(Leaky_Relu(alpha=0.2))
        shared_sub_layers.append(
            Conv2D(num_channels=self.max_conv_dim,
                   num_filters=self.max_conv_dim,
                   filter_size=4,
                   padding=0,
                   param_attr=weight_initializer,
                   bias_attr=bias_initializer,
                   stride=1,
                   act=None))
        shared_sub_layers.append(Leaky_Relu(alpha=0.2))

        shared_layers = Sequential(*shared_sub_layers)
        # unshared layers
        unshared_sub_layers = []
        for _ in range(self.num_domains):
            sub_layer = Linear(self.max_conv_dim,
                               self.style_dim,
                               param_attr=weight_initializer,
                               bias_attr=bias_initializer,
                               act=None)
            unshared_sub_layers.append(sub_layer)

        unshared_layers = Sequential(*unshared_sub_layers)
        return shared_layers, unshared_layers
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d([1, 1, 1, 1]),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            ReLU(True)
        ]

        #TODO: 这里加一个不带ReLU的有何意义
        conv_block += [
            ReflectionPad2d([1, 1, 1, 1]),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
Esempio n. 9
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d(pad=1),
            Conv2D(num_channels=dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            ReLU(inplace=True)
        ]

        conv_block += [
            ReflectionPad2d(pad=1),
            Conv2D(num_channels=dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
Esempio n. 10
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [ReflectionPad2d([1,1,1,1]),
                # TODO 谱归一化
                 Conv2D(input_nc, ndf, filter_size=4, stride=2, padding=0, bias_attr=True),
                 LeakyReLU(0.2, True)]

        for i in range(1, n_layers - 2):
            mult = 2 ** (i - 1)
            model += [ReflectionPad2d([1,1,1,1]),
                      Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=2, padding=0, bias_attr=True),
                      LeakyReLU(0.2, True)]

        mult = 2 ** (n_layers - 2 - 1)
        model += [ReflectionPad2d([1,1,1,1]),
                  Conv2D(ndf * mult, ndf * mult * 2, filter_size=4, stride=1, padding=0, bias_attr=True),
                  LeakyReLU(0.2, True)]

        # Class Activation Map
        mult = 2 ** (n_layers - 2)
        self.gap_fc = Linear(ndf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ndf * mult, 1, bias_attr=False)
        self.conv1x1 = Conv2D(ndf * mult * 2, ndf * mult, filter_size=1, stride=1, bias_attr=True)
        self.leaky_relu = LeakyReLU(0.2, True)

        self.pad = ReflectionPad2d([1,1,1,1])
        self.conv = Conv2D(ndf * mult, 1, filter_size=4, stride=1, padding=0, bias_attr=False)

        self.model = Sequential(*model)
Esempio n. 11
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []

        conv_block += [
            ReflectionPad2D(1),
            Conv2D(dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   bias_attr=use_bias),
            Instancenorm(),
            ReLU()
        ]

        conv_block += [
            ReflectionPad2D(1),
            Conv2D(dim,
                   num_filters=dim,
                   filter_size=3,
                   stride=1,
                   bias_attr=use_bias),
            Instancenorm()
        ]

        self.conv_block = Sequential(*conv_block)
Esempio n. 12
0
    def __init__(self, in_nc=64, out_nc=64, light=True, use_bias=True):
        super(MLP, self).__init__()
        # ops for  Gamma, Beta block
        self.light = light

        FC = [
            Linear(in_nc,
                   out_nc,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias),
                   act='relu'),
            Linear(out_nc,
                   out_nc,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias),
                   act='relu')
        ]

        self.gamma = Linear(out_nc,
                            out_nc,
                            param_attr=init_w(),
                            bias_attr=init_bias(use_bias))  # FC256
        self.beta = Linear(out_nc,
                           out_nc,
                           param_attr=init_w(),
                           bias_attr=init_bias(use_bias))  # FC256
        self.FC = Sequential(*FC)
Esempio n. 13
0
    def __init__(self, dim, use_bias=True):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias)),
            InstanceNorm(dim),
            ReLU()
        ]

        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   param_attr=init_w(),
                   bias_attr=init_bias(use_bias)),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
Esempio n. 14
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            PRelu(mode="all")
        ]

        conv_block += [
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
Esempio n. 15
0
    def __init__(self, dim, use_bias):
        super(ResnetBlock, self).__init__()
        conv_block = []
        conv_block += [
            #fluid.layers.pad2d(1),
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim),
            ReLU(False)
            #BatchNorm(dim,act='relu')
        ]

        conv_block += [
            #fluid.layers.pad2d(1),
            ReflectionPad2d(1),
            Conv2D(dim,
                   dim,
                   filter_size=3,
                   stride=1,
                   padding=0,
                   bias_attr=use_bias),
            InstanceNorm(dim)
        ]

        self.conv_block = Sequential(*conv_block)
Esempio n. 16
0
    def architecture_init(self):
        layers = []
        layers.append(BatchNorm(self.in_planes))
        layers.append(Relu())
        layers.append(Conv2D(self.in_planes, self.out_planes, 1, 1, bias_attr=bias_initializer_1x1))

        downsample = Sequential(*layers)
        return downsample
Esempio n. 17
0
    def architecture_init(self):
        ch_in = self.channels
        ch_out = self.channels
        encoder = []
        decoder = []
        # down/up-sampling blocks
        for i in range(self.repeat_num):
            ch_out = min(ch_in * 2, self.max_conv_dim)
            encoder.append(
                ResBlock(ch_in,
                         ch_out,
                         normalize=True,
                         downsample=True,
                         sn=self.sn))
            decoder.insert(0,
                           AdainResBlock(ch_out,
                                         ch_in,
                                         upsample=True,
                                         sn=self.sn,
                                         w_hpf=self.w_hpf))  # stack-like
            ch_in = ch_out
        # bottleneck blocks
        for i in range(2):
            encoder.append(ResBlock(ch_out, ch_out, normalize=True,
                                    sn=self.sn))
            decoder.insert(
                0, AdainResBlock(ch_out, ch_out, sn=self.sn, w_hpf=self.w_hpf))
        to_rgb_layer = []
        to_rgb_layer.append(InstanceNorm(self.style_dim))
        to_rgb_layer.append(Leaky_Relu(alpha=0.2))
        to_rgb_layer.append(
            Conv2D(num_channels=self.style_dim,
                   num_filters=self.img_ch,
                   filter_size=1,
                   padding=0,
                   param_attr=weight_initializer,
                   bias_attr=bias_initializer,
                   stride=1,
                   act=None))

        encoders = Sequential(*encoder)
        decoders = Sequential(*decoder)
        to_rgb = Sequential(*to_rgb_layer)

        return encoders, decoders, to_rgb
Esempio n. 18
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        model = [
            ReflectionPad2D(1),
            Spectralnorm(layer=Conv2D(input_nc,
                                      num_filters=ndf,
                                      filter_size=4,
                                      stride=2,
                                      bias_attr=True)),
            LeakyReLU(alpha=0.2)
        ]

        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                ReflectionPad2D(1),
                Spectralnorm(layer=Conv2D(ndf * mult,
                                          num_filters=ndf * mult * 2,
                                          filter_size=4,
                                          stride=2,
                                          bias_attr=True)),
                LeakyReLU(alpha=0.2)
            ]

        mult = 2**(n_layers - 2 - 1)
        model += [
            ReflectionPad2D(1),
            Spectralnorm(layer=Conv2D(ndf * mult,
                                      num_filters=ndf * mult * 2,
                                      filter_size=4,
                                      stride=1,
                                      bias_attr=True)),
            LeakyReLU(alpha=0.2)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        self.gap_fc = Spectralnorm(layer=Linear(ndf *
                                                mult, 1, bias_attr=False))
        self.gmp_fc = Spectralnorm(layer=Linear(ndf *
                                                mult, 1, bias_attr=False))
        self.conv1x1 = Conv2D(ndf * mult * 2,
                              num_filters=ndf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True)
        self.leaky_relu = LeakyReLU(alpha=0.2)

        self.pad = ReflectionPad2D(1)

        self.conv = Spectralnorm(layer=Conv2D(ndf * mult,
                                              num_filters=1,
                                              filter_size=4,
                                              stride=1,
                                              bias_attr=False))

        self.model = Sequential(*model)
Esempio n. 19
0
 def __make_layer(self, in_dim, cfg):
     in_planes = in_dim
     layer_list = []
     for layer in cfg:
         for out_planes in layer:
             layer_list.append(BasicConv(in_planes, out_planes))
             in_planes = out_planes
         layer_list.append(
             Pool2D(pool_size=2, pool_type='max', pool_stride=2))
     return Sequential(*layer_list)
Esempio n. 20
0
 def __init__(self, channels, stride):
     '''
     channels: in_channels == out_channels for MixedOp
     '''
     super().__init__()
     self._ops = LayerList()
     for prim_op in PRIMITIVES:
         op = OPS[prim_op](channels, stride, False)
         if 'pool' in prim_op:
             gama, beta = bn_param_config()
             bn = BatchNorm(channels, param_attr=gama, bias_attr=beta)
             op = Sequential(op, bn)
         self._ops.append(op)
Esempio n. 21
0
    def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
        downsample = None
        if stride != 1 or self.in_planes != planes * block.expansion:
            if shortcut_type == 'A':
                # 这里下采样我是用1x1卷积做,这里先不写
                pass
            else:
                downsample = Sequential(
                    conv1x1x1(self.in_planes, planes * block.expansion, stride),
                    BatchNorm(planes * block.expansion))

        layers = []
        layers.append(
            block(in_planes=self.in_planes,
                  planes=planes,
                  stride=stride,
                  downsample=downsample))
        self.in_planes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.in_planes, planes))

        return Sequential(*layers)
Esempio n. 22
0
    def architecture_init(self):
        shared_sub_layers = [
            Linear(self.latent_dim,
                   self.hidden_dim,
                   param_attr=weight_initializer,
                   bias_attr=bias_initializer,
                   act=None)
        ]
        shared_sub_layers.append(Relu())
        for i in range(3):
            shared_sub_layers.append(
                Linear(self.hidden_dim,
                       self.hidden_dim,
                       param_attr=weight_initializer,
                       bias_attr=bias_initializer,
                       act=None))
            shared_sub_layers.append(Relu())
        shared_layers = Sequential(*shared_sub_layers)
        unshared_layer = []
        for n_d in range(self.num_domains):
            unshared_sub_layers = []
            for i in range(3):
                unshared_sub_layers.append(
                    Linear(self.hidden_dim,
                           self.hidden_dim,
                           param_attr=weight_initializer,
                           bias_attr=bias_initializer,
                           act=None))
                unshared_sub_layers.append(Relu())
            unshared_sub_layers.append(
                Linear(self.hidden_dim,
                       self.style_dim,
                       param_attr=weight_initializer,
                       bias_attr=bias_initializer,
                       act=None))
            unshared_layer.append(Sequential(*unshared_sub_layers))
        unshared_layers = Sequential(*unshared_layer)

        return shared_layers, unshared_layers
Esempio n. 23
0
    def __init__(self,
                 gene,
                 in_channels,
                 init_node_c,
                 out_channels,
                 depth,
                 n_nodes,
                 drop_rate=0):
        '''
        gene: Genotype, searched architecture of a cell
        in_channels: RGB channel.
        init_node_c: Initial number of filters or output channels for the node.
        out_channels: How many classes are there in the target.
        depth: Number of cells.
        n_nodes: Number of nodes in each cell.
        drop_rate: dropout rate.
        '''
        super().__init__()
        stem_c = min(in_channels, n_nodes) * init_node_c  # stem out_channels
        self.stem = Sequential(
            Conv2D(in_channels,
                   stem_c,
                   3,
                   padding=1,
                   param_attr=ParamAttr(initializer=MSRAInitializer()),
                   bias_attr=False), BatchNorm(stem_c))
        c0 = c1 = stem_c
        node_c = init_node_c  # node out_channels
        self.cells = LayerList()
        reduction_prev = False
        reduce_layers = [depth // 3, 2 * depth // 3]
        for i in range(depth):
            if i in reduce_layers:
                node_c *= 2
                reduction = True
            else:
                reduction = False
            cell = SearchedCell(gene, n_nodes, c0, c1, node_c, reduction,
                                reduction_prev, drop_rate)
            reduction_prev = reduction
            self.cells.append(cell)
            c0, c1 = c1, cell.out_channels

        self.global_pooling = Pool2D(pool_type='avg', global_pooling=True)
        self.classifier = Linear(
            input_dim=c1,
            output_dim=out_channels,
            param_attr=ParamAttr(initializer=MSRAInitializer()),
            bias_attr=ParamAttr(initializer=MSRAInitializer()))
Esempio n. 24
0
    def __init__(self, name_scope, out_chs=20, in_chs=1024, inter_chs=512):
        super(DANet, self).__init__(name_scope)
        name_scope = self.full_name()
        self.in_chs = in_chs
        self.out_chs = out_chs
        self.inter_chs = inter_chs if inter_chs else in_chs

        self.backbone = ResNet(50)
        self.conv5p = Sequential(
            Conv2D(self.in_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )
        self.conv5c = Sequential(
            Conv2D(self.in_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )

        self.sp = PAM_module(self.inter_chs)
        self.sc = CAM_module(self.inter_chs)

        self.conv6p = Sequential(
            Conv2D(self.inter_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )
        self.conv6c = Sequential(
            Conv2D(self.inter_chs, self.inter_chs, 3, padding=1),
            BatchNorm(self.inter_chs, act='relu'),
        )

        self.conv7p = Sequential(
            Dropout(0.1),
            Conv2D(self.inter_chs, self.out_chs, 1),
        )
        self.conv7c = Sequential(
            Dropout(0.1),
            Conv2D(self.inter_chs, self.out_chs, 1),
        )
        self.conv7pc = Sequential(
            Dropout(0.1),
            Conv2D(self.inter_chs, self.out_chs, 1),
        )
Esempio n. 25
0
    def __init__(self,
                 input_nc,
                 output_nc,
                 ngf=64,
                 n_blocks=6,
                 img_size=256,
                 light=False):
        assert (n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        DownBlock += [
            #fluid.layers.pad2d(3),
            ReflectionPad2d(3),
            Conv2D(num_channels=input_nc,
                   num_filters=ngf,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            InstanceNorm(ngf),
            ReLU(False)
            #BatchNorm(ngf,act='relu')
            #fluid.layers.instance_norm(ngf)
        ]
        # self.conv1=Conv2D(input_nc, ngf, 7)
        # self.instance_norm=InstanceNorm(ngf)
        #self.n_downsampling=n_downsampling
        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                #fluid.layers.pad2d(1),
                ReflectionPad2d(1),
                Conv2D(ngf * mult,
                       ngf * mult * 2,
                       filter_size=3,
                       stride=2,
                       padding=0,
                       bias_attr=False),
                InstanceNorm(ngf * mult * 2),
                ReLU(False)
                #BatchNorm(ngf * mult * 2,act='relu')
                #fluid.layers.instance_norm(ngf * mult * 2)
            ]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        #self.renetblock=ResnetBlock(ngf * mult, use_bias=False)
        # Class Activation Map
        self.gap_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.conv1x1 = Conv2D(ngf * mult * 2,
                              ngf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True)
        self.relu = ReLU(False)

        # Gamma, Beta block
        if self.light:
            FC = [
                Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu'),
                Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu')
            ]
        else:
            FC = [
                Linear(img_size // mult * img_size // mult * ngf * mult,
                       ngf * mult,
                       bias_attr=False,
                       act='relu'),
                Linear(ngf * mult, ngf * mult, bias_attr=False, act='relu')
            ]
        self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)
        self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i + 1),
                    ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [  #nn.Upsample(scale_factor=2, mode='nearest'),
                #fluid.layers.pad2d(1),
                Upsample(),
                ReflectionPad2d(1),
                Conv2D(ngf * mult,
                       int(ngf * mult / 2),
                       filter_size=3,
                       stride=1,
                       padding=0,
                       bias_attr=False),
                ILN(int(ngf * mult / 2)),
                ReLU(False)
            ]

        UpBlock2 += [
            #fluid.layers.pad2d(3),
            ReflectionPad2d(3),
            Conv2D(ngf,
                   output_nc,
                   filter_size=7,
                   stride=1,
                   padding=0,
                   bias_attr=False),
            Tanh()
        ]

        self.DownBlock = Sequential(*DownBlock)
        self.FC = Sequential(*FC)
        self.UpBlock2 = Sequential(*UpBlock2)
Esempio n. 26
0
    def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=256, light=False):
        super(ResnetGenerator, self).__init__()
        '''
        Args:
            input_cn: 输入通道数
            output_nc: 输出通道数,此处二者都为3
            ngf: base channel number per layer
            n_blocks: The number of resblock
        '''
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()
        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        # 3 ReflectionPad2d 抵消了紧接着的7*7Conv层
        #TODO 此处由于Paddle的pad2d在fluid.layer中,不能作为对象定义,比较麻烦,因此暂时使用普通padding
        DownBlock += [ReflectionPad2d([1,1,1,1]),
                      Conv2D(input_nc, ngf, filter_size=7, stride=1, padding=0, bias_attr=False),
                      InstanceNorm(ngf),
                      #TODO paddle没有单独的ReLU对象,暂时用PReLU代替,后期将const改成0
                      ReLU(True)]

        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            # 通道以2倍增,stride为2,大小以2减少
            DownBlock += [ReflectionPad2d([1,1,1,1]),
                          Conv2D(ngf * mult, ngf * mult * 2, filter_size=3, stride=2, padding=0, bias_attr=False),
                          InstanceNorm(ngf * mult * 2),
                          ReLU(True)]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map
        self.gap_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False)
        self.conv1x1 = Conv2D(ngf * mult * 2, ngf * mult, filter_size=1, stride=1, padding=0, bias_attr=True)
        self.relu = ReLU(True)

        # Gamma, Beta block
        if self.light:
            FC = [Linear(ngf * mult, ngf * mult, bias_attr=False),
                  ReLU(True),
                  Linear(ngf * mult, ngf * mult, bias_attr=False),
                  ReLU(True)]
        else:
            # TODO 这里没太理解
            FC = [Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias_attr=False),
                  ReLU(True),
                  Linear(ngf * mult, ngf * mult, bias_attr=False),
                  ReLU(True)]
        self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)
        self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [Upsample(scale_factor=2),
                         ReflectionPad2d([1,1,1,1]),
                         Conv2D(ngf * mult, int(ngf * mult / 2), filter_size=3, stride=1, padding=0, bias_attr=False),
                         ILN(int(ngf * mult / 2)),
                         ReLU(True)]

        UpBlock2 += [ReflectionPad2d([3,3,3,3]),
                     Conv2D(ngf, output_nc, filter_size=7, stride=1, padding=0, bias_attr=False),
                     Tanh(False)]

        self.DownBlock = Sequential(*DownBlock)
        self.FC = Sequential(*FC)
        self.UpBlock2 = Sequential(*UpBlock2)
Esempio n. 27
0
File: tpn.py Progetto: ruyijidan/TPN
    def __init__(self,
                 in_channels=[256, 512, 1024, 2048],
                 out_channels=256,
                 spatial_modulation_config=None,
                 temporal_modulation_config=None,
                 upsampling_config=None,
                 downsampling_config=None,
                 level_fusion_config=None,
                 aux_head_config=None,
                 mode=None):
        super(TPN, self).__init__()
        assert isinstance(in_channels, list)
        assert isinstance(out_channels, int)
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.num_ins = len(in_channels)
        self.mode = mode
        # spatial_modulation_config = Config(spatial_modulation_config) if isinstance(spatial_modulation_config,
        #                                                                             dict) else spatial_modulation_config
        # temporal_modulation_config = Config(temporal_modulation_config) if isinstance(temporal_modulation_config,
        #                                                                               dict) else temporal_modulation_config
        # upsampling_config = Config(upsampling_config) if isinstance(upsampling_config, dict) else upsampling_config
        # downsampling_config = Config(downsampling_config) if isinstance(downsampling_config,
        #                                                                 dict) else downsampling_config
        # aux_head_config = Config(aux_head_config) if isinstance(aux_head_config, dict) else aux_head_config
        # level_fusion_config = Config(level_fusion_config) if isinstance(level_fusion_config,
        #                                                                 dict) else level_fusion_config

        # self.temporal_modulation_ops = nn.ModuleList()
        # self.upsampling_ops = nn.ModuleList()
        # self.downsampling_ops = nn.ModuleList()

        temp_modulation_ops = []
        temp_upsampling_ops = []
        temp_downsampling_ops = []
        for i in range(0, self.num_ins, 1):
            inplanes = in_channels[-1]
            planes = out_channels

            if temporal_modulation_config is not None:
                # overwrite the temporal_modulation_config
                # print(temporal_modulation_config)

                temporal_modulation_config['param'][
                    'downsample_scale'] = temporal_modulation_config['scales'][
                        i]
                temporal_modulation_config['param']['inplanes'] = inplanes
                temporal_modulation_config['param']['planes'] = planes
                temporal_modulation = TemporalModulation(
                    **temporal_modulation_config['param'])
                temp_modulation_ops.append(temporal_modulation)
            self.temporal_modulation_ops = Sequential(*temp_modulation_ops)

            if i < self.num_ins - 1:
                if upsampling_config is not None:
                    # overwrite the upsampling_config
                    upsampling = Upsampling(**upsampling_config)
                    temp_upsampling_ops.append(upsampling)
                self.upsampling_ops = Sequential(*temp_upsampling_ops)
                if downsampling_config is not None:
                    # overwrite the downsampling_config
                    downsampling_config['param']['inplanes'] = planes
                    downsampling_config['param']['planes'] = planes
                    downsampling_config['param'][
                        'downsample_scale'] = downsampling_config['scales']
                    downsampling = Downampling(**downsampling_config['param'])
                    temp_downsampling_ops.append(downsampling)
                self.downsampling_ops = Sequential(*temp_downsampling_ops)

        self.level_fusion_op = LevelFusion()  # **level_fusion_config
        self.spatial_modulation = SpatialModulation(
        )  # **spatial_modulation_config
        out_dims = level_fusion_config['out_channels']

        # Two pyramids
        self.level_fusion_op2 = LevelFusion(**level_fusion_config)

        self.pyramid_fusion_op = Sequential(
            nn.Conv3D(out_dims * 2, 2048, 1, 1, 0, bias_attr=False),
            nn.BatchNorm(2048), Relu())

        # overwrite aux_head_config
        if aux_head_config is not None:
            aux_head_config['inplanes'] = self.in_channels[-2]
            self.aux_head = AuxHead(**aux_head_config)
        else:
            self.aux_head = None
Esempio n. 28
0
    def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, img_size=256, light=False):
        
        assert(n_blocks >= 0)
        super(ResnetGenerator, self).__init__()

        self.input_nc = input_nc
        self.output_nc = output_nc
        self.ngf = ngf
        self.n_blocks = n_blocks
        self.img_size = img_size
        self.light = light

        DownBlock = []
        DownBlock += [
                    ReflectionPad2D(3),
                    Conv2D(num_channels=input_nc, num_filters=ngf, filter_size=7, stride=1, padding=0, bias_attr=False),
                    InstanceNorm(self.ngf),
                    Relu(),
            ]

        # Down-Sampling
        n_downsampling = 2
        for i in range(n_downsampling):
            mult = 2**i
            DownBlock += [
                ReflectionPad2D(1),
                Conv2D(num_channels=ngf * mult, num_filters=ngf * mult * 2, filter_size=3, stride=2, padding=0, bias_attr=False),
                InstanceNorm(ngf * mult * 2),
                Relu(),
            ]

        # Down-Sampling Bottleneck
        mult = 2**n_downsampling
        for i in range(n_blocks):
            DownBlock += [ResnetBlock(ngf * mult, use_bias=False)]

        # Class Activation Map
        self.gap_fc = Linear(ngf * mult, 1, bias_attr=False, act='sigmoid')
        self.gmp_fc = Linear(ngf * mult, 1, bias_attr=False, act='sigmoid')

        self.conv1x1 = Conv2D(ngf * mult * 2, ngf * mult, filter_size=1, stride=1, bias_attr=True)
        self.relu = Relu()

        # Gamma, Beta block
        if self.light:
            FC = [
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                Relu(),
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                Relu(),
            ]
        else:
            FC = [
                Linear(img_size // mult * img_size // mult * ngf * mult, ngf * mult, bias_attr=False),
                Relu(),
                Linear(ngf * mult, ngf * mult, bias_attr=False),
                Relu(),
            ]
        self.gamma = Linear(ngf * mult, ngf * mult, bias_attr=False)
        self.beta = Linear(ngf * mult, ngf * mult, bias_attr=False)

        # Up-Sampling Bottleneck
        for i in range(n_blocks):
            setattr(self, 'UpBlock1_' + str(i+1), ResnetAdaILNBlock(ngf * mult, use_bias=False))

        # Up-Sampling
        UpBlock2 = []
        for i in range(n_downsampling):
            mult = 2**(n_downsampling - i)
            UpBlock2 += [
                    UpSample(2),
                    ReflectionPad2D(1),
                    Conv2D(num_channels=ngf * mult, num_filters=int(ngf * mult / 2),
                        filter_size=3, stride=1, padding=0, bias_attr=False),
                    ILN(int(ngf * mult / 2)),
                    Relu(),
            ]

        UpBlock2 += [
                    ReflectionPad2D(3),
                    Conv2D(num_channels=ngf, num_filters=output_nc,
                        filter_size=7, stride=1, padding=0, bias_attr=False),
                    Tanh(),
            ]

        self.DownBlock = Sequential(*DownBlock)
        self.DownBlock_list = DownBlock
        self.FC = Sequential(*FC)
        self.UpBlock2 = Sequential(*UpBlock2)
Esempio n. 29
0
class ConvLSTM(fluid.dygraph.Layer):
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 kernel_size,
                 num_layers,
                 batch_first=True,
                 return_all_layers=False,
                 training=True):
        super(ConvLSTM, self).__init__()
        self.cell_list = Sequential()
        self._check_kernel_size_consistency(kernel_size)

        # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
        kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
        hidden_channels = self._extend_for_multilayer(hidden_channels,
                                                      num_layers)
        if not len(kernel_size) == len(hidden_channels) == num_layers:
            raise ValueError('Inconsistent list length.')

        self.input_dim = in_channels
        self.hidden_dim = hidden_channels
        self.kernel_size = kernel_size
        self.num_layers = num_layers
        self.batch_first = batch_first
        self.return_all_layers = return_all_layers
        for i in range(0, self.num_layers):
            cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i -
                                                                          1]

            self.cell_list.add_sublayer(name='{}'.format(i),
                                        sublayer=ConvLSTMCell(
                                            in_channels=cur_input_dim,
                                            hidden_channels=self.hidden_dim[i],
                                            kernel_size=self.kernel_size[i],
                                            training=training))

    def forward(self, input_tensor, hidden_state=None):
        """
        Parameters
        ----------
        input_tensor: todo
            5-D Tensor (b, t, c, h, w)
        hidden_state: todo
            None. todo implement stateful
        Returns
        -------
        last_state_list, layer_output
        """
        # Implement stateful ConvLSTM
        if hidden_state is not None:
            raise NotImplementedError()
        else:
            b, _, _, h, w = input_tensor.shape
            hidden_state = self._init_hidden(b, h, w)

        layer_output_list = []
        last_state_list = []

        seq_len = input_tensor.shape[1]
        cur_layer_input = input_tensor

        for layer_idx in range(self.num_layers):
            h, c = hidden_state[layer_idx]
            output_inner = []
            for t in range(seq_len):
                h, c = self.cell_list['{}'.format(layer_idx)](
                    input_tensor=cur_layer_input[:, t, :, :, :],
                    cur_state=[h, c])
                output_inner.append(h)
            layer_output = fluid.layers.stack(output_inner, axis=1)
            cur_layer_input = layer_output
            layer_output_list.append(layer_output)
            last_state_list.append([h, c])

        if not self.return_all_layers:
            layer_output_list = layer_output_list[-1:]
            last_state_list = last_state_list[-1:]

        return layer_output_list, last_state_list

    def _init_hidden(self, b, h, w):
        init_states = []
        for i in range(self.num_layers):
            init_states.append(self.cell_list[i].init_hidden(b, h, w))
        return init_states

    @staticmethod
    def _check_kernel_size_consistency(kernel_size):
        if not (isinstance(kernel_size, tuple) or
                (isinstance(kernel_size, list)
                 and all([isinstance(elem, tuple) for elem in kernel_size]))):
            raise ValueError('`kernel_size` must be tuple or list of tuples')

    @staticmethod
    def _extend_for_multilayer(param, num_layers):
        if not isinstance(param, list):
            param = [param] * num_layers
        return param
Esempio n. 30
0
    def __init__(self, input_nc, ndf=64, n_layers=5):
        super(Discriminator, self).__init__()
        #model = [fluid.layers.pad2d(1),
        #nn.utils.spectral_norm(
        #nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=0, bias=True)),
        #nn.LeakyReLU(0.2, True)]
        model = [
            #fluid.layers.pad2d(1),
            ReflectionPad2d(1),
            Spectralnorm(
                Conv2D(input_nc,
                       ndf,
                       filter_size=4,
                       stride=2,
                       padding=0,
                       bias_attr=True)),
            LeakyReLU(0.2)
        ]

        #for i in range(1, n_layers - 2):
        #mult = 2 ** (i - 1)
        #model += [fluid.layers.pad2d(1),
        #nn.utils.spectral_norm(
        #nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)),
        #nn.LeakyReLU(0.2, True)]
        for i in range(1, n_layers - 2):
            mult = 2**(i - 1)
            model += [
                #fluid.layers.pad2d(1),
                ReflectionPad2d(1),
                Spectralnorm(
                    Conv2D(ndf * mult,
                           ndf * mult * 2,
                           filter_size=4,
                           stride=2,
                           padding=0,
                           bias_attr=True)),
                LeakyReLU(0.2)
            ]

        mult = 2**(n_layers - 2 - 1)
        #model += [fluid.layers.pad2d(1),
        #nn.utils.spectral_norm(
        #nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)),
        #nn.LeakyReLU(0.2, True)]
        model += [
            #fluid.layers.pad2d(1),
            ReflectionPad2d(1),
            Spectralnorm(
                Conv2D(ndf * mult,
                       ndf * mult * 2,
                       filter_size=4,
                       stride=1,
                       padding=0,
                       bias_attr=True)),
            LeakyReLU(0.2)
        ]

        # Class Activation Map
        mult = 2**(n_layers - 2)
        #self.gap_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))
        #self.gmp_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))
        #self.conv1x1 = nn.Conv2d(ndf * mult * 2, ndf * mult, kernel_size=1, stride=1, bias=True)
        #self.leaky_relu = nn.LeakyReLU(0.2, True)
        self.gap_fc = Spectralnorm(Linear(ndf * mult, 1, bias_attr=False))
        self.gmp_fc = Spectralnorm(Linear(ndf * mult, 1, bias_attr=False))
        self.conv1x1 = Conv2D(ndf * mult * 2,
                              ndf * mult,
                              filter_size=1,
                              stride=1,
                              bias_attr=True)
        self.leaky_relu = LeakyReLU(0.2)
        #self.pad = fluid.layers.pad2d(1)
        self.pad = ReflectionPad2d(1)
        #self.conv = nn.utils.spectral_norm(
        #nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))
        self.conv = Spectralnorm(
            Conv2D(ndf * mult,
                   1,
                   filter_size=4,
                   stride=1,
                   padding=0,
                   bias_attr=False))

        self.model = Sequential(*model)