예제 #1
0
    def __init__(self, dim_in, dim_out, mode=None):
        super(ResidualBlock, self).__init__()
        if mode == 't':
            weight_attr = False
            bias_attr = False
        elif mode == 'p' or (mode is None):
            weight_attr = None
            bias_attr = None

        self.main = nn.Sequential(
            nn.Conv2d(dim_in,
                      dim_out,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias_attr=False),
            nn.InstanceNorm2d(dim_out,
                              weight_attr=weight_attr,
                              bias_attr=bias_attr), nn.ReLU(),
            nn.Conv2d(dim_out,
                      dim_out,
                      kernel_size=3,
                      stride=1,
                      padding=1,
                      bias_attr=False),
            nn.InstanceNorm2d(dim_out,
                              weight_attr=weight_attr,
                              bias_attr=bias_attr))
예제 #2
0
    def __init__(self, conv_dim=64, repeat_num=3, w=0.01):
        super(MANet, self).__init__()
        self.encoder = TNetDown(conv_dim=conv_dim, repeat_num=repeat_num)
        curr_dim = conv_dim * 4
        self.w = w
        self.beta = nn.Conv2d(curr_dim, curr_dim, kernel_size=3, padding=1)
        self.gamma = nn.Conv2d(curr_dim, curr_dim, kernel_size=3, padding=1)
        self.simple_spade = GetMatrix(curr_dim, 1)  # get the makeup matrix
        self.repeat_num = repeat_num
        for i in range(repeat_num):
            setattr(self, "bottlenecks_" + str(i),
                    ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, mode='t'))
        # Up-Sampling
        self.upsamplers = []
        self.up_betas = []
        self.up_gammas = []
        self.up_acts = []
        y_dim = curr_dim
        for i in range(2):
            layers = []
            layers.append(
                nn.ConvTranspose2d(curr_dim,
                                   curr_dim // 2,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1,
                                   bias_attr=False))
            layers.append(
                nn.InstanceNorm2d(curr_dim // 2,
                                  weight_attr=False,
                                  bias_attr=False))

            setattr(self, "up_acts_" + str(i), nn.ReLU())
            setattr(
                self, "up_betas_" + str(i),
                nn.ConvTranspose2d(y_dim,
                                   curr_dim // 2,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1))
            setattr(
                self, "up_gammas_" + str(i),
                nn.ConvTranspose2d(y_dim,
                                   curr_dim // 2,
                                   kernel_size=4,
                                   stride=2,
                                   padding=1))
            setattr(self, "up_samplers_" + str(i), nn.Sequential(*layers))
            curr_dim = curr_dim // 2
        self.img_reg = [
            nn.Conv2d(curr_dim,
                      3,
                      kernel_size=7,
                      stride=1,
                      padding=3,
                      bias_attr=False)
        ]
        self.img_reg = nn.Sequential(*self.img_reg)
예제 #3
0
 def __init__(self, nf=64, gc=32, bias=True):
     super(ResidualDenseBlock_5C, self).__init__()
     # gc: growth channel, i.e. intermediate channels
     self.conv1 = nn.Conv2d(nf, gc, 3, 1, 1, bias_attr=bias)
     self.conv2 = nn.Conv2d(nf + gc, gc, 3, 1, 1, bias_attr=bias)
     self.conv3 = nn.Conv2d(nf + 2 * gc, gc, 3, 1, 1, bias_attr=bias)
     self.conv4 = nn.Conv2d(nf + 3 * gc, gc, 3, 1, 1, bias_attr=bias)
     self.conv5 = nn.Conv2d(nf + 4 * gc, nf, 3, 1, 1, bias_attr=bias)
     self.lrelu = nn.LeakyReLU(negative_slope=0.2)
예제 #4
0
    def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
        super(ProjectionHead, self).__init__()

        if proj == 'linear':
            self.proj = nn.Conv2d(dim_in, proj_dim, kernel_size=1)
        elif proj == 'convmlp':
            self.proj = nn.Sequential(
                nn.Conv2d(dim_in, dim_in, kernel_size=1),
                nn.SyncBatchNorm(dim_in), nn.ReLU(),
                nn.Conv2d(dim_in, proj_dim, kernel_size=1))
예제 #5
0
파일: nlayers.py 프로젝트: tty33/PaddleGAN
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_type='instance'):
        """Construct a PatchGAN discriminator

        Args:
            input_nc (int)  -- the number of channels in input images
            ndf (int)       -- the number of filters in the last conv layer
            n_layers (int)  -- the number of conv layers in the discriminator
            norm_type (str)      -- normalization layer type
        """
        super(NLayerDiscriminator, self).__init__()
        norm_layer = build_norm_layer(norm_type)
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm
        else:
            use_bias = norm_layer == nn.InstanceNorm

        kw = 4
        padw = 1
        sequence = [
            nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
            nn.LeakyReLU(0.2)
        ]
        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1, n_layers):
            nf_mult_prev = nf_mult
            nf_mult = min(2**n, 8)
            sequence += [
                nn.Conv2d(ndf * nf_mult_prev,
                          ndf * nf_mult,
                          kernel_size=kw,
                          stride=2,
                          padding=padw,
                          bias_attr=use_bias),
                norm_layer(ndf * nf_mult),
                nn.LeakyReLU(0.2)
            ]

        nf_mult_prev = nf_mult
        nf_mult = min(2**n_layers, 8)
        sequence += [
            nn.Conv2d(ndf * nf_mult_prev,
                      ndf * nf_mult,
                      kernel_size=kw,
                      stride=1,
                      padding=padw,
                      bias_attr=use_bias),
            norm_layer(ndf * nf_mult),
            nn.LeakyReLU(0.2)
        ]

        sequence += [
            nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
        ]
        self.model = nn.Sequential(*sequence)
    def __init__(self, num_classes=10, classifier_activation='softmax'):
        super(LeNetDygraph, self).__init__()
        self.num_classes = num_classes
        self.features = nn.Sequential(nn.Conv2d(1, 6, 3, stride=1, padding=1),
                                      nn.ReLU(), nn.Pool2D(2, 'max', 2),
                                      nn.Conv2d(6, 16, 5, stride=1, padding=0),
                                      nn.ReLU(), nn.Pool2D(2, 'max', 2))

        if num_classes > 0:
            self.fc = nn.Sequential(nn.Linear(400, 120), nn.Linear(120, 84),
                                    nn.Linear(84, 10),
                                    nn.Softmax())  #Todo: accept any activation
예제 #7
0
 def __init__(self, in_features, kernel_size, padding):
     super(ResBlock2d, self).__init__()
     self.conv1 = nn.Conv2d(in_channels=in_features,
                            out_channels=in_features,
                            kernel_size=kernel_size,
                            padding=padding)
     self.conv2 = nn.Conv2d(in_channels=in_features,
                            out_channels=in_features,
                            kernel_size=kernel_size,
                            padding=padding)
     self.norm1 = nn.BatchNorm2d(in_features)
     self.norm2 = nn.BatchNorm2d(in_features)
예제 #8
0
 def __init__(self, dim_in, dim_out):
     super(GetMatrix, self).__init__()
     self.get_gamma = nn.Conv2d(dim_in,
                                dim_out,
                                kernel_size=1,
                                stride=1,
                                padding=0,
                                bias_attr=False)
     self.get_beta = nn.Conv2d(dim_in,
                               dim_out,
                               kernel_size=1,
                               stride=1,
                               padding=0,
                               bias_attr=False)
예제 #9
0
    def build_conv_block(self, dim, padding_type, norm_layer, use_dropout,
                         use_bias):
        """Construct a convolutional block.

        Parameters:
            dim (int)           -- the number of channels in the conv layer.
            padding_type (str)  -- the name of padding layer: reflect | replicate | zero
            norm_layer          -- normalization layer
            use_dropout (bool)  -- if use dropout layers.
            use_bias (bool)     -- if the conv layer uses bias or not

        Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
        """
        conv_block = []
        p = 0
        if padding_type == 'reflect':
            conv_block += [ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)

        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias),
            norm_layer(dim),
            nn.ReLU()
        ]
        if use_dropout:
            conv_block += [Dropout(0.5)]

        p = 0
        if padding_type == 'reflect':
            conv_block += [ReflectionPad2d(1)]
        elif padding_type == 'replicate':
            conv_block += [ReplicationPad2d(1)]
        elif padding_type == 'zero':
            p = 1
        else:
            raise NotImplementedError('padding [%s] is not implemented' %
                                      padding_type)
        conv_block += [
            nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias_attr=use_bias),
            norm_layer(dim)
        ]

        return nn.Sequential(*conv_block)
예제 #10
0
파일: darknet.py 프로젝트: heavengate/hapi
    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size=3,
                 stride=1,
                 groups=1,
                 padding=0,
                 act="leaky",
                 name=None):
        super(ConvBNLayer, self).__init__()

        self.conv = nn.Conv2d(in_channels=ch_in,
                              out_channels=ch_out,
                              kernel_size=filter_size,
                              stride=stride,
                              padding=padding,
                              groups=groups,
                              weight_attr=ParamAttr(name=name +
                                                    '.conv.weights'),
                              bias_attr=False)
        bn_name = name + '.bn'
        self.batch_norm = nn.BatchNorm2d(
            ch_out,
            weight_attr=ParamAttr(name=bn_name + '.scale',
                                  regularizer=L2Decay(0.)),
            bias_attr=ParamAttr(name=bn_name + '.offset',
                                regularizer=L2Decay(0.)))

        self.act = act
예제 #11
0
 def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs):
     super(BiSeNetOutput, self).__init__()
     self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
     self.conv_out = nn.Conv2d(mid_chan,
                               n_classes,
                               kernel_size=1,
                               bias_attr=False)
예제 #12
0
    def __init__(self,
                 ch_in: int,
                 ch_out: int,
                 filter_size: int = 3,
                 stride: int = 1,
                 groups: int = 1,
                 padding: int = 0,
                 act: str = 'leakly',
                 is_test: bool = False):
        super(ConvBNLayer, self).__init__()

        self.conv = nn.Conv2d(
            ch_in,
            ch_out,
            filter_size,
            padding=padding,
            stride=stride,
            groups=groups,
            weight_attr=paddle.ParamAttr(initializer=Normal(0., 0.02)),
            bias_attr=False)

        self.batch_norm = nn.BatchNorm(num_channels=ch_out,
                                       is_test=is_test,
                                       param_attr=paddle.ParamAttr(
                                           initializer=Normal(0., 0.02),
                                           regularizer=L2Decay(0.)))
        self.act = act
예제 #13
0
 def __init__(self, in_chan, out_chan, *args, **kwargs):
     super(FeatureFusionModule, self).__init__()
     self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)
     self.conv1 = nn.Conv2d(out_chan,
                            out_chan // 4,
                            kernel_size=1,
                            stride=1,
                            padding=0,
                            bias_attr=False)
     self.conv2 = nn.Conv2d(out_chan // 4,
                            out_chan,
                            kernel_size=1,
                            stride=1,
                            padding=0,
                            bias_attr=False)
     self.relu = nn.ReLU()
     self.sigmoid = nn.Sigmoid()
예제 #14
0
def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes,
                     out_planes,
                     kernel_size=3,
                     stride=stride,
                     padding=1,
                     bias_attr=False)
예제 #15
0
 def __init__(self, in_chan, out_chan, *args, **kwargs):
     super(AttentionRefinementModule, self).__init__()
     self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)
     self.conv_atten = nn.Conv2d(out_chan,
                                 out_chan,
                                 kernel_size=1,
                                 bias_attr=False)
     self.bn_atten = nn.BatchNorm(out_chan)
     self.sigmoid_atten = nn.Sigmoid()
예제 #16
0
 def __init__(
     self,
     in_channels,
     out_channels,
     **kwargs
 ):
     super(BasicConv2d, self).__init__()
     self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
     self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
예제 #17
0
    def __init__(self,
                 block_expansion,
                 num_kp,
                 num_channels,
                 max_features,
                 num_blocks,
                 temperature,
                 estimate_jacobian=False,
                 scale_factor=1,
                 single_jacobian_map=False,
                 pad=0):
        super(KPDetector, self).__init__()

        self.predictor = Hourglass(block_expansion,
                                   in_features=num_channels,
                                   max_features=max_features,
                                   num_blocks=num_blocks)

        self.kp = nn.Conv2d(in_channels=self.predictor.out_filters,
                            out_channels=num_kp,
                            kernel_size=(7, 7),
                            padding=pad)

        if estimate_jacobian:
            self.num_jacobian_maps = 1 if single_jacobian_map else num_kp
            self.jacobian = nn.Conv2d(in_channels=self.predictor.out_filters,
                                      out_channels=4 * self.num_jacobian_maps,
                                      kernel_size=(7, 7),
                                      padding=pad)
            # self.jacobian.weight.data.zero_()
            # self.jacobian.bias.data.copy_(paddle.tensor([1, 0, 0, 1] * self.num_jacobian_maps, dtype='float32'))
        else:
            self.jacobian = None

        self.temperature = temperature
        self.scale_factor = scale_factor
        if self.scale_factor != 1:
            self.down = AntiAliasInterpolation2d(num_channels,
                                                 self.scale_factor)
예제 #18
0
 def __init__(self,
              in_features,
              out_features,
              groups=1,
              kernel_size=3,
              padding=1):
     super(SameBlock2d, self).__init__()
     self.conv = nn.Conv2d(in_channels=in_features,
                           out_channels=out_features,
                           kernel_size=kernel_size,
                           padding=padding,
                           groups=groups)
     self.norm = nn.BatchNorm2d(out_features)
예제 #19
0
    def __init__(self, conv_dim=64, repeat_num=3):
        super(TNetDown, self).__init__()

        layers = []
        layers.append(
            nn.Conv2d(3,
                      conv_dim,
                      kernel_size=7,
                      stride=1,
                      padding=3,
                      bias_attr=False))
        layers.append(
            nn.InstanceNorm2d(conv_dim, weight_attr=False, bias_attr=False))

        layers.append(nn.ReLU())

        # Down-Sampling
        curr_dim = conv_dim
        for i in range(2):
            layers.append(
                nn.Conv2d(curr_dim,
                          curr_dim * 2,
                          kernel_size=4,
                          stride=2,
                          padding=1,
                          bias_attr=False))
            layers.append(
                nn.InstanceNorm2d(curr_dim * 2,
                                  weight_attr=False,
                                  bias_attr=False))
            layers.append(nn.ReLU())
            curr_dim = curr_dim * 2

        # Bottleneck
        for i in range(repeat_num):
            layers.append(
                ResidualBlock(dim_in=curr_dim, dim_out=curr_dim, mode='t'))

        self.main = nn.Sequential(*layers)
예제 #20
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = nn.Conv2d(inplanes,
                            planes,
                            kernel_size=1,
                            bias_attr=False)
     self.bn1 = nn.BatchNorm(planes)
     self.conv2 = nn.Conv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias_attr=False)
     self.bn2 = nn.BatchNorm(planes)
     self.conv3 = nn.Conv2d(planes,
                            planes * 4,
                            kernel_size=1,
                            bias_attr=False)
     self.bn3 = nn.BatchNorm(planes * 4)
     self.relu = nn.ReLU()
     self.downsample = downsample
     self.stride = stride
예제 #21
0
파일: modeling.py 프로젝트: heavengate/hapi
    def __init__(self, num_classes=80, model_mode='train'):
        super(YOLOv3, self).__init__()
        self.num_classes = num_classes
        assert str.lower(model_mode) in ['train', 'eval', 'test'], \
            "model_mode should be 'train' 'eval' or 'test', but got " \
            "{}".format(model_mode)
        self.model_mode = str.lower(model_mode)
        self.anchors = [
            10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198,
            373, 326
        ]
        self.anchor_masks = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
        self.valid_thresh = 0.005
        self.nms_thresh = 0.45
        self.nms_topk = 400
        self.nms_posk = 100
        self.draw_thresh = 0.5

        self.backbone = darknet53(pretrained=(model_mode == 'train'))
        self.block_outputs = []
        self.yolo_blocks = []
        self.route_blocks = []

        for idx, num_chan in enumerate([1024, 768, 384]):
            yolo_block = self.add_sublayer(
                "yolo_detecton_block_{}".format(idx),
                YoloDetectionBlock(num_chan, 512 // (2**idx)))
            self.yolo_blocks.append(yolo_block)

            num_filters = len(self.anchor_masks[idx]) * (self.num_classes + 5)

            block_out = self.add_sublayer(
                "block_out_{}".format(idx),
                nn.Conv2d(in_channels=1024 // (2**idx),
                          out_channels=num_filters,
                          kernel_size=1,
                          weight_attr=ParamAttr(
                              initializer=nn.initializer.Normal(0., 0.02)),
                          bias_attr=ParamAttr(
                              initializer=nn.initializer.Constant(0.0),
                              regularizer=L2Decay(0.))))
            self.block_outputs.append(block_out)
            if idx < 2:
                route = self.add_sublayer(
                    "route2_{}".format(idx),
                    ConvBNLayer(ch_in=512 // (2**idx),
                                ch_out=256 // (2**idx),
                                filter_size=1,
                                act='leaky_relu'))
                self.route_blocks.append(route)
예제 #22
0
 def __init__(self,
              in_features,
              out_features,
              kernel_size=3,
              padding=1,
              groups=1):
     super(DownBlock2d, self).__init__()
     self.conv = nn.Conv2d(in_channels=in_features,
                           out_channels=out_features,
                           kernel_size=kernel_size,
                           padding=padding,
                           groups=groups)
     self.norm = nn.BatchNorm2d(out_features)
     self.pool = nn.AvgPool2d(kernel_size=(2, 2))
예제 #23
0
    def __init__(self,
                 num_channels,
                 num_filters,
                 filter_size,
                 stride=1,
                 padding=0,
                 norm_layer=InstanceNorm,
                 use_bias=True,
                 scale_factor=1,
                 stddev=0.02):
        super(SeparableConv2D, self).__init__()

        self.conv = nn.LayerList([
            nn.Conv2d(
                in_channels=num_channels,
                out_channels=num_channels * scale_factor,
                kernel_size=filter_size,
                stride=stride,
                padding=padding,
                groups=num_channels,
                weight_attr=paddle.ParamAttr(
                    initializer=nn.initializer.Normal(loc=0.0, scale=stddev)),
                bias_attr=use_bias)
        ])

        self.conv.extend([norm_layer(num_channels * scale_factor)])

        self.conv.extend([
            nn.Conv2d(
                in_channels=num_channels * scale_factor,
                out_channels=num_filters,
                kernel_size=1,
                stride=1,
                weight_attr=paddle.ParamAttr(
                    initializer=nn.initializer.Normal(loc=0.0, scale=stddev)),
                bias_attr=use_bias)
        ])
예제 #24
0
    def __init__(self,
                 block_expansion,
                 num_blocks,
                 max_features,
                 num_kp,
                 num_channels,
                 estimate_occlusion_map=False,
                 scale_factor=1,
                 kp_variance=0.01):
        super(DenseMotionNetwork, self).__init__()
        self.hourglass = Hourglass(block_expansion=block_expansion,
                                   in_features=(num_kp + 1) *
                                   (num_channels + 1),
                                   max_features=max_features,
                                   num_blocks=num_blocks)

        self.mask = nn.Conv2d(self.hourglass.out_filters,
                              num_kp + 1,
                              kernel_size=(7, 7),
                              padding=(3, 3))

        if estimate_occlusion_map:
            self.occlusion = nn.Conv2d(self.hourglass.out_filters,
                                       1,
                                       kernel_size=(7, 7),
                                       padding=(3, 3))
        else:
            self.occlusion = None

        self.num_kp = num_kp
        self.scale_factor = scale_factor
        self.kp_variance = kp_variance

        if self.scale_factor != 1:
            self.down = AntiAliasInterpolation2d(num_channels,
                                                 self.scale_factor)
예제 #25
0
 def __init__(self):
     super(Resnet18, self).__init__()
     self.conv1 = nn.Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias_attr=False)
     self.bn1 = nn.BatchNorm(64)
     self.relu = nn.ReLU()
     self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)
     self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)
     self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)
     self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)
예제 #26
0
 def __init__(self,
              in_chan,
              out_chan,
              ks=3,
              stride=1,
              padding=1,
              *args,
              **kwargs):
     super(ConvBNReLU, self).__init__()
     self.conv = nn.Conv2d(in_chan,
                           out_chan,
                           kernel_size=ks,
                           stride=stride,
                           padding=padding,
                           bias_attr=False)
     self.bn = nn.BatchNorm2d(out_chan)
     self.relu = nn.ReLU()
예제 #27
0
 def paddle_nn_layer(self):
     x_var = dg.to_variable(self.input)
     conv = nn.Conv2d(self.num_channels,
                      self.num_filters,
                      self.filter_size,
                      padding=self.padding,
                      padding_mode=self.padding_mode,
                      stride=self.stride,
                      dilation=self.dilation,
                      groups=self.groups,
                      data_format=self.data_format)
     conv.weight.set_value(self.weight)
     if not self.no_bias:
         conv.bias.set_value(self.bias)
     y_var = conv(x_var)
     y_np = y_var.numpy()
     return y_np
예제 #28
0
 def __init__(self, in_chan, out_chan, stride=1):
     super(BasicBlock, self).__init__()
     self.conv1 = conv3x3(in_chan, out_chan, stride)
     self.bn1 = nn.BatchNorm(out_chan)
     self.conv2 = conv3x3(out_chan, out_chan)
     self.bn2 = nn.BatchNorm(out_chan)
     self.relu = nn.ReLU()
     self.downsample = None
     if in_chan != out_chan or stride != 1:
         self.downsample = nn.Sequential(
             nn.Conv2d(in_chan,
                       out_chan,
                       kernel_size=1,
                       stride=stride,
                       bias_attr=False),
             nn.BatchNorm(out_chan),
         )
예제 #29
0
 def __init__(self, block, layers, num_classes=1000):
     self.inplanes = 64
     super(ResNet, self).__init__()
     self.conv1 = nn.Conv2d(3,
                            64,
                            kernel_size=7,
                            stride=2,
                            padding=3,
                            bias_attr=False)
     self.bn1 = nn.BatchNorm(64)
     self.relu = nn.ReLU()
     self.maxpool = nn.Pool2D(pool_size=3, pool_stride=2, pool_padding=1)
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
     self.avgpool = nn.Pool2D(7, pool_stride=1, pool_type='avg')
     self.fc = nn.Linear(512 * block.expansion, num_classes)
예제 #30
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias_attr=False),
                nn.BatchNorm(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)