示例#1
0
 def __init__(self, version='1_0', num_classes=1000):
     super(SqueezeNet, self).__init__()
     self.num_classes = num_classes
     if (version == '1_0'):
         self.features = nn.Sequential(
             nn.Conv(3, 96, kernel_size=7, stride=2), nn.Relu(),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(96, 16, 64, 64), Fire(128, 16, 64, 64),
             Fire(128, 32, 128, 128),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(256, 32, 128, 128), Fire(256, 48, 192, 192),
             Fire(384, 48, 192, 192), Fire(384, 64, 256, 256),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(512, 64, 256, 256))
     elif (version == '1_1'):
         self.features = nn.Sequential(
             nn.Conv(3, 64, kernel_size=3, stride=2), nn.Relu(),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(64, 16, 64, 64), Fire(128, 16, 64, 64),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(128, 32, 128, 128), Fire(256, 32, 128, 128),
             nn.Pool(kernel_size=3, stride=2, ceil_mode=True, op='maximum'),
             Fire(256, 48, 192, 192), Fire(384, 48, 192, 192),
             Fire(384, 64, 256, 256), Fire(512, 64, 256, 256))
     else:
         raise ValueError(
             'Unsupported SqueezeNet version {version}:1_0 or 1_1 expected'.
             format(version=version))
     final_conv = nn.Conv(512, self.num_classes, kernel_size=1)
     self.classifier = nn.Sequential(nn.Dropout(p=0.5),
                                     final_conv, nn.Relu(),
                                     nn.AdaptiveAvgPool2d((1, 1)))
示例#2
0
 def __init__(self, alpha, num_classes=1000, dropout=0.2):
     super(MNASNet, self).__init__()
     assert (alpha > 0.0)
     self.alpha = alpha
     self.num_classes = num_classes
     depths = _get_depths(alpha)
     layers = [
         nn.Conv(3, 32, 3, padding=1, stride=2, bias=False),
         nn.BatchNorm(32, momentum=_BN_MOMENTUM),
         nn.Relu(),
         nn.Conv(32, 32, 3, padding=1, stride=1, groups=32, bias=False),
         nn.BatchNorm(32, momentum=_BN_MOMENTUM),
         nn.Relu(),
         nn.Conv(32, 16, 1, padding=0, stride=1, bias=False),
         nn.BatchNorm(16, momentum=_BN_MOMENTUM),
         _stack(16, depths[0], 3, 2, 3, 3, _BN_MOMENTUM),
         _stack(depths[0], depths[1], 5, 2, 3, 3, _BN_MOMENTUM),
         _stack(depths[1], depths[2], 5, 2, 6, 3, _BN_MOMENTUM),
         _stack(depths[2], depths[3], 3, 1, 6, 2, _BN_MOMENTUM),
         _stack(depths[3], depths[4], 5, 2, 6, 4, _BN_MOMENTUM),
         _stack(depths[4], depths[5], 3, 1, 6, 1, _BN_MOMENTUM),
         nn.Conv(depths[5], 1280, 1, padding=0, stride=1, bias=False),
         nn.BatchNorm(1280, momentum=_BN_MOMENTUM),
         nn.Relu()
     ]
     self.layers = nn.Sequential(*layers)
     self.classifier = nn.Sequential(nn.Dropout(p=dropout),
                                     nn.Linear(1280, num_classes))
示例#3
0
 def __init__(self, inplanes, squeeze_planes, expand1x1_planes,
              expand3x3_planes):
     super(Fire, self).__init__()
     self.inplanes = inplanes
     self.squeeze = nn.Conv(inplanes, squeeze_planes, kernel_size=1)
     self.squeeze_activation = nn.Relu()
     self.expand1x1 = nn.Conv(squeeze_planes,
                              expand1x1_planes,
                              kernel_size=1)
     self.expand1x1_activation = nn.Relu()
     self.expand3x3 = nn.Conv(squeeze_planes,
                              expand3x3_planes,
                              kernel_size=3,
                              padding=1)
     self.expand3x3_activation = nn.Relu()
示例#4
0
    def __init__(self,
                 inplanes,
                 planes,
                 stride=1,
                 downsample=None,
                 groups=1,
                 base_width=64,
                 dilation=1,
                 norm_layer=None,
                 *,
                 reduction=16):
        super(Bottleneck, self).__init__()
        if (norm_layer is None):
            norm_layer = nn.BatchNorm
        width = (int((planes * (base_width / 64.0))) * groups)
        self.conv1 = conv1x1(inplanes, width)
        self.bn1 = norm_layer(width)
        self.conv2 = conv3x3(width, width, stride, groups, dilation)
        self.bn2 = norm_layer(width)
        self.conv3 = conv1x1(width, (planes * self.expansion))
        self.bn3 = norm_layer((planes * self.expansion))
        self.relu = nn.Relu()
        #self.se = SELayer((planes * self.expansion), reduction)
        self.at = Attention((planes * self.expansion),
                            num_heads=4,
                            kdim=(planes * self.expansion),
                            vdim=(planes * self.expansion),
                            self_attention=True)

        self.downsample = downsample
        self.stride = stride
示例#5
0
 def __init__(self,
              inplanes,
              planes,
              stride=1,
              downsample=None,
              groups=1,
              base_width=64,
              dilation=1,
              norm_layer=None):
     super(BasicBlock, self).__init__()
     if (norm_layer is None):
         norm_layer = nn.BatchNorm
     if ((groups != 1) or (base_width != 64)):
         raise ValueError(
             'BasicBlock only supports groups=1 and base_width=64')
     if (dilation > 1):
         raise NotImplementedError(
             'Dilation > 1 not supported in BasicBlock')
     self.conv1 = conv3x3(inplanes, planes, stride)
     self.bn1 = norm_layer(planes)
     self.relu = nn.Relu()
     self.conv2 = conv3x3(planes, planes)
     self.bn2 = norm_layer(planes)
     self.downsample = downsample
     self.stride = stride
示例#6
0
 def __init__(self, channel, reduction=16):
     super(SELayer, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool2d(1)
     self.fc = nn.Sequential(
         nn.Linear(channel, channel // reduction, bias=False),
         nn.Relu(),
         nn.Linear(channel // reduction, channel, bias=False),
         nn.Sigmoid()
     )
示例#7
0
文件: model.py 项目: diviswen/PMP-Net
    def __init__(self, step=1, in_channel=256):
        super(Unit, self).__init__()
        self.step = step
        if step == 1:
            return

        self.conv_z = Conv1d(in_channel * 2, in_channel, if_bn=True, activation_fn=nn.Sigmoid())
        self.conv_r = Conv1d(in_channel * 2, in_channel, if_bn=True, activation_fn=nn.Sigmoid())
        self.conv_h = Conv1d(in_channel * 2, in_channel, if_bn=True, activation_fn=nn.Relu())
示例#8
0
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv(3, 32, 3, 1)  # no padding
        self.conv2 = nn.Conv(32, 64, 3, 1)
        self.bn = nn.BatchNorm(64)

        self.max_pool = nn.Pool(2, 2)
        self.relu = nn.Relu()
        self.fc1 = nn.Linear(64 * 12 * 12, 256)
        self.fc2 = nn.Linear(256, 10)
示例#9
0
 def __init__(self,
              block,
              layers,
              num_classes=1000,
              zero_init_residual=False,
              groups=1,
              width_per_group=64,
              replace_stride_with_dilation=None,
              norm_layer=None):
     super(ResNet, self).__init__()
     if (norm_layer is None):
         norm_layer = nn.BatchNorm
     self._norm_layer = norm_layer
     self.inplanes = 64
     self.dilation = 1
     if (replace_stride_with_dilation is None):
         replace_stride_with_dilation = [False, False, False]
     if (len(replace_stride_with_dilation) != 3):
         raise ValueError(
             'replace_stride_with_dilation should be None or a 3-element tuple, got {}'
             .format(replace_stride_with_dilation))
     self.groups = groups
     self.base_width = width_per_group
     self.conv1 = nn.Conv(3,
                          self.inplanes,
                          kernel_size=7,
                          stride=2,
                          padding=3,
                          bias=False)
     jt.init.relu_invariant_gauss_(self.conv1.weight, mode="fan_out")
     self.bn1 = norm_layer(self.inplanes)
     self.relu = nn.Relu()
     self.maxpool = nn.Pool(kernel_size=3,
                            stride=2,
                            padding=1,
                            op='maximum')
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block,
                                    128,
                                    layers[1],
                                    stride=2,
                                    dilate=replace_stride_with_dilation[0])
     self.layer3 = self._make_layer(block,
                                    256,
                                    layers[2],
                                    stride=2,
                                    dilate=replace_stride_with_dilation[1])
     self.layer4 = self._make_layer(block,
                                    512,
                                    layers[3],
                                    stride=2,
                                    dilate=replace_stride_with_dilation[2])
     self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
     self.fc = nn.Linear((512 * block.expansion), num_classes)
示例#10
0
 def __init__(self, inp, oup, stride):
     super(InvertedResidual, self).__init__()
     if (not (1 <= stride <= 3)):
         raise ValueError('illegal stride value')
     self.stride = stride
     branch_features = (oup // 2)
     assert ((self.stride != 1) or (inp == (branch_features << 1)))
     if (self.stride > 1):
         self.branch1 = nn.Sequential(
             self.depthwise_conv(inp,
                                 inp,
                                 kernel_size=3,
                                 stride=self.stride,
                                 padding=1), nn.BatchNorm(inp),
             nn.Conv(inp,
                     branch_features,
                     kernel_size=1,
                     stride=1,
                     padding=0,
                     bias=False), nn.BatchNorm(branch_features), nn.Relu())
     else:
         self.branch1 = nn.Sequential()
     self.branch2 = nn.Sequential(
         nn.Conv((inp if (self.stride > 1) else branch_features),
                 branch_features,
                 kernel_size=1,
                 stride=1,
                 padding=0,
                 bias=False), nn.BatchNorm(branch_features), nn.Relu(),
         self.depthwise_conv(branch_features,
                             branch_features,
                             kernel_size=3,
                             stride=self.stride,
                             padding=1), nn.BatchNorm(branch_features),
         nn.Conv(branch_features,
                 branch_features,
                 kernel_size=1,
                 stride=1,
                 padding=0,
                 bias=False), nn.BatchNorm(branch_features), nn.Relu())
示例#11
0
 def __init__(self,
              stages_repeats,
              stages_out_channels,
              num_classes=1000,
              inverted_residual=InvertedResidual):
     super(ShuffleNetV2, self).__init__()
     if (len(stages_repeats) != 3):
         raise ValueError(
             'expected stages_repeats as list of 3 positive ints')
     if (len(stages_out_channels) != 5):
         raise ValueError(
             'expected stages_out_channels as list of 5 positive ints')
     self._stage_out_channels = stages_out_channels
     input_channels = 3
     output_channels = self._stage_out_channels[0]
     self.conv1 = nn.Sequential(
         nn.Conv(input_channels, output_channels, 3, 2, 1, bias=False),
         nn.BatchNorm(output_channels), nn.Relu())
     input_channels = output_channels
     self.maxpool = nn.Pool(kernel_size=3,
                            stride=2,
                            padding=1,
                            op='maximum')
     stage_names = ['stage{}'.format(i) for i in [2, 3, 4]]
     for (name, repeats,
          output_channels) in zip(stage_names, stages_repeats,
                                  self._stage_out_channels[1:]):
         seq = [inverted_residual(input_channels, output_channels, 2)]
         for i in range((repeats - 1)):
             seq.append(
                 inverted_residual(output_channels, output_channels, 1))
         setattr(self, name, nn.Sequential(*seq))
         input_channels = output_channels
     output_channels = self._stage_out_channels[(-1)]
     self.conv5 = nn.Sequential(
         nn.Conv(input_channels, output_channels, 1, 1, 0, bias=False),
         nn.BatchNorm(output_channels), nn.Relu())
     self.fc = nn.Linear(output_channels, num_classes)
示例#12
0
    def __init__(self):
        super(Discriminator, self).__init__()
        self.down = nn.Sequential(
            nn.Conv(opt.channels, 64, 3, 2, 1), 
            nn.Relu()
        )
        self.down_size = (opt.img_size // 2)
        down_dim = (64 * ((opt.img_size // 2) ** 2))
        self.fc = nn.Sequential(
            nn.Linear(down_dim, 32), 
            nn.BatchNorm1d(32, 0.8), 
            nn.Relu(), 
            nn.Linear(32, down_dim), 
            nn.BatchNorm1d(down_dim), 
            nn.Relu()
        )
        self.up = nn.Sequential(
            nn.Upsample(scale_factor=2), 
            nn.Conv(64, opt.channels, 3, 1, 1)
        )

        for m in self.modules():
            weights_init_normal(m)
示例#13
0
文件: resnet.py 项目: yuntaolu/jittor
 def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
     super(Bottleneck, self).__init__()
     if (norm_layer is None):
         norm_layer = nn.BatchNorm
     width = (int((planes * (base_width / 64.0))) * groups)
     self.conv1 = conv1x1(inplanes, width)
     self.bn1 = norm_layer(width)
     self.conv2 = conv3x3(width, width, stride, groups, dilation)
     self.bn2 = norm_layer(width)
     self.conv3 = conv1x1(width, (planes * self.expansion))
     self.bn3 = norm_layer((planes * self.expansion))
     self.relu = nn.Relu()
     self.downsample = downsample
     self.stride = stride
示例#14
0
    def __init__(self,
                 cardinality,
                 depth,
                 nlabels,
                 base_width,
                 widen_factor=4):
        """ Constructor

        Args:
            cardinality: number of convolution groups.
            depth: number of layers.
            nlabels: number of classes
            base_width: base number of channels in each group.
            widen_factor: factor to adjust the channel dimensionality
        """
        super(CifarResNeXt, self).__init__()
        self.cardinality = cardinality
        self.depth = depth
        self.block_depth = (self.depth - 2) // 9
        self.base_width = base_width
        self.widen_factor = widen_factor
        self.nlabels = nlabels
        self.output_size = 64
        self.stages = [
            64, 64 * self.widen_factor, 128 * self.widen_factor,
            256 * self.widen_factor
        ]

        self.conv_1_3x3 = nn.Conv(3, 64, 3, 1, 1, bias=False)
        self.bn_1 = nn.BatchNorm(64)
        self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
        self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
        self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
        self.classifier = nn.Linear(self.stages[3], nlabels)

        self.pool = nn.Pool(8, 1, op="mean")

        self.relu = nn.Relu()
        init.relu_invariant_gauss_(self.classifier.weight)

        for param in self.parameters():
            key = param.name()
            if key.split('.')[-1] == 'weight':
                if 'Conv' in key:
                    init.relu_invariant_gauss_(param, mode='fan_out')
                if 'BatchNorm' in key:
                    init.constant_(param, value=1.0)
            elif key.split('.')[-1] == 'bias':
                init.constant_(param, value=0.0)
示例#15
0
    def __init__(self, in_channels, out_channels, stride, cardinality,
                 base_width, widen_factor):
        """ Constructor

        Args:
            in_channels: input channel dimensionality
            out_channels: output channel dimensionality
            stride: conv stride. Replaces pooling layer.
            cardinality: num of convolution groups.
            base_width: base number of channels in each group.
            widen_factor: factor to reduce the input dimensionality before convolution.
        """
        super(ResNeXtBottleneck, self).__init__()
        width_ratio = out_channels / (widen_factor * 64.)
        D = cardinality * int(base_width * width_ratio)
        self.conv_reduce = nn.Conv(in_channels,
                                   D,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0,
                                   bias=False)
        self.bn_reduce = nn.BatchNorm(D)
        self.conv_conv = nn.Conv(D,
                                 D,
                                 kernel_size=3,
                                 stride=stride,
                                 padding=1,
                                 groups=cardinality,
                                 bias=False)
        self.bn = nn.BatchNorm(D)
        self.conv_expand = nn.Conv(D,
                                   out_channels,
                                   kernel_size=1,
                                   stride=1,
                                   padding=0,
                                   bias=False)
        self.bn_expand = nn.BatchNorm(out_channels)
        self.relu = nn.Relu()

        self.shortcut = nn.Sequential()
        if in_channels != out_channels:
            self.shortcut.append(
                nn.Conv(in_channels,
                        out_channels,
                        kernel_size=1,
                        stride=stride,
                        padding=0,
                        bias=False))
            self.shortcut.append(nn.BatchNorm(out_channels))
示例#16
0
 def __init__(self,
              in_ch,
              out_ch,
              kernel_size,
              stride,
              expansion_factor,
              bn_momentum=0.1):
     super(_InvertedResidual, self).__init__()
     assert (stride in [1, 2])
     assert (kernel_size in [3, 5])
     mid_ch = (in_ch * expansion_factor)
     self.apply_residual = ((in_ch == out_ch) and (stride == 1))
     self.layers = nn.Sequential(
         nn.Conv(in_ch, mid_ch, 1, bias=False),
         nn.BatchNorm(mid_ch, momentum=bn_momentum), nn.Relu(),
         nn.Conv(mid_ch,
                 mid_ch,
                 kernel_size,
                 padding=(kernel_size // 2),
                 stride=stride,
                 groups=mid_ch,
                 bias=False), nn.BatchNorm(mid_ch, momentum=bn_momentum),
         nn.Relu(), nn.Conv(mid_ch, out_ch, 1, bias=False),
         nn.BatchNorm(out_ch, momentum=bn_momentum))
示例#17
0
文件: resnet.py 项目: zzmcdc/jittor
 def __init__(self, block, layers, num_classes=1000):
     self.inplanes = 64
     self.conv1 = nn.Conv(3,
                          64,
                          kernel_size=7,
                          stride=2,
                          padding=3,
                          bias=False)
     self.bn1 = nn.BatchNorm(64)
     self.relu = nn.Relu()
     self.maxpool = nn.Pool(kernel_size=3, stride=2, padding=1)
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
     self.avgpool = nn.Pool(7, stride=1, op="mean")
     self.fc = nn.Linear(512 * block.expansion, num_classes)
示例#18
0
文件: resnet.py 项目: zzmcdc/jittor
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     self.conv1 = nn.Conv(inplanes, planes, kernel_size=1, bias=False)
     self.bn1 = nn.BatchNorm(planes)
     self.conv2 = nn.Conv(planes,
                          planes,
                          kernel_size=3,
                          stride=stride,
                          padding=1,
                          bias=False)
     self.bn2 = nn.BatchNorm(planes)
     self.conv3 = nn.Conv(planes,
                          planes * self.expansion,
                          kernel_size=1,
                          bias=False)
     self.bn3 = nn.BatchNorm(planes * self.expansion)
     self.relu = nn.Relu()
     self.downsample = downsample
     self.stride = stride
示例#19
0
 def __init__(self, num_classes=1000):
     super(AlexNet, self).__init__()
     self.features = nn.Sequential(
         nn.Conv(3, 64, kernel_size=11, stride=4, padding=2), nn.Relu(),
         nn.Pool(kernel_size=3, stride=2, op='maximum'),
         nn.Conv(64, 192, kernel_size=5, padding=2), nn.Relu(),
         nn.Pool(kernel_size=3, stride=2, op='maximum'),
         nn.Conv(192, 384, kernel_size=3, padding=1), nn.Relu(),
         nn.Conv(384, 256, kernel_size=3, padding=1), nn.Relu(),
         nn.Conv(256, 256, kernel_size=3, padding=1), nn.Relu(),
         nn.Pool(kernel_size=3, stride=2, op='maximum'))
     self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
     self.classifier = nn.Sequential(nn.Dropout(),
                                     nn.Linear(((256 * 6) * 6), 4096),
                                     nn.Relu(), nn.Dropout(),
                                     nn.Linear(4096, 4096), nn.Relu(),
                                     nn.Linear(4096, num_classes))
示例#20
0
    def make_conv(
        in_channels, out_channels, kernel_size, stride=1, dilation=1
    ):
        conv = Conv2d(
            in_channels,
            out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=dilation * (kernel_size - 1) // 2,
            dilation=dilation,
            bias=False if use_gn else True
        )

        init.kaiming_uniform_(conv.weight, a=1)
        if not use_gn:
            nn.init.constant_(conv.bias, 0)
        module = [conv,]
        if use_gn:
            module.append(group_norm(out_channels))
        if use_relu:
            module.append(nn.Relu())
        if len(module) > 1:
            return nn.Sequential(*module)
        return conv
示例#21
0
 def __init__(self, num_classes):
     self.base_net = seresnet152(pretrained=True)
     self.fc = nn.Sequential(nn.Dropout(), nn.Linear(1000, 4096), nn.Relu(),
                             nn.Dropout(), nn.Linear(4096, 4096), nn.Relu(),
                             nn.Linear(4096, num_classes))
示例#22
0
文件: model.py 项目: diviswen/PMP-Net
 def __init__(self, in_channel, out_channel, kernel_size=1, stride=1,  if_bn=True, activation_fn=nn.Relu()):
     super(Conv1d, self).__init__()
     self.conv = nn.Conv1d(in_channel, out_channel, kernel_size, stride=stride)
     # self.conv = nn.Linear(in_channel, out_channel)
     self.if_bn = if_bn
     if self.if_bn:
         self.bn = nn.BatchNorm1d(out_channel)
     self.activation_fn = activation_fn
示例#23
0
文件: model.py 项目: diviswen/PMP-Net
    def __init__(self, step=1, if_noise=False, noise_dim=3, noise_stdv=1e-2, dim_tail=32):
        super(StepModel, self).__init__()
        self.step = step
        self.noise_dim = noise_dim
        self.noise_stdv = noise_stdv
        self.if_noise = if_noise
        self.dim_tail = dim_tail

        self.sa_module_1 = PointnetModule([3 + (self.noise_dim if self.if_noise else 0), 64, 64, 128], n_points=512, radius=0.2, n_samples=32)
        self.sa_module_2 = PointnetModule([128, 128, 128, 256], n_points=128, radius=0.4, n_samples=32)
        self.sa_module_3 = PointnetModule([256, 256, 512, 1024], n_points=None, radius=0.2, n_samples=32)

        self.fp_module_3 = PointNetFeaturePropagation(1024+256, [256, 256])
        self.fp_module_2 = PointNetFeaturePropagation(256+128, [256, 128])
        self.fp_module_1 = PointNetFeaturePropagation(128+6, [128, 128, 128])

        self.unit_3 = Unit(step=step, in_channel=256)
        self.unit_2 = Unit(step=step, in_channel=128)
        self.unit_1 = Unit(step=step, in_channel=128)

        mlp = [128, 64, 3]
        last_channel = 128 + self.dim_tail
        mlp_conv = []
        for out_channel in mlp[:-1]:
            mlp_conv.append(Conv1d(last_channel, out_channel, if_bn=True, activation_fn=nn.Relu()))
            last_channel = out_channel
        mlp_conv.append(Conv1d(last_channel, mlp[-1], if_bn=False, activation_fn=None))
        self.mlp_conv = nn.Sequential(*mlp_conv)

        self.tanh = nn.Tanh()
示例#24
0
 def __init__(self, input_size):
     self.linear1 = nn.Linear(input_size, 10)
     self.relu1 = nn.Relu()
     self.linear2 = nn.Linear(10, 1)
示例#25
0
 def __init__(self, in_size, out_size):
     super(UNetUp, self).__init__()
     self.model = nn.Sequential(nn.Upsample(scale_factor=2), nn.Conv(in_size, out_size, 3, stride=1, padding=1, bias=False), nn.BatchNorm(out_size, 0.8), nn.Relu())