コード例 #1
0
ファイル: mobilenetv2.py プロジェクト: hellowaywewe/read_doc
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              stride=1,
              groups=1):
     super(ConvBNReLU, self).__init__()
     padding = (kernel_size - 1) // 2
     if groups == 1:
         conv = layers.Conv2d(in_channels,
                              out_channels,
                              kernel_size,
                              stride,
                              pad_mode='pad',
                              padding=padding)
     else:
         conv = layers.Conv2d(in_channels,
                              in_channels,
                              kernel_size,
                              stride,
                              pad_mode='pad',
                              padding=padding,
                              group=in_channels)
     self.features = layers.SequentialLayer(
         [conv, layers.BatchNorm2d(out_channels),
          layers.ReLU6()])
コード例 #2
0
 def __init__(self,
              in_planes=3,
              ndf=64,
              n_layers=3,
              alpha=0.2,
              norm_mode='batch'):
     super(Discriminator, self).__init__()
     kernel_size = 4
     layer_list = [
         layers.Conv2d(in_planes,
                       ndf,
                       kernel_size,
                       2,
                       pad_mode='pad',
                       padding=1),
         layers.LeakyReLU(alpha)
     ]
     nf_mult = ndf
     for i in range(1, n_layers):
         nf_mult_prev = nf_mult
         nf_mult = min(2**i, 8) * ndf
         layer_list.append(
             ConvNormReLU(nf_mult_prev,
                          nf_mult,
                          kernel_size,
                          2,
                          alpha,
                          norm_mode,
                          padding=1))
     nf_mult_prev = nf_mult
     nf_mult = min(2**n_layers, 8) * ndf
     layer_list.append(
         ConvNormReLU(nf_mult_prev,
                      nf_mult,
                      kernel_size,
                      1,
                      alpha,
                      norm_mode,
                      padding=1))
     layer_list.append(
         layers.Conv2d(nf_mult,
                       1,
                       kernel_size,
                       1,
                       pad_mode='pad',
                       padding=1))
     self.features = layers.SequentialLayer(layer_list)
コード例 #3
0
 def __init__(self,
              in_planes=3,
              ngf=64,
              n_layers=9,
              alpha=0.2,
              norm_mode='batch',
              dropout=True,
              pad_mode="CONSTANT"):
     super(ResNetGenerator, self).__init__()
     self.conv_in = ConvNormReLU(in_planes,
                                 ngf,
                                 7,
                                 1,
                                 alpha=alpha,
                                 norm_mode=norm_mode,
                                 pad_mode=pad_mode)
     self.down_1 = ConvNormReLU(ngf, ngf * 2, 3, 2, alpha, norm_mode)
     self.down_2 = ConvNormReLU(ngf * 2, ngf * 4, 3, 2, alpha, norm_mode)
     layer_list = [
         ResidualBlock(
             ngf * 4, norm_mode, dropout=dropout, pad_mode=pad_mode)
     ] * n_layers
     self.residuals = layers.SequentialLayer(layer_list)
     self.up_2 = ConvTransposeNormReLU(ngf * 4, ngf * 2, 3, 2, alpha,
                                       norm_mode)
     self.up_1 = ConvTransposeNormReLU(ngf * 2, ngf, 3, 2, alpha, norm_mode)
     if pad_mode == "CONSTANT":
         self.conv_out = layers.Conv2d(ngf,
                                       3,
                                       kernel_size=7,
                                       stride=1,
                                       pad_mode='pad',
                                       padding=3)
     else:
         pad = layers.Pad(paddings=((0, 0), (0, 0), (3, 3), (3, 3)),
                          mode=pad_mode)
         conv = layers.Conv2d(ngf,
                              3,
                              kernel_size=7,
                              stride=1,
                              pad_mode='pad')
         self.conv_out = layers.SequentialLayer([pad, conv])
     self.activate = Tanh()
コード例 #4
0
ファイル: alexnet.py プロジェクト: hellowaywewe/tinyms
def _conv5x5(in_channel, out_channel, stride=1):
    weight_shape = (out_channel, in_channel, 5, 5)
    weight = _weight_variable(weight_shape)
    return layers.Conv2d(in_channel,
                         out_channel,
                         kernel_size=5,
                         stride=stride,
                         padding=2,
                         pad_mode='pad',
                         weight_init=weight)
コード例 #5
0
ファイル: alexnet.py プロジェクト: hellowaywewe/tinyms
def _conv7x7(in_channel, out_channel, stride=1):
    weight_shape = (out_channel, in_channel, 7, 7)
    weight = _weight_variable(weight_shape)
    return layers.Conv2d(in_channel,
                         out_channel,
                         kernel_size=7,
                         stride=stride,
                         padding=0,
                         pad_mode='same',
                         weight_init=weight)
コード例 #6
0
 def __init__(self,
              in_planes,
              out_planes,
              kernel_size=4,
              stride=2,
              alpha=0.2,
              norm_mode='batch',
              pad_mode='CONSTANT',
              use_relu=True,
              padding=None):
     super(ConvNormReLU, self).__init__()
     self.norm = layers.BatchNorm2d(out_planes)
     if norm_mode == 'instance':
         # Use BatchNorm2d with batchsize=1, affine=False, training=True instead of InstanceNorm2d
         norm = layers.BatchNorm2d(out_planes, affine=False)
     has_bias = (norm_mode == 'instance')
     if padding is None:
         padding = (kernel_size - 1) // 2
     if pad_mode == 'CONSTANT':
         conv = layers.Conv2d(in_planes,
                              out_planes,
                              kernel_size,
                              stride,
                              pad_mode='pad',
                              has_bias=has_bias,
                              padding=padding)
         layer_list = [conv, norm]
     else:
         paddings = ((0, 0), (0, 0), (padding, padding), (padding, padding))
         pad = layers.Pad(paddings=paddings, mode=pad_mode)
         conv = layers.Conv2d(in_planes,
                              out_planes,
                              kernel_size,
                              stride,
                              pad_mode='pad',
                              has_bias=has_bias)
         layer_list = [pad, conv, norm]
     if use_relu:
         relu = layers.ReLU()
         if alpha > 0:
             relu = layers.LeakyReLU(alpha)
         layer_list.append(relu)
     self.features = layers.SequentialLayer(layer_list)
コード例 #7
0
def test_sequential():
    context.set_context(mode=context.GRAPH_MODE, device_target="CPU")

    net = layers.SequentialLayer([
        layers.Conv2d(1, 6, 5, pad_mode='valid', weight_init="ones"),
        layers.ReLU(),
        layers.MaxPool2d(kernel_size=2, stride=2)
    ])
    model = Model(net)
    model.compile()
    z = model.predict(ts.ones((1, 1, 32, 32)))
    print(z.asnumpy())
コード例 #8
0
ファイル: unet.py プロジェクト: huxiaoman7/tinyms
    def __init__(self, outer_nc, inner_nc, in_planes=None, dropout=False,
                 submodule=None, outermost=False, innermost=False, alpha=0.2, norm_mode='batch'):
        super(UnetSkipConnectionBlock, self).__init__()
        downnorm = layers.BatchNorm2d(inner_nc)
        upnorm = layers.BatchNorm2d(outer_nc)
        use_bias = False
        if norm_mode == 'instance':
            downnorm = layers.BatchNorm2d(inner_nc, affine=False)
            upnorm = layers.BatchNorm2d(outer_nc, affine=False)
            use_bias = True
        if in_planes is None:
            in_planes = outer_nc
        downconv = layers.Conv2d(in_planes, inner_nc, kernel_size=4,
                             stride=2, padding=1, has_bias=use_bias, pad_mode='pad')
        downrelu = layers.LeakyReLU(alpha)
        uprelu = layers.ReLU()

        if outermost:
            upconv = layers.Conv2dTranspose(inner_nc * 2, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, pad_mode='pad')
            down = [downconv]
            up = [uprelu, upconv, layers.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = layers.Conv2dTranspose(inner_nc, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, has_bias=use_bias, pad_mode='pad')
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = layers.Conv2dTranspose(inner_nc * 2, outer_nc,
                                            kernel_size=4, stride=2,
                                            padding=1, has_bias=use_bias, pad_mode='pad')
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            model = down + [submodule] + up
            if dropout:
                model.append(layers.Dropout(0.5))

        self.model = layers.SequentialLayer(model)
        self.skip_connections = not outermost
        self.concat = Concat(axis=1)
コード例 #9
0
ファイル: mobilenetv2.py プロジェクト: hellowaywewe/read_doc
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = stride == 1 and inp == oup

        residual_layers = []
        if expand_ratio != 1:
            residual_layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
        residual_layers.extend([
            ConvBNReLU(hidden_dim,
                       hidden_dim,
                       stride=stride,
                       groups=hidden_dim),
            layers.Conv2d(hidden_dim,
                          oup,
                          kernel_size=1,
                          stride=1,
                          has_bias=False),
            layers.BatchNorm2d(oup),
        ])
        self.conv = layers.SequentialLayer(residual_layers)