Пример #1
0
    def __init__(self, in_channels, out_channels, stride, dilates):
        super(ESPBlock, self).__init__()
        num_branches = len(dilates)
        assert (out_channels % num_branches == 0)
        self.downsample = (stride != 1)
        mid_channels = out_channels // num_branches

        with self.init_scope():
            self.reduce_conv = conv1x1_block(
                in_channels=in_channels,
                out_channels=mid_channels,
                groups=num_branches,
                activation=(lambda: L.PReLU(shape=(mid_channels, ))))

            self.branches = HierarchicalConcurrent()
            with self.branches.init_scope():
                for i in range(num_branches):
                    setattr(
                        self.branches, "branch{}".format(i + 1),
                        conv3x3(in_channels=mid_channels,
                                out_channels=mid_channels,
                                stride=stride,
                                pad=dilates[i],
                                dilate=dilates[i],
                                groups=mid_channels))

            self.merge_conv = conv1x1_block(in_channels=out_channels,
                                            out_channels=out_channels,
                                            groups=num_branches,
                                            activation=None)
            self.preactiv = PreActivation(in_channels=out_channels)
            self.activ = L.PReLU(shape=(out_channels, ))
 def __init__(self, n_in, n_middle, n_turn):
     super(NaiveFCColorPainter, self).__init__(
         l1=L.Linear(n_middle, n_middle),
         l2=L.Linear(n_middle, n_middle),
         l3=L.Linear(n_middle, n_middle),
         l4=L.Linear(n_middle, n_in),
         act1=L.PReLU(n_middle),
         act2=L.PReLU(n_middle),
         act3=L.PReLU(n_middle),
         bn_list2=chainer.ChainList(*[
             L.BatchNormalization(n_middle, use_cudnn=False)
             for i in range(n_turn)
         ]),
         bn_list3=chainer.ChainList(*[
             L.BatchNormalization(n_middle, use_cudnn=False)
             for i in range(n_turn)
         ]),
         l1_attention=L.Linear(n_middle, n_middle),
         act1_attention=L.PReLU(n_middle),
         l2_attention=L.Linear(n_middle, n_in),
     )
     field = n_in // 3
     rang = int(field**0.5)
     self.image_shape = (3, rang, rang)
     self.image_size = n_in
Пример #3
0
    def __init__(self,
                 channels,
                 in_size,
                 **kwargs):
        super(DiceBaseBlock, self).__init__(**kwargs)
        mid_channels = 3 * channels

        with self.init_scope():
            self.convs = Concurrent()
            with self.convs.init_scope():
                setattr(self.convs, "ch_conv", conv3x3(
                    in_channels=channels,
                    out_channels=channels,
                    groups=channels))
                setattr(self.convs, "h_conv", SpatialDiceBranch(
                    sp_size=in_size[0],
                    is_height=True))
                setattr(self.convs, "w_conv", SpatialDiceBranch(
                    sp_size=in_size[1],
                    is_height=False))

            self.norm_activ = NormActivation(
                in_channels=mid_channels,
                activation=(lambda: L.PReLU(shape=(mid_channels,))))
            self.shuffle = ChannelShuffle(
                channels=mid_channels,
                groups=3)
            self.squeeze_conv = conv1x1_block(
                in_channels=mid_channels,
                out_channels=channels,
                groups=channels,
                activation=(lambda: L.PReLU(shape=(channels,))))
Пример #4
0
    def __init__(self,
                 Conv,
                 scale,
                 n_feats,
                 bn=False,
                 act=False,
                 nobias=False):
        m = []
        if (scale & (scale - 1)) == 0:  # Is scale = 2^n?
            for _ in range(int(np.log2(scale))):
                m.append(Conv(n_feats, 4 * n_feats, 3, nobias))
                m.append(partial(F.depth2space, r=2))
                if bn: m.append(L.BatchNormalization(n_feats))

                if act == 'relu':
                    m.append(F.relu)
                elif act == 'prelu':
                    m.append(L.PReLU(n_feats))
        elif scale == 3:
            m.append(Conv(n_feats, 9 * n_feats, 3, nobias))
            m.append(partial(F.depth2space, r=3))
            if bn: m.append(L.BatchNormalization(n_feats))

            if act == 'relu':
                m.append(F.relu)
            elif act == 'prelu':
                m.append(L.PReLU(n_feats))

        else:
            raise NotImplementedError('scale number not implemented')

        super(Upsampler, self).__init__(*m)
Пример #5
0
    def __init__(self, in_channels, out_channels, ksize, scale_factor, size,
                 bn_eps):
        super(SBBlock, self).__init__()
        self.use_scale = (scale_factor > 1)

        with self.init_scope():
            if self.use_scale:
                self.down_scale = partial(F.average_pooling_2d,
                                          ksize=scale_factor,
                                          stride=scale_factor)
                self.up_scale = InterpolationBlock(scale_factor=scale_factor,
                                                   out_size=size)

            use_fdw = (scale_factor > 0)
            if use_fdw:
                fdwconv3x3_class = fdwconv3x3_block if ksize == 3 else fdwconv5x5_block
                self.conv1 = fdwconv3x3_class(
                    in_channels=in_channels,
                    out_channels=in_channels,
                    bn_eps=bn_eps,
                    activation=(lambda: L.PReLU(shape=(in_channels, ))))
            else:
                self.conv1 = dwconv3x3_block(
                    in_channels=in_channels,
                    out_channels=in_channels,
                    bn_eps=bn_eps,
                    activation=(lambda: L.PReLU(shape=(in_channels, ))))

            self.conv2 = conv1x1(in_channels=in_channels,
                                 out_channels=out_channels)

            self.bn = L.BatchNormalization(size=out_channels, eps=bn_eps)
    def __init__(self, n_in, n_middle, n_units, n_turn,
                 sensor, language, painter, reconstructor=None):
        super(NaiveListener, self).__init__(
            sensor=sensor,
            painter=painter,
            language=language,
            l1_language=L.Linear(n_units, n_middle),
            l1_canvas=L.Linear(n_middle, n_middle),
            l2=L.Linear(n_middle, n_middle),
            l3=L.Linear(n_middle, n_middle),
            bn_list2=chainer.ChainList(*[
                L.BatchNormalization(n_middle, use_cudnn=False)
                for i in range(n_turn)
            ]),
            bn_list3=chainer.ChainList(*[
                L.BatchNormalization(n_middle, use_cudnn=False)
                for i in range(n_turn)
            ]),
            act1=L.PReLU(n_middle),
            act2=L.PReLU(n_middle),
            act3=L.PReLU(n_middle),
        )

        if reconstructor:
            self.add_link('reconstructor', reconstructor)
        else:
            self.reconstructor = None

        self.act = F.relu
Пример #7
0
 def __init__(self, n_in, n_middle):
     super(NaiveFCReconstructor, self).__init__(
         l1=L.Linear(n_middle, n_middle),
         l2=L.Linear(n_middle, n_middle),
         l3=L.Linear(n_middle, n_in),
         act1=L.PReLU(n_middle),
         act2=L.PReLU(n_middle),
     )
Пример #8
0
 def __init__(self, n_in, n_middle, n_units):
     super(ImageDecoder, self).__init__(
         l1=L.Linear(n_units, n_middle),
         l2=L.Linear(n_middle, n_middle),
         l3=L.Linear(n_middle, n_in),
         act1=L.PReLU(n_middle),
         act2=L.PReLU(n_middle),
     )
     self.act3 = F.sigmoid
Пример #9
0
 def __init__(self, n_in, n_middle, n_units):
     super(ImageEncoder, self).__init__(
         l1=L.Linear(n_in, n_middle),
         l2=L.Linear(n_middle, n_middle),
         l3=L.Linear(n_middle, n_units),
         act1=L.PReLU(n_middle),
         act2=L.PReLU(n_middle),
         bn1=L.BatchNormalization(n_middle, use_cudnn=False),
         bn2=L.BatchNormalization(n_middle, use_cudnn=False),
         bn3=L.BatchNormalization(n_units, use_cudnn=False),
     )
     self.act3 = F.tanh
 def __init__(self, n_in, n_middle, n_turn):
     super(NaiveFCPainter, self).__init__(
         l1=L.Linear(n_middle, n_middle),
         l2=L.Linear(n_middle, n_middle),
         l3=L.Linear(n_middle, n_in),
         l3_gate=L.Linear(n_middle, n_in),
         act1=L.PReLU(n_middle),
         act2=L.PReLU(n_middle),
         bn_list2=chainer.ChainList(*[
             L.BatchNormalization(n_middle, use_cudnn=False)
             for i in range(n_turn)
         ]))
Пример #11
0
 def __init__(self, in_channels, out_channels, final_groups):
     super(ESPFinalBlock, self).__init__()
     with self.init_scope():
         self.conv1 = conv3x3_block(
             in_channels=in_channels,
             out_channels=in_channels,
             groups=in_channels,
             activation=(lambda: L.PReLU(shape=(in_channels, ))))
         self.conv2 = conv1x1_block(
             in_channels=in_channels,
             out_channels=out_channels,
             groups=final_groups,
             activation=(lambda: L.PReLU(shape=(out_channels, ))))
Пример #12
0
 def __init__(self,
              channels,
              **kwargs):
     super(StridedDiceLeftBranch, self).__init__(**kwargs)
     with self.init_scope():
         self.conv1 = conv3x3_block(
             in_channels=channels,
             out_channels=channels,
             stride=2,
             groups=channels,
             activation=(lambda: L.PReLU(shape=(channels,))))
         self.conv2 = conv1x1_block(
             in_channels=channels,
             out_channels=channels,
             activation=(lambda: L.PReLU(shape=(channels,))))
 def __init__(self, n_in, middle_units, n_units, n_turn, drop_ratio=0.):
     super(EricFCSensor, self).__init__(
         l1=L.Linear(n_in, middle_units),
         #l2=L.Linear(middle_units, n_units),
         l2=L.Linear(middle_units, middle_units),
         #l3=L.Linear(middle_units, n_units),
         l3=L.Linear(middle_units, middle_units),
         # bn=L.BatchNormalization(n_units),
         bn1=L.BatchNormalization(middle_units, use_cudnn=False),
         bn2=L.BatchNormalization(middle_units, use_cudnn=False),
         bn3=L.BatchNormalization(n_units, use_cudnn=False),
         act1=L.PReLU(middle_units),
         act2=L.PReLU(middle_units),
     )
     self.drop_ratio = drop_ratio
Пример #14
0
    def __init__(self, in_channels, out_channels, dilate, se_reduction, down,
                 bn_eps, **kwargs):
        super(CGBlock, self).__init__(**kwargs)
        self.down = down
        if self.down:
            mid1_channels = out_channels
            mid2_channels = 2 * out_channels
        else:
            mid1_channels = out_channels // 2
            mid2_channels = out_channels

        with self.init_scope():
            if self.down:
                self.conv1 = conv3x3_block(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    stride=2,
                    bn_eps=bn_eps,
                    activation=(lambda: L.PReLU(out_channels)))
            else:
                self.conv1 = conv1x1_block(
                    in_channels=in_channels,
                    out_channels=mid1_channels,
                    bn_eps=bn_eps,
                    activation=(lambda: L.PReLU(mid1_channels)))

            self.branches = Concurrent()
            with self.branches.init_scope():
                setattr(self.branches, "branches1",
                        depthwise_conv3x3(channels=mid1_channels))
                setattr(
                    self.branches, "branches2",
                    depthwise_conv3x3(channels=mid1_channels,
                                      pad=dilate,
                                      dilate=dilate))

            self.norm_activ = NormActivation(
                in_channels=mid2_channels,
                bn_eps=bn_eps,
                activation=(lambda: L.PReLU(mid2_channels)))

            if self.down:
                self.conv2 = conv1x1(in_channels=mid2_channels,
                                     out_channels=out_channels)

            self.se = SEBlock(channels=out_channels,
                              reduction=se_reduction,
                              use_conv=False)
 def __init__(self, n_in, n_middle, n_units, n_turn, drop_ratio=0.0):
     super(EricFCPainter, self).__init__(
         #l1=L.Linear(n_units, n_middle),
         l1=L.Linear(n_middle, n_middle),
         l2=L.Linear(n_middle, n_middle),
         l3=L.Linear(n_middle, n_in),
         bn1=L.BatchNormalization(n_middle, use_cudnn=False),
         bn2=L.BatchNormalization(n_middle, use_cudnn=False),
         bn3=L.BatchNormalization(n_in, use_cudnn=False),
         act1=L.PReLU(n_middle),
         act2=L.PReLU(n_middle),
     )
     self.drop_ratio = drop_ratio
     if n_turn >= 2:
         self.add_link('lo_tanh', L.Linear(n_middle, n_in))
         self.add_link('bn_lo', L.BatchNormalization(n_in, use_cudnn=False))
Пример #16
0
 def __init__(self, dim_g, dim_emb):
     n_h = round((dim_g + dim_emb) * 0.5)
     super(Observable, self).__init__(l1=L.Linear(dim_emb, n_h),
                                      p1=L.PReLU(),
                                      b1=L.BatchNormalization(n_h),
                                      l2=L.Linear(n_h, dim_g))
     self.add_persistent('dim_g', dim_g)
Пример #17
0
 def __init__(self, dim2, classes, out_size, bn_eps):
     super(SBDecoder, self).__init__()
     with self.init_scope():
         self.decode1 = SBDecodeBlock(
             channels=classes,
             out_size=((out_size[0] // 8,
                        out_size[1] // 8) if out_size else None),
             bn_eps=bn_eps)
         self.decode2 = SBDecodeBlock(
             channels=classes,
             out_size=((out_size[0] // 4,
                        out_size[1] // 4) if out_size else None),
             bn_eps=bn_eps)
         self.conv3c = conv1x1_block(
             in_channels=dim2,
             out_channels=classes,
             bn_eps=bn_eps,
             activation=(lambda: L.PReLU(shape=(classes, ))))
         self.output = L.Deconvolution2D(
             in_channels=classes,
             out_channels=classes,
             ksize=2,
             stride=2,
             pad=0,
             # output_pad=0,
             nobias=True)
         self.up = InterpolationBlock(scale_factor=2)
Пример #18
0
    def __init__(self, x_channels, y_in_channels, y_out_channels, layers,
                 dilate, se_reduction, bn_eps, **kwargs):
        super(CGStage, self).__init__(**kwargs)
        self.use_x = (x_channels > 0)
        self.use_unit = (layers > 0)

        with self.init_scope():
            if self.use_x:
                self.x_down = partial(F.average_pooling_2d,
                                      ksize=3,
                                      stride=2,
                                      pad=1)

            if self.use_unit:
                self.unit = CGUnit(in_channels=y_in_channels,
                                   out_channels=(y_out_channels - x_channels),
                                   layers=layers,
                                   dilate=dilate,
                                   se_reduction=se_reduction,
                                   bn_eps=bn_eps)

            self.norm_activ = NormActivation(
                in_channels=y_out_channels,
                bn_eps=bn_eps,
                activation=(lambda: L.PReLU(y_out_channels)))
Пример #19
0
 def __init__(self,
              in_channels):
     super(PreActivation, self).__init__()
     with self.init_scope():
         self.bn = L.BatchNormalization(
             size=in_channels,
             eps=1e-5)
         self.activ = L.PReLU(shape=(in_channels,))
Пример #20
0
 def __init__(self):
     # 重みデータの初期値を指定する
     w1 = chainer.initializers.Normal(scale=0.0378, dtype=None)
     w2 = chainer.initializers.Normal(scale=0.3536, dtype=None)
     w3 = chainer.initializers.Normal(scale=0.1179, dtype=None)
     w4 = chainer.initializers.Normal(scale=0.189, dtype=None)
     w5 = chainer.initializers.Normal(scale=0.0001, dtype=None)
     super(SuperResolution_NN, self).__init__()
     # 全ての層を定義する
     with self.init_scope():
         self.c1 = L.Convolution2D(1, 56, ksize=5, stride=1, pad=0, initialW=w1)
         self.l1 = L.PReLU()
         self.c2 = L.Convolution2D(56, 12, ksize=1, stride=1, pad=0, initialW=w2)
         self.l2 = L.PReLU()
         self.c3 = L.Convolution2D(12, 12, ksize=3, stride=1, pad=1, initialW=w3)
         self.l3 = L.PReLU()
         self.c4 = L.Convolution2D(12, 12, ksize=3, stride=1, pad=1, initialW=w3)
         self.l4 = L.PReLU()
         self.c5 = L.Convolution2D(12, 12, ksize=3, stride=1, pad=1, initialW=w3)
         self.l5 = L.PReLU()
         self.c6 = L.Convolution2D(12, 12, ksize=3, stride=1, pad=1, initialW=w3)
         self.l6 = L.PReLU()
         self.c7 = L.Convolution2D(12, 56, ksize=1, stride=1, pad=1, initialW=w4)
         self.l7 = L.PReLU()
         self.c8 = L.Deconvolution2D(56, 1, ksize=9, stride=3, pad=4, initialW=w5)
Пример #21
0
 def __init__(self, in_channels, mid_channels, out_channels, bn_eps):
     super(SBEncoderInitBlock, self).__init__()
     with self.init_scope():
         self.conv1 = conv3x3_block(
             in_channels=in_channels,
             out_channels=mid_channels,
             stride=2,
             bn_eps=bn_eps,
             activation=(lambda: L.PReLU(shape=(mid_channels, ))))
         self.conv2 = dwsconv3x3_block(
             in_channels=mid_channels,
             out_channels=out_channels,
             stride=2,
             dw_use_bn=False,
             bn_eps=bn_eps,
             dw_activation=None,
             pw_activation=(lambda: L.PReLU(shape=(out_channels, ))),
             se_reduction=1)
Пример #22
0
 def __init__(self, in_channels, out_channels):
     super(ESPInitBlock, self).__init__()
     with self.init_scope():
         self.conv = conv3x3_block(
             in_channels=in_channels,
             out_channels=out_channels,
             stride=2,
             activation=(lambda: L.PReLU(shape=(out_channels, ))))
         self.pool = partial(F.average_pooling_2d, ksize=3, stride=2, pad=1)
Пример #23
0
 def __init__(self, in_ch, out_ch, ksize, stride=1, pad=1, dilation=1,
              nobias=False, upsample=None):
     super(SymmetricConvPReLU, self).__init__()
     with self.init_scope():
         self.conv1 = L.Convolution2D(
             in_ch, out_ch, (ksize, 1), stride, pad, nobias=nobias)
         self.conv2 = L.Convolution2D(
             in_ch, out_ch, (1, ksize), stride, pad, nobias=nobias)
         self.prelu = L.PReLU()
Пример #24
0
    def __init__(self):
        w = chainer.initializers.HeNormal(scale=0.9701425)
        super(Generator, self).__init__()

        with self.init_scope():
            self.conv1 = L.ConvolutionND(ndim=3, in_channels=1, out_channels=32, ksize=5, pad=2, initialW=w)
            self.prelu1 = L.PReLU()
            self.resblock = BlockB(in_channels=32, out_channels=32, ksize=3, num_of_layer=8)
            self.conv2 = L.ConvolutionND(ndim=3, in_channels=32, out_channels=1, ksize=5, pad=2, initialW=w)
Пример #25
0
 def __init__(self, in_channels, out_channels, bn_eps, **kwargs):
     super(DABInitBlock, self).__init__(**kwargs)
     with self.init_scope():
         self.conv1 = conv3x3_block(
             in_channels=in_channels,
             out_channels=out_channels,
             stride=2,
             bn_eps=bn_eps,
             activation=(lambda: L.PReLU(out_channels)))
         self.conv2 = conv3x3_block(
             in_channels=out_channels,
             out_channels=out_channels,
             bn_eps=bn_eps,
             activation=(lambda: L.PReLU(out_channels)))
         self.conv3 = conv3x3_block(
             in_channels=out_channels,
             out_channels=out_channels,
             bn_eps=bn_eps,
             activation=(lambda: L.PReLU(out_channels)))
Пример #26
0
 def __init__(self, in_channels, out_channels):
     super(ShortcutBlock, self).__init__()
     with self.init_scope():
         self.conv1 = conv3x3_block(
             in_channels=in_channels,
             out_channels=in_channels,
             activation=(lambda: L.PReLU(shape=(in_channels, ))))
         self.conv2 = conv1x1_block(in_channels=in_channels,
                                    out_channels=out_channels,
                                    activation=None)
    def __init__(self,
                 in_channels,
                 out_channels,
                 ksize,
                 stride,
                 pad,
                 dilate=1,
                 use_bias=False,
                 dw_use_bn=True,
                 pw_use_bn=True,
                 bn_eps=1e-5,
                 dw_activation=(lambda: F.relu),
                 pw_activation=(lambda: F.relu),
                 se_reduction=0):
        super(DwsConvBlock, self).__init__()
        self.use_se = se_reduction > 0

        with self.init_scope():
            self.dw_conv = dwconv_block(
                in_channels=in_channels,
                out_channels=in_channels,
                ksize=ksize,
                stride=stride,
                pad=pad,
                dilate=dilate,
                use_bias=use_bias,
                use_bn=dw_use_bn,
                bn_eps=bn_eps,
                activation=dw_activation)
            if self.use_se:
                self.se = SEBlock(
                    channels=in_channels,
                    reduction=se_reduction,
                    round_mid=False,
                    mid_activation=(lambda: L.PReLU(shape=(in_channels // se_reduction,))),
                    out_activation=(lambda: L.PReLU(shape=(in_channels,))))
            self.pw_conv = conv1x1_block(
                in_channels=in_channels,
                out_channels=out_channels,
                use_bias=use_bias,
                use_bn=pw_use_bn,
                bn_eps=bn_eps,
                activation=pw_activation)
    def __init__(self, n_in, n_middle, n_units, n_turn,
                 sensor, language, painter, reconstructor=None):
        super(EricListener, self).__init__(
            sensor=sensor,
            painter=painter,
            language=language,
            l1=L.Linear(n_units, n_middle),
            act1=L.PReLU(n_middle),
            l2=L.Linear(n_middle, n_middle),
            act2=L.PReLU(n_middle),
            l3=L.Linear(n_middle, n_middle),
            act3=L.PReLU(n_middle),
            bn1=L.BatchNormalization(n_middle, use_cudnn=False),
        )

        if reconstructor:
            self.add_link('reconstructor', reconstructor)
        else:
            self.reconstructor = None
Пример #29
0
 def __init__(self, in_channels=64, hidden_channels=None, out_channels=64, ksize=3):
     w = chainer.initializers.HeNormal(scale=0.9701425)
     super(BottleNeck, self).__init__()
     hidden_channels = in_channels if hidden_channels is None else hidden_channels
     with self.init_scope():
         self.conv1 = L.ConvolutionND(ndim=3, in_channels=in_channels, out_channels=hidden_channels, ksize=ksize, pad=get_valid_padding(ksize), initialW=w)
         self.bn1 = L.BatchNormalization(hidden_channels)
         self.prelu = L.PReLU()
         self.conv2 = L.ConvolutionND(ndim=3, in_channels=hidden_channels, out_channels=out_channels, ksize=ksize, pad=get_valid_padding(ksize), initialW=w)
         self.bn2 = L.BatchNormalization(out_channels)
    def setUp(self):
        self.link = links.PReLU(shape=(3,))
        W = self.link.W.data
        W[...] = numpy.random.uniform(-1, 1, W.shape)
        self.link.cleargrads()

        self.W = W.copy()  # fixed on CPU

        # Avoid unstability of numerical gradient
        self.x = numpy.random.uniform(.5, 1, (4, 3, 2)).astype(numpy.float32)
        self.x *= numpy.random.randint(2, size=(4, 3, 2)) * 2 - 1
        self.gy = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)