def __init__(self, channels, blocks, ksize): super().__init__() with self.init_scope(): self.frgb = Convolution2D(3, channels, ksize=1) self.resnet = Sequential(ResidualBlock(channels, ksize)).repeat(blocks) self.trgb = Convolution2D(channels, 3, ksize=1)
def __init__(self, ch, norm, initialW=None): # numbers of channels of flat blocks are fixed super(ResBlock, self).__init__() with self.init_scope(): self.conv1 = Convolution2D(ch, ch, 3, 1, 1, initialW=initialW, nobias=True) self.bn1 = norm(ch) self.conv2 = Convolution2D(ch, ch, 3, 1, 1, initialW=initialW, nobias=True) self.bn2 = norm(ch)
def __init__(self, in_channels, out_channels): super(ResidualBlockB, self).__init__() with self.init_scope(): self.res_branch1 = Convolution2D(in_channels, out_channels, ksize=1, stride=2, initialW=initial) # initialW=HeNormal()) self.bn_branch1 = BatchNormalization(out_channels) self.res_branch2a = Convolution2D(in_channels, out_channels, ksize=3, stride=2, pad=1, initialW=initial) # initialW=HeNormal()) self.bn_branch2a = BatchNormalization(out_channels) self.res_branch2b = Convolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initial) # initialW=HeNormal()) self.bn_branch2b = BatchNormalization(out_channels)
def __init__(self, in_channels, out_channels, ksize, stride=1, pad=0, dilate=1, nobias=False, dw_initialW=None, pw_initialW=None, dw_initial_bias=None, pw_initial_bias=None, dw_activ=identity, pw_activ=relu, bn_kwargs={}): self.dw_activ = identity if dw_activ is None else dw_activ self.pw_activ = identity if pw_activ is None else pw_activ super(SeparableConv2DBNActiv, self).__init__() with self.init_scope(): self.depthwise = Convolution2D( in_channels, in_channels, ksize=ksize, stride=stride, pad=pad, dilate=dilate, groups=in_channels, nobias=nobias, initialW=dw_initialW) self.pointwise = Convolution2D( in_channels, out_channels, 1, nobias=nobias, initialW=pw_initialW) if 'comm' in bn_kwargs: self.dw_bn = MultiNodeBatchNormalization( out_channels, **bn_kwargs) self.pw_bn = MultiNodeBatchNormalization( out_channels, **bn_kwargs) else: self.dw_bn = BatchNormalization(in_channels, **bn_kwargs) self.pw_bn = BatchNormalization(out_channels, **bn_kwargs)
def __init__(self, num_blocks=18, nc32=16, nc16=32, nc8=64): """ :param num_blocks: the number of resnet blocks per stage. There are 3 stages, for feature map width 32, 16, 8. Total number of layers is 6 * num_blocks + 2 :param nc32: the number of feature maps in the first stage (where feature maps are 32x32) :param nc16: the number of feature maps in the second stage (where feature maps are 16x16) :param nc8: the number of feature maps in the third stage (where feature maps are 8x8) """ ksize = 3 pad = 1 ws = sqrt(2.) # This makes the initialization equal to that of He et al. super(ResNet, self).__init__() # The first layer is always a convolution. self.add_link( Convolution2D(in_channels=3, out_channels=nc32, ksize=ksize, stride=1, pad=pad, wscale=ws) ) # Add num_blocks ResBlocks (2 * num_blocks layers) for the size 32x32 feature maps for i in range(num_blocks): self.add_link( ResBlock2D( in_channels=nc32, out_channels=nc32, ksize=ksize, fiber_map='id', stride=1, pad=pad, wscale=ws ) ) # Add num_blocks ResBlocks (2 * num_blocks layers) for the size 16x16 feature maps # The first convolution uses stride 2 for i in range(num_blocks): stride = 1 if i > 0 else 2 fiber_map = 'id' if i > 0 else 'linear' nc_in = nc16 if i > 0 else nc32 self.add_link( ResBlock2D( in_channels=nc_in, out_channels=nc16, ksize=ksize, fiber_map=fiber_map, stride=stride, pad=pad, wscale=ws ) ) # Add num_blocks ResBlocks (2 * num_blocks layers) for the size 8x8 feature maps # The first convolution uses stride 2 for i in range(num_blocks): stride = 1 if i > 0 else 2 fiber_map = 'id' if i > 0 else 'linear' nc_in = nc8 if i > 0 else nc16 self.add_link( ResBlock2D( in_channels=nc_in, out_channels=nc8, ksize=ksize, fiber_map=fiber_map, stride=stride, pad=pad, wscale=ws ) ) # Add BN and final layer # We do ReLU and average pooling between BN and final layer, # but since these are stateless they don't require a Link. self.add_link(F.BatchNormalization(size=nc8)) self.add_link(Convolution2D(in_channels=nc8, out_channels=10, ksize=1, stride=1, pad=0, wscale=ws))
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.relu): super(OptimizedBlock, self).__init__() initializer = chainer.initializers.GlorotUniform(math.sqrt(2)) initializer_sc = chainer.initializers.GlorotUniform() self.activation = activation with self.init_scope(): self.c1 = Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer) self.c2 = Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer) self.c_sc = Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def __init__(self, channels, ksize): super().__init__() self.ksize = ksize with self.init_scope(): self.c1 = Convolution2D(channels, channels, ksize) self.b1 = BatchNormalization(channels) self.a1 = LeakyReluLink() self.c2 = Convolution2D(channels, channels, ksize) self.b2 = BatchNormalization(channels) self.a2 = LeakyReluLink()
def __init__(self, channels, ksize, repeat): super().__init__() self.channels = channels self.ksize = ksize self.repeat = repeat with self.init_scope(): self.convs = Sequential( Convolution2D(channels, channels, ksize=ksize, stride=1, pad=0), relu).repeat(repeat) self.to_rgb = Convolution2D(channels, 3, ksize=1, stride=1, pad=0)
def __init__(self, channels, blocks, ksize, tblocks, tksize): super().__init__() with self.init_scope(): self.frgb = Convolution2D(3, channels, ksize=3) self.r1 = LeakyReluLink() self.l1 = Level(channels, blocks, ksize) self.l2 = Level(channels, blocks, ksize) self.l3 = Level(channels, blocks, ksize) self.l4 = Level(channels, blocks, ksize) self.tail = TailNet(channels, tblocks, tksize) self.trgb = Convolution2D(channels, 3, ksize=1)
def __init__(self, in_channels, out_channels): super(ResidualBlock, self).__init__() with self.init_scope(): self.res_branch2a = Convolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=HeNormal()) self.res_branch2b = Convolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=HeNormal())
def __init__(self): super().__init__() with self.init_scope(): self.c1 = Convolution2D(3, 64, ksize=17, stride=1, pad=0) #64->48 self.a1 = PReLU() self.c2 = Convolution2D(64, 12, ksize=1, stride=1, pad=0) #->48 self.a2 = PReLU() self.c3 = Convolution2D(12, 12, ksize=3, stride=1, pad=1) #->48 self.a3 = PReLU() self.c4 = Convolution2D(12, 12, ksize=7, stride=1, pad=1) #->44 self.a4 = PReLU() self.c5 = Convolution2D(12, 64, ksize=4, stride=2, pad=1) #->22 self.a5 = PReLU() self.d6 = Deconvolution2D(64, 3, ksize=9, stride=3, pad=4) #->64
def __init__(self, in_channels, out_channels): super(ConvolutionBlock, self).__init__() with self.init_scope(): self.conv = Convolution2D(in_channels, out_channels, ksize=7, stride=2, pad=3, initialW=HeNormal()) self.bn_conv = BatchNormalization(out_channels)
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad='SAME', nobias=False, initialW=None, initial_bias=None, **kwargs): super(TFConvolution2D, self).__init__() if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None if pad in ('SAME', 'VALID'): # TF compatible pad self.padding = lambda x: _tf_padding(x, _pair(self.conv.ksize), _pair(self.conv.stride), pad) conv_pad = 0 else: self.padding = None assert isinstance(pad, int) conv_pad = pad with self.init_scope(): self.conv = Convolution2D(in_channels, out_channels, ksize, stride, conv_pad, nobias, initialW, initial_bias, **kwargs)
def __init__(self, n_fg_class=None, pretrained_model=None): super(YOLOv3, self).__init__() param, path = utils.prepare_pretrained_model( {'n_fg_class': n_fg_class}, pretrained_model, self._models) self.n_fg_class = param['n_fg_class'] self.use_preset('visualize') with self.init_scope(): self.extractor = Darknet53Extractor() self.subnet = chainer.ChainList() for i, n in enumerate((512, 256, 128)): self.subnet.append(chainer.Sequential( Conv2DBNActiv(n * 2, 3, pad=1, activ=_leaky_relu), Convolution2D( len(self._anchors[i]) * (4 + 1 + self.n_fg_class), 1))) default_bbox = [] step = [] for k, grid in enumerate(self.extractor.grids): for v, u in itertools.product(range(grid), repeat=2): for h, w in self._anchors[k]: default_bbox.append((v, u, h, w)) step.append(self.insize / grid) self._default_bbox = np.array(default_bbox, dtype=np.float32) self._step = np.array(step, dtype=np.float32) if path: chainer.serializers.load_npz(path, self, strict=False)
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, dilate=1, groups=1, nobias=True, initialW=None, initial_bias=None, activ=relu, bn_kwargs={}): if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None self.activ = activ super(Conv2DBNActiv, self).__init__() with self.init_scope(): self.conv = Convolution2D(in_channels, out_channels, ksize, stride, pad, nobias, initialW, initial_bias, dilate=dilate, groups=groups) if 'comm' in bn_kwargs: self.bn = MultiNodeBatchNormalization(out_channels, **bn_kwargs) else: self.bn = BatchNormalization(out_channels, **bn_kwargs)
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, dilate=1, nobias=False, initialW=None, initial_bias=None, activ=relu): if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None self.activ = activ super(Conv2DActiv, self).__init__() with self.init_scope(): if dilate > 1: self.conv = DilatedConvolution2D(in_channels, out_channels, ksize, stride, pad, dilate, nobias, initialW, initial_bias) else: self.conv = Convolution2D(in_channels, out_channels, ksize, stride, pad, nobias, initialW, initial_bias)
def __init__(self, channels, blocks, ksize): super().__init__() with self.init_scope(): self.resnet = Sequential(ResidualBlock(channels, ksize)).repeat(blocks) self.c = Convolution2D(channels + 3, channels, ksize=3) self.r = LeakyReluLink() self.up = Upsampler()
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, scale=1, dilate=1, groups=1, nobias=True, initialW=None, initial_bias=None, activ=relu, bn_kwargs={}, aa_kwargs={}): if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None self.activ = activ super(Conv2DBNActiv, self).__init__() with self.init_scope(): if len(aa_kwargs) > 0: self.conv = AugmentedConv(in_channels, out_channels, ksize, int(out_channels * aa_kwargs['k']), int(out_channels * aa_kwargs['v']), aa_kwargs['Nh'], aa_kwargs['relative'], initialW=initialW) elif scale > 1: self.conv = Res2NetConv(in_channels, out_channels, ksize, stride, pad, scale, nobias, initialW, initial_bias, dilate=dilate, groups=groups) else: self.conv = Convolution2D(in_channels, out_channels, ksize, stride, pad, nobias, initialW, initial_bias, dilate=dilate, groups=groups) if 'comm' in bn_kwargs: self.bn = MultiNodeBatchNormalization(out_channels, **bn_kwargs) else: self.bn = BatchNormalization(out_channels, **bn_kwargs)
def __init__(self): super(DummyCNNModel, self).__init__() with self.init_scope(): self.l1 = Convolution2D(1, 1, ksize=3, initialW=numpy.ones((1, 1, 3, 3), numpy.float32), nobias=True)
def __init__(self, in_channels, out_channels, ksize, pad): super().__init__() with self.init_scope(): self.conv = Convolution2D(in_channels=in_channels, out_channels=out_channels, ksize=ksize, pad=pad) _, _, h, w = self.conv.W.shape _mask = np.ones(shape=(h, w)) _mask[:, w // 2 + 1:] = 0 self.mask = Variable(_mask)
def __init__(self, path_glb, in_ch=None, out_ch=3, ins_norm=False, num_resblock=3, input_size=(512, 1024)): super(LocalEnhancer, self).__init__() with self.init_scope(): self.global_network = GlobalGenerator(in_ch) serializers.load_npz(path_glb, self.global_network) self.num_resblock = num_resblock if ins_norm: norm = InstanceNormalization else: norm = BatchNormalization self.flat1 = Convolution2D(in_ch, 32, 7, 1, 3) self.flat1_bn = norm(32) self.down1 = Convolution2D(32, 64, 3, 2, 1) self.down1_bn = norm(64) for i in range(self.num_resblock): self.add_link('res_{}'.format(i), ResBlock(64, norm=norm)) self.up1 = Deconvolution2D(64, 32, 3, 2, 1, outsize=input_size) self.up1_bn = norm(32) self.flat2 = Convolution2D(32, out_ch, 7, 1, 3)
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, dilate=1, groups=1, nobias=True, initialW=None, initial_bias=None, activ=relu, use_bn=True, bn_kwargs={}): super().__init__() if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None self.in_channels = in_channels self.out_channels = out_channels self.ksize = ksize self.stride = stride self.pad = pad self.dilate = dilate self.groups = groups self.nobias = nobias self.initialW = initialW self.initial_bias = initial_bias self.use_bn = use_bn self.bn_kwargs = bn_kwargs self.activ = activ with self.init_scope(): self.conv = Convolution2D(in_channels, out_channels, ksize=ksize, stride=stride, pad=pad, nobias=nobias, initialW=initialW, initial_bias=initial_bias, dilate=dilate, groups=groups) # TODO: allow passing customized BN if use_bn: if 'comm' in bn_kwargs: self.bn = MultiNodeBatchNormalization( out_channels, **bn_kwargs) else: self.bn = BatchNormalization(out_channels, **bn_kwargs) else: self.bn = None
def __init__(self, in_ch=None, out_ch=3, ins_norm=False, input_size=(512, 1024), num_resblock=9): super(GlobalGenerator, self).__init__() with self.init_scope(): self.num_resblock = num_resblock if ins_norm: norm = InstanceNormalization else: norm = BatchNormalization self.flat1 = Convolution2D(in_ch, 64, 7, 1, 3, initialW=None, nobias=True) self.flat1_bn = norm(64) self.down1 = Convolution2D(64, 128, 3, 2, 1, initialW=None, nobias=True) self.down1_bn = norm(128) self.down2 = Convolution2D(128, 256, 3, 2, 1, initialW=None, nobias=True) self.down2_bn = norm(256) self.down3 = Convolution2D(256, 512, 3, 2, 1, initialW=None, nobias=True) self.down3_bn = norm(512) self.down4 = Convolution2D(512, 1024, 3, 2, 1, initialW=None, nobias=True) self.down4_bn = norm(1024) for i in range(self.num_resblock): self.add_link('res_{}'.format(i), ResBlock(1024, norm=norm)) self.up0 = Deconvolution2D(1024, 512, 3, 2, 1, initialW=None, nobias=True, outsize=[int(x / 8) for x in input_size]) self.up0_bn = norm(512) self.up1 = Deconvolution2D(512, 256, 3, 2, 1, initialW=None, nobias=True, outsize=[int(x / 4) for x in input_size]) self.up1_bn = norm(256) self.up2 = Deconvolution2D(256, 128, 3, 2, 1, initialW=None, nobias=True, outsize=[int(x / 2) for x in input_size]) self.up2_bn = norm(128) self.up3 = Deconvolution2D(128, 64, 3, 2, 1, initialW=None, nobias=True, outsize=input_size) self.up3_bn = norm(64) self.flat2 = Convolution2D(64, out_ch, 7, 1, 3, initialW=None, nobias=True)
def __init__(self, in_ch=3, out_ch=3): super(Encoder, self).__init__() with self.init_scope(): self.flat1 = Convolution2D(in_ch, 32, 7, 1, 3) self.flat1_bn = BatchNormalization(32) self.down1 = Convolution2D(32, 64, 3, 2, 1) self.down1_bn = BatchNormalization(64) self.down2 = Convolution2D(64, 128, 3, 2, 1) self.down2_bn = BatchNormalization(128) self.down3 = Convolution2D(128, 256, 3, 2, 1) self.down3_bn = BatchNormalization(256) self.down4 = Convolution2D(256, 512, 3, 2, 1) self.down4_bn = BatchNormalization(512) self.up1 = Deconvolution2D(512, 256, 4, 2, 1) self.up1_bn = BatchNormalization(256) self.up2 = Deconvolution2D(256, 128, 4, 2, 1) self.up2_bn = BatchNormalization(128) self.up3 = Deconvolution2D(128, 64, 4, 2, 1) self.up3_bn = BatchNormalization(64) self.up4 = Deconvolution2D(64, 32, 4, 2, 1) self.up4_bn = BatchNormalization(32) self.flat2 = Deconvolution2D(32, out_ch, 7, 1, 3) self.out_ch = out_ch
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1, normalization=None, activation=F.relu, downsample=False): super(Block, self).__init__() initializer = chainer.initializers.GlorotUniform(math.sqrt(2)) initializer_sc = chainer.initializers.GlorotUniform() self.normalization = normalization self.activation = activation self.downsample = downsample self.learnable_sc = (in_channels != out_channels) or downsample hidden_channels = in_channels if hidden_channels is None else hidden_channels with self.init_scope(): self.c1 = Convolution2D(in_channels, hidden_channels, ksize=ksize, pad=pad, initialW=initializer) self.c2 = Convolution2D(hidden_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer) if self.learnable_sc: self.c_sc = Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc) if self.normalization == 'batchnorm': self.b1 = L.BatchNormalization(in_channels) self.b2 = L.BatchNormalization(hidden_channels) elif self.normalization == 'groupnorm': self.b1 = L.GroupNormalization(NUMGROUPS, in_channels) self.b2 = L.GroupNormalization(NUMGROUPS, hidden_channels) else: self.b1 = self.b2 = lambda x: x
def __init__(self, k, layer_num, f0, growth=4, dropout_ratio=0.5): super().__init__() with self.init_scope(): self.bn1 = BatchNormalization(size=(f0 + (layer_num - 1) * growth)) self.bn2 = BatchNormalization(size=4 * growth) self.conv1 = Convolution2D(in_channels=f0 + (layer_num - 1) * growth, out_channels=4 * growth, ksize=1) self.conv2 = MaskedConv2D(in_channels=4 * growth, out_channels=growth, ksize=k, pad=k // 2) self.dropout_ratio = dropout_ratio
def __init__(self, in_channels, out_channels, ksize, dk, dv, Nh, relative, initialW=None): super(AugmentedConv, self).__init__() self.dk = dk self.dv = dv self.Nh = Nh self.relative = relative with self.init_scope(): self.conv = Convolution2D(in_channels, out_channels - dv, ksize, stride=1, pad=ksize // 2, nobias=True, initialW=initialW) self.conv_qkv = Convolution2D(in_channels, 2 * dk + dv, ksize, stride=1, pad=ksize // 2, nobias=True, initialW=initialW) self.conv_attn = Convolution2D(dv, dv, 1, nobias=True, initialW=initialW)
def __init__(self, in_channels, out_channels, ksize, stride, pad, scale, nobias=True, initialW=None, initial_bias=None, dilate=1, groups=1): assert scale > 1 assert in_channels % scale == 0 self.scale = scale super(Res2NetConv, self).__init__() k_ch = in_channels // scale with self.init_scope(): for i in range(2, scale + 1): name = 'k{}'.format(i) conv = Convolution2D( k_ch, k_ch, ksize, stride, pad, nobias, initialW, initial_bias, dilate=dilate, groups=groups) self.add_link(name, conv)
def __init__(self, in_channels, out_channels, ksize=None, stride=1, pad=0, nobias=True, initialW=None, initial_bias=None, activ=relu, bn_kwargs=dict()): if ksize is None: out_channels, ksize, in_channels = in_channels, out_channels, None self.activ = activ super(Conv2DBNActiv, self).__init__() with self.init_scope(): self.conv = Convolution2D(in_channels, out_channels, ksize, stride, pad, nobias, initialW, initial_bias) self.bn = BatchNormalization(out_channels, **bn_kwargs)
def __init__(self, n_fg_class=None, pretrained_model=None): super(YOLOv2, self).__init__() param, path = utils.prepare_pretrained_model( {'n_fg_class': n_fg_class}, pretrained_model, self._models) self.n_fg_class = param['n_fg_class'] self.use_preset('visualize') with self.init_scope(): self.extractor = Darknet19Extractor() self.subnet = Convolution2D( len(self._anchors) * (4 + 1 + self.n_fg_class), 1) default_bbox = [] for v, u in itertools.product(range(self.extractor.grid), repeat=2): for h, w in self._anchors: default_bbox.append((v, u, h, w)) self._default_bbox = np.array(default_bbox, dtype=np.float32) if path: chainer.serializers.load_npz(path, self, strict=False)