def __init__(self, opt, input_ch, input_resolution=65): super().__init__() #get options nf = opt.aspp_nf #this rate is dilate size based original paper. x65_rate = [6, 12, 18] rate = [round(x * input_resolution / 65) for x in x65_rate] he_w = HeNormal() with self.init_scope(): self.x1 = define_conv(opt)(input_ch, nf, ksize=1, initialW=he_w) self.x1_bn = L.BatchNormalization(nf) self.x3_small = define_atrous_conv(opt)(input_ch, nf, ksize=3, rate=rate[0], initialW=he_w) self.x3_small_bn = L.BatchNormalization(nf) self.x3_middle = define_atrous_conv(opt)(input_ch, nf, ksize=3, rate=rate[1], initialW=he_w) self.x3_middle_bn = L.BatchNormalization(nf) self.x3_large = define_atrous_conv(opt)(input_ch, nf, ksize=3, rate=rate[2], initialW=he_w) self.x3_large_bn = L.BatchNormalization(nf) self.sum_func = define_conv(opt)(nf * 4, input_ch, ksize=3, pad=1, initialW=he_w) self.activation = F.relu
def __init__(self, opt): super().__init__() he_w = HeNormal() xavier_w = Normal() ndf = opt.ndf with self.init_scope(): #[input] nclass x 256 x 256 self.c1 = define_conv(opt)(opt.class_num, ndf, ksize=3, stride=2, pad=1, initialW=he_w) #[input] ndf x 128 x 128 self.c2 = define_conv(opt)(ndf, ndf * 2, ksize=3, stride=2, pad=1, initialW=he_w) self.c2_norm = L.BatchNormalization(size=ndf * 2) #[input] ndf*2 x 64 x 64 self.c3 = define_conv(opt)(ndf * 2, ndf * 4, ksize=3, stride=2, pad=1, initialW=he_w) self.c3_norm = L.BatchNormalization(size=ndf * 4) #[input] ndf*4 x 32 x 32 self.c4 = define_conv(opt)(ndf * 4, ndf * 8, ksize=3, stride=2, pad=1, initialW=he_w) self.c4_norm = L.BatchNormalization(size=ndf * 8) #[input] ndf*8 x 16 x 16 self.c5 = define_conv(opt)(ndf * 8, 1, ksize=3, stride=2, pad=1, initialW=he_w) #[input] 1 x 8 x 8 self.upscale = define_deconv(opt)(1, 1, ksize=32, stride=32, initialW=xavier_w) #[output] 1 x 256 x 256 self.activation = F.leaky_relu
def __init__(self, opt, input_ch, output_ch, rate=2): super().__init__() he_w = HeNormal() output_ch = output_ch * rate**2 with self.init_scope(): self.c = define_conv(opt)(input_ch, output_ch, ksize=3, stride=1, pad=1, initialW=he_w) self.ps_func = lambda x: F.depth2space(x, rate)
def __init__(self, opt): super().__init__() he_w = HeNormal() with self.init_scope(): # This ResNet101 use a pre-trained caffemodel that can be downloaded at GitHub # <https://github.com/KaimingHe/deep-residual-networks>. self.resnet101 = L.ResNet101Layers() self.use_layer = ('res3', 512) nf = self.use_layer[1] self.c1 = define_atrous_conv(opt)(nf, nf, ksize=3, rate=2, initialW=he_w) self.norm1 = L.BatchNormalization(nf) self.c2 = define_atrous_conv(opt)(nf, nf, ksize=3, rate=4, initialW=he_w) self.norm2 = L.BatchNormalization(nf) self.aspp = ASPP(opt, nf, input_resolution=32) self.up1 = PixelShuffler(opt, nf, nf // 2, rate=2) #32 -> 64 self.up2 = PixelShuffler(opt, nf // 2, nf // 4, rate=2) #64 -> 128 self.up3 = PixelShuffler(opt, nf // 4, nf // 8, rate=2) # 128 -> 256 self.to_class = define_conv(opt)(nf // 8, opt.class_num, ksize=3, pad=1, initialW=he_w) self.activation = F.leaky_relu
def __init__(self, opt): super().__init__() he_w = HeNormal() down_sampling_num = 3 ngf = opt.ngf with self.init_scope(): #[input] 3 x 256 x 256 self.c1 = define_conv(opt)(opt.img_shape[0], ngf, ksize=4, stride=2, pad=1, initialW=he_w) self.norm1 = L.BatchNormalization(ngf) #[input] ngf x 128 x 128 self.c2 = define_conv(opt)(ngf, ngf * 2, ksize=4, stride=2, pad=1, initialW=he_w) self.norm2 = L.BatchNormalization(ngf * 2) #[input] ngf*2 x 64 x 64 self.c3 = define_conv(opt)(ngf * 2, ngf * 4, ksize=4, stride=2, pad=1, initialW=he_w) self.norm3 = L.BatchNormalization(ngf * 4) #[input] ngf*4 x 32 x 32 self.a1 = define_atrous_conv(opt)(ngf * 4, ngf * 4, ksize=3, rate=2, initialW=he_w) self.norm4 = L.BatchNormalization(ngf * 4) #[input] ngf*4 x 32 x 32 self.a2 = define_atrous_conv(opt)(ngf * 4, ngf * 4, ksize=3, rate=4, initialW=he_w) self.norm5 = L.BatchNormalization(ngf * 4) #[input] ngf*4 x 32 x 32 resolution = max(opt.img_shape[1], opt.img_shape[2]) // 2**down_sampling_num self.aspp = ASPP(opt, ngf * 4, input_resolution=resolution) #[input] ngf*4 x 32 x 32 self.up1 = PixelShuffler(opt, ngf * 4, ngf * 2, rate=2) #64 -> 128 self.up2 = PixelShuffler(opt, ngf * 2, ngf, rate=2) # 128 -> 256 self.to_class = define_conv(opt)(ngf, opt.class_num, ksize=3, pad=1, initialW=he_w) #[output] class_num x 256 x 256 self.activation = F.relu
def __init__(self, opt): super().__init__() he_w = HeNormal() ngf = opt.ngf with self.init_scope(): #Encoder #[input] 3 x 256 x 256 self.e1 = define_conv(opt)(opt.input_ch, ngf, ksize=3, stride=1, pad=1, initialW=he_w) self.e1_bn = L.BatchNormalization(ngf) #[input] ngf x 256 x 256 self.e2 = define_conv(opt)(ngf, ngf * 2, ksize=4, stride=2, pad=1, initialW=he_w) self.e2_bn = L.BatchNormalization(ngf * 2) #[input] ngf*2 x 128 x 128 self.e3 = define_conv(opt)(ngf * 2, ngf * 4, ksize=4, stride=2, pad=1, initialW=he_w) self.e3_bn = L.BatchNormalization(ngf * 4) #[input] ngf*4 x 64 x 64 self.e4 = define_conv(opt)(ngf * 4, ngf * 8, ksize=4, stride=2, pad=1, initialW=he_w) self.e4_bn = L.BatchNormalization(ngf * 8) #[input] ngf*8 256 x 32 x 32 self.e5 = define_conv(opt)(ngf * 8, ngf * 16, ksize=4, stride=2, pad=1, initialW=he_w) self.e5_bn = L.BatchNormalization(ngf * 16) #Decoder #[input] ngf*16 x 16 x 16 self.d1 = L.Deconvolution2D(ngf * 16, ngf * 8, ksize=4, stride=2, pad=1, initialW=he_w) self.d1_bn = L.BatchNormalization(ngf * 8) #[input] ngf*8*2 x 32 x 32 (concat) self.d2 = L.Deconvolution2D(ngf * 8 * 2, ngf * 4, ksize=4, stride=2, pad=1, initialW=he_w) self.d2_bn = L.BatchNormalization(ngf * 4) #[input] ngf*4*2 x 64 x 64 (concat) self.d3 = L.Deconvolution2D(ngf * 4 * 2, ngf * 2, ksize=4, stride=2, pad=1, initialW=he_w) self.d3_bn = L.BatchNormalization(ngf * 2) #[input] ngf*2*2 x 128 x 128 (concat) self.d4 = L.Deconvolution2D(ngf * 2 * 2, ngf, ksize=4, stride=2, pad=1, initialW=he_w) self.d4_bn = L.BatchNormalization(ngf) #[input] ngf x 256 x 256 self.to_class = define_conv(opt)(ngf, opt.nclass, ksize=3, pad=1, initialW=he_w) #[output] nclass x 256 x 256 self.activation = F.relu