def __init__(self, opt, input_ch, input_resolution=65): super().__init__() #get options nf = opt.aspp_nf #this rate is dilate size based original paper. x65_rate = [6, 12, 18] rate = [round(x * input_resolution / 65) for x in x65_rate] he_w = HeNormal() with self.init_scope(): self.x1 = define_conv(opt)(input_ch, nf, ksize=1, initialW=he_w) self.x1_bn = L.BatchNormalization(nf) self.x3_small = define_atrous_conv(opt)(input_ch, nf, ksize=3, rate=rate[0], initialW=he_w) self.x3_small_bn = L.BatchNormalization(nf) self.x3_middle = define_atrous_conv(opt)(input_ch, nf, ksize=3, rate=rate[1], initialW=he_w) self.x3_middle_bn = L.BatchNormalization(nf) self.x3_large = define_atrous_conv(opt)(input_ch, nf, ksize=3, rate=rate[2], initialW=he_w) self.x3_large_bn = L.BatchNormalization(nf) self.sum_func = define_conv(opt)(nf * 4, input_ch, ksize=3, pad=1, initialW=he_w) self.activation = F.relu
def __init__(self, opt): super().__init__() he_w = HeNormal() with self.init_scope(): # This ResNet101 use a pre-trained caffemodel that can be downloaded at GitHub # <https://github.com/KaimingHe/deep-residual-networks>. self.resnet101 = L.ResNet101Layers() self.use_layer = ('res3', 512) nf = self.use_layer[1] self.c1 = define_atrous_conv(opt)(nf, nf, ksize=3, rate=2, initialW=he_w) self.norm1 = L.BatchNormalization(nf) self.c2 = define_atrous_conv(opt)(nf, nf, ksize=3, rate=4, initialW=he_w) self.norm2 = L.BatchNormalization(nf) self.aspp = ASPP(opt, nf, input_resolution=32) self.up1 = PixelShuffler(opt, nf, nf // 2, rate=2) #32 -> 64 self.up2 = PixelShuffler(opt, nf // 2, nf // 4, rate=2) #64 -> 128 self.up3 = PixelShuffler(opt, nf // 4, nf // 8, rate=2) # 128 -> 256 self.to_class = define_conv(opt)(nf // 8, opt.class_num, ksize=3, pad=1, initialW=he_w) self.activation = F.leaky_relu
def __init__(self, opt): super().__init__() he_w = HeNormal() down_sampling_num = 3 ngf = opt.ngf with self.init_scope(): #[input] 3 x 256 x 256 self.c1 = define_conv(opt)(opt.img_shape[0], ngf, ksize=4, stride=2, pad=1, initialW=he_w) self.norm1 = L.BatchNormalization(ngf) #[input] ngf x 128 x 128 self.c2 = define_conv(opt)(ngf, ngf * 2, ksize=4, stride=2, pad=1, initialW=he_w) self.norm2 = L.BatchNormalization(ngf * 2) #[input] ngf*2 x 64 x 64 self.c3 = define_conv(opt)(ngf * 2, ngf * 4, ksize=4, stride=2, pad=1, initialW=he_w) self.norm3 = L.BatchNormalization(ngf * 4) #[input] ngf*4 x 32 x 32 self.a1 = define_atrous_conv(opt)(ngf * 4, ngf * 4, ksize=3, rate=2, initialW=he_w) self.norm4 = L.BatchNormalization(ngf * 4) #[input] ngf*4 x 32 x 32 self.a2 = define_atrous_conv(opt)(ngf * 4, ngf * 4, ksize=3, rate=4, initialW=he_w) self.norm5 = L.BatchNormalization(ngf * 4) #[input] ngf*4 x 32 x 32 resolution = max(opt.img_shape[1], opt.img_shape[2]) // 2**down_sampling_num self.aspp = ASPP(opt, ngf * 4, input_resolution=resolution) #[input] ngf*4 x 32 x 32 self.up1 = PixelShuffler(opt, ngf * 4, ngf * 2, rate=2) #64 -> 128 self.up2 = PixelShuffler(opt, ngf * 2, ngf, rate=2) # 128 -> 256 self.to_class = define_conv(opt)(ngf, opt.class_num, ksize=3, pad=1, initialW=he_w) #[output] class_num x 256 x 256 self.activation = F.relu