コード例 #1
0
 def __init__(self, hp):
     super(GPPatchMcResDis, self).__init__()
     assert hp['n_res_blks'] % 2 == 0, 'n_res_blk must be multiples of 2'
     self.n_layers = hp['n_res_blks'] // 2
     nf = hp['nf']
     cnn_f = [Conv2dBlock(3, nf, 7, 1, 3,
                          pad_type='reflect',
                          norm='none',
                          activation='none')]
     for i in range(self.n_layers - 1):
         nf_out = np.min([nf * 2, 1024])
         cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
         cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
         cnn_f += [nn.ReflectionPad2d(1)]
         cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)]
         nf = np.min([nf * 2, 1024])
     nf_out = np.min([nf * 2, 1024])
     cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
     cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
     cnn_c = [Conv2dBlock(nf_out, hp['num_classes'], 1, 1,
                          norm='none',
                          activation='lrelu',
                          activation_first=True)]
     self.cnn_f = nn.Sequential(*cnn_f)
     self.cnn_c = nn.Sequential(*cnn_c)
コード例 #2
0
 def __init__(self, num_writers):
     super(WriterClaModel, self).__init__()
     self.n_layers = 6
     nf = 16
     cnn_f = [Conv2dBlock(1, nf, 7, 1, 3,
                          pad_type='reflect',
                          norm='none',
                          activation='none')]
     for i in range(self.n_layers - 1):
         nf_out = np.min([nf * 2, 1024])
         cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
         cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
         cnn_f += [nn.ReflectionPad2d(1)]
         cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)]
         nf = np.min([nf * 2, 1024])
     nf_out = np.min([nf * 2, 1024])
     cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
     cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
     cnn_c = [Conv2dBlock(nf_out, num_writers, IMG_HEIGHT//(2**(self.n_layers-1)), IMG_WIDTH//(2**(self.n_layers-1))+1,
                          norm='none',
                          activation='lrelu',
                          activation_first=True)]
     self.cnn_f = nn.Sequential(*cnn_f)
     self.cnn_c = nn.Sequential(*cnn_c)
     self.cross_entropy = nn.CrossEntropyLoss()
コード例 #3
0
 def __init__(self):
     super(DisModel, self).__init__()
     self.n_layers = 6
     self.final_size = 1024
     nf = 16
     cnn_f = [Conv2dBlock(1, nf, 7, 1, 3,
                          pad_type='reflect',
                          norm='none',
                          activation='none')]
     for i in range(self.n_layers - 1):
         nf_out = np.min([nf * 2, 1024])
         cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
         cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
         cnn_f += [nn.ReflectionPad2d(1)]
         cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)]
         nf = np.min([nf * 2, 1024])
     nf_out = np.min([nf * 2, 1024])
     cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
     cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
     cnn_c = [Conv2dBlock(nf_out, self.final_size, IMG_HEIGHT//(2**(self.n_layers-1)), IMG_WIDTH//(2**(self.n_layers-1))+1,
                          norm='none',
                          activation='lrelu',
                          activation_first=True)]
     self.cnn_f = nn.Sequential(*cnn_f)
     self.cnn_c = nn.Sequential(*cnn_c)
     self.bce = nn.BCEWithLogitsLoss()
コード例 #4
0
ファイル: networks.py プロジェクト: CALM-LMU/FUNIT
    def __init__(self, hp):
        super(GPPatchMcResDis, self).__init__()

        #InceptionBlock = Conv2dBlock

        assert hp['n_res_blks'] % 2 == 0, 'n_res_blk must be multiples of 2'
        self.n_layers = hp['n_res_blks'] // 2
        nf = hp['nf']
        input_channels = hp['input_nc']
        cnn_f = [
            Conv2dBlock(input_channels,
                        nf,
                        KERNEL_SIZE_7,
                        1,
                        3,
                        pad_type='reflect',
                        norm='none',
                        activation='none')
        ]
        for i in range(self.n_layers - 1):
            nf_out = np.min([nf * 2, 1024])
            cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
            cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
            cnn_f += [nn.ReflectionPad2d(1)]
            cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)]
            nf = np.min([nf * 2, 1024])
        nf_out = np.min([nf * 2, 1024])
        cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
        cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
        cnn_c = [
            Conv2dBlock(nf_out,
                        hp['num_classes'],
                        1,
                        1,
                        norm='none',
                        activation='lrelu',
                        activation_first=True)
        ]
        self.cnn_f = nn.Sequential(*cnn_f)
        self.cnn_c = nn.Sequential(*cnn_c)

        #self.register_backward_hook(debug.printgradnorm)
        self.debug = Debugger()
コード例 #5
0
 def __init__(self, hp):
     super(GPPatchMcResDis, self).__init__()
     assert hp['n_res_blks'] % 2 == 0, 'n_res_blk must be multiples of 2'
     self.n_layers = hp['n_res_blks'] // 2  # 5
     nf = hp['nf']  # 64
     cnn_f = [
         Conv2dBlock(3,
                     nf,
                     7,
                     1,
                     3,
                     pad_type='reflect',
                     norm='none',
                     activation='none')
     ]  # 第一个卷积层提取特征,输出batchsize*128*128*64维度向量
     for i in range(
             self.n_layers -
             1):  # 128->256->512->1024,输出batchsize*8*8*1024维度向量,在原论文条件下
         nf_out = np.min([nf * 2, 1024])
         cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
         cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu', 'none')]
         cnn_f += [nn.ReflectionPad2d(1)]
         cnn_f += [nn.AvgPool2d(kernel_size=3, stride=2)]
         nf = np.min([nf * 2, 1024])
     nf_out = np.min([nf * 2, 1024])  # 1024
     cnn_f += [ActFirstResBlock(nf, nf, None, 'lrelu', 'none')]
     cnn_f += [ActFirstResBlock(nf, nf_out, None, 'lrelu',
                                'none')]  # 再经过两个ResBlock,得到1024深度
     cnn_c = [
         Conv2dBlock(nf_out,
                     hp['num_classes'],
                     1,
                     1,
                     norm='none',
                     activation='lrelu',
                     activation_first=True)
     ]  # 最后再经过一个卷积层得到各个种类的概率:输出batchsize*patch*patch*num_class的四维向量
     self.cnn_f = nn.Sequential(*cnn_f)
     self.cnn_c = nn.Sequential(*cnn_c)