Exemplo n.º 1
0
    def __init__(self, actc='ReLU', acto='Softmax'):
        super(C3D_DISC, self).__init__()

        cact = lbuild.act(actc)
        oact = lbuild.act(acto)

        feat = [
            ('conv1', nn.Conv3d(3,
                                64,
                                kernel_size=(3, 3, 3),
                                padding=(1, 1, 1))), ('conv1_act', cact()),
            ('pool1', nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))),
            ('conv2',
             nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
            ('conv2_act', cact()),
            ('pool2', nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))),
            ('conv3a',
             nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
            ('conv3a_act', cact()),
            ('conv3b',
             nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
            ('conv3b_act', cact()),
            ('pool3', nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))),
            ('conv4a',
             nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
            ('conv4a_act', cact()),
            ('conv4b',
             nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
            ('conv4b_act', cact()),
            ('pool4', nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))),
            ('conv5a',
             nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
            ('conv5a_act', cact()),
            ('conv5b',
             nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
            ('conv5b_act', cact()),
            ('pool5',
             nn.MaxPool3d(kernel_size=(2, 2, 2),
                          stride=(2, 2, 2),
                          padding=(0, 1, 1)))
        ]

        linear = [('fc1', nn.Linear(8192, 4096)), ('fc1_act', cact()),
                  ('do1', nn.Dropout(p=0.5)), ('fc2', nn.Linear(4096, 4096)),
                  ('fc2_act', cact()), ('do2', nn.Dropout(p=0.5)),
                  ('fc3', nn.Linear(4096, 487)), ('fc3_act', oact())]

        self.feature = nn.Sequential(OrderedDict(feat))
        self.fc = nn.Sequential(OrderedDict(linear))
Exemplo n.º 2
0
    def __init__(self,
                 ndf,
                 n_layer,
                 inch=3,
                 norm='bn',
                 actc='lrelu',
                 use_sigmoid=True):
        super(PatchDiscriminator, self).__init__()
        self.advloss = GANLoss(use_lsgan=False)
        #self.cuda_objects += [self.advloss]

        cact = lbuild.act(actc)
        norml = lbuild.norm(norm)

        seq = [('l1_conv',
                nn.Conv2d(in_channels=inch,
                          out_channels=ndf,
                          kernel_size=4,
                          stride=2,
                          padding=1,
                          bias=False)), ('l1_act', cact())]

        nin = ndf
        for i in range(2, n_layer):
            nout = (2**(i - 1)) * ndf
            seq += [('l%s_conv' % str(i),
                     nn.Conv2d(in_channels=nin,
                               out_channels=nout,
                               kernel_size=4,
                               stride=2,
                               padding=1,
                               bias=False)),
                    ('l%s_norm' % str(i), norml(nout)),
                    ('l%s_act' % str(i), cact())]
            nin = nout

        seq += [('l%s_conv' % str(n_layer),
                 nn.Conv2d(in_channels=nin,
                           out_channels=1,
                           kernel_size=4,
                           padding=1,
                           bias=False))]

        if use_sigmoid: seq += [('l%s_act' % str(n_layer), nn.Sigmoid())]

        self.model = nn.Sequential(OrderedDict(seq))
Exemplo n.º 3
0
    def __init__(self,
                 ngf=64,
                 norm='bn',
                 use_dropout=False,
                 inch=3,
                 outch=3,
                 intemp=1,
                 padding='reflect',
                 uplayer='iconv',
                 actc='ReLU',
                 actd='LeakyReLU',
                 acto='Tanh'):
        super(C3DGAN, self).__init__()

        cact = lbuild.act(actc)
        dact = lbuild.act(actd)
        oact = lbuild.act(acto)

        seq = [('conv1',
                nn.Conv3d(3, 32, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
               ('conv1_act', cact()),
               ('pool1', nn.MaxPool3d(kernel_size=(1, 2, 2),
                                      stride=(1, 2, 2))),
               ('conv2',
                nn.Conv3d(32, 32, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
               ('conv2_act', cact()),
               ('pool2', nn.MaxPool3d(kernel_size=(2, 1, 1),
                                      stride=(2, 1, 1))),
               ('conv3a',
                nn.Conv3d(32, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
               ('conv3a_act', cact()),
               ('conv3b',
                nn.Conv3d(64, 64, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
               ('conv3b_act', cact()),
               ('pool3', nn.MaxPool3d(kernel_size=(2, 2, 2),
                                      stride=(2, 2, 2))),
               ('conv4a',
                nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
               ('conv4a_act', cact()),
               ('conv4b',
                nn.Conv3d(128, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1))),
               ('conv4b_act', cact()),
               ('pool4', nn.MaxPool3d(kernel_size=(2, 2, 2),
                                      stride=(2, 2, 2))),
               ('up1',
                nn.ConvTranspose3d(128,
                                   64,
                                   kernel_size=(1, 3, 3),
                                   stride=(1, 2, 2),
                                   padding=(0, 1, 1),
                                   output_padding=(0, 1, 1))),
               ('up1_act', dact()),
               ('up2',
                nn.ConvTranspose3d(64,
                                   32,
                                   kernel_size=(1, 3, 3),
                                   stride=(1, 2, 2),
                                   padding=(0, 1, 1),
                                   output_padding=(0, 1, 1))),
               ('up2_act', dact()),
               ('up3',
                nn.ConvTranspose3d(32,
                                   16,
                                   kernel_size=(1, 3, 3),
                                   stride=(1, 2, 2),
                                   padding=(0, 1, 1),
                                   output_padding=(0, 1, 1))),
               ('up3_act', dact()),
               ('out',
                nn.Conv3d(16, 3, kernel_size=(2, 3, 3), padding=(0, 1, 1))),
               ('out_act', oact())]

        self.model = nn.Sequential(OrderedDict(seq))
Exemplo n.º 4
0
    def __init__(self,
                 n_downs,
                 n_blocks,
                 n_up,
                 ngf=64,
                 norm='bn',
                 use_dropout=False,
                 inch=3,
                 outch=3,
                 padding='reflect',
                 uplayer='iconv',
                 actc='lrelu',
                 actd='lrelu',
                 acto='tanh'):
        super(ResNet, self).__init__()

        cact = lbuild.act(actc)
        dact = lbuild.act(actd)
        oact = lbuild.act(acto)

        pad = lbuild.pad(padding)
        norml = lbuild.norm(norm)

        deconv = lbuild.deconv(uplayer,
                               iconv_args={
                                   'kernel_size': 3,
                                   'padding': 1
                               },
                               tconv_args={
                                   'kernel_size': 3,
                                   'stride': 2,
                                   'padding': 1,
                                   'output_padding': 1
                               })

        if use_dropout: raise Exception('Dropout is not implemented yet')

        # Input layers
        seq = [('in_pad', pad(3)),
               ('in_conv',
                nn.Conv2d(in_channels=inch, out_channels=ngf, kernel_size=7)),
               ('in_norm', norml(ngf)), ('in_act', cact())]

        # Downsampling layers
        nin = ngf
        for i in range(1, n_downs + 1):
            nout = ngf * (2**i)
            seq += [('down%s_conv' % str(i),
                     nn.Conv2d(in_channels=nin,
                               out_channels=nout,
                               kernel_size=3,
                               stride=2,
                               padding=1)),
                    ('down%s_norm' % str(i), norml(nout)),
                    ('down%s_act' % str(i), cact())]
            nin = nout

        # Residual blocks
        for i in range(n_blocks):
            seq += [('resblock%s' % str(i + 1),
                     ResBlock(nin, pad=pad, cact=cact, norml=norml))]

        # Upsampling layers
        for i in range(1, n_up + 1):
            nout = ngf * (2**(n_downs - i))
            seq += [('up%s_deconv' % i,
                     deconv(in_channels=nin, out_channels=nout)),
                    ('up%s_norm' % i, norml(nout)), ('up%s_act' % i, dact())]
            nin = nout

        # Out layer
        seq += [('out_pad', pad(3)),
                ('out_conv',
                 nn.Conv2d(in_channels=nin, out_channels=outch,
                           kernel_size=7)), ('out_act', oact())]

        self.model = nn.Sequential(OrderedDict(seq))
Exemplo n.º 5
0
    def __init__(self,
                 n_downs,
                 n_blocks,
                 n_up,
                 ngf=64,
                 norm='bn',
                 use_dropout=False,
                 inch=3,
                 outch=3,
                 padding='reflect',
                 uplayer='iconv',
                 actc='lrelu',
                 actd='lrelu',
                 acto='tanh'):
        super(ResNetPartial, self).__init__()

        cact = lbuild.act(actc)
        dact = lbuild.act(actd)
        oact = lbuild.act(acto)

        pad = lbuild.pad(padding)
        norml = lbuild.norm(norm)

        if use_dropout: raise Exception('Dropout is not implemented yet')
        if uplayer != 'iconv':
            raise Exception('only iconv is implemented for uplayer')

        # Input layers
        seq = [
            PConv(in_channels=inch, out_channels=ngf, kernel_size=7,
                  padding=3),
            norml(ngf),
            cact()
        ]

        # Downsampling layers
        nin = ngf
        for i in range(1, n_downs + 1):
            nout = ngf * (2**i)
            seq += [
                PConv(in_channels=nin,
                      out_channels=nout,
                      kernel_size=3,
                      stride=2,
                      padding=1),
                norml(nout),
                cact()
            ]
            nin = nout

        # Residual blocks
        for i in range(n_blocks):
            seq += [PartialResBlock(nin, pad=pad, cact=cact, norml=norml)]

        # Upsampling layers
        up_seq = []
        for i in range(1, n_up + 1):
            nout = ngf * (2**(n_downs - i))
            up_seq += [
                #deconv(in_channels=nin, out_channels=nout),
                PConv(in_channels=nin,
                      out_channels=nout,
                      kernel_size=3,
                      stride=1,
                      padding=1),
                norml(nout),
                dact()
            ]
            nin = nout

        # Out layer
        out_seq = [
            PConv(in_channels=nin,
                  out_channels=outch,
                  kernel_size=7,
                  padding=3),
            oact()
        ]

        self.down_model = nn.ModuleList(seq)
        self.up_model = nn.ModuleList(up_seq)
        self.out_model = nn.ModuleList(out_seq)