コード例 #1
0
    def __init__(self,
                 dimension,
                 deconv=UpsampleDeConv,
                 activation=nn.LeakyReLU(),
                 drop_out: float = 0.5):
        super(StanfordFastGenerator, self).__init__()
        self.DEPTH_SIZE = int(3)
        self.conv1 = BaseBlock(dimension,
                               LATENT_SPACE_8,
                               9,
                               1,
                               activation=activation,
                               drop_out=drop_out)
        self.conv2 = BaseBlock(LATENT_SPACE_8,
                               LATENT_SPACE_4,
                               3,
                               2,
                               activation=activation,
                               drop_out=drop_out)
        self.conv3 = BaseBlock(LATENT_SPACE_4,
                               LATENT_SPACE_2,
                               3,
                               2,
                               activation=activation,
                               drop_out=drop_out)
        self.residual_blocks = nn.Sequential()

        for i in range(0, self.DEPTH_SIZE):
            self.residual_blocks.add_module(
                str(i),
                ResidualBlock(LATENT_SPACE_2,
                              LATENT_SPACE_2,
                              stride=1,
                              activation=activation))

        self.deconv1 = deconv(LATENT_SPACE_2, LATENT_SPACE_4)
        self.norm1 = torch.nn.BatchNorm2d(LATENT_SPACE_4, affine=True)
        self.deconv2 = deconv(LATENT_SPACE_4, LATENT_SPACE_8)
        self.norm2 = torch.nn.BatchNorm2d(LATENT_SPACE_8, affine=True)
        self.refinement = nn.Sequential(
            ConvLayer(LATENT_SPACE_8, LATENT_SPACE_8, kernel_size=3, stride=1),
            torch.nn.BatchNorm2d(LATENT_SPACE_8, affine=True),
            activation,
            ConvLayer(LATENT_SPACE_8, LATENT_SPACE_8, kernel_size=3, stride=1),
            torch.nn.BatchNorm2d(LATENT_SPACE_8, affine=True),
            activation,
        )
        self.final = ConvLayer(LATENT_SPACE_8,
                               dimension,
                               kernel_size=9,
                               stride=1)
        self.activation = activation
コード例 #2
0
    def __init__(self,
                 dimension,
                 deconv=UpsampleDeConv,
                 activation=nn.LeakyReLU(),
                 drop_out: float = 0.5):
        super(FreiburgSqueezeGenerator, self).__init__(dimension, deconv,
                                                       activation, drop_out)
        pretrained_features = models.squeezenet1_1(pretrained=True).features
        conv = ConvLayer(dimension, 64, kernel_size=3, stride=1, bias=True)

        if dimension == 1 or dimension == 3:
            weight = torch.FloatTensor(64, dimension, 3, 3)
            parameters = list(pretrained_features.parameters())
            for i in range(64):
                if dimension == 1:
                    weight[i, :, :, :] = parameters[0].data[i].mean(0)
                else:
                    weight[i, :, :, :] = parameters[0].data[i]
            conv.weight.data.copy_(weight)
            conv.bias.data.copy_(parameters[1].data)

        self.enc1 = nn.Sequential(conv)
        for x in range(1, 2):
            self.enc1.add_module(str(x), pretrained_features[x])

        self.enc2 = torch.nn.Sequential()
        self.enc3 = torch.nn.Sequential()
        self.enc4 = torch.nn.Sequential()

        for x in range(3, 5):
            self.enc2.add_module(str(x), pretrained_features[x])
        for x in range(6, 8):
            self.enc3.add_module(str(x), pretrained_features[x])
        for x in range(9, 13):
            self.enc4.add_module(str(x), pretrained_features[x])
コード例 #3
0
 def __init__(self,
              dimension,
              deconv=UpsampleDeConv,
              activation=nn.LeakyReLU(),
              drop_out: float = 0.5):
     super(TexasGenerator, self).__init__()
     self.fpn = FPN(dimension=dimension,
                    activation=activation,
                    pretrained=False,
                    drop_out=drop_out)
     self.head1 = nn.Sequential(
         ConvLayer(LATENT_SPACE, LATENT_SPACE, kernel_size=3,
                   bias=False), activation,
         ConvLayer(LATENT_SPACE, LATENT_SPACE, kernel_size=3, bias=False),
         activation)
     self.head2 = nn.Sequential(
         ConvLayer(LATENT_SPACE, LATENT_SPACE, kernel_size=3,
                   bias=False), activation,
         ConvLayer(LATENT_SPACE, LATENT_SPACE, kernel_size=3, bias=False),
         activation)
     self.head3 = nn.Sequential(
         ConvLayer(LATENT_SPACE, LATENT_SPACE, kernel_size=3,
                   bias=False), activation,
         ConvLayer(LATENT_SPACE, LATENT_SPACE, kernel_size=3, bias=False),
         activation)
     self.head4 = nn.Sequential(
         ConvLayer(LATENT_SPACE, LATENT_SPACE, kernel_size=3,
                   bias=False), activation,
         ConvLayer(LATENT_SPACE, LATENT_SPACE, kernel_size=3, bias=False),
         activation)
     self.smooth1 = BaseBlock(4 * LATENT_SPACE,
                              LATENT_SPACE,
                              kernel_size=3,
                              stride=1,
                              bias=True,
                              activation=activation,
                              drop_out=drop_out)
     self.smooth2 = BaseBlock(1 * LATENT_SPACE,
                              LATENT_SPACE,
                              kernel_size=3,
                              stride=1,
                              bias=True,
                              activation=activation,
                              drop_out=drop_out)
     self.deconv1 = deconv(LATENT_SPACE, dimension)
     self.activation = activation
コード例 #4
0
 def __init__(self, dimension, deconv = UpsampleDeConv, activation = nn.LeakyReLU(), drop_out : float = 0.5):
     super(MovaviGenerator, self).__init__()
     self.deconv1 = deconv
     self.activation = activation
     self.enc1   = BaseBlock(dimension, 16, 3, 2, activation=activation, drop_out=drop_out)
     self.enc2   = nn.Sequential(BaseBlock(16,  16, 3, 1, activation=activation, drop_out=drop_out), BaseBlock(16,  16, 3, 1, activation=activation, drop_out=drop_out), ConvLayer(16,   32, 3, 2))
     self.enc3   = nn.Sequential(BaseBlock(32,  32, 3, 1, activation=activation, drop_out=drop_out), BaseBlock(32,  32, 3, 1, activation=activation, drop_out=drop_out), ConvLayer(32,   64, 3, 2))
     self.enc4   = nn.Sequential(BaseBlock(64,  64, 3, 1, activation=activation, drop_out=drop_out), BaseBlock(64,  64, 3, 1, activation=activation, drop_out=drop_out), ConvLayer(64,  128, 3, 2))
     self.enc5   = nn.Sequential(BaseBlock(128,128, 3, 1, activation=activation, drop_out=drop_out), BaseBlock(128,128, 3, 1, activation=activation, drop_out=drop_out), ConvLayer(128, 128, 3, 2))
     self.center = nn.Sequential(BaseBlock(128,128, 3, 1, activation=activation, drop_out=drop_out), BaseBlock(128,128, 3, 1, activation=activation, drop_out=drop_out))
     self.dec6   = nn.Sequential(BaseBlock(256,128, 3, 1, activation=activation, drop_out=drop_out), BaseBlock(128,128, 3, 1, activation=activation, drop_out=drop_out), deconv(128, 64))
     self.dec7   = nn.Sequential(BaseBlock(192, 64, 3, 1, activation=activation, drop_out=drop_out), BaseBlock(64,  64, 3, 1, activation=activation, drop_out=drop_out), deconv(64,  32))
     self.dec8   = nn.Sequential(BaseBlock(96,  32, 3, 1, activation=activation, drop_out=drop_out), BaseBlock(32,  32, 3, 1, activation=activation, drop_out=drop_out), deconv(32,  16))
     self.dec9   = nn.Sequential(BaseBlock(48,  16, 3, 1, activation=activation, drop_out=drop_out), BaseBlock(16,  16, 3, 1, activation=activation, drop_out=drop_out), deconv(16,  16))
     self.dec10 = deconv(16, 16)
     self.final = ConvLayer(16, dimension, 3, 1)
コード例 #5
0
    def __init__(self,
                 dimension,
                 deconv=UpsampleDeConv,
                 activation=nn.LeakyReLU(),
                 drop_out: float = 0.5):
        super(FreiburgGenerator, self).__init__()
        self.enc1 = FreiburgDoubleBlock(dimension,
                                        64,
                                        activation,
                                        drop_out=drop_out)
        self.enc2 = FreiburgDoubleBlock(64, 128, activation, drop_out=drop_out)
        self.enc3 = FreiburgDoubleBlock(128,
                                        256,
                                        activation,
                                        drop_out=drop_out)
        self.enc4 = FreiburgDoubleBlock(256,
                                        512,
                                        activation,
                                        drop_out=drop_out)

        self.center = FreiburgDoubleBlock(512, 1024)

        self.deconv4 = deconv(1024, 512)
        self.dec4 = FreiburgSingleBlock(1024,
                                        512,
                                        activation=activation,
                                        drop_out=drop_out)
        self.deconv3 = deconv(512, 256)
        self.dec3 = FreiburgSingleBlock(512,
                                        256,
                                        activation=activation,
                                        drop_out=drop_out)
        self.deconv2 = deconv(256, 128)
        self.dec2 = FreiburgSingleBlock(256,
                                        128,
                                        activation=activation,
                                        drop_out=drop_out)
        self.deconv1 = deconv(128, 64)
        self.dec1 = FreiburgSingleBlock(128,
                                        64,
                                        activation=activation,
                                        drop_out=drop_out)
        self.final = ConvLayer(64, dimension, 1)
        self.activation = activation
        self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2)
コード例 #6
0
    def __init__(self, dimension, deconv = UpsampleDeConv, activation = nn.LeakyReLU(), drop_out : float = 0.5):
        super(MovaviStrongGenerator, self).__init__(dimension, deconv, activation, drop_out)
        self.enc1 = ResidualBlock(dimension, 32, activation=activation, stride=2, drop_out=drop_out)
        self.enc2 = ResidualBlock(32, 64,        activation=activation, stride=2, drop_out=drop_out)
        self.enc3 = ResidualBlock(64, 128,       activation=activation, stride=2, drop_out=drop_out)
        self.enc4 = ResidualBlock(128, 256,      activation=activation, stride=2, drop_out=drop_out)
        self.enc5 = ResidualBlock(256, 256,      activation=activation, stride=2, drop_out=drop_out)
        self.center = ResidualBlock(256, 256,    activation=activation, stride=1, drop_out=drop_out)

        self.dec6 = nn.Sequential(BaseBlock(512, 256, 3, 1, activation=activation, drop_out=drop_out),
                                  BaseBlock(256, 256, 3, 1, activation=activation, drop_out=drop_out), deconv(256, 128))
        self.dec7 = nn.Sequential(BaseBlock(384, 128, 3, 1, activation=activation, drop_out=drop_out),
                                  BaseBlock(128, 128, 3, 1, activation=activation, drop_out=drop_out), deconv(128, 64))
        self.dec8 = nn.Sequential(BaseBlock(192, 64, 3, 1,  activation=activation, drop_out=drop_out),
                                  BaseBlock(64, 64, 3, 1,   activation=activation, drop_out=drop_out), deconv(64, 32))
        self.dec9 = nn.Sequential(BaseBlock(96, 32, 3, 1,   activation=activation, drop_out=drop_out),
                                  BaseBlock(32, 32, 3, 1,   activation=activation, drop_out=drop_out), deconv(32, 32))
        self.dec10 = deconv(32, 32)
        self.final = ConvLayer(32, dimension, 3, 1)
コード例 #7
0
    def __init__(self,
                 dimension,
                 deconv=UpsampleDeConv,
                 activation=nn.LeakyReLU(),
                 drop_out: float = 0.5):
        super(FreiburgFastGenerator, self).__init__(dimension, deconv,
                                                    activation, drop_out)
        self.enc1 = FreiburgDoubleBlock(dimension,
                                        32,
                                        activation,
                                        drop_out=drop_out)
        self.enc2 = FreiburgDoubleBlock(32, 64, activation, drop_out=drop_out)
        self.enc3 = FreiburgDoubleBlock(64, 128, activation, drop_out=drop_out)
        self.enc4 = FreiburgDoubleBlock(128,
                                        256,
                                        activation,
                                        drop_out=drop_out)

        self.center = FreiburgDoubleBlock(256, 512)

        self.deconv4 = deconv(512, 256)
        self.dec4 = FreiburgSingleBlock(512,
                                        256,
                                        activation=activation,
                                        drop_out=drop_out)
        self.deconv3 = deconv(256, 128)
        self.dec3 = FreiburgSingleBlock(256,
                                        128,
                                        activation=activation,
                                        drop_out=drop_out)
        self.deconv2 = deconv(128, 64)
        self.dec2 = FreiburgSingleBlock(128,
                                        64,
                                        activation=activation,
                                        drop_out=drop_out)
        self.deconv1 = deconv(64, 32)
        self.dec1 = FreiburgSingleBlock(64,
                                        32,
                                        activation=activation,
                                        drop_out=drop_out)
        self.final = ConvLayer(32, dimension, 1)
コード例 #8
0
    def __init__(self,
                 dimension,
                 deconv=UpsampleDeConv,
                 activation=nn.LeakyReLU(),
                 drop_out: float = 0.5):
        super(StanfordSupremeGenerator, self).__init__()
        self.DEPTH_SIZE = int(9)
        self.conv1 = BaseBlock(dimension,
                               64,
                               9,
                               1,
                               activation=activation,
                               drop_out=drop_out)
        self.conv2 = BaseBlock(64,
                               128,
                               3,
                               2,
                               activation=activation,
                               drop_out=drop_out)
        self.conv3 = BaseBlock(128,
                               256,
                               3,
                               2,
                               activation=activation,
                               drop_out=drop_out)
        self.residual_blocks = nn.Sequential()

        for i in range(0, self.DEPTH_SIZE):
            self.residual_blocks.add_module(
                str(i), ResidualBlock(256,
                                      256,
                                      stride=1,
                                      activation=activation))

        self.deconv1 = deconv(512, 128)
        self.norm1 = torch.nn.BatchNorm2d(128, affine=True)
        self.deconv2 = deconv(256, 64)
        self.norm2 = torch.nn.BatchNorm2d(64, affine=True)
        self.final = ConvLayer(64, dimension, kernel_size=9, stride=1)
        self.activation = activation
コード例 #9
0
    def __init__(self,
                 dimension,
                 deconv=UpsampleDeConv,
                 activation=nn.LeakyReLU(),
                 drop_out: float = 0.5):
        super(FreiburgTernauGenerator, self).__init__()
        self.activation = activation
        base_model = models.vgg11(pretrained=True).features
        conv = nn.Conv2d(dimension, LATENT_SPACE, kernel_size=3, padding=1)

        if dimension == 1 or dimension == 3:
            weight = torch.FloatTensor(64, dimension, 3, 3)
            parameters = list(base_model.parameters())
            for i in range(LATENT_SPACE):
                if dimension == 1:
                    weight[i, :, :, :] = parameters[0].data[i].mean(0)
                else:
                    weight[i, :, :, :] = parameters[0].data[i]
            conv.weight.data.copy_(weight)
            conv.bias.data.copy_(parameters[1].data)

        self.encoder1 = nn.Sequential(conv)
        for x in range(1, 3):
            self.encoder1.add_module(str(x), base_model[x])

        self.encoder2 = nn.Sequential()
        for x in range(3, 6):
            self.encoder2.add_module(str(x), base_model[x])

        self.encoder3 = nn.Sequential()
        for x in range(6, 11):
            self.encoder3.add_module(str(x), base_model[x])

        self.encoder4 = nn.Sequential()
        for x in range(11, 16):
            self.encoder4.add_module(str(x), base_model[x])

        self.encoder5 = nn.Sequential()
        for x in range(16, 21):
            self.encoder5.add_module(str(x), base_model[x])

        self.deconv1 = deconv(512, 512)
        self.center = FreiburgSingleBlock(512,
                                          256,
                                          activation=activation,
                                          drop_out=drop_out)

        self.deconv5 = deconv(768, 512)
        self.decoder5 = FreiburgSingleBlock(512,
                                            256,
                                            activation=activation,
                                            drop_out=drop_out)
        self.deconv4 = deconv(768, 512)
        self.decoder4 = FreiburgSingleBlock(512,
                                            128,
                                            activation=activation,
                                            drop_out=drop_out)
        self.deconv3 = deconv(384, 256)
        self.decoder3 = FreiburgSingleBlock(256,
                                            64,
                                            activation=activation,
                                            drop_out=drop_out)
        self.deconv2 = deconv(192, 128)
        self.decoder2 = FreiburgSingleBlock(128,
                                            32,
                                            activation=activation,
                                            drop_out=drop_out)
        self.decoder1 = nn.Conv2d(96, 32, 3, padding=1)

        self.final = ConvLayer(32, dimension, 1)
        self.activation = activation
コード例 #10
0
    def __init__(self,
                 dimension,
                 deconv=UpsampleDeConv,
                 activation=nn.LeakyReLU(),
                 drop_out: float = 0.5):
        super(FreiburgResidualGenerator, self).__init__()
        self.activation = activation
        self.max_pool = nn.MaxPool2d(2, 2)
        base_model = models.resnet18(pretrained=True)
        conv = nn.Conv2d(dimension,
                         LATENT_SPACE,
                         kernel_size=7,
                         stride=2,
                         padding=3,
                         bias=False)

        if dimension == 1 or dimension == 3:
            weight = torch.FloatTensor(64, dimension, 7, 7)
            parameters = list(base_model.parameters())
            for i in range(LATENT_SPACE):
                if dimension == 1:
                    weight[i, :, :, :] = parameters[0].data[i].mean(0)
                else:
                    weight[i, :, :, :] = parameters[0].data[i]
            conv.weight.data.copy_(weight)

        self.encoder0 = nn.Sequential(
            conv,
            base_model.bn1,
            activation,
            base_model.maxpool,
        )

        self.encoder1 = base_model.layer1
        self.encoder2 = base_model.layer2
        self.encoder3 = base_model.layer3
        self.encoder4 = base_model.layer4

        self.deconv_center = deconv(512, 512)
        self.center = FreiburgSingleBlock(512,
                                          256,
                                          activation=activation,
                                          drop_out=drop_out)

        self.deconv5 = deconv(768, 512)
        self.decoder5 = FreiburgSingleBlock(512,
                                            256,
                                            activation=activation,
                                            drop_out=drop_out)
        self.deconv4 = deconv(512, 512)
        self.decoder4 = FreiburgSingleBlock(512,
                                            256,
                                            activation=activation,
                                            drop_out=drop_out)
        self.deconv3 = deconv(384, 256)
        self.decoder3 = FreiburgSingleBlock(256,
                                            64,
                                            activation=activation,
                                            drop_out=drop_out)
        self.deconv2 = deconv(128, 128)
        self.decoder2 = FreiburgSingleBlock(128,
                                            128,
                                            activation=activation,
                                            drop_out=drop_out)
        self.deconv1 = deconv(128, 128)
        self.decoder1 = FreiburgSingleBlock(128,
                                            32,
                                            activation=activation,
                                            drop_out=drop_out)
        self.decoder0 = nn.Conv2d(32, 32, 3, padding=1)
        self.final = ConvLayer(32, dimension, 1)
        self.activation = activation
コード例 #11
0
    def __init__(self,
                 dimension,
                 activation,
                 pretrained=True,
                 drop_out: float = 0.5):
        super(FPN, self).__init__()
        self.activation = activation
        self.max_pool = nn.MaxPool2d(2, 2)
        base_model = models.resnet18(pretrained=pretrained)
        conv = nn.Conv2d(dimension,
                         LATENT_SPACE,
                         kernel_size=7,
                         stride=2,
                         padding=3,
                         bias=False)

        if dimension == 1 or dimension == 3:
            weight = torch.FloatTensor(64, dimension, 7, 7)
            parameters = list(base_model.parameters())
            for i in range(LATENT_SPACE):
                if dimension == 1:
                    weight[i, :, :, :] = parameters[0].data[i].mean(0)
                else:
                    weight[i, :, :, :] = parameters[0].data[i]
            conv.weight.data.copy_(weight)

        self.encoder0 = nn.Sequential(
            conv,
            base_model.bn1,
            activation,
            base_model.maxpool,
        )

        self.encoder1 = base_model.layer1
        self.encoder2 = base_model.layer2
        self.encoder3 = base_model.layer3
        self.encoder4 = base_model.layer4

        self.td1 = BaseBlock(LATENT_SPACE,
                             LATENT_SPACE,
                             kernel_size=3,
                             stride=1,
                             bias=True,
                             activation=activation,
                             drop_out=drop_out)
        self.td2 = BaseBlock(LATENT_SPACE,
                             LATENT_SPACE,
                             kernel_size=3,
                             stride=1,
                             bias=True,
                             activation=activation,
                             drop_out=drop_out)
        self.td3 = BaseBlock(LATENT_SPACE,
                             LATENT_SPACE,
                             kernel_size=3,
                             stride=1,
                             bias=True,
                             activation=activation,
                             drop_out=drop_out)

        self.lateral4 = ConvLayer(512, LATENT_SPACE, kernel_size=1, bias=False)
        self.lateral3 = ConvLayer(256, LATENT_SPACE, kernel_size=1, bias=False)
        self.lateral2 = ConvLayer(128, LATENT_SPACE, kernel_size=1, bias=False)
        self.lateral1 = ConvLayer(64, LATENT_SPACE, kernel_size=1, bias=False)
        self.lateral0 = ConvLayer(64, LATENT_SPACE, kernel_size=1, bias=False)

        for param in self.parameters():
            param.requires_grad = True