def __init__(self, dimension, deconv = UpsampleDeConv, activation = nn.LeakyReLU(), drop_out : float = 0.5):
     super(AdelaideResidualGenerator, self).__init__(dimension, deconv, activation, drop_out)
     self.enc1 = ResidualBlock(dimension, 64, stride = 2,activation=activation)
     self.enc2 = ResidualBlock(64,  128,     stride = 2,activation=activation)
     self.enc3 = ResidualBlock(128, 256,     stride = 2,activation=activation)
     self.enc4 = ResidualBlock(256, 512,     stride = 2,activation=activation)
     self.enc5 = ResidualBlock(512, 512,     stride = 2,activation=activation)
 def __init__(self,dimension, deconv = UpsampleDeConv, activation = nn.LeakyReLU(), drop_out : float = 0.5):
     super(AdelaideFastGenerator, self).__init__(dimension, deconv, activation, drop_out)
     self.enc1 = ResidualBlock(dimension, 16, stride = 2, activation=activation)
     self.enc2 = ResidualBlock(16,  32,      stride = 2,  activation=activation)
     self.enc3 = ResidualBlock(32, 64,       stride = 2,   activation=activation)
     self.enc4 = ResidualBlock(64, 128,      stride = 2,  activation=activation)
     self.enc5 = ResidualBlock(128, 256,     stride = 2, activation=activation)
     self.dec1 = SimpleDecoder(256, 128,         deconv=deconv)
     self.dec2 = SimpleDecoder(128, 64,          deconv=deconv)
     self.dec3 = SimpleDecoder(64, 32,           deconv=deconv)
     self.dec4 = SimpleDecoder(32, 16,           deconv=deconv)
     self.dec5 = SimpleDecoder(16, dimension,    deconv=deconv)
    def __init__(self,
                 dimension,
                 deconv=UpsampleDeConv,
                 activation=nn.LeakyReLU(),
                 drop_out: float = 0.5):
        super(StanfordFastGenerator, self).__init__()
        self.DEPTH_SIZE = int(3)
        self.conv1 = BaseBlock(dimension,
                               LATENT_SPACE_8,
                               9,
                               1,
                               activation=activation,
                               drop_out=drop_out)
        self.conv2 = BaseBlock(LATENT_SPACE_8,
                               LATENT_SPACE_4,
                               3,
                               2,
                               activation=activation,
                               drop_out=drop_out)
        self.conv3 = BaseBlock(LATENT_SPACE_4,
                               LATENT_SPACE_2,
                               3,
                               2,
                               activation=activation,
                               drop_out=drop_out)
        self.residual_blocks = nn.Sequential()

        for i in range(0, self.DEPTH_SIZE):
            self.residual_blocks.add_module(
                str(i),
                ResidualBlock(LATENT_SPACE_2,
                              LATENT_SPACE_2,
                              stride=1,
                              activation=activation))

        self.deconv1 = deconv(LATENT_SPACE_2, LATENT_SPACE_4)
        self.norm1 = torch.nn.BatchNorm2d(LATENT_SPACE_4, affine=True)
        self.deconv2 = deconv(LATENT_SPACE_4, LATENT_SPACE_8)
        self.norm2 = torch.nn.BatchNorm2d(LATENT_SPACE_8, affine=True)
        self.refinement = nn.Sequential(
            ConvLayer(LATENT_SPACE_8, LATENT_SPACE_8, kernel_size=3, stride=1),
            torch.nn.BatchNorm2d(LATENT_SPACE_8, affine=True),
            activation,
            ConvLayer(LATENT_SPACE_8, LATENT_SPACE_8, kernel_size=3, stride=1),
            torch.nn.BatchNorm2d(LATENT_SPACE_8, affine=True),
            activation,
        )
        self.final = ConvLayer(LATENT_SPACE_8,
                               dimension,
                               kernel_size=9,
                               stride=1)
        self.activation = activation
 def __init__(self, dimension, deconv = UpsampleDeConv, activation = nn.LeakyReLU(), drop_out : float = 0.5):
     super(MovaviResidualGenerator, self).__init__(dimension, deconv, activation, drop_out)
     self.enc1 = ResidualBlock(dimension, 16,  activation = activation, stride=2)
     self.enc2 = ResidualBlock(16, 32,         activation = activation, stride=2, drop_out=drop_out)
     self.enc3 = ResidualBlock(32, 64,         activation = activation, stride=2, drop_out=drop_out)
     self.enc4 = ResidualBlock(64, 128,        activation = activation, stride=2, drop_out=drop_out)
     self.enc5 = ResidualBlock(128, 128,       activation = activation, stride=2, drop_out=drop_out)
     self.center = ResidualBlock(128, 128,     activation = activation, stride=1, drop_out=drop_out)
 def __init__(self,
              dimension,
              deconv=UpsampleDeConv,
              activation=nn.LeakyReLU(),
              drop_out: float = 0.5):
     super(BerkeleyResidualGenerator,
           self).__init__(dimension, deconv, activation, drop_out)
     self.layer1 = ResidualBlock(dimension,
                                 64,
                                 stride=2,
                                 activation=activation)
     self.layer2 = ResidualBlock(64, 128, stride=2, activation=activation)
     self.layer3 = ResidualBlock(128, 256, stride=2, activation=activation)
     self.layer4 = ResidualBlock(256, 512, stride=2, activation=activation)
     self.layer5 = ResidualBlock(512, 512, stride=2, activation=activation)
     self.layer6 = ResidualBlock(512, 512, stride=2, activation=activation)
    def __init__(self,
                 dimension,
                 deconv=UpsampleDeConv,
                 activation=nn.LeakyReLU(),
                 drop_out: float = 0.5):
        super(StanfordSupremeGenerator, self).__init__()
        self.DEPTH_SIZE = int(9)
        self.conv1 = BaseBlock(dimension,
                               64,
                               9,
                               1,
                               activation=activation,
                               drop_out=drop_out)
        self.conv2 = BaseBlock(64,
                               128,
                               3,
                               2,
                               activation=activation,
                               drop_out=drop_out)
        self.conv3 = BaseBlock(128,
                               256,
                               3,
                               2,
                               activation=activation,
                               drop_out=drop_out)
        self.residual_blocks = nn.Sequential()

        for i in range(0, self.DEPTH_SIZE):
            self.residual_blocks.add_module(
                str(i), ResidualBlock(256,
                                      256,
                                      stride=1,
                                      activation=activation))

        self.deconv1 = deconv(512, 128)
        self.norm1 = torch.nn.BatchNorm2d(128, affine=True)
        self.deconv2 = deconv(256, 64)
        self.norm2 = torch.nn.BatchNorm2d(64, affine=True)
        self.final = ConvLayer(64, dimension, kernel_size=9, stride=1)
        self.activation = activation
    def __init__(self, dimension, deconv = UpsampleDeConv, activation = nn.LeakyReLU(), drop_out : float = 0.5):
        super(MovaviStrongGenerator, self).__init__(dimension, deconv, activation, drop_out)
        self.enc1 = ResidualBlock(dimension, 32, activation=activation, stride=2, drop_out=drop_out)
        self.enc2 = ResidualBlock(32, 64,        activation=activation, stride=2, drop_out=drop_out)
        self.enc3 = ResidualBlock(64, 128,       activation=activation, stride=2, drop_out=drop_out)
        self.enc4 = ResidualBlock(128, 256,      activation=activation, stride=2, drop_out=drop_out)
        self.enc5 = ResidualBlock(256, 256,      activation=activation, stride=2, drop_out=drop_out)
        self.center = ResidualBlock(256, 256,    activation=activation, stride=1, drop_out=drop_out)

        self.dec6 = nn.Sequential(BaseBlock(512, 256, 3, 1, activation=activation, drop_out=drop_out),
                                  BaseBlock(256, 256, 3, 1, activation=activation, drop_out=drop_out), deconv(256, 128))
        self.dec7 = nn.Sequential(BaseBlock(384, 128, 3, 1, activation=activation, drop_out=drop_out),
                                  BaseBlock(128, 128, 3, 1, activation=activation, drop_out=drop_out), deconv(128, 64))
        self.dec8 = nn.Sequential(BaseBlock(192, 64, 3, 1,  activation=activation, drop_out=drop_out),
                                  BaseBlock(64, 64, 3, 1,   activation=activation, drop_out=drop_out), deconv(64, 32))
        self.dec9 = nn.Sequential(BaseBlock(96, 32, 3, 1,   activation=activation, drop_out=drop_out),
                                  BaseBlock(32, 32, 3, 1,   activation=activation, drop_out=drop_out), deconv(32, 32))
        self.dec10 = deconv(32, 32)
        self.final = ConvLayer(32, dimension, 3, 1)
 def __init__(self,
              dimension,
              deconv=UpsampleDeConv,
              activation=nn.LeakyReLU(),
              drop_out: float = 0.5):
     super(BerkeleyFastGenerator, self).__init__(dimension, deconv,
                                                 activation, drop_out)
     self.layer1 = ResidualBlock(dimension,
                                 16,
                                 stride=2,
                                 activation=activation)
     self.layer2 = ResidualBlock(16, 32, stride=2, activation=activation)
     self.layer3 = ResidualBlock(32, 64, stride=2, activation=activation)
     self.layer4 = ResidualBlock(64, 128, stride=2, activation=activation)
     self.layer5 = ResidualBlock(128, 256, stride=2, activation=activation)
     self.layer6 = ResidualBlock(256, 512, stride=2, activation=activation)
     self.layer7 = SimpleDecoder(512 * 1, 256, deconv=deconv)
     self.layer8 = SimpleDecoder(256 * 2, 128, deconv=deconv)
     self.layer9 = SimpleDecoder(128 * 2, 64, deconv=deconv)
     self.layer10 = SimpleDecoder(64 * 2, 32, deconv=deconv)
     self.layer11 = SimpleDecoder(32 * 2, 16, deconv=deconv)
     self.layer12 = SimpleDecoder(16, dimension, deconv=deconv)