Пример #1
0
    def __init__(self):
        super(Discriminator, self).__init__()

        self.conv_block = nn.Sequential(
            # [Conv2d]
            # H_out = (H_in + 2 x padding - dilation x (kernel_size -1) -1) / stride + 1
            # [-1, 3, 64, 64] -> [-1, 64, 32, 32]
            nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.2, inplace=True),

            # [-1, 64, 32, 32] -> [-1, 128, 16, 16]
            nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.2, inplace=True),

            # [-1, 128, 16, 16] -> [-1, 256, 8, 8]
            nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.2, inplace=True),

            # [-1, 256, 8, 8] -> [-1, 512, 4, 4]
            nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.2, inplace=True),

            # [-1, 512, 4, 4] -> [-1, 1024, 1, 1]
            nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0),
            nn.Sigmoid())

        utils.weights_init_normal(self)
Пример #2
0
    def __init__(self, latent_dim, classes, channels):
        super(Generator, self).__init__()
        self.latent_dim = latent_dim
        self.channels = channels
        self.classes = classes

        # image_size = 64
        img_shape = (channels, 64, 64)

        self.nn_block = nn.Sequential(
            # [ConvTranspose2d]
            # H_out = (H_in - 1) x stride - 2 x padding + kernel_size + output_padding
            # [-1, input_dim, 1, 1] -> [-1, 512, 4, 4]
            nn.Linear(latent_dim + classes, 128),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(128, 256),
            nn.BatchNorm1d(256, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(256, 512),
            nn.BatchNorm1d(512, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(512, 1024),
            nn.BatchNorm1d(1024, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(1024, int(np.prod(img_shape))),
            nn.Tanh())

        utils.weights_init_normal(self)
Пример #3
0
 def __init__(self, latent_dim, channels):
     super(Generator, self).__init__()
     self.latent_dim = latent_dim
     self.channels = channels
     
     self.conv_block = nn.Sequential(
             # [ConvTranspose2d] 
             # H_out = (H_in - 1) x stride - 2 x padding + kernel_size + output_padding
             # [-1, input_dim, 1, 1] -> [-1, 512, 4, 4]
             nn.ConvTranspose2d(latent_dim, 512, kernel_size=4, stride=1, padding=0),
             nn.BatchNorm2d(512), 
             nn.ReLU(True),
             
             # [-1, 512, 4, 4] -> [-1, 256, 8, 8]
             nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),
             nn.BatchNorm2d(256),
             nn.ReLU(True),
             
             # [-1, 256, 8, 8] -> [-1, 128, 16, 16]
             nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
             nn.BatchNorm2d(128),
             nn.ReLU(True),
             
             # [-1, 128, 16, 16] -> [-1, 64, 32, 32]
             nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
             nn.BatchNorm2d(64),
             nn.ReLU(True),
             
             # [-1, 64, 32, 32] -> [-1, channels, 64, 64]
             nn.ConvTranspose2d(64, channels, kernel_size=4, stride=2, padding=1),
             nn.Tanh()
         )
     
     utils.weights_init_normal(self)
Пример #4
0
    def __init__(self, N_STATES, N_ACTIONS, H1Size, H2Size):
        super(Q_Net, self).__init__()
        # build network layers
        self.fc1 = nn.Linear(N_STATES, H1Size)
        self.fc2 = nn.Linear(H1Size, H2Size)
        self.out = nn.Linear(H2Size, N_ACTIONS)

        # initialize layers
        utils.weights_init_normal([self.fc1, self.fc2, self.out], 0.0, 0.1)
Пример #5
0
    def __init__(self, N_STATES, N_ACTIONS, H1Size, H2Size):
        super(EnvModel, self).__init__()
        # build network layers
        self.fc1 = nn.Linear(N_STATES + N_ACTIONS, H1Size)
        self.fc2 = nn.Linear(H1Size, H2Size)
        self.statePrime = nn.Linear(H2Size, N_STATES)
        self.reward = nn.Linear(H2Size, 1)
        self.done = nn.Linear(H2Size, 1)

        # initialize layers
        utils.weights_init_normal(
            [self.fc1, self.fc2, self.statePrime, self.reward, self.done], 0.0,
            0.1)
Пример #6
0
    def __init__(self, categorical_dim):
        super(Discriminator, self).__init__()
        self.categorical_dim = categorical_dim

        self.conv_block = nn.Sequential(
            # [Conv2d]
            # H_out = (H_in + 2 x padding - dilation x (kernel_size -1) -1) / strid + 1
            # [-1, channels, 64, 64] -> [-1, 64, 32, 32]
            nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1),
            nn.LeakyReLU(0.1, inplace=True),

            # [-1, 64, 32, 32] -> [-1, 128, 16, 16]
            nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(128),
            nn.LeakyReLU(0.1, inplace=True),

            # [-1, 128, 16, 16] -> [-1, 256, 8, 8]
            nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(256),
            nn.LeakyReLU(0.1, inplace=True),

            # [-1, 256, 8, 8] -> [-1, 512, 4, 4]
            nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(512),
            nn.LeakyReLU(0.1, inplace=True),

            # [-1, 512, 4, 4] -> [-1, 64, 1, 1]
            # 갑자기 이렇게 체널이 줄어도 괜찮을까?
            nn.Conv2d(512, 64, kernel_size=4, stride=1, padding=0),
        )

        self.adv_layer = nn.Sequential(nn.Linear(64, 1), nn.Sigmoid())

        self.aux_layer = nn.Sequential(nn.Linear(64, categorical_dim),
                                       nn.Softmax())

        self.latent_layer = nn.Sequential(nn.Linear(64, 1))

        utils.weights_init_normal(self)
 def weight_init(self, mean=0.0, std=0.02):
     for m in self.modules():
         utils.weights_init_normal(m, mean=mean, std=std)
Пример #8
0
    netg_b2a = G_net(input_channel, output_channel, ngf, g_layer).to(device)
    netd_a = D_net(input_channel, ndf, d_layer).to(device)
    netd_b = D_net(input_channel, ndf, d_layer).to(device)

criterionGAN = PatchLoss().to(device)
criterionL1 = nn.L1Loss().to(device)
# criterionMSE = nn.MSELoss().to(device)

optimzer_g = torch.optim.SGD(itertools.chain(netg_b2a.parameters(),
                                             netg_a2b.parameters()),
                             lr=lr)
optimzerd_a = torch.optim.SGD(netd_a.parameters(), lr)
optimzerd_b = torch.optim.SGD(netd_b.parameters(), lr)
if not os.path.exists(check):
    print('init param')
    weights_init_normal(optimzer_g)
    weights_init_normal(optimzerd_a)
    weights_init_normal(optimzerd_b)


def train():
    for epoch in range(epochs):
        avg_loss_g_a2b = AverageMeter()
        avg_loss_g_b2a = AverageMeter()
        avg_loss_g = AverageMeter()
        avg_loss_d_a = AverageMeter()
        avg_loss_d_b = AverageMeter()
        min_loss_g = float('inf')
        min_loss_d = float('inf')
        for i, data in enumerate(train_loader):
            img_a, img_b = data[0].to(device), data[1].to(device)
Пример #9
0
criterionL1 = nn.L1Loss().to(device)
criterionBCE = PatchLoss(nn.BCEWithLogitsLoss()).to(device)

optimzer_g = torch.optim.Adam(itertools.chain(netg_b2a.parameters(),
                                              netg_a2b.parameters()),
                              lr=lr,
                              betas=(0.5, 0.999),
                              weight_decay=weight_decay)
optimzer_d = torch.optim.Adam(itertools.chain(netd_a.parameters(),
                                              netd_b.parameters()),
                              lr=lr,
                              betas=(0.5, 0.999),
                              weight_decay=weight_decay)
if not os.path.exists(check):
    print('init param')
    weights_init_normal(optimzer_g)
    weights_init_normal(optimzer_d)


def train():
    for epoch in range(epochs):
        avg_loss_g_a = AverageMeter()
        avg_loss_g_b = AverageMeter()
        avg_loss_d_a = AverageMeter()
        avg_loss_d_b = AverageMeter()
        min_loss_g = float('inf')
        min_loss_d = float('inf')
        for i, data in enumerate(train_loader):
            img_a, img_b = data[0].to(device), data[1].to(device)

            #### update generator
Пример #10
0
 def weight_init(self):
     for m in self.modules():
         utils.weights_init_normal(m)