def main():
    disc = Discriminator(in_channels=3).to(config.DEVICE)
    gen = Generator(in_channels=3).to(config.DEVICE)
    opt_disc = optim.Adam(disc.parameters(),lr= config.LEARNING_RATE,betas=(0.5,0.999))
    opt_gen = optim.Adam(gen.parameters(),lr= config.LEARNING_RATE,betas=(0.5,0.999))
    BCE =nn.BCEWithLogitsLoss()
    L1_LOSS = nn.L1Loss()
    if config.LOAD_MODEL:
        load_checkpoint(config.CHECKPOINT_GEN,gen,opt_gen,config.LEARNING_RATE)
        load_checkpoint(config.CHECKPOINT_DISC,disc,opt_disc,config.LEARNING_RATE)
    
    train_dataset = MapDataset(config.TRAIN_DIR)
    train_loader = DataLoader(train_dataset,batch_size=config.BATCH_SIZE,shuffle=True,num_workers=config.NUM_WORKERS)
    g_scaler = torch.cuda.amp.GradScaler()
    d_scaler = torch.cuda.amp.GradScaler()
    val_dataset = MapDataset(config.VAL_DIR)
    val_loader = DataLoader(val_dataset,batch_size=1,shuffle=False)

    for epoch in range(config.NUM_EPOCHS):
        train_fn(disc,gen,train_loader,opt_disc,opt_gen,L1_LOSS,BCE,g_scaler,d_scaler)

        if config.SAVE_MODEL and epoch % 5 ==0:
            save_checkpoint(disc,opt_disc,filename=config.CHECKPOINT_DISC)
            save_checkpoint(gen,opt_gen,filename= config.CHECKPOINT_GEN)
        
        save_some_examples(gen,val_loader,epoch,folder='evaluation')
Beispiel #2
0
def main():
    disc = Discriminator(in_channels=3, feature=64).to(config.device)
    gen = GeneratorUNET(in_channels=3, feature=64).to(config.device)
    opt_disc = optim.Adam(disc.parameters(),
                          lr=config.learning_rate,
                          betas=(0.5, 0.999))
    opt_gen = optim.Adam(gen.parameters(),
                         lr=config.learning_rate,
                         betas=(0.5, 0.999))
    BCE = nn.BCEWithLogitsLoss()
    L1_loss = nn.L1Loss()

    if config.Load_model:
        load_checkpoint(
            config.CHECKPOINT_GEN,
            gen,
            opt_gen,
            config.learning_rate,
        )
        load_checkpoint(
            config.CHECKPOINT_DISC,
            disc,
            opt_disc,
            config.learning_rate,
        )
    train_dataset = Anime_Dataset(root_dir=config.train_dir)
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_worker,
    )
    g_scaler = torch.cuda.amp.GradScaler()
    d_scaler = torch.cuda.amp.GradScaler()
    val_dataset = Anime_Dataset(root_dir=config.val_dir)
    val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

    for epoch in range(config.num_epochs):
        train_fn(
            disc,
            gen,
            train_loader,
            opt_disc,
            opt_gen,
            L1_loss,
            BCE,
            g_scaler,
            d_scaler,
        )

        if config.Save_model and epoch % 5 == 0:
            save_checkpoint(gen, opt_gen, filename=config.CHECKPOINT_GEN)
            save_checkpoint(disc, opt_disc, filename=config.CHECKPOINT_DISC)

        save_some_examples(gen, val_loader, epoch, folder="evaluation")
Beispiel #3
0
def train_fn(disc, gen, loader, opt_disc, opt_gen, l1_loss, bce, val_loader):
    loop = tqdm(loader, leave=True)

    for idx, (x, y) in enumerate(loop):
        x = x.to(config.DEVICE)
        y = y.to(config.DEVICE)

        # 训练识别器
        # with torch.cuda.amp.autocast():  # 自动混合精度
        y_fake = gen(x)
        d_real = disc(x, y)
        d_real_loss = bce(d_real, torch.ones_like(d_real))
        d_fake = disc(x, y_fake.detach())
        d_fake_loss = bce(d_fake, torch.zeros_like(d_fake))
        d_loss = (d_real_loss + d_fake_loss) / 2

        disc.zero_grad()
        # d_scaler.scale(d_loss).backward()  # 没有cuda 白忙活
        # d_scaler.step(opt_disc)
        # d_scaler.update()
        d_loss.backward()
        opt_disc.step()

        # 训练生成器
        # with torch.cuda.amp.autocast():
        g_fake = disc(x, y_fake)
        g_fake_loss = bce(g_fake, torch.ones_like(d_fake))
        L1 = l1_loss(y_fake, y) * config.L1_LAMBDA
        g_loss = g_fake_loss + L1

        gen.zero_grad()
        # g_scaler.scale(g_loss).backward()
        # g_scaler.step(opt_gen)
        # g_scaler.update()
        g_loss.backward()
        opt_gen.step()

        if idx % 10 == 0:
            loop.set_postfix(
                d_real=torch.sigmoid(d_real).mean().item(),
                d_fake=torch.sigmoid(d_fake).mean().item(),
            )

        if idx % 1 == 0:
            print(
                f'Batch {idx}/{len(loop)} Loss D: {d_loss}, loss G: {g_loss}')

            save_some_examples(gen,
                               val_loader,
                               epoch=1,
                               idx=idx,
                               folder="evaluation")
Beispiel #4
0
def main():
    disc = Discriminator(in_channels=3).to(config.DEVICE)
    gen = Generator(in_channels=3).to(config.DEVICE)
    opt_disc = optim.Adam(disc.parameters(),
                          lr=config.LEARNING_RATE,
                          betas=(0.5, 0.999))
    opt_gen = optim.Adam(gen.parameters(),
                         lr=config.LEARNING_RATE,
                         betas=(0.5, 0.999))
    bce_loss = nn.BCEWithLogitsLoss()
    l1_loss = nn.L1Loss()
    if config.LOAD_MODEL:
        load_checkpoint(config.CHECKPOINT_DISC, disc, opt_disc,
                        config.LEARNING_RATE)
        load_checkpoint(config.CHECKPOINT_GEN, gen, opt_gen,
                        config.LEARNING_RATE)

    train_dataset = MapDataSet("datasets/maps/train")
    train_loader = DataLoader(train_dataset,
                              batch_size=config.BATCH_SIZE,
                              shuffle=True,
                              num_workers=config.NUM_WORKERS)
    # for float 16 training optional
    gen_scaler = None  #torch.cuda.amp.GradScaler()
    disc_scaler = None  #torch.cuda.amp.GradScaler()
    val_dataset = MapDataSet("datasets/maps/val")
    val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

    for epoch in range(config.NUM_EPOCHS):
        train(disc, gen, train_loader, opt_disc, opt_gen, l1_loss, bce_loss,
              disc_scaler, gen_scaler)
        if config.SAVE_MODEL and epoch % 10 == 0:
            save_checkpoint(gen, opt_gen, filename=config.CHECKPOINT_GEN)
            save_checkpoint(disc, opt_disc, filename=config.CHECKPOINT_DISC)

        save_some_examples(gen, val_loader, epoch, folder="savedevaluations")
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from utils import save_some_examples,load_checkpoint
import config
from dataset import MapDataset
from generator import Generator
from torchvision.utils import save_image

val_dataset = MapDataset(config.VAL_DIR)
val_loader = DataLoader(val_dataset,batch_size=4,shuffle=False)
gen = Generator(in_channels=3).to(config.DEVICE)
opt_gen = optim.Adam(gen.parameters(),lr= config.LEARNING_RATE,betas=(0.5,0.999))
load_checkpoint(config.CHECKPOINT_GEN,gen,opt_gen,config.LEARNING_RATE)

def save_some_examples(gen, val_loader, folder):
    for idx ,(x,y) in enumerate(val_loader):
        x, y = x.to(config.DEVICE), y.to(config.DEVICE)
        gen.eval()
        with torch.no_grad():
            y_fake = gen(x)
            y_fake = y_fake * 0.5 + 0.5  # remove normalization#
            save_image(y_fake, folder + f"/y_gen_{idx}.png")
            save_image(x * 0.5 + 0.5, folder + f"/input_{idx}.png")
        gen.train()


if __name__=="__main__":
    save_some_examples(gen,val_loader,folder='evaluation')