예제 #1
0
파일: train.py 프로젝트: Patryk-Sl/ai_art
def main():
    disc = Discriminator(in_channels=3).to(config.DEVICE)
    gen = Generator(in_channels=3, features=64).to(config.DEVICE)
    opt_disc = optim.Adam(
        disc.parameters(),
        lr=config.LEARNING_RATE,
        betas=(0.5, 0.999),
    )
    opt_gen = optim.Adam(gen.parameters(),
                         lr=config.LEARNING_RATE,
                         betas=(0.5, 0.999))
    BCE = nn.BCEWithLogitsLoss()
    L1_LOSS = nn.L1Loss()

    if config.LOAD_MODEL:
        load_checkpoint(
            config.CHECKPOINT_GEN,
            gen,
            opt_gen,
            config.LEARNING_RATE,
        )
        load_checkpoint(
            config.CHECKPOINT_DISC,
            disc,
            opt_disc,
            config.LEARNING_RATE,
        )

    train_dataset = MapDataset(root_dir=config.TRAIN_DIR)
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.BATCH_SIZE,
        shuffle=True,
        num_workers=config.NUM_WORKERS,
    )
    g_scaler = torch.cuda.amp.GradScaler()
    d_scaler = torch.cuda.amp.GradScaler()
    val_dataset = MapDataset(root_dir=config.VAL_DIR)
    val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

    for epoch in range(config.NUM_EPOCHS):
        train_fn(
            disc,
            gen,
            train_loader,
            opt_disc,
            opt_gen,
            L1_LOSS,
            BCE,
            g_scaler,
            d_scaler,
        )

        if config.SAVE_MODEL and epoch % 5 == 0:
            save_checkpoint(gen, opt_gen, filename=config.CHECKPOINT_GEN)
            save_checkpoint(disc, opt_disc, filename=config.CHECKPOINT_DISC)

        save_some_examples(gen, val_loader, epoch, folder="evaluation")
예제 #2
0
def main():
    disc = Discriminator(in_channels=3, feature=64).to(config.device)
    gen = GeneratorUNET(in_channels=3, feature=64).to(config.device)
    opt_disc = optim.Adam(disc.parameters(),
                          lr=config.learning_rate,
                          betas=(0.5, 0.999))
    opt_gen = optim.Adam(gen.parameters(),
                         lr=config.learning_rate,
                         betas=(0.5, 0.999))
    BCE = nn.BCEWithLogitsLoss()
    L1_loss = nn.L1Loss()

    if config.Load_model:
        load_checkpoint(
            config.CHECKPOINT_GEN,
            gen,
            opt_gen,
            config.learning_rate,
        )
        load_checkpoint(
            config.CHECKPOINT_DISC,
            disc,
            opt_disc,
            config.learning_rate,
        )
    train_dataset = Anime_Dataset(root_dir=config.train_dir)
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_worker,
    )
    g_scaler = torch.cuda.amp.GradScaler()
    d_scaler = torch.cuda.amp.GradScaler()
    val_dataset = Anime_Dataset(root_dir=config.val_dir)
    val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

    for epoch in range(config.num_epochs):
        train_fn(
            disc,
            gen,
            train_loader,
            opt_disc,
            opt_gen,
            L1_loss,
            BCE,
            g_scaler,
            d_scaler,
        )

        if config.Save_model and epoch % 5 == 0:
            save_checkpoint(gen, opt_gen, filename=config.CHECKPOINT_GEN)
            save_checkpoint(disc, opt_disc, filename=config.CHECKPOINT_DISC)

        save_some_examples(gen, val_loader, epoch, folder="evaluation")
예제 #3
0
def main():
    disc_H = Discriminator(in_channels=3).to(config.DEVICE)
    disc_Z = Discriminator(in_channels=3).to(config.DEVICE)
    gen_H = Generator(img_channels=3, num_residuals=9).to(config.DEVICE)
    gen_Z = Generator(img_channels=3, num_residuals=9).to(config.DEVICE)

    opt_disc = optim.Adam(
        list(disc_H.parameters()) + list(disc_Z.parameters()),
        lr=config.LEARNING_RATE,
        betas=(0.5, 0.999),
    )

    opt_gen = optim.Adam(
        list(gen_Z.parameters()) + list(gen_H.parameters()),
        lr=config.LEARNING_RATE,
        betas=(0.5, 0.999),
    )

    L1 = nn.L1Loss()
    mse = nn.MSELoss()

    if config.LOAD_MODEL:
        load_checkpoint(
            config.CHECKPOINT_GEN_H, gen_H, opt_gen, config.LEARNING_RATE,
        )
        load_checkpoint(
            config.CHECKPOINT_GEN_Z, gen_Z, opt_gen, config.LEARNING_RATE,
        )
        load_checkpoint(
            config.CHECKPOINT_CRITIC_H, disc_H, opt_disc, config.LEARNING_RATE,
        )
        load_checkpoint(
            config.CHECKPOINT_CRITIC_Z, disc_Z, opt_disc, config.LEARNING_RATE,
        )

    dataset = HorseZebraDataset(
        root_horse=config.TRAIN_DIR + "/horses", root_zebra=config.TRAIN_DIR + "/zebras", transform=config.transforms
    )

    loader = DataLoader(
        dataset,
        batch_size=config.BATCH_SIZE,
        shuffle=True,
        num_workers=config.NUM_WORKERS,
        pin_memory=True
    )

    g_scaler = torch.cuda.amp.GradScaler()
    d_scaler = torch.cuda.amp.GradScaler()

    for epoch in range(config.NUM_EPOCHS):
        train_fn(disc_H, disc_Z, gen_Z, gen_H, loader, opt_disc, opt_gen, L1, mse, d_scaler, g_scaler)

        if config.SAVE_MODEL:
            save_checkpoint(gen_H, opt_gen, filename=config.CHECKPOINT_GEN_H)
            save_checkpoint(gen_Z, opt_gen, filename=config.CHECKPOINT_GEN_Z)
            save_checkpoint(disc_H, opt_disc, filename=config.CHECKPOINT_CRITIC_H)
            save_checkpoint(disc_Z, opt_disc, filename=config.CHECKPOINT_CRITIC_Z)
예제 #4
0
def main():
    disc = Discriminator(in_channels=3).to(config.DEVICE)
    gen = Generator(in_channels=3).to(config.DEVICE)
    opt_disc = optim.Adam(disc.parameters(),
                          lr=config.LEARNING_RATE,
                          betas=(0.5, 0.999))
    opt_gen = optim.Adam(gen.parameters(),
                         lr=config.LEARNING_RATE,
                         betas=(0.5, 0.999))
    BCE = nn.BCEWithLogitsLoss()
    L1_LOSS = nn.L1Loss()

    if config.LOAD_MODEL:
        load_checkpoint(config.CHECKPOINTS_GEN, gen, opt_gen,
                        config.LEARNING_RATE)
        load_checkpoint(config.CHECKPOINTS_DISC, disc, opt_disc,
                        config.LEARNING_RATE)

    train_dataset = MapDataset('..\data\maps\train')
    train_loader = DataLoader(train_dataset,
                              batch_size=config.BATCH_SIZE,
                              shuffle=True,
                              num_workers=config.NUM_WORKERS)
    g_scaler = torch.cuda.amp.GradScaler()
    d_scaler = torch.cuda.amp.GradScaler()

    val_dataset = MapDataset('..\data\maps\test')
    val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

    for epoch in range(config.NUM_EPOCHS):
        train_fn(disc, gen, train_loader, opt_disc, opt_gen, L1_LOSS, BCE,
                 g_scaler, d_scaler)

        if config.SAVE_MODEL and epoch % 5 == 0:
            save_checkpoint(gen, opt_gen, filename=config.CHECKPOINTS_GEN)
            save_checkpoint(disc, opt_disc, filename=config.CHECKPOINTS_DISC)
예제 #5
0
def main():
    disc = Discriminator(in_channels=3).to(config.DEVICE)
    gen = Generator(in_channels=3).to(config.DEVICE)
    opt_disc = optim.Adam(disc.parameters(),
                          lr=config.LEARNING_RATE,
                          betas=(0.5, 0.999))
    opt_gen = optim.Adam(gen.parameters(),
                         lr=config.LEARNING_RATE,
                         betas=(0.5, 0.999))
    bce_loss = nn.BCEWithLogitsLoss()
    l1_loss = nn.L1Loss()
    if config.LOAD_MODEL:
        load_checkpoint(config.CHECKPOINT_DISC, disc, opt_disc,
                        config.LEARNING_RATE)
        load_checkpoint(config.CHECKPOINT_GEN, gen, opt_gen,
                        config.LEARNING_RATE)

    train_dataset = MapDataSet("datasets/maps/train")
    train_loader = DataLoader(train_dataset,
                              batch_size=config.BATCH_SIZE,
                              shuffle=True,
                              num_workers=config.NUM_WORKERS)
    # for float 16 training optional
    gen_scaler = None  #torch.cuda.amp.GradScaler()
    disc_scaler = None  #torch.cuda.amp.GradScaler()
    val_dataset = MapDataSet("datasets/maps/val")
    val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)

    for epoch in range(config.NUM_EPOCHS):
        train(disc, gen, train_loader, opt_disc, opt_gen, l1_loss, bce_loss,
              disc_scaler, gen_scaler)
        if config.SAVE_MODEL and epoch % 10 == 0:
            save_checkpoint(gen, opt_gen, filename=config.CHECKPOINT_GEN)
            save_checkpoint(disc, opt_disc, filename=config.CHECKPOINT_DISC)

        save_some_examples(gen, val_loader, epoch, folder="savedevaluations")
예제 #6
0
    if iter_number % SAVE_IMAGE_EVERY_ITER == 0:
        writer.add_image("fake", vutils.make_grid(gen_output_vec.data[:64]),
                         iter_number)
        writer.add_image("real", vutils.make_grid(batch_vec.data[:64]),
                         iter_number)


if __name__ == '__main__':
    device = torch.device("cuda")
    envs = [
        InputWrapper(gym.make(name))
        for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')
    ]
    input_shape = output_shape = envs[0].observation_space.shape

    net_discr = Discriminator(input_shape).to(device)
    net_gener = Generator(output_shape).to(device)

    writer = SummaryWriter()
    objective = nn.BCELoss()
    gen_optimizer = optim.Adam(params=net_gener.parameters(),
                               lr=LEARNING_RATE,
                               betas=(0.5, 0.999))
    dis_optimizer = optim.Adam(params=net_discr.parameters(),
                               lr=LEARNING_RATE,
                               betas=(0.5, 0.999))

    gen_losses = []
    dis_losses = []
    iter_number = 0
예제 #7
0

transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5,), (0.5,))
                ])

trainset = MNIST(root='../../../', train=True, download=False, transform=transform)
trainloader = DataLoader(trainset, batch_size=32, shuffle=True)
testset = MNIST(root='../../../', train=False, download=False, transform=transform)
testloader = DataLoader(testset, batch_size=100, shuffle=True)

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

classifier = Classifier().to(device)
discriminator = Discriminator().to(device)
generator = Generator().to(device)

discriminator.load_state_dict(torch.load('dis.pth'))
generator.load_state_dict(torch.load('gen.pth'))
classifier.load_state_dict(torch.load('class.pth'))

criterion = nn.CrossEntropyLoss()
c_lr = 2e-4
d_lr = 2e-4
g_lr = 2e-4
c_optim = optim.Adadelta(classifier.parameters(), lr=c_lr, weight_decay=0.01)
d_optim = optim.Adadelta(discriminator.parameters(), lr=d_lr, weight_decay=0.01)
g_optim = optim.Adadelta(generator.parameters(), lr=g_lr, weight_decay=0.01)

def train_classifier(imgs, labels):
예제 #8
0
sys.path.append("../attacks/")
from carlini_attack import carlini_l2_attack

transform = transforms.Compose(
    [transforms.ToTensor(),
     transforms.Normalize((0.5, ), (0.5, ))])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

testset = MNIST(root='../../../',
                train=False,
                download=False,
                transform=transform)
testloader = DataLoader(testset, batch_size=100, shuffle=True)

classifier = Classifier().to(device)
discriminator = Discriminator().to(device)
generator = Generator().to(device)

discriminator.load_state_dict(
    torch.load('dis.pth', map_location={'cuda:0': 'cpu'}))
generator.load_state_dict(torch.load('gen.pth', map_location={'cuda:0':
                                                              'cpu'}))
classifier.load_state_dict(
    torch.load('class.pth', map_location={'cuda:0': 'cpu'}))


def predict(imgs):
    p = discriminator(generator(imgs))
    return p[:, :10] + p[:, 10:]