Exemple #1
0
def run_GAN(n_epoch=2,
            batch_size=50,
            use_gpu=False,
            dis_lr=1e-4,
            gen_lr=1e-3,
            n_update_dis=1,
            n_update_gen=1,
            update_max=None):
    # loading data
    trainloader, testloader = load_dataset(batch_size=batch_size)

    # initialize models
    Dis_model = Discriminator()
    Gen_model = Generator()

    if use_gpu:
        Dis_model = Dis_model.cuda()
        Gen_model = Gen_model.cuda()

    # assign loss function and optimizer to D and G
    D_criterion = torch.nn.BCELoss()
    D_optimizer = optim.SGD(Dis_model.parameters(), lr=dis_lr, momentum=0.9)

    G_criterion = torch.nn.BCELoss()
    G_optimizer = optim.SGD(Gen_model.parameters(), lr=gen_lr, momentum=0.9)

    train_GAN(Dis_model,
              Gen_model,
              D_criterion,
              G_criterion,
              D_optimizer,
              G_optimizer,
              trainloader,
              n_epoch,
              batch_size,
              n_update_dis,
              n_update_gen,
              update_max=update_max)
Exemple #2
0
    os.makedirs(opt.dataroot + '/out')

###### Definition of variables ######
# Networks
netG_A2B = Generator(opt.input_nc, opt.output_nc)
netG_B2A = Generator(opt.output_nc, opt.input_nc)
if opt.mode == 'train':
    netD_A = Discriminator(opt.input_nc)
    netD_B = Discriminator(opt.output_nc)

if opt.cuda:
    torch.cuda.empty_cache()
    netG_A2B.cuda()
    netG_B2A.cuda()
    if opt.mode == 'train':
        netD_A.cuda()
        netD_B.cuda()

if opt.mode == 'train':
    # Data augmentation and prep
    for numm in range(int(opt.n_data / 200)):
        bigimage, biglabel = construct('../train3D')
        sampling(bigimage,
                 biglabel,
                 numm + 1,
                 opt.dataroot + '/data',
                 rand_num=56)
    if opt.resume == 'Y':
        # Load state dicts
        netG_A2B.load_state_dict(torch.load(opt.generator_A2B))
        netG_B2A.load_state_dict(torch.load(opt.generator_B2A))
Exemple #3
0
def train():

    input_channels = 3
    lr = 0.01
    momentum = 0.5
    epochs = 200
    lambda_pixel = 300
    #gen = Gen(100)
    gen_model = Generator(input_channels, input_channels)
    disc_model = Discriminator(input_channels, 2)
    #optimizer_G = optim.Adam(gen_model .parameters(), lr=lr)
    #optimizer_D = optim.Adam(disc_model .parameters(), lr=lr)
    optimizer_G = optim.SGD(gen_model.parameters(), lr=lr, momentum=momentum)
    optimizer_D = optim.SGD(disc_model.parameters(), lr=lr, momentum=momentum)
    #piexl_loss = torch.nn.L1Loss()
    piexl_loss = nn.L1Loss()
    disc_loss = nn.CrossEntropyLoss()
    if use_cuda:
        gen_model = gen_model.cuda()
        disc_model = disc_model.cuda()
        piexl_loss = piexl_loss.cuda()
        disc_loss = disc_loss.cuda()
    # prepare fake_real label
    real_lines = open('real_face.txt', 'r').readlines()[:1000]
    cartoon_lines = open('cartoon_face.txt', 'r').readlines()[:1000]
    train_loader = GenertorData(real_lines, cartoon_lines, batch_size,
                                input_size)
    epoch_g_loss = []
    epoch_d_loss = []
    fw_log = open('log.txt', 'w')
    for epoch in range(epochs):
        train_loss_G = 0
        train_loss_D = 0
        #for batch_idx, (data, target) in enumerate(train_loader):
        for batch_idx in range(len(train_loader)):
            data, target = train_loader[batch_idx]
            data, target = data.to(device), target.to(device)
            real_target, fake_target = generate_label(data.size(0))
            # train generators
            optimizer_G.zero_grad()
            fake = gen_model(data)
            real_pred = disc_model(target)
            fake_pred = disc_model(fake)
            disc_loss_real = disc_loss(real_pred, real_target)
            disc_loss_fake = disc_loss(fake_pred, fake_target)
            loss_D = disc_loss_real + disc_loss_fake
            loss_G = piexl_loss(target, fake)
            loss_G = loss_D + lambda_pixel * loss_G
            loss_G.backward()
            optimizer_G.step()
            train_loss_G += loss_G.item()

            # train Discriminator
            if (batch_idx / 50) == epoch % (len(train_loader) / 50):
                # if loss_D > 0.05:
                optimizer_D.zero_grad()
                fake = gen_model(data)
                #print(fake.size())
                real_pred = disc_model(target)
                fake_pred = disc_model(fake)
                disc_loss_real = disc_loss(real_pred, real_target)
                disc_loss_fake = disc_loss(fake_pred, fake_target)
                loss_D = disc_loss_real + disc_loss_fake
                loss_D.backward()
                optimizer_D.step()
            train_loss_D = loss_D.item()
            if batch_idx % 50 == 0:
                print("GAN train Epochs %d %d/%d G_loss %.6f D_loss %.6f" %
                      (epoch, batch_idx, len(train_loader), loss_G.item(),
                       train_loss_D))

        epoch_g_loss.append(loss_G.item())
        epoch_d_loss.append(train_loss_D)
        torch.save(
            gen_model.state_dict(),
            "model/gen_cartoon_model_epoch_" + str(epoch) + '_gloss' +
            str(loss_G.item())[:6] + '_d_loss' + str(train_loss_D)[:6] + ".pt")
        fw_log.write(str(epoch) + ' ' + str(epoch_g_loss) + '\n')
        fw_log.write(str(epoch) + ' ' + str(epoch_d_loss) + '\n')
        draw(epoch_g_loss, epoch_d_loss)
elif dataset == 'fake':
    dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize),
                                transform=transforms.ToTensor())
    nc=3

assert dataset


print('Saving Features')
if not os.path.exists(feature_file):
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size,
                    shuffle=True, num_workers=opt.num_workers)

    
    netD = Discriminator(opt.ndf, opt.nc, opt.filters, opt.strides, opt.padding)
    netD.cuda()
    
    epoch = 10
    netD.load_state_dict(torch.load(opt.model_path + 'netD_epoch_{}.pth'.format(epoch)))
    
    print(netD)
    netD.eval()
    n_features = 4096 # 1024x2x2
    save_features(dataloader, opt.batch_size, n_features, feature_file)

print('Load Features')
data = np.loadtxt(feature_file, dtype=np.float16)

features, labels = data[:, : -1], data[:, -1: ]
shape = features.shape