コード例 #1
0
ファイル: dcgan.py プロジェクト: Angelowin/deeplearning
                                     shuffle=True)

###############   MODEL   ####################
ndf = opt.ndf
ngf = opt.ngf
nc = 1

netD = Discriminator(nc, ndf)
netG = Generator(nc, ngf, opt.nz)
#if(opt.cuda):
netD.cuda()
netG.cuda()

###########   LOSS & OPTIMIZER   ##########
criterion = nn.BCELoss()
optimizerD = torch.optim.Adam(netD.parameters(),
                              lr=opt.lr,
                              betas=(opt.beta1, 0.999))
optimizerG = torch.optim.Adam(netG.parameters(),
                              lr=opt.lr,
                              betas=(opt.beta1, 0.999))

##########   GLOBAL VARIABLES   ###########
#noise_all = torch.FloatTensor(20,opt.nz,1,1)
noise = torch.FloatTensor(opt.batchSize, opt.nz, 1, 1)
real = torch.FloatTensor(opt.batchSize, nc, opt.imageSize, opt.imageSize)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
#noise_all = Variable(noise_all)
noise = Variable(noise)
コード例 #2
0
ファイル: main.py プロジェクト: jungwon-choi/WGAN-pytorch
def main(args):
    #===========================================================================
    # Set the file name format
    FILE_NAME_FORMAT = "{0}_{1}_{2:d}_{3:d}_{4:d}_{5:f}{6}".format(
        args.model, args.dataset, args.epochs, args.obj_step, args.batch_size,
        args.lr, args.flag)

    # Set the results file path
    RESULT_FILE_NAME = FILE_NAME_FORMAT + '_results.pkl'
    RESULT_FILE_PATH = os.path.join(RESULTS_PATH, RESULT_FILE_NAME)
    # Set the checkpoint file path
    CHECKPOINT_FILE_NAME = FILE_NAME_FORMAT + '.ckpt'
    CHECKPOINT_FILE_PATH = os.path.join(CHECKPOINT_PATH, CHECKPOINT_FILE_NAME)
    BEST_CHECKPOINT_FILE_NAME = FILE_NAME_FORMAT + '_best.ckpt'
    BEST_CHECKPOINT_FILE_PATH = os.path.join(CHECKPOINT_PATH,
                                             BEST_CHECKPOINT_FILE_NAME)

    # Set the random seed same for reproducibility
    random.seed(190811)
    torch.manual_seed(190811)
    torch.cuda.manual_seed_all(190811)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # Step1 ====================================================================
    # Load dataset
    if args.dataset == 'CelebA':
        dataloader = CelebA_Dataloader()
    else:
        assert False, "Please select the proper dataset."

    train_loader = dataloader.get_train_loader(batch_size=args.batch_size,
                                               num_workers=args.num_workers)
    print('==> DataLoader ready.')

    # Step2 ====================================================================
    # Make the model
    if args.model in ['WGAN', 'DCGAN']:
        generator = Generator(BN=True)
        discriminator = Discriminator(BN=True)
    elif args.model in ['WGAN_noBN', 'DCGAN_noBN']:
        generator = Generator(BN=False)
        discriminator = Discriminator(BN=False)
    else:
        assert False, "Please select the proper model."

    # Check DataParallel available
    if torch.cuda.device_count() > 1:
        generator = nn.DataParallel(generator)
        discriminator = nn.DataParallel(discriminator)

    # Check CUDA available
    if torch.cuda.is_available():
        generator.cuda()
        discriminator.cuda()
    print('==> Model ready.')

    # Step3 ====================================================================
    # Set loss function and optimizer
    if args.model in ['DCGAN', 'DCGAN_noBN']:
        criterion = nn.BCELoss()
    else:
        criterion = None
    optimizer_G = torch.optim.RMSprop(generator.parameters(), lr=args.lr)
    optimizer_D = torch.optim.RMSprop(discriminator.parameters(), lr=args.lr)
    step_counter = StepCounter(args.obj_step)
    print('==> Criterion and optimizer ready.')

    # Step4 ====================================================================
    # Train and validate the model
    start_epoch = 0
    best_metric = float("inf")
    validate_noise = torch.randn(args.batch_size, 100, 1, 1)

    # Initialize the result lists
    train_loss_G = []
    train_loss_D = []
    train_distance = []

    if args.resume:
        assert os.path.exists(CHECKPOINT_FILE_PATH), 'No checkpoint file!'
        checkpoint = torch.load(CHECKPOINT_FILE_PATH)
        generator.load_state_dict(checkpoint['generator_state_dict'])
        discriminator.load_state_dict(checkpoint['discriminator_state_dict'])
        optimizer_G.load_state_dict(checkpoint['optimizer_G_state_dict'])
        optimizer_D.load_state_dict(checkpoint['optimizer_D_state_dict'])
        start_epoch = checkpoint['epoch']
        step_counter.current_step = checkpoint['current_step']
        train_loss_G = checkpoint['train_loss_G']
        train_loss_D = checkpoint['train_loss_D']
        train_distance = checkpoint['train_distance']
        best_metric = checkpoint['best_metric']

    # Save the training information
    result_data = {}
    result_data['model'] = args.model
    result_data['dataset'] = args.dataset
    result_data['target_epoch'] = args.epochs
    result_data['batch_size'] = args.batch_size

    # Check the directory of the file path
    if not os.path.exists(os.path.dirname(RESULT_FILE_PATH)):
        os.makedirs(os.path.dirname(RESULT_FILE_PATH))
    if not os.path.exists(os.path.dirname(CHECKPOINT_FILE_PATH)):
        os.makedirs(os.path.dirname(CHECKPOINT_FILE_PATH))

    print('==> Train ready.')

    # Validate before training (step 0)
    val(generator, validate_noise, step_counter, FILE_NAME_FORMAT)

    for epoch in range(args.epochs):
        # strat after the checkpoint epoch
        if epoch < start_epoch:
            continue
        print("\n[Epoch: {:3d}/{:3d}]".format(epoch + 1, args.epochs))
        epoch_time = time.time()
        #=======================================================================
        # train the model (+ validate the model)
        tloss_G, tloss_D, tdist = train(generator, discriminator, train_loader,
                                        criterion, optimizer_G, optimizer_D,
                                        args.clipping, args.num_critic,
                                        step_counter, validate_noise,
                                        FILE_NAME_FORMAT)
        train_loss_G.extend(tloss_G)
        train_loss_D.extend(tloss_D)
        train_distance.extend(tdist)
        #=======================================================================
        current = time.time()

        # Calculate average loss
        avg_loss_G = sum(tloss_G) / len(tloss_G)
        avg_loss_D = sum(tloss_D) / len(tloss_D)
        avg_distance = sum(tdist) / len(tdist)

        # Save the current result
        result_data['current_epoch'] = epoch
        result_data['train_loss_G'] = train_loss_G
        result_data['train_loss_D'] = train_loss_D
        result_data['train_distance'] = train_distance

        # Save result_data as pkl file
        with open(RESULT_FILE_PATH, 'wb') as pkl_file:
            pickle.dump(result_data,
                        pkl_file,
                        protocol=pickle.HIGHEST_PROTOCOL)

        # Save the best checkpoint
        # if avg_distance < best_metric:
        #     best_metric = avg_distance
        #     torch.save({
        #         'epoch': epoch+1,
        #         'generator_state_dict': generator.state_dict(),
        #         'discriminator_state_dict': discriminator.state_dict(),
        #         'optimizer_G_state_dict': optimizer_G.state_dict(),
        #         'optimizer_D_state_dict': optimizer_D.state_dict(),
        #         'current_step': step_counter.current_step,
        #         'best_metric': best_metric,
        #         }, BEST_CHECKPOINT_FILE_PATH)

        # Save the current checkpoint
        torch.save(
            {
                'epoch': epoch + 1,
                'generator_state_dict': generator.state_dict(),
                'discriminator_state_dict': discriminator.state_dict(),
                'optimizer_G_state_dict': optimizer_G.state_dict(),
                'optimizer_D_state_dict': optimizer_D.state_dict(),
                'current_step': step_counter.current_step,
                'train_loss_G': train_loss_G,
                'train_loss_D': train_loss_D,
                'train_distance': train_distance,
                'best_metric': best_metric,
            }, CHECKPOINT_FILE_PATH)

        # Print the information on the console
        print("model                : {}".format(args.model))
        print("dataset              : {}".format(args.dataset))
        print("batch_size           : {}".format(args.batch_size))
        print("current step         : {:d}".format(step_counter.current_step))
        print("current lrate        : {:f}".format(args.lr))
        print("gen/disc loss        : {:f}/{:f}".format(
            avg_loss_G, avg_loss_D))
        print("distance metric      : {:f}".format(avg_distance))
        print("epoch time           : {0:.3f} sec".format(current -
                                                          epoch_time))
        print("Current elapsed time : {0:.3f} sec".format(current - start))

        # If iteration step has been satisfied
        if step_counter.exit_signal:
            break

    print('==> Train done.')

    print(' '.join(['Results have been saved at', RESULT_FILE_PATH]))
    print(' '.join(['Checkpoints have been saved at', CHECKPOINT_FILE_PATH]))
コード例 #3
0
    model.load_state_dict(torch.load(load_path))
    print('model loaded')
    torch.cuda.empty_cache()

model = model.cuda()
discriminator = discriminator.cuda()

vgg_model = models.vgg16(pretrained=True)
vgg_model.cuda()

loss_network = utils.LossNetwork(vgg_model)
loss_network.eval()

# Optimizers & LR schedulers
optimizer_G = optim.Adam(model.parameters(), lr=opt.lr, betas=(0.5, 0.999))
optimizer_D = optim.Adam(discriminator.parameters(),
                         lr=opt.lr,
                         betas=(0.5, 0.999))

# Dataset loader
trainloader = loaddata.getTrainingData(opt.batchSize, size=128)
testloader = loaddata.getTestingData(1, size=128)

criteria_recon_l2 = nn.MSELoss()
criteria_recon_l1 = nn.L1Loss()

###################################
###### Training ######

with open('./output_' + opt.output_str + '/loss.csv', 'w',
          newline='') as csvfile:
コード例 #4
0
ファイル: main.py プロジェクト: djkim1991/DCGAN
from model.Discriminator import Discriminator
from model.Generator import Generator

from loaders.MNISTLoader import MNIST
from util.ImageUtil import ImageUtil

# create model objects
discriminator = Discriminator()
generator = Generator()

# set data loader
dataLoader = MNIST()
train_loader, test_loader = dataLoader.train_loader, dataLoader.test_loader

# optimizer
D_optimizer = Adam(params=discriminator.parameters(), lr=0.001)
G_optimizer = Adam(params=generator.parameters(), lr=0.001)

# loss function
D_loss_function = nn.BCELoss()  # Binary Cross Entropy loss
G_loss_function = nn.BCELoss()  # Binary Cross Entropy loss

imageUtil = ImageUtil()

epoch_size = 10000
for epoch in range(epoch_size):
    for i, data in enumerate(train_loader):
        real_data, real_label = data
        real_data, real_label = Variable(real_data), Variable(real_label)

        if torch.cuda.is_available():
コード例 #5
0
G_BA.apply(weights_init)

D_A.apply(weights_init)
D_B.apply(weights_init)

if(cuda):
    D_A.cuda()
    D_B.cuda()
    G_AB.cuda()
    G_BA.cuda()

###########   LOSS & OPTIMIZER   ##########
criterionMSE = nn.L1Loss()
criterion = nn.MSELoss()
# chain is used to update two generators simultaneously
optimizerD_A = torch.optim.Adam(D_A.parameters(),lr=0.0002, betas=(0.5, 0.999), weight_decay=1e-4)
optimizerD_B = torch.optim.Adam(D_B.parameters(),lr=0.0002, betas=(0.5, 0.999), weight_decay=1e-4)
optimizerG = torch.optim.Adam(chain(G_AB.parameters(),G_BA.parameters()),lr=0.0002, betas=(0.5, 0.999))


real_A = torch.FloatTensor(batchSize, input_nc, fineSize, fineSize)
AB = torch.FloatTensor(batchSize, input_nc, fineSize, fineSize)
real_B = torch.FloatTensor(batchSize, output_nc, fineSize, fineSize)
BA = torch.FloatTensor(batchSize, output_nc, fineSize, fineSize)
label = torch.FloatTensor(batchSize)

if(cuda):
    real_A = real_A.cuda()
    real_B = real_B.cuda()
    label = label.cuda()
    AB = AB.cuda()
コード例 #6
0
    G_BA.apply(weights_init)

if (opt.cuda):
    D_A.cuda()
    D_B.cuda()
    G_AB.cuda()
    G_BA.cuda()

D_A.apply(weights_init)
D_B.apply(weights_init)

###########   LOSS & OPTIMIZER   ##########
criterionMSE = nn.MSELoss()
criterion = nn.BCELoss()
# chain is used to update two generators simultaneously
optimizerD = torch.optim.Adam(chain(D_A.parameters(), D_B.parameters()),
                              lr=opt.lr,
                              betas=(opt.beta1, 0.999),
                              weight_decay=opt.weight_decay)
optimizerG = torch.optim.Adam(chain(G_AB.parameters(), G_BA.parameters()),
                              lr=opt.lr,
                              betas=(opt.beta1, 0.999))

###########   GLOBAL VARIABLES   ###########
input_nc = opt.input_nc
output_nc = opt.output_nc
fineSize = opt.fineSize

real_A = torch.FloatTensor(opt.batchSize, input_nc, fineSize, fineSize)
real_B = torch.FloatTensor(opt.batchSize, output_nc, fineSize, fineSize)
label = torch.FloatTensor(opt.batchSize)
コード例 #7
0
    D_A.cuda()
    D_B.cuda()
    G_AB.cuda()
    G_BA.cuda()

D_A.apply(weights_init)
D_B.apply(weights_init)

###########   LOSS & OPTIMIZER   ##########
criterionMSE = nn.L1Loss()
if (opt.loss_type == 'bce'):
    criterion = nn.BCELoss()
else:
    criterion = nn.MSELoss()
# chain is used to update two generators simultaneously
optimizerD_A = torch.optim.Adam(D_A.parameters(),
                                lr=opt.lr,
                                betas=(opt.beta1, 0.999),
                                weight_decay=opt.weight_decay)
optimizerD_B = torch.optim.Adam(D_B.parameters(),
                                lr=opt.lr,
                                betas=(opt.beta1, 0.999),
                                weight_decay=opt.weight_decay)
optimizerG = torch.optim.Adam(chain(G_AB.parameters(), G_BA.parameters()),
                              lr=opt.lr,
                              betas=(opt.beta1, 0.999))

###########   GLOBAL VARIABLES   ###########
input_nc = opt.input_nc
output_nc = opt.output_nc
fineSize = opt.fineSize
コード例 #8
0
    D_A.cuda()
    D_B.cuda()
    G_AB.cuda()
    G_BA.cuda()

D_A.apply(weights_init)
D_B.apply(weights_init)

###########   LOSS & OPTIMIZER   ##########
criterionMSE = nn.L1Loss()
if (opt.loss_type == 'bce'):
    criterion = nn.BCELoss()
else:
    criterion = nn.MSELoss()
# chain is used to update two generators simultaneously
optimizerD_A = torch.optim.Adam(D_A.parameters(),
                                lr=opt.lr,
                                betas=(opt.beta1, 0.999),
                                weight_decay=opt.weight_decay)
optimizerD_B = torch.optim.Adam(D_B.parameters(),
                                lr=opt.lr,
                                betas=(opt.beta1, 0.999),
                                weight_decay=opt.weight_decay)
optimizerG = torch.optim.Adam(chain(G_AB.parameters(), G_BA.parameters()),
                              lr=opt.lr,
                              betas=(opt.beta1, 0.999))

###########   GLOBAL VARIABLES   ###########
input_nc = opt.input_nc
output_nc = opt.output_nc
fineSize = opt.fineSize