Beispiel #1
0
    def __init__(self, args):
        # Generator architecture
        self.G = nn.Sequential(nn.Linear(100, 256), nn.LeakyReLU(0.2),
                               nn.Linear(256, 512), nn.LeakyReLU(0.2),
                               nn.Linear(512, 1024), nn.LeakyReLU(0.2),
                               nn.Tanh())

        # Discriminator architecture
        self.D = nn.Sequential(nn.Linear(1024, 512), nn.LeakyReLU(0.2),
                               nn.Linear(512, 256), nn.LeakyReLU(0.2),
                               nn.Linear(256, 1), nn.Sigmoid())

        self.cuda = False
        self.cuda_index = 0
        # check if cuda is available
        self.check_cuda(args.cuda)

        # Binary cross entropy loss and optimizer
        self.loss = nn.BCELoss()
        self.d_optimizer = torch.optim.Adam(self.D.parameters(),
                                            lr=0.0002,
                                            weight_decay=0.00001)
        self.g_optimizer = torch.optim.Adam(self.G.parameters(),
                                            lr=0.0002,
                                            weight_decay=0.00001)

        # Set the logger
        self.logger = Logger('./logs')
        self.number_of_images = 10
        self.epochs = args.epochs
        self.batch_size = args.batch_size
    def __init__(self, args):
        print("WGAN_GradientPenalty init model.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels

        # Check if cuda is available
        self.check_cuda(args.cuda)

        # WGAN values from paper
        self.learning_rate = 1e-4
        self.b1 = 0.5
        self.b2 = 0.999
        self.batch_size = 64

        # WGAN_gradient penalty uses ADAM
        self.d_optimizer = optim.Adam(self.D.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))
        self.g_optimizer = optim.Adam(self.G.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))

        # Set the logger
        self.logger = Logger('./logs')
        self.logger.writer.flush()
        self.number_of_images = 10

        self.generator_iters = args.generator_iters
        self.critic_iter = 5
        self.lambda_term = 10
Beispiel #3
0
    def __init__(self, args):
        print("WGAN_CP init model.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels

        # check if cuda is available
        self.check_cuda(args.cuda)

        # WGAN values from paper
        self.learning_rate = 0.00005

        self.batch_size = 64
        self.weight_cliping_limit = 0.01

        # WGAN with gradient clipping uses RMSprop instead of ADAM
        self.d_optimizer = torch.optim.RMSprop(self.D.parameters(),
                                               lr=self.learning_rate)
        self.g_optimizer = torch.optim.RMSprop(self.G.parameters(),
                                               lr=self.learning_rate)

        # Set the logger
        self.logger = Logger('./logs')
        self.logger.writer.flush()
        self.number_of_images = 10

        self.generator_iters = args.generator_iters
        self.critic_iter = 5
Beispiel #4
0
    def __init__(self, args):
        print("DCGAN model initalization.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels

        # binary cross entropy loss and optimizer
        self.loss = nn.BCELoss()

        self.cuda = False
        self.cuda_index = 0
        # check if cuda is available
        self.check_cuda(args.cuda)

        # Using lower learning rate than suggested by (ADAM authors) lr=0.0002  and Beta_1 = 0.5 instead od 0.9 works better [Radford2015]
        self.d_optimizer = torch.optim.Adam(self.D.parameters(),
                                            lr=0.0002,
                                            betas=(0.5, 0.999))
        self.g_optimizer = torch.optim.Adam(self.G.parameters(),
                                            lr=0.0002,
                                            betas=(0.5, 0.999))

        self.epochs = args.epochs
        self.batch_size = args.batch_size

        # Set the logger
        self.logger = Logger('./logs')
        self.number_of_images = 10
Beispiel #5
0
    def __init__(self, args):
        print("DCGAN model initalization.")
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels)
        self.C = args.channels
        self.mode = args.mode

        self.name = ('res/_mode_' + str(args.mode) + '_beta_g_' +
                     str(args.beta_g) + '_beta_g_' + str(args.beta_g) +
                     '_beta_d_' + str(args.beta_d) + '_lr_g_' +
                     str(args.lr_g) + '_lr_d_' + str(args.lr_d) +
                     '_alpha_d_vjp_' + str(args.alpha_d_vjp) +
                     '_alpha_g_vjp_' + str(args.alpha_g_vjp) +
                     '_alpha_d_grad_' + str(args.alpha_d_grad) +
                     '_alpha_g_grad_' + str(args.alpha_g_grad))
        print(self.name)
        if not os.path.exists(self.name):
            os.makedirs(self.name)
        # binary cross entropy loss and optimizer
        self.loss = nn.BCEWithLogitsLoss()

        self.cuda = "False"
        self.cuda_index = 0
        # check if cuda is available
        self.check_cuda(args.cuda)

        # Using lower learning rate than suggested by (ADAM authors) lr=0.0002  and Beta_1 = 0.5 instead od 0.9 works better [Radford2015]
        if self.mode == 'adam':
            self.d_optimizer = torch.optim.Adam(self.D.parameters(),
                                                lr=0.0002,
                                                betas=(0.5, 0.999))
            self.g_optimizer = torch.optim.Adam(self.G.parameters(),
                                                lr=0.0002,
                                                betas=(0.5, 0.999))
        elif self.mode == 'adam_vjp':
            self.d_optimizer = optim.VJP_Adam(self.D.parameters(),
                                              lr=args.lr_d,
                                              betas=(args.beta_d, 0.999),
                                              alpha_vjp=args.alpha_d_vjp,
                                              alpha_grad=args.alpha_d_grad)
            self.g_optimizer = optim.VJP_Adam(self.G.parameters(),
                                              lr=args.lr_g,
                                              betas=(args.beta_g, 0.999),
                                              alpha_vjp=args.alpha_g_vjp,
                                              alpha_grad=args.alpha_g_grad)
        self.epochs = args.epochs
        self.batch_size = args.batch_size

        # Set the logger
        self.logger = Logger('./logs')
        self.number_of_images = 10
    def __init__(self, args):
        print("init model.")
        print(args)
        self.G = Generator(args.channels)
        self.D = Discriminator(args.channels, args.ssup)
        self.C = args.channels
        self.ssup = args.ssup
        self.loss_type = args.loss

        if self.ssup:
            self.save_path = 'sslgan_gp_ssup'
        else:
            self.save_path = 'sslgan_gp'

        # Check if cuda is available
        self.check_cuda(args.cuda)

        # WGAN values from paper
        self.learning_rate = 1e-4
        self.b1 = 0.5
        self.b2 = 0.999
        self.batch_size = 64

        # WGAN_gradient penalty uses ADAM
        self.d_optimizer = optim.Adam(self.D.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))
        self.g_optimizer = optim.Adam(self.G.parameters(),
                                      lr=self.learning_rate,
                                      betas=(self.b1, self.b2))

        # Set the logger
        self.logger = Logger('./logs')
        self.logger.writer.flush()
        self.number_of_images = 10

        self.generator_iters = args.generator_iters
        self.critic_iter = 5
        self.lambda_term = 10
        self.weight_rotation_loss_d = 1.0
        self.weight_rotation_loss_g = 0.5
        self.print_iter = 50
Beispiel #7
0
        if os.path.isfile(args.checkpoint):

            print("=> loading checkpoint '{}'".format(args.checkpoint))
            checkpoint = torch.load(args.checkpoint)
            optimizer.load_state_dict(checkpoint['optimizer'])
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}'".format(args.checkpoint))
        else:
            print("=> no checkpoint found at '{}'".format(args.checkpoint))

    checkpoint_saver = nn_module_utils.CheckPointSaver([
        'epoch_loss', 'epoch_accuracy', 'batch_loss', 'batch_accuracy',
        'test_loss'
    ], checkpoint_dir)

    logger = Logger('./logs')
    os.system("mkdir -p ./results")

    def train(epoch):
        model.train()
        train_loss = 0
        train_reconstruction_loss = 0
        train_kld_loss = 0
        step = (epoch - 1) * len(train_loader.dataset) + 1
        for batch_idx, (input, ground_truth, _) in enumerate(train_loader):
            input = Variable(input)
            ground_truth = Variable(ground_truth)
            if args.cuda:
                input = input.cuda()
                ground_truth = ground_truth.cuda()