示例#1
0
    def _init_model(self):

        self._enc_model = models.Encoder(density=8, size=self._image_size,
                                         latent_size=self._latent_size)
        self._gen_model = models.Generator(density=8, size=self._image_size,
                                         latent_size=self._latent_size)
        self._dis_model = models.Discriminator(density=8, size=self._image_size)

        self._enc_dis_model = models.Encoder(density=8, size=self._image_size,
                                             latent_size=self._latent_size)
        self._gen_dis_model = models.Generator(density=8, size=self._image_size,
                                               latent_size=self._latent_size)

        self._optimizer_enc = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_enc.setup(self._enc_model)
        self._optimizer_enc.add_hook(chainer.optimizer.WeightDecay(0.00001))
        self._optimizer_gen = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_gen.setup(self._gen_model)
        self._optimizer_gen.add_hook(chainer.optimizer.WeightDecay(0.00001))

        self._optimizer_enc_dis = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_enc_dis.setup(self._enc_dis_model)
        self._optimizer_enc_dis.add_hook(chainer.optimizer.WeightDecay(0.00001))
        self._optimizer_gen_dis = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_gen_dis.setup(self._gen_dis_model)
        self._optimizer_gen_dis.add_hook(chainer.optimizer.WeightDecay(0.00001))

        self._optimizer_dis = optimizers.Adam(alpha=0.0001, beta1=0.5)
        self._optimizer_dis.setup(self._dis_model)
        self._optimizer_dis.add_hook(chainer.optimizer.WeightDecay(0.00001))

        self._enc_model.to_gpu(self._gpu)  # send to main GPU
        self._gen_model.to_gpu(self._gpu)
        self._dis_model.to_gpu(self._gpu)
示例#2
0
def test(dataset="datasets/horse2zebra", batch=1, imSize=128, inputChannels=3, outputChannels=3, cuda=False,
         cpus=-1, genXtoY="minecraftday2night/weights/netXtoY.pth", genYtoX="minecraftday2night/weights/netYtoX.pth"):
    print(dataset, batch, imSize, inputChannels, outputChannels, cuda, cpus, genXtoY, genYtoX)
    if cpus <= 0:
        global threads
        threads = multiprocessing.cpu_count()
    else:
        threads = cpus
    XtoY = models.Generator(inputChannels, outputChannels)
    YtoX = models.Generator(inputChannels, outputChannels)

    if cuda:
        XtoY.cuda()
        YtoX.cuda()

    XtoY.load_state_dict(torch.load(genXtoY))
    YtoX.load_state_dict(torch.load(genYtoX))

    XtoY.eval()
    YtoX.eval()

    Tensor = torch.cuda.FloatTensor if cuda else torch.Tensor
    input_x = Tensor(batch, inputChannels, imSize, imSize)
    input_y = Tensor(batch, outputChannels, imSize, imSize)

    transformList = [transforms.Resize(int(imSize), Image.ANTIALIAS),
                     transforms.CenterCrop(imSize),
                     transforms.ToTensor(),
                     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]

    dataset = DataLoader(utils.LoadDataset(dataset, transformList=transformList, mode='test'),
                         batch_size=batch, shuffle=False, num_workers=threads)

    if not os.path.exists('minecraftday2night/x'):
        os.makedirs('minecraftday2night/x')
    if not os.path.exists('minecraftday2night/y'):
        os.makedirs('minecraftday2night/y')

    for i, batch in enumerate(dataset):
        currentBatch_x = Variable(input_x.copy_(batch['x']))
        currentBatch_y = Variable(input_y.copy_(batch['y']))

        # Generate minecraftday2night
        fake_y = 0.5 * (XtoY(currentBatch_x).data + 1.0)
        fake_x = 0.5 * (YtoX(currentBatch_y).data + 1.0)

        save_image(fake_x, 'minecraftday2night/x/%04d.jpg' % (i + 1))
        save_image(fake_y, 'minecraftday2night/y/%04d.jpg' % (i + 1))

        sys.stdout.write('\rGenerated %04d of %04d' % (i + 1, len(dataset)))

    sys.stdout.write('\n')
示例#3
0
    def _create_models(self) -> None:
        cfg = self._config
        device = self._device

        self._generator = models.Generator(style_code_dim=cfg.style_code_dim, )

        self._generator_ema = models.Generator(
            style_code_dim=cfg.style_code_dim, )

        self._generator_ema.load_state_dict(self._generator.state_dict())

        self._mapping = models.Mapping(
            latent_dim=cfg.mapper_latent_code_dim,
            hidden_dim=cfg.mapper_hidden_dim,
            out_dim=cfg.style_code_dim,
            num_shared_layers=cfg.mapper_shared_layers,
            num_heads=cfg.num_domains,
        )

        self._mapping_ema = models.Mapping(
            latent_dim=cfg.mapper_latent_code_dim,
            hidden_dim=cfg.mapper_hidden_dim,
            out_dim=cfg.style_code_dim,
            num_shared_layers=cfg.mapper_shared_layers,
            num_heads=cfg.num_domains,
        )

        self._mapping_ema.load_state_dict(self._mapping.state_dict())

        self._style_encoder = models.StyleEncoder(
            style_code_dim=cfg.style_code_dim,
            num_heads=cfg.num_domains,
        )

        self._style_encoder_ema = models.StyleEncoder(
            style_code_dim=cfg.style_code_dim,
            num_heads=cfg.num_domains,
        )

        self._style_encoder_ema.load_state_dict(
            self._style_encoder.state_dict())

        self._discriminator = models.Discriminator(num_heads=cfg.num_domains, )

        self._generator.to(device)
        self._generator_ema.eval().to(device)
        self._mapping.to(device)
        self._mapping_ema.eval().to(device)
        self._style_encoder.to(device)
        self._style_encoder_ema.eval().to(device)
        self._discriminator.to(device)
示例#4
0
    def __init__(self, fine_tuning=True):
        super(C_GAN, self).__init__()

        self.G_A2B = models.Generator(3, 3)
        self.G_B2A = models.Generator(3, 3)
        self.D_A = models.Discriminator(3)
        self.D_B = models.Discriminator(3)

        self.G_A2B.apply(init_parameters)
        self.G_A2B.apply(init_parameters)
        self.D_A.apply(init_parameters)
        self.D_B.apply(init_parameters)

        self.fake_a_buffer = ReplayBuffer()
        self.fake_b_buffer = ReplayBuffer()
示例#5
0
def sampling(netG_path, out, size=10000):
    """
    sampling a series of tokens squentially for molecule generation
    Args:
        netG_path (str): The file path of generator.
        out (str): The file path of genrated molecules including SMILES, and its scores for each sample
        size (int): The number of molecules required to be generated.
        env (utils.Environment): The environment to provide the scores of all objectives for each sample

    Returns:
        smiles (List): A list of generated SMILES-based molecules
    """
    batch_size = 250
    samples = []
    voc = utils.Voc(init_from_file="data/voc.txt")
    netG = models.Generator(voc)
    netG.load_state_dict(torch.load(netG_path))
    batch = size // batch_size
    mod = size % batch_size
    for i in tqdm(range(batch + 1)):
        if i == 0:
            if mod == 0: continue
            tokens = netG.sample(batch)
        else:
            tokens = netG.sample(batch_size)
        smiles = [voc.decode(s) for s in tokens]
        samples.extend(smiles)
    return samples
示例#6
0
    def __init__(self, device, model, model_num_labels, image_nc, box_min,
                 box_max):
        output_nc = image_nc
        self.device = device
        self.model_num_labels = model_num_labels
        self.model = model
        self.input_nc = image_nc
        self.output_nc = output_nc
        self.box_min = box_min
        self.box_max = box_max

        self.gen_input_nc = image_nc
        self.netG = models.Generator(self.gen_input_nc, image_nc).to(device)
        self.netDisc = models.Discriminator(image_nc).to(device)

        # initialize all weights
        self.netG.apply(weights_init)
        self.netDisc.apply(weights_init)

        # initialize optimizers
        self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=0.001)
        self.optimizer_D = torch.optim.Adam(self.netDisc.parameters(),
                                            lr=0.001)

        if not os.path.exists(models_path):
            os.makedirs(models_path)
示例#7
0
def training(is_lstm=True):
    voc = utils.Voc(init_from_file="data/voc.txt")
    if is_lstm:
        netP_path = 'output/lstm_chembl'
        netE_path = 'output/lstm_ligand'
    else:
        netP_path = 'output/gru_chembl'
        netE_path = 'output/gru_ligand'

    prior = models.Generator(voc, is_lstm=is_lstm)
    if not os.path.exists(netP_path + '.pkg'):
        df = pd.read_table()
        chembl = df.read_table("data/chembl_corpus.txt").Token
        chembl = torch.LongTensor(voc.encode([seq.split(' ') for seq in chembl]))
        chembl = DataLoader(chembl, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
        prior.fit(chembl, out=netP_path, epochs=50)
    prior.load_state_dict(torch.load(netP_path + '.pkg'))

    # explore = model.Generator(voc)
    df = pd.read_table('data/ligand_corpus.txt').drop_duplicates('Smiles')
    valid = df.sample(len(df) // 10).Token
    train = df.drop(valid.index).Token
    # explore.load_state_dict(torch.load(netP_path + '.pkg'))

    train = torch.LongTensor(voc.encode([seq.split(' ') for seq in train]))
    train = DataLoader(train, batch_size=BATCH_SIZE, shuffle=True)

    valid = torch.LongTensor(voc.encode([seq.split(' ') for seq in valid]))
    valid = DataLoader(TensorDataset(valid), batch_size=BATCH_SIZE, shuffle=True)
    print('Fine tuning progress begins to be trained...')

    prior.fit(train, loader_valid=valid, out=netE_path, epochs=1000, lr=lr)
    print('Fine tuning progress training is finished...')
示例#8
0
    def __init__(self, hyperparameters):
        super(FUNIT_Trainer, self).__init__()
        self.generator = models.Generator()
        self.discriminator = models.Discriminator(
            hyperparameters['source_classes'])

        self.gan_loss = nn.BCELoss(size_average=True, reduce=True)
        # The paper was not clear about this loss, as it references VAE papers using BSE but uses L1 itself
        self.content_reconstruction_loss = nn.L1Loss(size_average=True,
                                                     reduce=True)
        # Same as content reconstruction loss: unclear
        self.feature_matching_loss = nn.L1Loss(size_average=True, reduce=True)

        lr = hyperparameters['lr']

        # Setup the optimizers
        beta1 = hyperparameters['beta1']
        beta2 = hyperparameters['beta2']
        dis_params = list(self.discriminator.parameters())
        gen_params = list(self.generator.parameters())
        self.dis_opt = torch.optim.Adam(
            [p for p in dis_params if p.requires_grad],
            lr=lr,
            betas=(beta1, beta2),
            weight_decay=hyperparameters['weight_decay'])
        self.gen_opt = torch.optim.Adam(
            [p for p in gen_params if p.requires_grad],
            lr=lr,
            betas=(beta1, beta2),
            weight_decay=hyperparameters['weight_decay'])
    def __init__(
                self, 
                device, 
                model, 
                n_labels, 
                n_channels, 
                target, 
                lr, 
                l_inf_bound, 
                alpha, 
                beta, 
                gamma, 
                kappa, 
                c, 
                n_steps_D, 
                n_steps_G, 
                is_relativistic
            ):
        self.device = device
        self.n_labels = n_labels        # number of labels of the target model
        self.model = model      # target model
        
        self.target = target    # target dataset name

        self.lr = lr    # learning rate

        self.l_inf_bound = l_inf_bound      # petrubation bound

        self.alpha = alpha  # weight for GAN loss
        self.beta = beta    # weight for hinge loss
        self.gamma = gamma  # weight for adv loss
        self.kappa = kappa  # Used in Adv loss
        self.c = c  # used in hinge_loss

        self.n_steps_D = n_steps_D     # number of steps to train the discriminator per batch
        self.n_steps_G = n_steps_G      # number of steps to train the generator per batch

        self.is_relativistic = is_relativistic

        self.G = models.Generator(n_channels, n_channels, target).to(device)
        self.D = models.Discriminator(n_channels).to(device)

        # initialize all weights
        self.G.apply(init_weights)
        self.D.apply(init_weights)

        # initialize optimizers
        self.optimizer_G = torch.optim.Adam(self.G.parameters(), lr=self.lr)
        self.optimizer_D = torch.optim.Adam(self.D.parameters(), lr=self.lr)

        # create model save path if it doesn't exist
        if not os.path.exists(models_path):
            os.makedirs(models_path)

        # create loss plot save path if it doesn't exist
        if not os.path.exists('{}{}/'.format(losses_path, target)):
            os.makedirs('{}{}/'.format(losses_path, target))
示例#10
0
    def __init__(self, device, model, model_num_labels, image_nc, box_min,
                 box_max, eps, pgd_iter, models_path, out_path, model_name,
                 writer, E_lr, advG_lr, defG_lr):
        output_nc = image_nc
        self.device = device
        self.model_num_labels = model_num_labels
        self.model = model
        self.input_nc = image_nc
        self.output_nc = output_nc
        self.box_min = box_min
        self.box_max = box_max
        self.eps = eps
        self.pgd_iter = pgd_iter
        self.models_path = models_path
        self.out_path = out_path
        self.model_name = model_name
        self.writer = writer
        self.E_lr = E_lr
        self.advG_lr = advG_lr
        self.defG_lr = defG_lr

        self.en_input_nc = image_nc
        self.E = models.Encoder(image_nc).to(device)
        self.defG = models.Generator(adv=False).to(device)
        self.advG = models.Generator(y_dim=model_num_labels,
                                     adv=True).to(device)
        self.pgd = PGD(self.model,
                       self.E,
                       self.defG,
                       self.device,
                       self.eps,
                       step_size=self.eps / 4)

        # initialize all weights
        self.E.apply(weights_init)
        self.defG.apply(weights_init)
        self.advG.apply(weights_init)

        # initialize optimizers
        self.optimizer_E = torch.optim.Adam(self.E.parameters(), lr=self.E_lr)
        self.optimizer_defG = torch.optim.Adam(self.defG.parameters(),
                                               lr=self.defG_lr)
        self.optimizer_advG = torch.optim.Adam(self.advG.parameters(),
                                               lr=self.advG_lr)
示例#11
0
def main():
    parser = argparse.ArgumentParser(description='Train WSISR on compressed TMA dataset')
    parser.add_argument('--batch-size', default=32, type=int, help='Batch size')
    parser.add_argument('--patch-size', default=224, type=int, help='Patch size')
    parser.add_argument('--up-scale', default=5, type=float, help='Targeted upscale factor')
    parser.add_argument('--num-workers', default=1, type=int, help='Number of workers')
    parser.add_argument('--num-epochs', default=900, type=int, help='Number of epochs, more epochs are desired for GAN training')
    parser.add_argument('--g-lr', default=0.0001, type=float, help='Learning rate of the generator')
    parser.add_argument('--d-lr', default=0.00001, type=float, help='Learning rate of the descriminator')
    parser.add_argument('--percep-weight', default=0.01, type=float, help='GAN loss weight')
    parser.add_argument('--run-from', default=None, type=str, help='Load weights from a previous run, use folder name in [weights] folder')
    parser.add_argument('--start-epoch', default=1, type=int, help='Starting epoch for the curriculum, start at 1/2 of the epochs to skip the curriculum')
    parser.add_argument('--gan', default=1, type=int, help='Use GAN')
    parser.add_argument('--num-critic', default=1, type=int, help='Interval of training the descriminator') 
    args = parser.parse_args()
    warnings.filterwarnings('ignore')
    device = torch.device('cuda:0')
    tensor = torch.cuda.FloatTensor
    data.generate_compress_csv()
    valid_dataset = new_compress_curriculum(args, args.up_scale, 'valid')
    generator = models.Generator()
    generator.to(device);
    discriminator = models.Discriminator()
    discriminator.to(device);
    criterionL = nn.L1Loss().cuda()
    criterionMSE = nn.MSELoss().cuda()
    optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.g_lr)
    optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.d_lr)
    patch = (1, args.patch_size // 2 ** 4, args.patch_size // 2 ** 4)
    if args.run_from is not None:
        generator.load_state_dict(torch.load(os.path.join('weights', args.run_from, 'generator.pth')))
        try:
            discriminator.load_state_dict(torch.load(os.path.join('weights', args.run_from, 'discriminator.pth')))
        except:
            print('Discriminator weights not found!')
            pass
    optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.g_lr)
    optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.d_lr)
    scheduler_G = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_G, args.num_epochs, args.g_lr*0.05)
    scheduler_D = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_D, args.num_epochs, args.d_lr*0.05)
    run = datetime.now().strftime("%Y-%m-%d--%H-%M-%S")
    cur_length = int(0.5*args.num_epochs)
    init_scale = 2**2
    step_size = (2**args.up_scale-init_scale) / cur_length
    for epoch in range(args.start_epoch, args.num_epochs):
        factor = min(log2(init_scale+(epoch-1)*step_size), args.up_scale)
        print('curriculum updated: {} '.format(factor))
        train_dataset = new_compress_curriculum(args, factor, 'train', stc=True)
        train(args, epoch, run, train_dataset, generator, discriminator, optimizer_G, optimizer_D, criterionL, criterionMSE, tensor, device, patch)
        scheduler_G.step()
        scheduler_D.step()
        if epoch % 1 == 0:
            fid, psnr = test(args, generator, data.compress_csv_path('valid'))
            print_output(generator, valid_dataset, device)
            print('\r>>>> PSNR: {}, FID: {}'.format(psnr, fid))
    test(args, generator, data.compress_csv_path('valid'), stitching=True)
示例#12
0
def init_models_res(opt):
    # generator initialization:
    netG = models.Generator(opt).to(opt.device)
    netG.apply(models.weights_init)

    # discriminator initialization:
    netD = models.WDiscriminator_mask(opt).to(opt.device)
    netD.apply(models.weights_init)

    return netD, netG
    def __init__(self, args):
        self.gan_c = args.percep_weight
        self.gan = args.gan
        self.device = args.device
        self.patch = (1, args.patch_size // 2 ** 4, args.patch_size // 2 ** 4)
        self.dis_freq = args.dis_freq
        self.num_epochs = args.num_epochs

        self.patch_size = args.patch_size
        self.batch_size = args.batch_size
        self.dis_out_shape = (self.batch_size, 1, self.patch_size // 2 ** 4, self.patch_size // 2 ** 4)

        self.num_workers = args.num_workers
        self.up_scale = args.up_scale


        self.generator = models.Generator()
        self.generator.to(self.device)
        self.discriminator = models.Discriminator()
        self.discriminator.to(self.device)

        self.tester = Tester(args, 'output')

        self.optimizer_G = torch.optim.Adam(self.generator.parameters(), lr=args.g_lr)
        self.optimizer_D = torch.optim.Adam(self.discriminator.parameters(), lr=args.d_lr)
        self.criterionL = nn.L1Loss().to(self.device)
        self.criterionMSE = nn.MSELoss().to(self.device)
        self.scheduler_G = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer_G, self.num_epochs,
                                                                      args.g_lr * 0.05)
        self.scheduler_D = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer_D, self.num_epochs,
                                                                      args.d_lr * 0.05)

        self.weight_dir = os.path.join(args.dir, 'weights')
        os.makedirs(self.weight_dir, exist_ok=True)
        self.log_path = os.path.join(args.dir, 'logs')
        os.makedirs(self.log_path, exist_ok=True)

        if args.run_from is not None:
            gen_path = os.path.join(self.weight_dir, 'generator_{}.pth'.format(args.run_from))
            if os.path.exists(gen_path):
                self.generator.load_state_dict(torch.load(gen_path))
            else:
                raise FileNotFoundError('Generator weights not found!')
            dis_path = os.path.join(self.weight_dir, 'discriminator_{}.pth'.format(args.run_from))
            if os.path.exists(dis_path):
                self.generator.load_state_dict(torch.load(dis_path))
            else:
                print('Discriminator weights not found!')
                pass
            self.start_epoch = args.run_from + 1
        else:
            self.start_epoch = 0

            # writing log for training
        self.writer = SummaryWriter(self.log_path)
示例#14
0
    def init_model(self):

        # Construct Generator / Discriminator NN
        self.DiscA = models.Discriminator_5()
        self.DiscB = models.Discriminator_5()
        self.GenA = models.Generator(9)  # Generator with 9 ResNets
        self.GenB = models.Generator(9)  # Generator with 9 ResNets

        # Make CUDA tensors
        if cuda_flag:
            device = torch.device("cuda:0")
            num_device = torch.cuda.device_count()
            print("Number of devices: ", num_device)
            self.DiscA = nn.DataParallel(
                self.DiscA, device_ids=[i for i in range(num_device)])
            self.DiscB = nn.DataParallel(
                self.DiscB, device_ids=[i for i in range(num_device)])
            self.GenA = nn.DataParallel(
                self.GenA, device_ids=[i for i in range(num_device)])
            self.GenB = nn.DataParallel(
                self.GenB, device_ids=[i for i in range(num_device)])

            print("Sending models to device")
            self.DiscA = self.DiscA.to(device)
            self.DiscB = self.DiscB.to(device)
            self.GenA = self.GenA.to(device)
            self.GenB = self.GenB.to(device)

        # Optimizer
        print("Optimizer init")
        self.DiscA_opt = torch.optim.Adam(self.DiscA.parameters(),
                                          lr=learning_rate,
                                          betas=(beta1, beta2))
        self.DiscB_opt = torch.optim.Adam(self.DiscB.parameters(),
                                          lr=learning_rate,
                                          betas=(beta1, beta2))
        self.GenA_opt = torch.optim.Adam(self.GenA.parameters(),
                                         lr=learning_rate,
                                         betas=(beta1, beta2))
        self.GenB_opt = torch.optim.Adam(self.GenB.parameters(),
                                         lr=learning_rate,
                                         betas=(beta1, beta2))
示例#15
0
 def __init__(self,opt):
     self.opt = opt
     self.wh = int(np.sqrt(opt.num_classes))
     self.G = models.Generator(opt)
     self.G.cuda()
     self.latent = Get_Latent_Y(opt)
     self.latent.get_latent()
     filename = "%s_netG.pth"%args.e
     self.G.load_state_dict(torch.load(filename))
     if args.eval:
         self.G.eval()
示例#16
0
    def __init__(self, dataloader, args):

        self.dataloader = dataloader
        self.args = args
        self.kimgs = 0
        self.res = 2
        self.batchSize = {2: 128, 3: 128, 4: 128, 5: 64, 6: 32, 7: 16, 8: 8}
        self.kticks = {2: 100, 3: 100, 4: 80, 5: 60, 6: 60, 7: 40, 8: 20}
        self.device = torch.device("cuda:0")

        # Forming output folders
        self.out_imgs = os.path.join(args.outf, 'images')
        self.out_checkpoints = os.path.join(args.outf, 'checkpoints')
        utils.mkdirp(self.out_imgs)
        utils.mkdirp(self.out_checkpoints)

        # Defining networks and optimizers
        self.netG = models.Generator()
        self.netD = models.Discriminator()

        print('Generator')
        print(self.netG)

        print('Discriminator')
        print(self.netD)

        # Weight initialization
        self.netG.apply(utils.weights_init)
        self.netD.apply(utils.weights_init)

        # Defining loss criterions
        self.criterion = nn.BCELoss()

        self.netD.cuda()
        self.netG.cuda()
        self.criterion.cuda()

        # Defining optimizers
        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=args.lr,
                                     betas=(args.beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=args.lr,
                                     betas=(args.beta1, 0.999))

        # Other variables
        self.real_label = 1
        self.fake_label = 0
        self.fixed_noise = torch.randn(8,
                                       self.args.nz,
                                       1,
                                       1,
                                       device=self.device)
示例#17
0
    def __init__(
        self,
        device,
        model,
        n_labels,
        n_channels,
        target,
        lr,
        l_inf_bound,
        alpha,
        beta,
        gamma,
        kappa,
        c,
        n_steps_D,
        n_steps_G,
    ):
        self.device = device
        self.n_labels = n_labels
        self.model = model

        self.target = target

        self.lr = lr

        self.l_inf_bound = l_inf_bound

        self.alpha = alpha
        self.beta = beta
        self.gamma = gamma
        self.kappa = kappa
        self.c = c

        self.n_steps_D = n_steps_D
        self.n_steps_G = n_steps_G

        self.G = models.Generator(n_channels, n_channels, target).to(device)
        self.D = models.Discriminator(n_channels).to(device)

        # initialize all weights
        self.G.apply(init_weights)
        self.D.apply(init_weights)

        # initialize optimizers
        self.optimizer_G = torch.optim.Adam(self.G.parameters(), lr=self.lr)
        self.optimizer_D = torch.optim.Adam(self.D.parameters(), lr=self.lr)

        if not os.path.exists(models_path):
            os.makedirs(models_path)

        if not os.path.exists('{}{}/'.format(losses_path, target)):
            os.makedirs('{}{}/'.format(losses_path, target))
示例#18
0
def make_inference(model_name='Generator', checkpoint_path=None):
    image_plotter = VisdomImagePlotter(env_name="test")
    ngpu = 1
    b_size = 128
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and ngpu > 0) else "cpu")
    model = models.Generator(checkpoint_path=checkpoint_path)
    model.to(device)
    noise = torch.randn(b_size, model.nz, 1, 1, device=device)
    with torch.no_grad():
        fake = model(noise).detach().cpu()
        image_plotter.plot(vutils.make_grid(fake, padding=2, normalize=True),
                           name="generator-output")
示例#19
0
def init_models_res(opt):
    # generator initialization:
    netG = models.Generator(opt).to(opt.device)
    netG.apply(models.weights_init)
    if opt.netG != '':
        netG.load_state_dict(torch.load(opt.netG))

    # discriminator initialization:
    netD = models.WDiscriminator(opt).to(opt.device)
    netD.apply(models.weights_init)
    if opt.netD != '':
        netD.load_state_dict(torch.load(opt.netD))

    return netD, netG
示例#20
0
 def __init__(self, opt):
     self.opt = opt
     self.G = models.Generator(opt)
     self.D = models.Discriminator()
     self.Lsloss = torch.nn.MSELoss()
     self.optimizerG = torch.optim.Adam(self.G.parameters(), 1e-4,
                                        (0, 0.999))
     self.optimizerD = torch.optim.Adam(self.D.parameters(), 4e-4,
                                        (0, 0.999))
     self.d_losses = []
     self.g_losses = []
     self.G.cuda()
     self.D.cuda()
     self.G.apply(self.weights_init)
     self.D.apply(self.weights_init)
     self.latent_ebdy_generator = Get_Latent_Y(opt)
示例#21
0
def main():
  args = parse_args()
  config = Config(args)

  # 出力先の作成
  os.makedirs(config.output_dir, exist_ok=True)

  # モデルの作成
  model = models.generate_model(config.model)

  # 画像サイズの修正
  img_orig = load_image(config.original_image, [config.width, config.height])
  img_style = load_image(config.style_image, [config.width, config.height] if not config.no_resize_style else None)

  # 画像を生成
  generator = models.Generator(model, img_orig, img_style, config)
  generator.generate(config)
示例#22
0
    def createModels(self):
        """
        This function will create models and their optimizers
        """
        self.gen = models.Generator().cuda()
        self.disc = models.Discriminator().cuda()
        self.gOptimizer = Adam(self.gen.parameters(),
                               lr=self.gLR,
                               betas=(0.0, 0.99))
        self.dOptimizer = Adam(self.disc.parameters(),
                               lr=self.dLR,
                               betas=(0.0, 0.99))

        print(
            'Models Instantiated. # of trainable parameters Disc:%e; Gen:%e' %
            (sum([np.prod([*p.size()]) for p in self.disc.parameters()]),
             sum([np.prod([*p.size()]) for p in self.gen.parameters()])))
示例#23
0
 def __init__(self,opt):
     self.opt = opt
     self.G = models.Generator(opt)
     self.D = models.Discriminator(opt)
     self.Lsloss = torch.nn.MSELoss()
     self.optimizerG = torch.optim.Adam(self.G.parameters(),opt.g_lr,opt.g_betas)
     self.optimizerD = torch.optim.Adam(self.D.parameters(),opt.d_lr,opt.d_betas)
     self.d_losses = []
     self.wd_losses = []
     self.g_losses = []
     self.wg_losses = []
     self.losses = {'d':[],'g':[],'wd':[],'wg':[]}
     self.G.cuda()
     self.D.cuda()
     self.G.apply(self.weights_init)
     self.D.apply(self.weights_init)
     self.latent_ebdy_generator = Get_Latent_Y(opt)
     self.generate_imgs_count = 0
     if False:
         self.G.share_memory()
         self.processes = []
         self.main_to_thread1 = mp.Queue(maxsize=1)
         self.main_to_thread2 = mp.Queue(maxsize=1)
         self.main_to_thread3 = mp.Queue(maxsize=1)
         self.main_to_thread4 = mp.Queue(maxsize=1)
         self.thread1_to_main = mp.Queue(maxsize=1)
         self.thread2_to_main = mp.Queue(maxsize=1)
         self.thread3_to_main = mp.Queue(maxsize=1)
         self.thread4_to_main = mp.Queue(maxsize=1)
         self.thread1_grad = mp.Queue(maxsize=1)
         self.thread2_grad = mp.Queue(maxsize=1)
         self.thread3_grad = mp.Queue(maxsize=1)
         self.thread4_grad = mp.Queue(maxsize=1)
         p1 = mp.Process(target=func,args=(self.G,self.main_to_thread1,self.thread1_to_main,self.thread1_grad,self.optimizerG))
         p1.start()
         self.processes.append(p1)
         p2 = mp.Process(target=func,args=(self.G,self.main_to_thread2,self.thread2_to_main,self.thread2_grad,self.optimizerG))
         p2.start()
         self.processes.append(p2)
         p3 = mp.Process(target=func,args=(self.G,self.main_to_thread3,self.thread3_to_main,self.thread3_grad,self.optimizerG))
         p3.start()
         self.processes.append(p3)
         p4 = mp.Process(target=func,args=(self.G,self.main_to_thread4,self.thread4_to_main,self.thread4_grad,self.optimizerG))
         p4.start()
         self.processes.append(p4)
def inpaint(args):
    dataset = datasets.RandomPatchDataset(args.test_data_dir,weighted_mask=True, window_size=args.window_size)
    dataloader = DataLoader(dataset, batch_size=args.batch_size)

    # Loading trained GAN model
    saved_gan = torch.load(args.gan_path)
    generator = models.Generator(args).cuda()
    discriminator = models.Discriminator(args).cuda()
    generator.load_state_dict(saved_gan["state_dict_G"])
    discriminator.load_state_dict(saved_gan["state_dict_D"])

    for i, (corrupted_images, original_images, masks, weighted_masks) in enumerate(dataloader):
        corrupted_images, masks, weighted_masks = corrupted_images.cuda(), masks.cuda(), weighted_masks.cuda()
        z_optimum = nn.Parameter(torch.FloatTensor(np.random.normal(0, 1, (corrupted_images.shape[0],args.latent_dim,))).cuda())
        optimizer_inpaint = optim.Adam([z_optimum])

        print("Starting backprop to input ...")
        for epoch in range(args.optim_steps):
            optimizer_inpaint.zero_grad()
            generated_images = generator(z_optimum)
            discriminator_opinion = discriminator(generated_images)
            c_loss = context_loss(corrupted_images, generated_images, weighted_masks)
            prior_loss = torch.sum(-torch.log(discriminator_opinion))
            inpaint_loss = c_loss + args.prior_weight*prior_loss
            inpaint_loss.backward()
            optimizer_inpaint.step()
            print("[Epoch: {}/{}] \t[Loss: \t[Context: {:.3f}] \t[Prior: {:.3f}] \t[Inpaint: {:.3f}]]  \r".format(1+epoch, args.optim_steps, c_loss, 
                                                                               prior_loss, inpaint_loss),end="")
        print("")

        blended_images = posisson_blending(masks, generated_images.detach(), corrupted_images)
    
        image_range = torch.min(corrupted_images), torch.max(corrupted_images)
        save_image(corrupted_images, "../outputs/corrupted_{}.png".format(i), normalize=True, range=image_range, nrow=5)
        save_image(generated_images, "../outputs/output_{}.png".format(i), normalize=True, range=image_range, nrow=5)
        save_image(blended_images, "../outputs/blended_{}.png".format(i), normalize=True, range=image_range, nrow=5)
        save_image(original_images, "../outputs/original_{}.png".format(i), normalize=True, range=image_range, nrow=5)

        del z_optimum, optimizer_inpaint
示例#25
0
    def __init__(self, opt, num_classes, source_train_ds, source_test_ds,
                 target_train_ds, mean, std):
        super().__init__(opt, num_classes, source_train_ds, source_test_ds)

        self.target_train_ds = target_train_ds
        self.mean = mean
        self.std = std

        # Defining networks and optimizers

        self.generator = models.Generator(opt, num_classes)
        self.discriminator = models.Discriminator(opt, num_classes)

        # Weight initialization
        self.generator.apply(utils.weights_init)
        self.discriminator.apply(utils.weights_init)

        # Defining loss criterions
        self.criterion_c = nn.CrossEntropyLoss()
        self.criterion_s = nn.BCELoss()

        if opt.gpu >= 0:
            self.discriminator.cuda()
            self.generator.cuda()
            self.criterion_c.cuda()
            self.criterion_s.cuda()

        # Defining optimizers
        self.optimizer_discriminator = optim.Adam(
            self.discriminator.parameters(),
            lr=opt.lr,
            betas=(opt.beta1, 0.999))
        self.optimizer_generator = optim.Adam(self.generator.parameters(),
                                              lr=opt.lr,
                                              betas=(opt.beta1, 0.999))

        # Other variables
        self.real_label_val = 1
        self.fake_label_val = 0
示例#26
0
    gen_input_nc = image_nc

    # Define what device we are using
    print("CUDA Available: ",torch.cuda.is_available())
    device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")

    # load the pretrained model
    pretrained_model = "./MNIST_target_model.pth"
    target_model = MNIST_target_net().to(device)
    target_model.load_state_dict(torch.load(pretrained_model))
    target_model.eval()

    # load the generator of adversarial examples
    pretrained_generator_path = './models/netG.pth.tar'
    pretrained_G = models.Generator(gen_input_nc, image_nc).to(device)
    pretrained_G.load_state_dict(torch.load(pretrained_generator_path))
    pretrained_G.eval()

    # test adversarial examples in MNIST training dataset
    mnist_dataset = torchvision.datasets.MNIST('./dataset', train=True, transform=transforms.ToTensor(), download=True)
    train_dataloader = DataLoader(mnist_dataset, batch_size=batch_size, shuffle=False, num_workers=1)
    num_correct = 0
    num_all = 0
    for i, data in enumerate(train_dataloader, 0):
        test_img, test_label = data
        test_img, test_label = test_img.to(device), test_label.to(device)
        perturbation = pretrained_G(test_img)
        perturbation = torch.clamp(perturbation, -0.3, 0.3)
        adv_img = perturbation + test_img
        adv_img = torch.clamp(adv_img, 0, 1)
示例#27
0
                        help='attributes to learn')
    parser.add_argument('--img_size', dest='img_size', type=int, default=128)
    parser.add_argument('--batch_size', dest='batch_size', type=int, default=1)
    parser.add_argument('--experiment_name',
                        dest='experiment_name',
                        default='stgan_128')

    args = parser.parse_args()
    # model
    atts = args.atts
    n_att = len(atts)
    img_size = args.img_size
    batch_size = args.batch_size
    experiment_name = args.experiment_name

    Gen = models.Generator()
    Dis = models.Discriminator(n_att)
    Enc = models.Encoder()
    Stu = models.Stu()

    x = tf.ones(shape=[2, 128, 128, 3], dtype=tf.float32)
    a = tf.ones(shape=[2, 13], dtype=tf.float32)

    z = Enc(x)
    z_stu = Stu(z, a)
    x_fake = Gen(z_stu, a - a)
    d, att = Dis(x)

    lr = tf.Variable(initial_value=0., trainable=False)
    g_opt = tf.optimizers.Adam(lr, beta_1=0., beta_2=0.99)
    d_opt = tf.optimizers.Adam(lr, beta_1=0., beta_2=0.99)
示例#28
0
        eps = utils.sample_z_motion(16, dim_z_motion)

        #store
        fc_path = os.path.join(fc_path, 'eps.pth')
        torch.save(eps, fc_path)
    else:
        fc_path = os.path.join(fc_path, 'eps.pth')

    return torch.load(fc_path), fc_path


fc_dir = './fc_dir/'

## Initialize Generator, RNN, and latent codes

generator = models.Generator(ngpu, z_dim, ngf, ndf, nc)
generator = generator.cuda()

gru = models.GRU(dim_z_motion, 500, gpu=True)
gru.initWeight()
gru = gru.cuda()

z_c = utils.sample_z_content(dim_z_content)

## Start training

for ep in range(num_epoch):

    #Random shuffle data_folder
    np.random.shuffle(data_folders)
示例#29
0
    os.makedirs(os.path.join(args.output_dir, 'checkpoints'))

if not os.path.isdir(os.path.join(args.output_dir, 'samples')):
    os.makedirs(os.path.join(args.output_dir, 'samples'))

# pickle to avoid encoding errors with json
with open(os.path.join(args.output_dir, 'charmap.pickle'), 'wb') as f:
    pickle.dump(charmap, f)

with open(os.path.join(args.output_dir, 'inv_charmap.pickle'), 'wb') as f:
    pickle.dump(inv_charmap, f)

real_inputs_discrete = tf.placeholder(tf.int32,
                                      shape=[args.batch_size, args.seq_length])
real_inputs = tf.one_hot(real_inputs_discrete, len(charmap))
fake_inputs = models.Generator(args.batch_size, args.seq_length,
                               args.layer_dim, len(charmap))
fake_inputs_discrete = tf.argmax(fake_inputs,
                                 fake_inputs.get_shape().ndims - 1)

disc_real = models.Discriminator(real_inputs, args.seq_length, args.layer_dim,
                                 len(charmap))
disc_fake = models.Discriminator(fake_inputs, args.seq_length, args.layer_dim,
                                 len(charmap))

disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
gen_cost = -tf.reduce_mean(disc_fake)

# WGAN lipschitz-penalty
alpha = tf.random_uniform(shape=[args.batch_size, 1, 1], minval=0., maxval=1.)

differences = fake_inputs - real_inputs
示例#30
0
print(opt)

# gpuが使えるかどうか
cuda = True if torch.cuda.is_available() else False
print("GPUは使えますか? (True/False)  ----> ", cuda)

if cuda:
    gpu_id = input('使用するGPUの番号を入れてください : ')
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
device = torch.device('cuda:' + gpu_id if torch.cuda.is_available() else 'cpu')

# ネットワークのインスタンスを作成
# generator用のネットワークを作成
torch.manual_seed(opt.generator_seed)
generator = models.Generator(n_filter=opt.n_filter,
                             generator_kernel_size=opt.generator_kernel_size,
                             p=opt.p,
                             q=opt.q)
print(generator)

# discriminator用のネットワークを作成
torch.manual_seed(opt.discriminator_seed)
discriminator = models.Discriminator(
    q=opt.q, discriminator_hidden_unit=opt.discriminator_hidden_unit)
print(discriminator)

# ニューラルネットを用いた非線形モデルによる人工データを作成
mseed = 0
n_unit1 = 16
n_unit2 = 16
sigma_ = 2
seed = 10