Exemplo n.º 1
0
def main(args):
    if torch.cuda.is_available():
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

    dloader, data_num = dataloader.getDataLoader(args.root_dir,
                                                 args.sub_dir,
                                                 batch=args.batch_size,
                                                 shuffle=False)

    G_MF = model.Generator().type(dtype)
    G_FM = model.Generator().type(dtype)

    weight_loc = 'epoch_' + str(args.epoch)
    G_MF.load_state_dict(torch.load('pretrained/' + weight_loc + '/G_MF.pkl'))
    G_FM.load_state_dict(torch.load('pretrained/' + weight_loc + '/G_FM.pkl'))

    if data_num % args.batch_size == 0:
        total_num = data_num / args.batch_size
    else:
        total_num = data_num / args.batch_size + 1

    for idx, (img, label) in enumerate(dloader):
        print('Processing : [%d / %d]' % (idx + 1, total_num))
        img = img.type(dtype)
        label = label.type(dtype)

        img = Variable(img)
        save_result(args.result_path, G_MF, G_FM, img, label, args.nrow,
                    str(idx))
Exemplo n.º 2
0
def main(args):
    dloader, dlen = data_loader(root=args.root,
                                batch_size='all',
                                shuffle=False,
                                img_size=128,
                                mode='test')

    if torch.cuda.is_available() is True:
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

    if args.epoch is not None:
        weight_name = '{epoch}-G.pkl'.format(epoch=args.epoch)
    else:
        weight_name = 'G.pkl'

    weight_path = os.path.join(args.weight_dir, weight_name)
    weight_path2 = os.path.join(args.weight2_dir, weight_name)

    G = model.Generator(z_dim=8).type(dtype)
    G.load_state_dict(torch.load(weight_path))
    G.eval()

    G2 = model.Generator(z_dim=8).type(dtype)
    G2.load_state_dict(torch.load(weight_path2))
    G2.eval()

    if os.path.exists(args.result_dir) is False:
        os.makedirs(args.result_dir)

    # For example, img_name = random_55.png
    if args.epoch is None:
        args.epoch = 'latest'
    #img_name = '{type}_{epoch}.png'.format(type=args.sample_type, epoch=args.epoch)
    img_name = '11.png'
    img_name2 = '12.png'

    img_path = os.path.join(args.result_dir, img_name)
    img_path2 = os.path.join(args.result_dir, img_name2)

    # Make latent code and images
    z = make_z(n=dlen,
               img_num=args.img_num,
               z_dim=8,
               sample_type=args.sample_type)

    result_img = make_img(dloader, G, z, img_size=128)
    result_img2 = make_img(dloader, G2, z, img_size=128)

    torchvision.utils.save_image(result_img,
                                 img_path,
                                 nrow=args.img_num + 1,
                                 padding=4)
    torchvision.utils.save_image(result_img2,
                                 img_path2,
                                 nrow=args.img_num + 1,
                                 padding=4)
Exemplo n.º 3
0
def train(opt, images):

    gennet_g = model.Generator()
    gennet_f = model.Generator()
    vgg = model.VGG()

    discrinet_c = model.Discriminator(3)
    discrinet_t = model.Discriminator(1)

    g_optimizer = optimizer.Adam(params=gennet_g.parameters(), lr=opt.lr)
    f_optimizer = optimizer.Adam(params=gennet_f.parameters(), lr=opt.lr)
    c_optimizer = optimizer.Adam(params=discrinet_c.parameters(), lr=opt.lr)
    t_optimizer = optimizer.Adam(params=discrinet_t.parameters(), lr=opt.lr)

    optimizers = dict()
    optimizers['g'] = g_optimizer
    optimizers['f'] = f_optimizer
    optimizers['c'] = c_optimizer
    optimizers['t'] = t_optimizer

    content_criterion = nn.L1Loss()
    texture_criterion = model.GANLoss()
    color_criterion = model.GANLoss()
    tv_criterion = model.TVLoss(1.0)

    num_samples = 0

    for i in xrange(opt.epoches):

        for j in xrange(num_samples / opt.batch_size):

            batch = generate_batches(images, opt.batch_size)
            loss = train_step(vgg=vgg,
                              gennet_g=gennet_g,
                              gennet_f=gennet_f,
                              discrinet_c=discrinet_c,
                              discrinet_t=discrinet_t,
                              content_criterion=content_criterion,
                              color_criterion=color_criterion,
                              texture_critetion=texture_criterion,
                              tv_criterion=tv_criterion,
                              x=batch['x'],
                              y=batch['y'],
                              optimizers=optimizers)

            print("\nEpoch: %s\n" % i)
            print("Total Loss: %s \n" % loss['total_loss'])
            print("Color Loss: %s \n" % loss['color_loss'])
            print("Content Loss: %s \n" % loss['content_loss'])
            print("TV Loss: %s \n" % loss['tv_loss'])
            print("Texture Loss: %s \n" % loss['texture_loss'])
Exemplo n.º 4
0
def test(args):
    utils.cuda_devices([args.gpu_id])
    transform = transforms.Compose(
        [transforms.Resize((args.img_height,args.img_width)),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])

    dataset_dirs = utils.get_testdata_link(args.dataset_dir)

    a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform)
    b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform)


    a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=False, num_workers=4)
    b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=False, num_workers=4)

    Gab = model.Generator()
    Gba = model.Generator()

    utils.cuda([Gab, Gba])


    try:
        ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_dir))
        Gab.load_state_dict(ckpt['Gab'])
        Gba.load_state_dict(ckpt['Gba'])
    except:
        print(' [*] No checkpoint!')


    """ run """
    a_real_test = Variable(iter(a_test_loader).next()[0], requires_grad=True)
    b_real_test = Variable(iter(b_test_loader).next()[0], requires_grad=True)
    a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test])
            

    Gab.eval()
    Gba.eval()

    with torch.no_grad():
        a_fake_test = Gab(b_real_test)
        b_fake_test = Gba(a_real_test)
        a_recon_test = Gab(b_fake_test)
        b_recon_test = Gba(a_fake_test)

    pic = (torch.cat([a_real_test, b_fake_test, a_recon_test, b_real_test, a_fake_test, b_recon_test], dim=0).data + 1) / 2.0

    if not os.path.isdir(args.results_dir):
        os.makedirs(args.results_dir)

    torchvision.utils.save_image(pic, args.results_dir+'/sample.jpg', nrow=3)
Exemplo n.º 5
0
def main():
    global Epsilon
    # Vocabulary containing all of the tokens for SMILES construction
    voc = util.Voc("data/voc.txt")
    # File path of predictor in the environment
    environ_path = 'output/RF_cls_ecfp6.pkg'
    # file path of hidden states in RNN for initialization
    initial_path = 'output/net_p'
    # file path of hidden states of optimal exploitation network
    agent_path = 'output/net_e_%.2f_%.1f_%dx%d' % (Epsilon, Baseline,
                                                   BATCH_SIZE, MC)
    # file path of hidden states of exploration network
    explore_path = 'output/net_p'

    # Environment (predictor)
    environ = util.Environment(environ_path)
    # Agent (generator, exploitation network)
    agent = model.Generator(voc)
    agent.load_state_dict(torch.load(initial_path + '.pkg'))

    # exploration network
    explore = model.Generator(voc)
    explore.load_state_dict(torch.load(explore_path + '.pkg'))

    best_score = 0
    log = open(agent_path + '.log', 'w')

    for epoch in range(1000):
        print('\n--------\nEPOCH %d\n--------' % (epoch + 1))
        print('\nForward Policy Gradient Training Generator : ')
        Policy_gradient(agent, environ, explore=explore)
        seqs = agent.sample(1000)
        ix = util.unique(seqs)
        smiles, valids = util.check_smiles(seqs[ix], agent.voc)
        scores = environ(smiles)
        scores[valids == False] = 0
        unique = (scores >= 0.5).sum() / 1000
        # The model with best percentage of unique desired SMILES will be persisted on the hard drive.
        if best_score < unique:
            torch.save(agent.state_dict(), agent_path + '.pkg')
            best_score = unique
        print("Epoch+: %d average: %.4f valid: %.4f unique: %.4f" %
              (epoch, scores.mean(), valids.mean(), unique),
              file=log)
        for i, smile in enumerate(smiles):
            print('%f\t%s' % (scores[i], smile), file=log)

        # Learing rate exponential decay
        for param_group in agent.optim.param_groups:
            param_group['lr'] *= (1 - 0.01)
    log.close()
Exemplo n.º 6
0
    def __init__(self, args, network_config):
        self.start_epoch = 0
        self.num_epochs = network_config["num_epochs"]
        self.batch_size = network_config["batch_size"]
        self.noisy_dir = args.noisy_dir
        self.clean_dir = args.clean_dir
        self.wandb = args.wandb
        if self.wandb == None:
            from torch.utils.tensorboard import SummaryWriter
            self.writer = SummaryWriter(args.log)
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        print(self.device)

        self.generator_noisy2clean = model.Generator().to(self.device)
        self.generator_clean2noisy = model.Generator().to(self.device)
        self.discriminator_noisy = model.Discriminator().to(self.device)
        self.discriminator_clean = model.Discriminator().to(self.device)
        cudnn.benchmark = True
        cudnn.fastest = True

        mse_loss = torch.nn.MSELoss()
        g_params = list(self.generator_noisy2clean.parameters()) + list(
            self.generator_clean2noisy.parameters())
        d_params = list(self.discriminator_noisy.parameters()) + list(
            self.discriminator_clean.parameters())

        self.generator_lr = network_config["generator_lr"]
        self.discriminator_lr = network_config["discriminator_lr"]
        self.decay_iter = network_config["decay_iter"]

        self.generator_lr_decay = self.generator_lr / self.decay_iter
        self.discriminator_lr_decay = self.discriminator_lr / self.decay_iter
        self.start_decay = network_config["start_decay"]
        self.cycle_loss_lambda = network_config["cycle_loss_lambda"]
        self.identity_loss_lambda = network_config["identity_loss_lambda"]
        self.identity_loss_stop = network_config["identity_loss_stop"]

        self.generator_optim = torch.optim.Adam(g_params,
                                                lr=self.generator_lr,
                                                betas=(0.5, 0.999))
        self.discriminator_optim = torch.optim.Adam(d_params,
                                                    lr=self.discriminator_lr,
                                                    betas=(0.5, 0.999))

        self.model_dir = args.model_save
        self.output_dir = args.output_save
        self.generator_loss = []
        self.discriminator_loss = []
Exemplo n.º 7
0
def main(opt):
    dataset = data.Dataset(dataset=opt.dataset, pool_size=opt.pool_size, sample_size=opt.sample_size)
    dataset.show_inf()
    feature_size, att_size = dataset.feature_size, dataset.att_size
    discriminator = model.Discriminator(feature_size, att_size).cuda()
    generator = model.Generator(feature_size, att_size).cuda()

    for epoch in range(opt.epochs):
        # d_loss = train.train_disciminator(discriminator, generator, dataset, opt.lr, opt.batch_size, epoch)
        # g_loss = train.train_generator(discriminator, generator, dataset, opt.lr, opt.batch_size, epoch)
        d_loss, g_loss = train.train_together(discriminator, generator, dataset, opt.lr, opt.batch_size, epoch)
        D_zsl_acc = test.compute_acc(discriminator, dataset, opt1='zsl', opt2='test_unseen')
        D_seen_acc = test.compute_acc(discriminator, dataset, opt1='gzsl', opt2='test_seen')
        D_unseen_acc = test.compute_acc(discriminator, dataset, opt1='gzsl', opt2='test_unseen')
        D_harmonic_mean = (2 * D_seen_acc * D_unseen_acc) / (D_seen_acc + D_unseen_acc)
        print("Epoch {}/{}...".format(epoch + 1, opt.epochs))
        print("D_Loss: {:.4f}".format(d_loss),
              "zsl_acc: {:.4f}".format(D_zsl_acc),
              "seen_acc: {:.4f}".format(D_seen_acc),
              "unseen_acc: {:.4f}".format(D_unseen_acc),
              "harmonic_mean: {:.4f}".format(D_harmonic_mean)
              )
        G_zsl_acc = test.compute_acc(generator, dataset, opt1='zsl', opt2='test_unseen')
        G_seen_acc = test.compute_acc(generator, dataset, opt1='gzsl', opt2='test_seen')
        G_unseen_acc = test.compute_acc(generator, dataset, opt1='gzsl', opt2='test_unseen')
        G_harmonic_mean = (2 * G_seen_acc * G_unseen_acc) / (G_seen_acc + G_unseen_acc)
        print("G_Loss: {:.4f}".format(g_loss),
              "zsl_acc: {:.4f}".format(G_zsl_acc),
              "seen_acc: {:.4f}".format(G_seen_acc),
              "unseen_acc: {:.4f}".format(G_unseen_acc),
              "harmonic_mean: {:.4f}".format(G_harmonic_mean)
              )
Exemplo n.º 8
0
    def __init__(self, config, args):
        self.config = config
        for k, v in args.__dict__.items():
            setattr(self.config, k, v)
        setattr(self.config, 'save_dir', '{}_log'.format(self.config.dataset))

        disp_str = ''
        for attr in sorted(dir(self.config), key=lambda x: len(x)):
            if not attr.startswith('__'):
                disp_str += '{} : {}\n'.format(attr,
                                               getattr(self.config, attr))
        sys.stdout.write(disp_str)
        sys.stdout.flush()

        self.labeled_loader, self.unlabeled_loader, self.dev_loader, self.special_set = data.get_cifar_loaders(
            config)

        self.dis = model.Discriminative(config).cuda()
        self.ema_dis = model.Discriminative(config, ema=True).cuda()
        self.gen = model.Generator(image_size=config.image_size,
                                   noise_size=config.noise_size).cuda()
        self.enc = model.Encoder(config.image_size,
                                 noise_size=config.noise_size,
                                 output_params=True).cuda()

        # self.dis_optimizer = optim.Adam(self.dis.parameters(), lr=config.dis_lr, betas=(0.5, 0.999))
        self.dis_optimizer = optim.SGD(self.dis.parameters(),
                                       lr=config.dis_lr,
                                       momentum=config.momentum,
                                       weight_decay=config.weight_decay,
                                       nesterov=config.nesterov)
        self.gen_optimizer = optim.Adam(self.gen.parameters(),
                                        lr=config.gen_lr,
                                        betas=(0.0, 0.999))
        self.enc_optimizer = optim.Adam(self.enc.parameters(),
                                        lr=config.enc_lr,
                                        betas=(0.0, 0.999))

        self.d_criterion = nn.CrossEntropyLoss()
        if config.consistency_type == 'mse':
            self.consistency_criterion = losses.softmax_mse_loss  # nn.MSELoss()    # (size_average=False)
        elif config.consistency_type == 'kl':
            self.consistency_criterion = losses.softmax_kl_loss  # nn.KLDivLoss()  # (size_average=False)
        else:
            pass
        self.consistency_weight = 0

        if not os.path.exists(self.config.save_dir):
            os.makedirs(self.config.save_dir)

        if self.config.resume:
            pass

        log_path = os.path.join(
            self.config.save_dir,
            '{}.FM+VI.{}.txt'.format(self.config.dataset, self.config.suffix))
        self.logger = open(log_path, 'wb')
        self.logger.write(disp_str)

        print self.dis
Exemplo n.º 9
0
def test_roll():
    generator_config = config.Config()
    data = np.random.uniform(10,
                             100,
                             size=(10000, generator_config.rnn_cell_units))
    dataset = tf.data.Dataset.from_tensor_slices(data)
    dataset = dataset.batch(generator_config.batch_size)
    iterator = dataset.make_one_shot_iterator()
    gen = model.Generator(iterator, generator_config, matrixs=None)
    # token = gen.random_token
    # state = gen.random_state
    #
    # act,state = gen.act(token,state)

    output, token, states = gen.dynamic_decode()

    roll = model.RollOut(gen, generator_config)

    output = roll.roll(token, states)
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        # for i in range(1000):
        #     print(sess.run(act))
        #     # t = sess.run(token)
        #     # print(np.max(t))
        #     print(sess.run(token))
        o = sess.run(output)
        print(o[-1])
Exemplo n.º 10
0
    def step(self, images, z):
        self.generator = model.Generator(FLAGS.batch_size, FLAGS.gc_dim)
        self.G = self.generator.inference(z)

        # descriminator inference using true images
        self.discriminator_list = []
        self.D1_list = []
        self.D2_list = []
        D1_logits_list = []
        D2_logits_list = []
        D1_inter_list = []
        D2_inter_list = []
        self.samples = self.generator.sampler(z, reuse=True)
        for i in range(self.d_num):
            discriminator = model.Descriminator(FLAGS.batch_size, FLAGS.dc_dim)
            D1, D1_logits, D1_inter = discriminator.inference(images, num=i)
            # descriminator inference using sampling with G
            D2, D2_logits, D2_inter = discriminator.inference(self.G, reuse=True, num=i)
            self.D1_list.append(D1)
            self.D2_list.append(D2)
            D1_logits_list.append(D1_logits)
            D2_logits_list.append(D2_logits)
            D1_inter_list.append(D1_inter)
            D2_inter_list.append(D2_inter)
        return images, D1_logits_list, D2_logits_list, D1_inter_list, D2_inter_list
Exemplo n.º 11
0
def generate(agent_path,
             out,
             num=10000,
             environ_path='output/RF_cls_ecfp6.pkg'):
    """ Generating novel molecules with SMILES representation and
    storing them into hard drive as a data frame.

    Arguments:
        agent_path (str): the neural states file paths for the RNN agent (generator).
        out (str): file path for the generated molecules (and scores given by environment).
        num (int, optional): the total No. of SMILES that need to be generated. (Default: 10000)
        environ_path (str): the file path of the predictor for environment construction.
    """
    batch_size = 500
    df = pd.DataFrame()
    voc = util.Voc("data/voc.txt")
    agent = model.Generator(voc)
    agent.load_state_dict(torch.load(agent_path))
    for i in range(num // batch_size + 1):
        if i == 0 and num % batch_size == 0: continue
        batch = pd.DataFrame()
        samples = agent.sample(batch_size if i != 0 else num % batch_size)
        smiles, valids = util.check_smiles(samples, agent.voc)
        if environ_path is not None:
            # calculating the reward of each SMILES based on the environment (predictor).
            environ = util.Environment(environ_path)
            scores = environ(smiles)
            scores[valids == 0] = 0
            valids = scores
            batch['SCORE'] = valids
        batch['CANONICAL_SMILES'] = smiles
        df = df.append(batch)
    df.to_csv(out, sep='\t', index=None)
Exemplo n.º 12
0
    def __init__(self):
        self.config = utils.Config('./config.yml')
        self.device = None
        self.summary = {}
        self.dumpPath = None
        self.sysConfig()
        self.setSummary()
        self.pipeRaw = self.loadDataset()
        self.pipeLen = self.pipeRaw['train'].__len__()
        self.pipe = None
        self.pipeIter()
        self.gen = model.Generator(self.config)
        self.dis = model.Discriminator(self.config)

        if self.config.GPU == -1 and self.config.CUDA:
            print('Using MultiGPU')
            self.gen = nn.parallel.DataParallel(self.gen).to(self.device)

        else:
            self.gen = self.gen.to(self.device)

        self.optGen = torch.optim.Adam(self.gen.parameters(),
                                       lr=eval(self.config.LR),
                                       betas=self.config.BETA)
        self.optDis = torch.optim.Adam(self.dis.parameters(),
                                       lr=eval(self.config.LR),
                                       betas=self.config.BETA)
Exemplo n.º 13
0
def main(args):
    resize = (96, 96)

    if not os.path.exists(args.out_dir):
        os.makedirs(args.out_dir)

    generator = model.Generator()
    chainer.serializers.load_hdf5(args.generator_model_file, generator)
    vectorizer = model.Vectorizer()
    chainer.serializers.load_hdf5(args.vectorizer_model_file, vectorizer)

    vectors = vectorize(args.target_imgs, vectorizer, resize, args.margin)

    for ss in range(args.step * (len(args.target_imgs) - 1) * 2):
        if ss >= args.step * (len(args.target_imgs) - 1):
            s = args.step * (len(args.target_imgs) - 1) * 2 - ss
        else:
            s = ss

        i = int(math.floor(s / args.step))
        res = float(s % args.step) / (args.step - 1)
        j = i + 1 if not s == args.step * (len(args.target_imgs) - 1) else i
        print(i, j, res)
        generated = generator(vectors[i] * (1 - res) + vectors[j] * (res),
                              test=True)
        save(generated.data,
             os.path.join(args.out_dir, "morphing{:04d}.png".format(ss)))
Exemplo n.º 14
0
    def __init__(self, config, args):
        self.config = config
        for k, v in list(args.__dict__.items()):
            setattr(self.config, k, v)
        setattr(self.config, 'save_dir', '{}_log'.format(self.config.dataset))

        disp_str = ''
        for attr in sorted(dir(self.config), key=lambda x: len(x)):
            if not attr.startswith('__'):
                disp_str += '{} : {}\n'.format(attr, getattr(self.config, attr))
        sys.stdout.write(disp_str)
        sys.stdout.flush()

        self.labeled_loader, self.unlabeled_loader, self.unlabeled_loader2, self.dev_loader, self.special_set = data.get_cifar_loaders(config)

        self.dis = model.Discriminative(config).cuda()
        self.gen = model.Generator(image_size=config.image_size, noise_size=config.noise_size).cuda()
        self.enc = model.Encoder(config.image_size, noise_size=config.noise_size, output_params=True).cuda()

        self.dis_optimizer = optim.Adam(self.dis.parameters(), lr=config.dis_lr, betas=(0.5, 0.999))
        self.gen_optimizer = optim.Adam(self.gen.parameters(), lr=config.gen_lr, betas=(0.0, 0.999))
        self.enc_optimizer = optim.Adam(self.enc.parameters(), lr=config.enc_lr, betas=(0.0, 0.999))

        self.d_criterion = nn.CrossEntropyLoss()

        if not os.path.exists(self.config.save_dir):
            os.makedirs(self.config.save_dir)

        log_path = os.path.join(self.config.save_dir, '{}.FM+VI.{}.txt'.format(self.config.dataset, self.config.suffix))
        self.logger = open(log_path, 'wb')
        self.logger.write(disp_str)

        print(self.dis)
Exemplo n.º 15
0
def generate(dev, modelpath, gennum, savedir):
    if dev != '' and dev != 'cpu':
        os.environ['CUDA_VISIBLE_DEVICES'] = dev
    cuda = True if torch.cuda.is_available() and dev != 'cpu' else False
    imgh, latent_dim, channels = 320, 100, 1

    generator = model.Generator(imgh, latent_dim, channels)  # h, dim, c
    print(generator)
    pth = torch.load(modelpath, map_location=GetDevice(cuda, dev))
    generator.load_state_dict(pth['generator_model_state_dict'])
    generator.eval()  # 设置为评估模式;如果是resume,则设置为train模式
    print('generator load parameters done')

    for i in range(gennum):
        if (i + 1) % 100 == 0:
            print('generated fake images num', i + 1)
        z = torch.FloatTensor(np.random.normal(
            0, 1, (1, latent_dim)))  # 输出的值赋在shape里,(batchsize, latent_dim)
        gen_imgs = generator(z)  # [gennum, 1, 320, 320] b, c, h, w
        save_image(gen_imgs.data,
                   os.path.join(savedir, 'generate_{}.png'.format(i)),
                   nrow=1,
                   normalize=True)
        # save_image(gen_imgs.data, os.path.join(savedir, 'generate.png'), nrow=4, normalize=True)
    print('generate done')
Exemplo n.º 16
0
def main(args):
    dloader, dlen = data_loader(root=args.root,
                                batch_size=1,
                                shuffle=False,
                                img_size=128,
                                mode='test')

    if torch.cuda.is_available() is True:
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

    if args.epoch is not None:
        weight_name = '{epoch}-G.pkl'.format(epoch=args.epoch)
    else:
        weight_name = 'G.pkl'

    weight_path = os.path.join(args.weight_dir, weight_name)
    G = model.Generator(z_dim=8).type(dtype)
    G.load_state_dict(torch.load(weight_path))
    G.eval()

    if os.path.exists(args.result_dir) is False:
        os.makedirs(args.result_dir)

    # For example, img_name = random_55.png
    if args.epoch is None:
        args.epoch = 'latest'

    filenames = get_files_name()

    for iters, (img, ground_truth, mask) in enumerate(dloader):
        img = util.var(img.type(dtype))
        #mask = util.var(mask.type(dtype))
        one = torch.ones([1, 3, 128, 128])
        one = util.var(one.type(dtype))

        for i in range(0, dlen):
            # img_ = img.unsqueeze(dim=0)

            #mask_ = mask[i].unsqueeze(dim=0)
            #mask_ = one - mask_

            # Make latent code and images
            z = make_z(img_num=4, z_dim=8)
            for j in range(4):
                z_ = z[j, :].unsqueeze(dim=0)
                out_img = G(img, z_)
                outs_img = out_img / 2 + 0.5

                img_name = '{filenames}_{style}.png'.format(
                    filenames=filenames[i], style=j)
                print(img_name)
                #mask_name = '{filenames}_{style}.png'.format(filenames = filenames[i], style = j)

                img_path = os.path.join(args.result_dir, img_name)
                #mask_path = os.path.join(args.mask_dir, mask_name)

                torchvision.utils.save_image(outs_img, img_path)
Exemplo n.º 17
0
def main(opt):
    dataset = data.Dataset(dataset=opt.dataset, pool_size=opt.pool_size, sample_size=opt.sample_size)
    dataset.show_inf()
    feature_size, att_size = dataset.feature_size, dataset.att_size
    discriminator = model.Discriminator(feature_size, att_size, opt.t1).cuda()
    generator = model.Generator(feature_size, att_size, opt.t2).cuda()
    train2.train(discriminator, generator, dataset, d_lr=opt.d_lr, g_lr=opt.g_lr,\
                 batch_size=opt.batch_size, alpha=opt.alpha, epochs=opt.epochs)
Exemplo n.º 18
0
def main(num_block, epochs_list, batch_size, is_train, is_continue, is_save) :

    #####################################
    # 환경변수 지정, 추후 parser로 변환 예정
    #####################################
    
    #path_image = os.path.join(os.getcwd(), 'train_image/')
    path_image = os.path.join(os.getcwd(), '../datasets/DogData/')
    path_model = os.path.join(os.getcwd(), 'save_model/')
    print(f' Path of Image : {path_image}')

    model_name = 'model.pth'

    # 블록이 하나 이하일시 종료
    if(num_block <= 1) :
        print('Not enough block, Terminated')
        return 

    # 원하는 갯수의 블록을 가진 Generator 와 Discriminator 생성 할까 했는데 무조건 맥스로 생성
    generator = model.Generator(batch_size, 9)
    discriminator = model.Discriminator(9)

    if torch.cuda.is_available() == True : 
        generator = generator.cuda()
        discriminator = discriminator.cuda()
    
    if is_continue :  
        file_model = os.path.join(path_model, model_name)

        if os.path.exists(file_model) :  
            model_dict = load_model(file_model)
            generator.load_state_dict(model_dict['generator'])
            discriminator.load_state_dict(model_dict['discriminator'])
    
    # 학습 시작
    if is_train :
        train(num_block, generator, discriminator,
                 batch_size, epochs_list, path_image)
        print(f'Train End')

    if is_save :
        if os.path.exists(path_model) == False :
            os.mkdir(path_model)
        file_model = os.path.join(path_model, model_name)
        save_model(file_model, generator, discriminator)

    for i in range(5) :
        ###############################33
        # 임시 생성 테스트용
        z = torch.rand(100)
        if torch.cuda.is_available() :
            z = z.cuda()

        image = generator(z, num_block).cpu().detach().numpy()[0]
        image = image.transpose((1,2,0))
        img = Image.fromarray(np.uint8(image*255))
        img.save(os.path.join('save_image/',f'save{i}.png'), format='png')
Exemplo n.º 19
0
def load_model_to_test(model_path):
    g = model.Generator()
    g.load_state_dict(torch.load(model_path))
    item, attr = get_test_data()
    g.to(device)
    item = item.to(device)
    attr = attr.to(device)
    item_user = g(attr)
    to_valuate(item, item_user)
Exemplo n.º 20
0
def main():
    voc = util.Voc(init_from_file="data/voc_b.txt")
    netR_path = 'output/rf_dis.pkg'
    netG_path = 'output/net_p'
    netD_path = 'output/net_d'
    agent_path = 'output/net_gan_%d_%d_%dx%d' % (SIGMA * 10, BL * 10,
                                                 BATCH_SIZE, MC)

    netR = util.Environment(netR_path)

    agent = model.Generator(voc)
    agent.load_state_dict(T.load(netG_path + '.pkg'))

    df = pd.read_table('data/CHEMBL251.txt')
    df = df[df['PCHEMBL_VALUE'] >= 6.5]
    data = util.MolData(df, voc)
    loader = DataLoader(data,
                        batch_size=BATCH_SIZE,
                        shuffle=True,
                        drop_last=True,
                        collate_fn=data.collate_fn)

    netD = model.Discriminator(VOCAB_SIZE, EMBED_DIM, FILTER_SIZE, NUM_FILTER)
    if not os.path.exists(netD_path + '.pkg'):
        Train_dis_BCE(netD, agent, loader, epochs=100, out=netD_path)
    netD.load_state_dict(T.load(netD_path + '.pkg'))

    best_score = 0
    log = open(agent_path + '.log', 'w')
    for epoch in range(1000):
        print('\n--------\nEPOCH %d\n--------' % (epoch + 1))
        print('\nPolicy Gradient Training Generator : ')
        Train_GAN(agent, netD, netR)

        print('\nAdversarial Training Discriminator : ')
        Train_dis_BCE(netD, agent, loader, epochs=1)

        seqs = agent.sample(1000)
        ix = util.unique(seqs)
        smiles, valids = util.check_smiles(seqs[ix], agent.voc)
        scores = netR(smiles)
        scores[valids == False] = 0
        unique = (scores >= 0.5).sum() / 1000
        if best_score < unique:
            T.save(agent.state_dict(), agent_path + '.pkg')
            best_score = unique
        print("Epoch+: %d average: %.4f valid: %.4f unique: %.4f" %
              (epoch, scores.mean(), valids.mean(), unique),
              file=log)
        for i, smile in enumerate(smiles):
            print('%f\t%s' % (scores[i], smile), file=log)

        for param_group in agent.optim.param_groups:
            param_group['lr'] *= (1 - 0.01)

    log.close()
Exemplo n.º 21
0
def GetModelAndLoss(device, opt):
    generator = model.Generator(opt.img_size_h, opt.latent_dim, opt.channels)  # generator
    generator.apply(model.weights_init_normal)

    discriminator = model.Discriminator(opt.img_size_h, opt.channels)  # discriminator
    discriminator.apply(model.weights_init_normal)

    adversarial_loss = torch.nn.BCELoss()  # loss

    return generator.to(device), discriminator.to(device), adversarial_loss.to(device)
Exemplo n.º 22
0
def main():
    # Construction of the vocabulary
    voc = util.Voc("data/voc.txt")
    netP_path = 'output/net_pr'
    netE_path = 'output/net_ex'

    # Pre-training the RNN model with ZINC set
    prior = model.Generator(voc)
    if not os.path.exists(netP_path + '.pkg'):
        print('Exploitation network begins to be trained...')
        zinc = util.MolData("data/zinc_corpus.txt", voc, token='SENT')
        zinc = DataLoader(zinc,
                          batch_size=BATCH_SIZE,
                          shuffle=True,
                          drop_last=True,
                          collate_fn=zinc.collate_fn)
        prior.fit(zinc, out=netP_path)
        print('Exploitation network training is finished!')
    prior.load_state_dict(T.load(netP_path + '.pkg'))

    # Fine-tuning the RNN model with A2AR set as exploration stragety
    explore = model.Generator(voc)
    df = pd.read_table('data/chembl_corpus.txt').drop_duplicates(
        'CANONICAL_SMILES')
    valid = df.sample(BATCH_SIZE)
    train = df.drop(valid.index)
    explore.load_state_dict(T.load(netP_path + '.pkg'))

    # Training set and its data loader
    train = util.MolData(train, voc, token='SENT')
    train = DataLoader(train,
                       batch_size=BATCH_SIZE,
                       collate_fn=train.collate_fn)

    # Validation set and its data loader
    valid = util.MolData(valid, voc, token='SENT')
    valid = DataLoader(valid,
                       batch_size=BATCH_SIZE,
                       collate_fn=valid.collate_fn)

    print('Exploration network begins to be trained...')
    explore.fit(train, loader_valid=valid, out=netE_path, epochs=1000)
    print('Exploration network training is finished!')
Exemplo n.º 23
0
def main():
    args = get_args()
    Atest, Btest = data.train_dataset(args.dir, args.batch_size,
                                      args.image_size, 1)
    B_test_iter = iter(Btest)
    A_test_iter = iter(Atest)
    B_test = Variable(B_test_iter.next()[0])
    A_test = Variable(A_test_iter.next()[0])

    G_12 = model.Generator(64)
    G_21 = model.Generator(64)

    checkpoint = torch.load(args.state_dict)
    G_12.load_state_dict(checkpoint['G_12_state_dict'])
    G_21.load_state_dict(checkpoint['G_21_state_dict'])

    if torch.cuda.is_available():
        test = test.cuda()
        noised = noised.cuda()
        G_12 = G_12.cuda()
        G_21 = G_21.cuda()

    G_12.eval()
    G_21.eval()

    generate_A_image = G_21(B_test.float())
    grid = vutils.make_grid(generate_A_image, nrow=8, normalize=True)
    vutils.save_image(grid, "generate_A_image.png")

    generate_B_image = G_12(A_test.float())
    grid = vutils.make_grid(generate_A_image, nrow=8, normalize=True)
    vutils.save_image(grid, "generate_B_image.png")

    loss = PSNR.PSNR()

    estimate_loss_generate_A = loss(generate_A_image, A_test)
    estimate_loss_generate_B = loss(generate_B_image, B_test)

    print(estimate_loss_generate_A)
    print(estimate_loss_generate_B)
Exemplo n.º 24
0
    def step(self, z):
        z_sum = tf.summary.histogram("z", z)

        self.generator = model.Generator(FLAGS.batch_size_v, FLAGS.gc_dim_v)
        self.G = self.generator.inference(z)

        # descriminator inference using true images
        self.discriminator = model.Descriminator(FLAGS.batch_size_v,
                                                 FLAGS.dc_dim_v)
        #self.D1, D1_logits = self.discriminator.inference(images)

        # descriminator inference using sampling with G
        self.samples = self.generator.sampler(z, reuse=True, trainable=False)
Exemplo n.º 25
0
def main(args):
    resize = (96, 96)
    generator = model.Generator()
    chainer.serializers.load_hdf5(args.generator_model_file, generator)
    vectorizer = model.Vectorizer()
    chainer.serializers.load_hdf5(args.vectorizer_model_file, vectorizer)

    vectors = morphing.vectorize(args.target_imgs, vectorizer, resize,
                                 args.margin)

    SIZE = 7
    result_img = numpy.zeros((resize[0] * SIZE, resize[1] * SIZE, 3),
                             dtype=numpy.uint8)

    c = vectors[4]
    print(args.target_imgs)
    for ix, iy, reverse_x, reverse_y in [
        (1, 0, False, False),
        (1, 2, False, True),
        (3, 2, True, False),
        (3, 0, True, True),
    ]:
        a, b = vectors[ix], vectors[iy]
        ylen = SIZE // 2 + 1
        for y in range(ylen):
            xlen = SIZE - y * 2
            for x in range(xlen):
                rx = (float(x) / (xlen - 1)) if x > 0 else 0
                ry = float(y) / (ylen - 1)
                generated = generator(a * (1 - rx) * (1 - ry) + b * (rx) *
                                      (1 - ry) + c * ry,
                                      test=True)
                generated_img = morphing.to_img(generated.data)
                print(generated_img.dtype)

                coordinate_x = resize[0] * (x + y)
                coordinate_y = resize[1] * y
                if reverse_x:
                    coordinate_x = result_img.shape[0] - coordinate_x - resize[
                        0]
                    coordinate_y = result_img.shape[1] - coordinate_y - resize[
                        1]

                if reverse_y:
                    coordinate_x, coordinate_y = coordinate_y, coordinate_x

                result_img[coordinate_x:coordinate_x + resize[0],
                           coordinate_y:coordinate_y +
                           resize[1]] = generated_img
    cv2.imwrite("morphing5.png", result_img)
Exemplo n.º 26
0
def testMain(pic_localLa, Path, target):
    save_file = target     # 'testp/'+ str(num)+' trainingWB.png'
    total = 0
    pic_local = test_dir + pic_localLa
    # print('pic_local:', pic_local)
    img = tf.io.read_file(pic_local)
    img = tf.image.decode_image(img, channels=3)
    img = tf.image.convert_image_dtype(img, tf.float32)
    img = tf.image.rgb_to_yuv(img)

    try:
        gray, uv = tf.split(img, [1, 2], axis=2)
        x, y = gray.shape[0:2]
        # print('正在加载模型')
        # print(gray.shape)
        gen = model.Generator(input_shape=gray.shape)
        # print('正在加载权重')
        gen.load_weights(Path)
        # print('正在预测...')

        pred = gen(tf.expand_dims(gray, axis=0))[0]
        pred_img = tf.concat([gray, pred], axis=2)
        pred_img = tf.image.yuv_to_rgb(pred_img)
        pred_img = tf.clip_by_value(pred_img, 0., 1.)
        pred_img = tf.image.convert_image_dtype(pred_img, tf.uint8)
        pred_data = tf.image.encode_png(pred_img)

        '''
        if total != 0:
            # print('预测完成,预测文件位于' + save_file, ', 图片分辨率为', x, '*', y, '.由于显卡限制,图片经过', total, '次缩放')
        else:
          pass
            # print('预测完成,预测文件位于' + save_file, ', 图片分辨率为', x, '*', y)
            '''
        tf.io.write_file(save_file, pred_data)
        return 0, pic_localLa, x, y
    except Exception as e:
        # print(str(type(e)))
        if str(type(e)) == "<class 'tensorflow.python.framework.errors_impl.ResourceExhaustedError'>":
            total = total + 1
            # print('错误1,内存分配不足')
            # print('错误信息:', e)
            # print('正在尝试降低图片分辨率')
            # print(type(gray))
            return 1, pic_localLa, x, y
        else:
            # print('未知错误')
            # print('错误信息:', e)
            return -1, None, None, None
Exemplo n.º 27
0
def test_generator():
    generator_config = config.Config()
    data = np.random.randint(10,
                             100,
                             size=(10000, generator_config.rnn_cell_units))

    dataset = tf.data.Dataset.from_tensor_slices(data)
    dataset = dataset.batch(generator_config.batch_size)
    iterator = dataset.make_one_shot_iterator()
    gen = model.Generator(iterator, generator_config, matrixs=None)
    output, token, states = gen.dynamic_decode()
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)
        print(sess.run(token))
        print(sess.run(states))
Exemplo n.º 28
0
def load_model(imsize, nz, nc, ndf, ngf, n_extra_layers, savepath):
	# load net models
	print(savepath)
	assert (os.path.exists(savepath)==True)
	netD_name = 'netD.pth'
	netG_name = 'netG.pth'

	print(netD_name, netG_name)
	netD = dcgan.Discriminator(imsize = imsize, nz=nz, nc=nc, ndf=ndf, n_extra_layers=n_extra_layers)
	netG = dcgan.Generator(imsize = imsize, nz=nz, nc=nc, ngf=ngf, n_extra_layers=n_extra_layers)

	netD.load_state_dict(torch.load(savepath + netD_name))
	netG.load_state_dict(torch.load(savepath + netG_name))

	losses = np.load(savepath + "losses.npy")
	return netD, netG, list(losses)
Exemplo n.º 29
0
def test():

    nrow = 4  # output a 4 * 4 grid

    if args.dataset == 'mnist':
        args.in_dim = 1
        args.out_dim = 1

    generator = model.Generator(args)

    if args.gpu:
        # no need to use multi gpu in testing phase
        generator = generator.cuda()

    assert os.path.isfile(os.path.join(ckpt_path, args.run_name + '.ckpt'))

    print('found ckpt file' + os.path.join(ckpt_path, args.run_name + '.ckpt'))
    ckpt = torch.load(os.path.join(ckpt_path, args.run_name + '.ckpt'))
    generator.load_state_dict(ckpt['generator'])

    #input_noise = torch.from_numpy( np.random.normal(0,1,[ nrow**2 , args.dim_embed]).astype(np.float32) )

    if args.sample_idx == None:
        input_label = torch.from_numpy(
            np.random.randint(0, args.num_class, [nrow**2]))
    else:
        input_label = torch.from_numpy(
            np.array([np.int(args.sample_idx) for i in range(nrow**2)]))

    input_noise = np.random.normal(0, 1, [nrow**2, args.dim_embed]).astype(
        np.float32)
    class_onehot = np.zeros((nrow**2, args.num_class))
    class_onehot[np.arange(nrow**2), input_label] = 1
    input_noise[np.arange(nrow**2), :args.num_class] = class_onehot[np.arange(
        nrow**2)]
    input_noise = torch.from_numpy(input_noise)

    if args.gpu:
        input_noise = input_noise.cuda()
        input_label = input_label.cuda()

    generator.zero_grad()
    #fake = generator(input_noise , input_label)

    test_generator(generator, input_noise, input_label, nrow)
Exemplo n.º 30
0
    def step(self, z):
        z_sum = tf.summary.histogram("z", z)

        # generater
        self.generator = model.Generator(FLAGS.batch_size, FLAGS.gc_dim)
        # self.G = self.generator.inference(z)

        # sampler using generator
        self.samples = self.generator.sampler(z, reuse=False, trainable=False)

        # reverser
        self.reverser = model.Encoder(FLAGS.batch_size, FLAGS.dc_dim,
                                      FLAGS.z_dim)
        self.R1, R1_logits, R1_inter = self.reverser.inference(self.samples)
        R_sum = tf.summary.histogram("R", self.R1)
        # return images, D1_logits, D2_logits, G_sum, z_sum, d1_sum, d2_sum
        # return D2_logits, G_sum, z_sum, d1_sum, d2_sum
        return R1_logits, R1_inter, R_sum, z_sum