def __init__(self, n_level):
     super(LPAGAN, self).__init__()
     self.n_level = n_level
     self.Generator = []
     self.Discriminator = []
     for i in range(n_level):
         g = generator.Generator()
         g.cuda()
         d = discriminator.Discriminator()
         d.cuda()
         self.Generator.append(g)
         self.Discriminator.append(d)
Example #2
0
import numpy as np


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# custom weights initialization called on netG and netD
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)
        

generator_ = generator.Generator(args["nz"], args["ngf"], args["nc"], args["ngpu"]).to(device)
discriminator_ = discriminator.Discriminator(args["nc"],args["ndf"]).to(device)

generator_.apply(weights_init)
discriminator_.apply(weights_init)

criterion = args['loss_criterion']

params_gen = list(generator_.parameters())
params_dis = list(discriminator_.parameters())

optimizer_gen = torch.optim.Adam(params_gen, lr=args['learning_rate_gen'], betas=(args['beta'], 0.999))
optimizer_dis = torch.optim.Adam(params_dis, lr=args['learning_rate_dis'], betas=(args['beta'], 0.999))


d_stats_manager, g_stats_manager = nt.StatsManager(),nt.StatsManager()
Example #3
0
    random.seed(cfg['seed'])
    torch.manual_seed(cfg['seed'])
    torch.cuda.manual_seed(cfg['seed'])

# Set dataset and dataloader
dataset, dataloader = None, None
if cfg['dataset'] == 'celeba':
    dataset, dataloader = celeba(cfg['batch_size'], cfg['num_workers'])
elif cfg['dataset'] == 'paintings':
    dataset, dataloader = paintings(cfg['batch_size'], cfg['num_workers'])
elif cfg['dataset'] == 'mnist':
    dataset, dataloader = mnist(cfg['batch_size'], cfg['num_workers'])
else:
    raise ValueError("Dataset specified in config/config.json is not implemented.")

netG = generator.Generator(cfg['ngpu'], cfg['nz'], cfg['ngf'], cfg['nc']).to(device)
netG.apply(weights_init)

netD = discriminator.Discriminator(cfg['ngpu'], cfg['nc'], cfg['ndf']).to(device)
netD.apply(weights_init)

# x = (9 * 5)  + (6 / 3) + (4 * 2) - (18 / 6 * 4)
# at the same time, calculate (9 * 5), (6/3), (4*2), (18/6*4)
# in sequence, calculate addition and subtraction

# define loss
# Binary Cross Entropy Loss
criterion = nn.BCELoss()

# make an optimizer
# Adam optimizers for generator and discriminator
Example #4
0
if not os.path.exists(noLoaclD_dir):
    os.mkdir(noLoaclD_dir)

compare_dir = os.path.join(args.output_dir, 'compare')
if not os.path.exists(compare_dir):
    os.mkdir(compare_dir)

### Initializing Networks ###
vgg = net.vgg
vgg.load_state_dict(torch.load(args.vgg))
vgg = nn.Sequential(*list(vgg.children())[:31])

decoder = net.decoder
decoder.load_state_dict(torch.load(args.decoder))

generator = G.Generator(vgg, decoder)
generator.eval()
generator.to(device)

mattingNet = M.MattingNetwork()
mattingNet.load_state_dict(torch.load(args.mask))
mattingNet.eval()
mattingNet.to(device)

print("-----Model Loaded!-----")

image_tf = test_transform()
prefix_names = glob.glob(os.path.join(args.input_dir, '*.jpg'))
prefix_names = set(
    [name.split('/')[-1].split('_')[0] for name in prefix_names])
Example #5
0
    plt.title('Training...')
    plt.xlabel('Episode')
    plt.ylabel('Duration')
    plt.plot(durations_g.numpy(), 'r', label="g_loss")
    plt.plot(durations_d.numpy(), 'b', label="d_loss")
    plt.plot(durations_c.numpy(), 'g', label="c_loss")
    plt.pause(0.001)  # pause a bit so that plots are updated


use_cuda = torch.cuda.is_available()
# 初始化模型
classifier = classifier.Classifier()
critic = discriminator.Discriminator(input_dims=params.d_input_dims,
                                     hidden_dims=params.d_hidden_dims,
                                     output_dims=params.d_output_dims)
generator = generator.Generator()

criterion = nn.CrossEntropyLoss()

optimizer_c = optim.Adam(classifier.parameters(),
                         lr=params.learning_rate,
                         betas=(params.beta1, params.beta2))
optimizer_d = optim.Adam(critic.parameters(),
                         lr=params.learning_rate,
                         betas=(params.beta1, params.beta2))
optimizer_g = optim.Adam(generator.parameters(),
                         lr=params.learning_rate,
                         betas=(params.beta1, params.beta2))
data_itr_src = get_data_iter("MNIST", train=True)
data_itr_tgt = get_data_iter("USPS", train=True)
Example #6
0
device = torch.device("cuda:0" if (
    torch.cuda.is_available() and opt.ngpu > 0) else "cpu")
print("device", device)

### ### ### ### ### ### ### ###

### ! Setup Dataset ! ###
train_data, val_data, test_data = data_utils.load_dataset(opt)
train_generator = data_utils.data_generator(train_data, train=True, opt=opt)
# val_generator = data_utils.data_generator(val_data, train=False, opt=opt)
# test_dl_generator = data_utils.data_generator(test_data, train=False, dynamic_length=True, opt=opt)

### ### ### ### ### ### ### ###

### ! Setup Models ! ###
netG = generator.Generator(1, 64, (3, 3, 3), 2, device).to(device)

if (device.type == 'cuda' and (opt.ngpu > 1)):
    netG = nn.DataParallel(netG, list(range(opt.ngpu)))

netG.apply(weight_init.weight_init)

netD = discriminator.Discriminator().to(device)
if (device.type == 'cuda' and (opt.ngpu > 1)):
    netD = nn.DataParallel(netD, list(range(opt.ngpu)))
netD.apply(weight_init.weight_init)

### ### ### ### ### ### ### ###

## ! Setup Loss and Optimizer ! ###
# def loss_fn(outputs, )
Example #7
0
    # restore model weights
    if restore is not None and os.path.exists(restore):
        net.load_state_dict(torch.load(restore))
        net.restored = True
        print("Restore model from: {}".format(os.path.abspath(restore)))

    # check if cuda is available
    if torch.cuda.is_available():
        cudnn.benchmark = True
        net.cuda()

    return net


# load dataset
src_data_loader = get_data_loader(params.src_dataset)
src_data_loader_test = get_data_loader(params.src_dataset, train=False)
tgt_data_loader = get_data_loader(params.tgt_dataset)
tgt_data_loader_test = get_data_loader(params.tgt_dataset, train=False)

#load model
classifier = read_model(classifier.Classifier(),
                        restore=params.c_model_restore)
generator = read_model(generator.Generator(), restore=params.g_model_restore)

# evaluate models
print("=== Evaluating models ===")
print(">>> on source domain <<<")
test(classifier, generator, src_data_loader, params.src_dataset)
print(">>> on target domain <<<")
test(classifier, generator, tgt_data_loader, params.tgt_dataset)
Example #8
0
        mod1 = utils.ClippedScore(lower_x=3, upper_x=6.5)
        mod2 = utils.ClippedScore(lower_x=10, upper_x=6.5)
        ths = [0.99] * 3
    mods = [mod1, mod1, mod2] if case == 'OBJ3' else [mod2, mod1, mod2]
    env = utils.Env(objs=objs, mods=mods, keys=keys, ths=ths)

    root = 'output/%s_%s_%s_%s/'% (alg, case, scheme, time.strftime('%y%m%d_%H%M%S', time.localtime()))
    os.mkdir(root)
    copy2('models/rlearner.py', root)
    copy2('trainer.py', root)

    pr_path = 'output/lstm_chembl'
    ft_path = 'output/lstm_ligand'

    voc = utils.Voc(init_from_file="data/voc.txt")
    agent = generator.Generator(voc)
    agent.load_state_dict(torch.load(ft_path + '.pkg'))

    prior = generator.Generator(voc)
    prior.load_state_dict(torch.load(pr_path + '.pkg'))

    if alg == 'drugex':
        learner = rlearner.DrugEx(prior, env, agent)
    elif alg == 'organic':
        embed_dim = 128
        filter_size = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20]
        num_filters = [100, 200, 200, 200, 200, 100, 100, 100, 100, 100, 160, 160]
        prior = classifier.Discriminator(agent.voc.size, embed_dim, filter_size, num_filters)
        df = pd.read_table('data/LIGAND_%s_%s.tsv' % (z, case))
        df = df[df.DESIRE == 1]
        data = voc.encode([voc.tokenize(s) for s in df.Smiles])