Example #1
0
    def __init__(self, rb=9):
        self.gen_x = Generator(rb).to(device)
        self.gen_y = Generator(rb).to(device)
        self.dis_x = Discriminator().to(device)
        self.dis_y = Discriminator().to(device)
        self.fake_x_buffer = ImageBuffer()
        self.fake_y_buffer = ImageBuffer()
        self.crit = nn.MSELoss()
        self.l1 = torch.nn.L1Loss()
        self.optimizer_gen = torch.optim.Adam(list(self.gen_x.parameters()) +
                                              list(self.gen_y.parameters()),
                                              lr=lr,
                                              betas=betas)
        self.optimizer_dis = torch.optim.Adam(list(self.dis_x.parameters()) +
                                              list(self.dis_y.parameters()),
                                              lr=lr,
                                              betas=betas)
        self.scaler_gen = torch.cuda.amp.GradScaler()
        self.scaler_dis = torch.cuda.amp.GradScaler()
        self.lr_dis = None
        self.lr_gen = None

        self.gen_y.apply(self.init_weights)
        self.gen_x.apply(self.init_weights)
        self.dis_x.apply(self.init_weights)
        self.dis_y.apply(self.init_weights)
    def __init__(self,
                 batch_size=64,
                 noise_vector_size=100,
                 num_epochs=1,
                 lr=0.0002,
                 beta1=0.5):
        self.device = torch.device("cuda:0" if (
            torch.cuda.is_available()) else "cpu")
        self.data_provider = Data_Provider(batch_size)
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.netG = Generator(noise_vector_size,
                              self.data_provider.num_ingredients).to(
                                  self.device)
        self.netD = Discriminator(self.data_provider.num_ingredients).to(
            self.device)

        self.criterion = nn.BCELoss()
        self.fixed_noise = torch.randn(batch_size,
                                       noise_vector_size,
                                       device=self.device)
        self.noise_vector_size = noise_vector_size
        self.real_label = 1
        self.fake_label = 0

        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=lr,
                                     betas=(beta1, 0.999))
        self.optimizerG = optim.Adam(self.netG.parameters(),
                                     lr=lr,
                                     betas=(beta1, 0.999))

        self.recipe_list = []
Example #3
0
 def __init__(self):
     self.netG = Generator().to(device)
     self.netD = Discriminator().to(device)
     self.netG.apply(self.weights_init)
     self.netD.apply(self.weights_init)
     self.fixed_noise = torch.randn(16, nz, 1, 1, device=device)
     self.optimizerD = optim.Adam(self.netD.parameters(), lr=lr, betas=betas)
     self.optimizerG = optim.Adam(self.netG.parameters(), lr=lr, betas=betas)
Example #4
0
    def __init__(self, args):
        super(Trainer, self).__init__()
        for k, v in vars(args).items():
            setattr(self, k, v)
        self.args = args
        self.data_path = './origin_data/' + self.dataset + '/'

        self.train_tasks = json.load(open(self.data_path + 'train_tasks.json'))
        self.rel2id = json.load(open(self.data_path + 'relation2ids'))

        # Generate the relation matrix according to word embeddings and TFIDF
        if self.generate_text_embedding:
            if self.dataset == "NELL":
                NELL_text_embedding(self.args)
            else:
                raise AttributeError("wrong dataset name!")

        rela_matrix = np.load(self.data_path + 'rela_matrix.npz')['relaM']
        print('##LOADING RELATION MATRIX##')
        self.rela_matrix = rela_matrix.astype('float32')

        self.ent2id = json.load(open(self.data_path + 'entity2id'))

        print('##LOADING CANDIDATES ENTITIES##')
        self.rel2candidates = json.load(
            open(self.data_path + 'rel2candidates_all.json'))

        # load answer dict
        self.e1rel_e2 = defaultdict(list)
        self.e1rel_e2 = json.load(open(self.data_path + 'e1rel_e2_all.json'))

        noises = Variable(torch.randn(self.test_sample, self.noise_dim)).cuda()
        self.test_noises = 0.1 * noises

        self.meta = not self.no_meta
        self.label_num = len(self.train_tasks.keys())

        self.rela2label = dict()
        rela_sorted = sorted(list(self.train_tasks.keys()))
        for i, rela in enumerate(rela_sorted):
            self.rela2label[rela] = int(i)

        print('##LOADING SYMBOL ID AND SYMBOL EMBEDDING')
        self.load_embed()
        self.num_symbols = len(self.symbol2id.keys()) - 1  #
        self.pad_id = self.num_symbols

        print('##DEFINE FEATURE EXTRACTOR')
        self.Extractor = Extractor(self.embed_dim,
                                   self.num_symbols,
                                   embed=self.symbol2vec)
        self.Extractor.cuda()
        self.Extractor.apply(weights_init)
        self.E_parameters = filter(lambda p: p.requires_grad,
                                   self.Extractor.parameters())
        self.optim_E = optim.Adam(self.E_parameters, lr=self.lr_E)
        #self.scheduler = optim.lr_scheduler.MultiStepLR(self.optim_E, milestones=[50000], gamma=0.5)

        print('##DEFINE GENERATOR')
        self.Generator = Generator(self.args)
        self.Generator.cuda()
        self.Generator.apply(weights_init)
        self.G_parameters = filter(lambda p: p.requires_grad,
                                   self.Generator.parameters())
        self.optim_G = optim.Adam(self.G_parameters,
                                  lr=self.lr_G,
                                  betas=(0.5, 0.9))
        self.scheduler_G = optim.lr_scheduler.MultiStepLR(self.optim_G,
                                                          milestones=[4000],
                                                          gamma=0.2)

        print('##DEFINE DISCRIMINATOR')
        self.Discriminator = Discriminator()
        self.Discriminator.cuda()
        self.Discriminator.apply(weights_init)
        self.D_parameters = filter(lambda p: p.requires_grad,
                                   self.Discriminator.parameters())
        self.optim_D = optim.Adam(self.D_parameters,
                                  lr=self.lr_D,
                                  betas=(0.5, 0.9))
        self.scheduler_D = optim.lr_scheduler.MultiStepLR(self.optim_D,
                                                          milestones=[20000],
                                                          gamma=0.2)

        self.num_ents = len(self.ent2id.keys())

        print('##BUILDING CONNECTION MATRIX')
        degrees = self.build_connection(max_=self.max_neighbor)
Example #5
0
    shuffle=False,
    **kwargs)

train_loader_B = mnist_triplet_train_loader
test_loader_B = mnist_triplet_test_loader
train_loader_S = mnist_mini_triplet_train_loader
test_loader_S = mnist_mini_triplet_test_loader

margin = 1.
embedding_net_B = MLP_Embedding()  # define network for big datasets
triplet_net_B = TripletNet(embedding_net_B)
embedding_net_S = MLP_Embedding()  # define network for small datasets
triplet_net_S = TripletNet(embedding_net_S)

layer_size = (256, 16)
G = Generator(layer_size)
D = Discriminator(layer_size)
# define hooks
# h_B = embedding_net_B.fc2.register_backward_hook(hook_B)
# h_S = embedding_net_S.fc2.register_backward_hook(hook_S)
if cuda:
    triplet_net_S.cuda()
    triplet_net_B.cuda()
    G.cuda()
    D.cuda()

loss_fn_S = TripletLoss(margin)
loss_fn_B = TripletLoss(margin)
lr = 1e-3
optim_B = optim.Adam(triplet_net_B.parameters(), lr=lr)
optim_S = optim.Adam(triplet_net_S.parameters(), lr=lr)
Example #6
0
import torch
from torch import nn
from torch.autograd import Variable

# Load data -- hardcoded
data_path = "NEED PATH"
data_name = "NEED NAME"
batch_size = 250
loader = Loader(data_path, batch_size)
data_loader = loader.get_loader(loader.get_dataset())
num_batches = len(data_loader)

# Create newtork instances
discriminator = Discriminator()
discriminator.apply(Trainer.init_weights)
generator = Generator()
generator.apply(Trainer.init_weights)

# Create trainer and initialize network weights
device = "cpu"
if torch.cuda.is_available():
    print("cuda available")
    device = "cuda:0"
    discriminator.cuda()
    generator.cuda()

# Optimizers and loss function
net_trainer = Trainer(nn.BCELoss(), device)
net_trainer.create_optimizers(discriminator.parameters(),generator.parameters())

# Number of epochs -- hardcoded
Example #7
0
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, )),
])

to_pil_image = transforms.ToPILImage()

train_data = datasets.MNIST(root='../input/data',
                            train=True,
                            download=False,
                            transform=transform)
train_loader = DataLoader(train_data, batch_size=opt.batch_size, shuffle=True)

from Networks import Generator
from Networks import Discriminator

generator = Generator(opt.latent_dim, opt.img_size, opt.channels).to(device)
discriminator = Discriminator(opt.img_size, opt.channels).to(device)

print(generator, discriminator)


def weights_init_normal(m):

    classname = m.__class__.__name__
    if classname.find("Conv") != -1:
        torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find("BatchNorm2d") != -1:
        torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
        torch.nn.init.constant_(m.bias.data, 0.0)

Example #8
0
])

to_pil_image = transforms.ToPILImage()

train_data = datasets.MNIST(
    root='../input/data',
    train=True,
    download=False,
    transform=transform
)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)

from Networks import Generator
from Networks import Discriminator

generator = Generator(nz).to(device)
discriminator = Discriminator().to(device)

print(discriminator)
print(generator)

# optimizers
optim_g = optim.Adam(generator.parameters(), lr=0.0002)
optim_d = optim.Adam(discriminator.parameters(), lr=0.0002)

# loss function
criterion = nn.BCELoss()

losses_g = [] # to store generator loss after each epoch
losses_d = [] # to store discriminator loss after each epoch
images = [] # to store images generatd by the generator