def __init__(self, vocab, args): super(DiffVAE, self).__init__() self.vocab = vocab self.hidden_size = hidden_size = args.hidden_size self.rand_size = rand_size = args.rand_size self.jtmpn = JTMPN(hidden_size, args.depthG) self.mpn = MPN(hidden_size, args.depthG) if args.share_embedding: self.embedding = nn.Embedding(vocab.size(), hidden_size) self.jtnn = JTNNEncoder(hidden_size, args.depthT, self.embedding) self.decoder = JTNNDecoder(vocab, hidden_size, self.embedding, args.use_molatt) else: self.jtnn = JTNNEncoder(hidden_size, args.depthT, nn.Embedding(vocab.size(), hidden_size)) self.decoder = JTNNDecoder(vocab, hidden_size, nn.Embedding(vocab.size(), hidden_size), args.use_molatt) self.A_assm = nn.Linear(hidden_size, hidden_size, bias=False) self.assm_loss = nn.CrossEntropyLoss(size_average=False) self.T_mean = nn.Linear(hidden_size, rand_size / 2) self.T_var = nn.Linear(hidden_size, rand_size / 2) self.G_mean = nn.Linear(hidden_size, rand_size / 2) self.G_var = nn.Linear(hidden_size, rand_size / 2) self.B_t = nn.Sequential(nn.Linear(hidden_size + rand_size / 2, hidden_size), nn.ReLU()) self.B_g = nn.Sequential(nn.Linear(hidden_size + rand_size / 2, hidden_size), nn.ReLU())
def __init__(self, vocab, hidden_size, latent_size, depth): super(JTPropVAE, self).__init__() self.vocab = vocab self.hidden_size = hidden_size self.latent_size = latent_size self.depth = depth self.embedding = nn.Embedding(vocab.size(), hidden_size) self.jtnn = JTNNEncoder(vocab, hidden_size, self.embedding) self.jtmpn = JTMPN(hidden_size, depth) self.mpn = MPN(hidden_size, depth) self.decoder = JTNNDecoder(vocab, hidden_size, latent_size / 2, self.embedding) self.T_mean = nn.Linear(hidden_size, latent_size / 2) self.T_var = nn.Linear(hidden_size, latent_size / 2) self.G_mean = nn.Linear(hidden_size, latent_size / 2) self.G_var = nn.Linear(hidden_size, latent_size / 2) self.propNN = nn.Sequential( nn.Linear(self.latent_size, self.hidden_size), nn.Tanh(), nn.Linear(self.hidden_size, 1)) self.prop_loss = nn.MSELoss() self.assm_loss = nn.CrossEntropyLoss(size_average=False) self.stereo_loss = nn.CrossEntropyLoss(size_average=False)
def __init__(self, vocab, hidden_size, latent_size, depthT, depthG): super(JTNNVAEMLP, self).__init__() self.vocab = vocab self.hidden_size = hidden_size self.latent_size = latent_size = latent_size / 2 #Tree and Mol has two vectors self.jtnn = JTNNEncoder(hidden_size, depthT, nn.Embedding(vocab.size(), hidden_size)) self.decoder = JTNNDecoder(vocab, hidden_size, latent_size, nn.Embedding(vocab.size(), hidden_size)) self.jtmpn = JTMPN(hidden_size, depthG) self.mpn = MPN(hidden_size, depthG) self.A_assm = nn.Linear(latent_size, hidden_size, bias=False) self.assm_loss = nn.CrossEntropyLoss(size_average=False) self.T_mean = nn.Linear(hidden_size, latent_size) self.T_var = nn.Linear(hidden_size, latent_size) self.G_mean = nn.Linear(hidden_size, latent_size) self.G_var = nn.Linear(hidden_size, latent_size) self.T_hat_mean = nn.Linear(latent_size, latent_size) self.T_hat_var = nn.Linear(latent_size, latent_size) #New MLP self.gene_exp_size = 978 self.gene_mlp = nn.Linear(self.gene_exp_size, latent_size) self.tree_mlp = nn.Linear(latent_size+latent_size, latent_size)
def __init__(self, vocab, hidden_size, latent_size, depthT, depthG, loss_type='cos'): super(JTNNMJ, self).__init__() self.vocab = vocab self.hidden_size = hidden_size self.latent_size = latent_size = latent_size / 2 #Tree and Mol has two vectors self.jtnn = JTNNEncoder(hidden_size, depthT, nn.Embedding(vocab.size(), hidden_size)) self.decoder = JTNNDecoder(vocab, hidden_size, latent_size, nn.Embedding(vocab.size(), hidden_size)) self.jtmpn = JTMPN(hidden_size, depthG) self.mpn = MPN(hidden_size, depthG) self.A_assm = nn.Linear(latent_size, hidden_size, bias=False) self.assm_loss = nn.CrossEntropyLoss(size_average=False) self.T_mean = nn.Linear(hidden_size, latent_size) self.T_var = nn.Linear(hidden_size, latent_size) self.G_mean = nn.Linear(hidden_size, latent_size) self.G_var = nn.Linear(hidden_size, latent_size) #For MJ self.gene_exp_size = 978 self.gene_mlp = nn.Linear(self.gene_exp_size, 2 * hidden_size) self.gene_mlp2 = nn.Linear(2 * hidden_size, 2 * hidden_size) self.cos = nn.CosineSimilarity() self.loss_type = loss_type if loss_type == 'L1': self.cos_loss = torch.nn.L1Loss(reduction='elementwise_mean') elif loss_type == 'L2': self.cos_loss = torch.nn.MSELoss(reduction='elementwise_mean') elif loss_type == 'cos': self.cos_loss = torch.nn.CosineEmbeddingLoss()
def __init__(self, vocab, hidden_size, latent_size, depth): super(JTNNVAE, self).__init__() self.vocab = vocab self.hidden_size = int(hidden_size) self.latent_size = int(latent_size) self.depth = depth self.embedding = nn.Embedding(vocab.size(), hidden_size) self.jtnn = JTNNEncoder(vocab, hidden_size, self.embedding) self.jtmpn = JTMPN(hidden_size, depth) self.mpn = MPN(hidden_size, depth) self.decoder = JTNNDecoder(vocab, hidden_size, latent_size / 2, self.embedding) self.T_mean = nn.Linear(hidden_size, int(latent_size / 2)) self.T_var = nn.Linear(hidden_size, int(latent_size / 2)) self.G_mean = nn.Linear(hidden_size, int(latent_size / 2)) self.G_var = nn.Linear(hidden_size, int(latent_size / 2)) self.assm_loss = nn.CrossEntropyLoss(size_average=False) self.stereo_loss = nn.CrossEntropyLoss(size_average=False)