def __init__(self, X, y, mols, w=None, cuda=False, pad_to=-1, **kwargs): self.cuda = cuda self.adj = [] self.x = [] self.w = None self.mols = mols if pad_to is None: pad_to = -1 l = 0 or X[0][-1].shape[-1] fake_atom = to_tensor(np.zeros(l), dtype=torch.float32, gpu=cuda) self.pad_x = partial(pad_feats, no_atom_tensor=fake_atom, max_num_node=pad_to) self.pad = partial(pad_graph, max_num_node=pad_to) if len(X) > 0: self.adj, self.x = zip(*X) self.adj = list(self.adj) self.x = list(self.x) self.y = to_tensor(y, gpu=self.cuda, dtype=torch.float32) if w is not None: self.w = w.reshape(y.shape[0], -1) self.w = to_tensor(self.w, gpu=self.cuda, dtype=torch.float32) for k, v in kwargs.items(): setattr(self, k, v)
def __init__(self, X, y, mols, w=None, cuda=False, pad_to=-1, **kwargs): self.cuda = cuda self.w = None self.G = [] self.feat = [] self.add_feat = [] self.mols = mols self.pad = partial(pad_graph, max_num_node=pad_to) fake_atom = to_tensor(one_of_k_encoding('*', const.ATOM_LIST), dtype=torch.float32, gpu=cuda) self.pad_x = partial(pad_feats, no_atom_tensor=fake_atom, max_num_node=pad_to) if len(X) > 0: self.G, self.feat, *self.add_feat = zip(*X) self.G = list(self.G) self.feat = list(self.feat) self.y = to_tensor(y, gpu=self.cuda, dtype=torch.float32) if self.add_feat: self.add_feat = self.add_feat[0] if w is not None: self.w = w.reshape(y.shape[0], -1) self.w = to_tensor(self.w, gpu=self.cuda, dtype=torch.float32) for k, v in kwargs.items(): setattr(self, k, v)
def X(self): G, F = self.adj, self.x G = [self.pad(to_tensor(g_i, gpu=self.cuda, dtype=torch.float32)) for g_i in G] F = [self.pad_x(to_tensor(f_i, gpu=self.cuda, dtype=torch.float32)) for f_i in F] return list(zip(G, F))
def __getitem__(self, idx): g_i, f_i = self.adj[idx], self.x[idx] true_nodes = g_i.shape[0] if not isinstance(g_i, torch.Tensor): # remove edge dim if exist g_i = self.pad(to_tensor(g_i, gpu=self.cuda, dtype=torch.float32)).squeeze() if not isinstance(f_i, torch.Tensor): f_i = self.pad_x(to_tensor(f_i, gpu=self.cuda, dtype=torch.float32)) y_i = self.y[idx, None] # add mask for binary m_i = torch.zeros(g_i.shape[-1]) m_i[torch.arange(true_nodes)] = 1 m_i = m_i.unsqueeze(-1) if self.w is not None: w_i = self.w[idx, None] return (g_i, f_i, m_i), self.mols[idx], y_i, w_i return (g_i, f_i, m_i), self.mols[idx], y_i
def __getitem__(self, idx): g_i, f_i = self.G[idx], self.feat[idx] if not isinstance(g_i, torch.Tensor): g_i = self.pad(to_tensor(g_i, gpu=self.cuda, dtype=torch.float32)) if not isinstance(f_i, torch.Tensor): f_i = self.pad_x(to_tensor(f_i, gpu=self.cuda, dtype=torch.float32)) X_i = (g_i, f_i) if self.add_feat: af_i = self.add_feat[idx] if not isinstance(af_i, torch.Tensor): af_i = self.pad_x(to_tensor(af_i, gpu=self.cuda, dtype=torch.float32), no_atom_tensor=None) X_i += (af_i, ) y_i = self.y[idx, None] if self.w is not None: w_i = self.w[idx, None] return (*X_i, self.mols[idx]), y_i, w_i return (*X_i, self.mols[idx]), y_i
def __getitem__(self, idx): mol_i = to_mol(self.smiles[idx]) graph = self.transformer.transform([self.smiles[idx]]) g_i, f_i = graph[0][0], graph[0][1] fake_atoms = torch.zeros(f_i.shape[-1]) fake_atoms[-1] = 1 true_nodes = g_i.shape[0] if not isinstance(g_i, torch.Tensor): g_i = pad_graph(to_tensor(g_i, gpu=self.cuda, dtype=torch.float32), max_num_node=self.pad_to).squeeze() if not isinstance(f_i, torch.Tensor): f_i = pad_feats(to_tensor(f_i, gpu=self.cuda, dtype=torch.float32), no_atom_tensor=fake_atoms, max_num_node=self.pad_to) # add mask for binary m_i = torch.zeros(g_i.shape[0]) m_i[torch.arange(true_nodes)] = 1 m_i = m_i.unsqueeze(-1) if g_i.dim() == 2: g_i = g_i.unsqueeze(-1) return (g_i, f_i, m_i), mol_i, torch.ones(1) #