Esempio n. 1
0
def full_adj_nll(ei, z):
    A = to_dense_adj(ei, max_num_nodes=z.size(0))[0]
    A_tilde = z @ z.T

    temp_size = A.size(0)
    temp_sum = A.sum()
    posw = float(temp_size * temp_size - temp_sum) / temp_sum
    norm = temp_size * temp_size / float(
        (temp_size * temp_size - temp_sum) * 2)
    nll_loss_mat = F.binary_cross_entropy_with_logits(input=A_tilde,
                                                      target=A,
                                                      pos_weight=posw,
                                                      reduction='none')
    nll_loss = -1 * norm * torch.mean(nll_loss_mat, dim=[0, 1])
    return -nll_loss
Esempio n. 2
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        z, _ = to_dense_batch(x, batch)
        adj = to_dense_adj(edge_index, batch)

        batch_size, num_nodes, _ = z.size()

        '''
        >>> As = torch.randn(3,2,5)
        >>> Bs = torch.randn(3,5,4)
        >>> torch.einsum('bij,bjk->bik', As, Bs) # batch matrix multiplication
        s = F.sigmoid(torch.einsum('bij,bji->bii', x, torch.transpose(x)))  # batch matrix multiplication
        '''
        return None, x, torch.sigmoid(torch.matmul(z, z.transpose(1, 2))), adj
Esempio n. 3
0
    def eval(self, epoch):
        total_closs = 0.0
        total_laploss = 0.0
        total_nloss = 0.0
        total_eloss = 0.0
        total_loss = 0.0

        num_iters = int(
            math.ceil(self.params.val_data_size / self.params.batch_size))
        loss = 0.0
        self.model.eval()

        for i in range(num_iters):

            self.optimizer.zero_grad()

            gt_vertices, gt_normals, gt_edges, gt_image_feats, proj_gt = next(
                self.val_generator)

            gt_vertices = torch.Tensor(gt_vertices).type(
                dtypeF).requires_grad_(False)
            gt_normals = torch.Tensor(gt_normals).type(dtypeF).requires_grad_(
                False)
            gt_image_feats = torch.Tensor(gt_image_feats).type(
                dtypeF).requires_grad_(False)

            x, c = self.model.forward(gt_image_feats, gt_vertices, gt_normals)

            total_closs += self.model.closs / num_iters
            total_laploss += self.model.laploss / num_iters
            total_nloss += self.model.nloss / num_iters
            total_eloss += self.model.eloss / num_iters
            total_loss += self.model.loss / num_iters

        print(
            f'Validation Epoch: {epoch}, Val Epoch Loss: {total_loss}, Val Epoch CLoss: {total_closs}, Val Epoch NLoss: {total_nloss}, Val Epoch ELoss: {total_eloss}, Val EpochLapLoss: {total_laploss}'
        )
        # proj_pred = utils.flatten_pred_batch(utils.scaleBack(c.x), A, self.params)
        utils.drawPolygons(utils.scaleBack(c.x),
                           utils.scaleBack(gt_vertices[0]),
                           gt_edges[0],
                           proj_pred=None,
                           proj_gt=None,
                           color='red',
                           out=self.params.expt_res_dir + '/../val_out.png',
                           A=to_dense_adj(c.edge_index).cpu().numpy()[0])

        self.model.train()
Esempio n. 4
0
 def __call__(self, data):
     adj_mat = np.array(to_dense_adj(data.edge_index)[0])
     graph = networkx.convert_matrix.from_numpy_matrix(adj_mat)
     if not nx.is_connected(graph):
         graph = to_connected(graph)
     self.model.fit([graph])
     hks = self.model.get_embedding().reshape(eval_points, -1).transpose()
     #hks=torch.index_select(torch.from_numpy(hks), 1, indice)
     num = self.num_node - len(hks)
     m = torch.nn.ConstantPad2d((0, 0, 0, num), 0)
     mask = torch.cat((torch.ones(
         (len(hks), eval_points)), torch.zeros((num, eval_points))), 0)
     hks = m(torch.from_numpy(hks)).float()[:, None, :]
     data.emb = hks
     data.mask = mask
     return data
Esempio n. 5
0
    def forward(self, h, g):
        if self.ptb == False:
            g = to_dense_adj(g)
            g = torch.squeeze(g)
        # h = self.s_gcn(g, h)
        h = torch.squeeze(h)
        h = self.g_unet_forward(g, h)
        # classify
        # h = self.out_drop(h)
        # h = self.out_l_1(h)
        # h = self.c_act(h)
        # h = self.out_drop(h)
        # h = self.out_gcn(g, h)
        # h = self.gunet(h, g)

        return h
Esempio n. 6
0
    def forward(self, x, edge_index, edge_weight):
        adj_mat = to_dense_adj(edge_index, edge_attr=edge_weight)
        adj_mat = adj_mat.reshape(adj_mat.size(1), adj_mat.size(2))
        deg_out = torch.matmul(adj_mat, torch.ones(size=(adj_mat.size(0), 1)))
        deg_out = deg_out.flatten()
        deg_in = torch.matmul(torch.ones(size=(1, adj_mat.size(0))), adj_mat)
        deg_in = deg_in.flatten()

        deg_out_inv = torch.reciprocal(deg_out)
        deg_in_inv = torch.reciprocal(deg_in)
        row, col = edge_index
        norm_out = deg_out_inv[row]
        norm_in = deg_in_inv[row]  # row for W^T

        Tx_0 = x
        Tx_1 = x
        out = torch.matmul(Tx_0, (self.weight[0])[0]) + torch.matmul(
            Tx_0, (self.weight[1])[0])

        # propagate_type
        if self.weight.size(1) > 1:
            Tx_1_o = self.propagate(edge_index, x=x, norm=norm_out, size=None)
            Tx_1_i = self.propagate(edge_index, x=x, norm=norm_in, size=None)
            out = out + torch.matmul(Tx_1_o,
                                     (self.weight[0])[1]) + torch.matmul(
                                         Tx_1_i, (self.weight[1])[1])

        for k in range(2, self.weight.size(1)):
            Tx_2_o = self.propagate(edge_index,
                                    x=Tx_1_o,
                                    norm=norm_out,
                                    size=None)
            Tx_2_o = 2. * Tx_2_o - Tx_0
            Tx_2_i = self.propagate(edge_index,
                                    x=Tx_1_i,
                                    norm=norm_in,
                                    size=None)
            Tx_2_i = 2. * Tx_2_i - Tx_0
            out = out + torch.matmul(Tx_2_o,
                                     (self.weight[0])[k]) + torch.matmul(
                                         Tx_2_i, (self.weight[1])[k])
            Tx_0, Tx_1_o, Tx_1_i = Tx_1, Tx_2_o, Tx_2_i

        if self.bias is not None:
            out += self.bias

        return out
Esempio n. 7
0
    def forward(
            self,
            x: Union[Tensor, PairTensor],
            batch: Union[OptTensor, Optional[PairTensor]] = None) -> Tensor:
        """"""

        is_bipartite: bool = True
        if isinstance(x, Tensor):
            x: PairTensor = (x, x)
            is_bipartite = False

        if x[0].dim() != 2:
            raise ValueError("Static graphs not supported in 'GravNetConv'")

        b: PairOptTensor = (None, None)
        if isinstance(batch, Tensor):
            b = (batch, batch)
        elif isinstance(batch, tuple):
            assert batch is not None
            b = (batch[0], batch[1])

        # embed the inputs before message passing
        msg_activations = self.lin_p(x[0])

        # transform to the space dimension to build the graph
        s_l: Tensor = self.lin_s(x[0])
        s_r: Tensor = self.lin_s(x[1]) if is_bipartite else s_l

        edge_index = knn(s_l, s_r, self.k, b[0], b[1]).flip([0])
        # edge_index = knn_graph(s_l, self.k, b[0], b[1]).flip([0])

        edge_weight = (s_l[edge_index[0]] - s_r[edge_index[1]]).pow(2).sum(-1)
        edge_weight = torch.exp(-10.0 *
                                edge_weight)  # 10 gives a better spread

        # return the adjacency matrix of the graph for lrp purposes
        A = to_dense_adj(
            edge_index.to("cpu"),
            edge_attr=edge_weight.to("cpu"))[0]  # adjacency matrix

        # message passing
        out = self.propagate(edge_index,
                             x=(msg_activations, None),
                             edge_weight=edge_weight,
                             size=(s_l.size(0), s_r.size(0)))

        return self.lin_out(out), A, msg_activations
 def __call__(self, data):
     adj_mat = np.array(to_dense_adj(data.edge_index)[0])
     graph = networkx.convert_matrix.from_numpy_matrix(adj_mat)
     if not nx.is_connected(graph):
         graph=to_connected(graph)
     data.x=create_node_feature_matrix(graph)
     data.x=torch.tensor(data.x)
     #print(data.x)
     self.model.fit(graph,data.x)
     data.emb = torch.from_numpy(self.model.get_embedding()).float()
     num = self.num_node - len(data.emb)
     m = nn.ConstantPad2d((0, 0, 0, num), 0)
     mask = torch.cat((torch.ones((len(data.emb), eval_points)), torch.zeros((num, eval_points))), 0)
     data.emb = m(data.emb).float()
     data.mask = mask
     data.emb = data.emb.reshape(-1,20, eval_points)
     return data
Esempio n. 9
0
def test(model, loader, loss_func):
    total_loss = 0
    model.eval()
    test_log = TestLog()

    for batch_id, rxn_batch in tqdm(enumerate(loader)):
        rxn_batch = rxn_batch.to(model.device)
        D_pred, mask, W = model(rxn_batch)
        D_gt = to_dense_adj(rxn_batch.edge_index, rxn_batch.batch, rxn_batch.y)

        batch_loss = loss_func(D_pred, D_gt) / mask.sum()
        total_loss += batch_loss.item()
        test_log.add_D(D_pred.detach().cpu().numpy())
        test_log.add_W(W.detach().cpu().numpy())

    RMSE = math.sqrt(total_loss / len(loader.dataset))
    return RMSE, test_log
Esempio n. 10
0
    def __init__(self,
                 dataset='Cora',
                 lr=0.01,
                 weight_decay=5e-4,
                 max_layer=10,
                 batch_size=128,
                 policy=""):
        device = 'cpu'
        dataset = dataset
        path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data',
                        dataset)
        dataset = Planetoid(path, dataset, T.NormalizeFeatures())
        data = dataset[0]

        adj = to_dense_adj(data.edge_index).numpy()[0]
        norm = np.array([np.sum(row) for row in adj])
        self.adj = (adj / norm).T
        self.init_k_hop(max_layer)

        self.model, self.data = Net(max_layer,
                                    dataset).to(device), data.to(device)
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr,
                                          weight_decay=weight_decay)
        train_mask = self.data.train_mask.to('cpu').numpy()
        self.train_indexes = np.where(train_mask == True)[0]
        self.batch_size = len(self.train_indexes) - 1
        self.i = 0
        self.val_acc = 0.0
        self._set_action_space(max_layer)
        obs = self.reset()
        self._set_observation_space(obs)
        self.policy = policy
        self.max_layer = max_layer

        # For Experiment #
        self.random = False
        self.gcn = False  # GCN Baseline
        self.enable_skh = True  # only when GCN is false will be useful
        self.enable_dlayer = True
        self.baseline_experience = 50

        # buffers for updating
        #self.buffers = {i: [] for i in range(max_layer)}
        self.buffers = defaultdict(list)
        self.past_performance = [0]
Esempio n. 11
0
def test(model, loader, loss, device, log_dir, epoch):
    model.eval()
    error = 0

    for i, data in tqdm(enumerate(loader)):
        data = data.to(device)
        out, mask = model(data)
        result = loss(out, to_dense_adj(data.edge_index, data.batch,
                                        data.y)) / mask.sum()
        error += result.item()

        if i == 0:
            if epoch < 5:
                check_ts(data, log_dir, epoch)

    # divides by number of molecules
    return math.sqrt(error / len(loader.dataset))  # rmse
Esempio n. 12
0
    def forward(self, data):
        seq_len = data['s']

        inputs = data['c'].reshape((len(seq_len), -1, 3))

        inputs = inputs.reshape((len(seq_len), -1, 3))
        _, idx_sort = torch.sort(seq_len, dim=0, descending=True)
        _, idx_unsort = torch.sort(idx_sort, dim=0)
        input_x = inputs.index_select(0, Variable(idx_sort))
        length_list = list(seq_len[idx_sort])
        input_x = input_x.float()
        pack = nn.utils.rnn.pack_padded_sequence(input_x,
                                                 length_list,
                                                 batch_first=True)
        out, state = self.lstm(pack)
        del state
        un_padded = nn.utils.rnn.pad_packed_sequence(out, batch_first=True)
        un_padded = un_padded[0].index_select(0, Variable(idx_unsort))
        out = self.dropout(un_padded)
        feature = self.fc(out)
        batch_feature = None
        del out, pack, un_padded
        for i in range(data.num_graphs):
            emptyfeature = torch.zeros((1, self.fea_dim)).to(device)
            fea = torch.cat((feature[i][:(seq_len[i])], emptyfeature))
            if batch_feature is None:
                batch_feature = fea
            else:
                batch_feature = torch.cat((batch_feature, fea))

        data['x'] = batch_feature
        x, edge_index = data.x, data.edge_index
        dense_x = utils.to_dense_batch(x, batch=data.batch)
        x = dense_x[0]
        adj = utils.to_dense_adj(data.edge_index, batch=data.batch)
        s = self.gnn1_pool(x, adj)
        x = self.gnn1_embed(x, adj)
        x, adj, l1, e1 = dense_diff_pool(x, adj, s)

        x = self.gnn3_embed(x, adj)

        x = x.mean(dim=1)
        x1 = self.lin1(x)
        x = F.relu(x1)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), x1, l1, e1
Esempio n. 13
0
def rwr_filter(data, c=0.1):
    adj = to_dense_adj(data.edge_index).squeeze()
    adj = 0.5 * (adj + adj.T)
    adj = adj + torch.eye(adj.shape[0])
    d = torch.diag(torch.sum(adj, 1))
    d_inv = d**(-0.5)
    d_inv[torch.isinf(d_inv)] = 0.
    w_tilda = torch.matmul(d_inv, adj)
    w_tilda = np.matmul(w_tilda, d_inv)
    q = torch.eye(w_tilda.shape[0]) - c * w_tilda
    q_inv = torch.inverse(q)
    rwr = (1 - c) * q_inv
    rwr, _ = torch.sort(rwr, dim=1, descending=True)
    sparse_rwr = rwr.to_sparse()
    data.edge_index = sparse_rwr.indices()
    data.edge_attr = sparse_rwr.values().unsqueeze(1).float()
    return data
Esempio n. 14
0
def convert_to_dense(data, mask, start=0, end=None):
    end = data.T if not end else end

    adjs = []
    for t in range(start, end):
        ei = data.get_masked_edges(t, mask)
        ei = add_remaining_self_loops(ei, num_nodes=data.num_nodes)[0]
        
        a = to_dense_adj(ei, max_num_nodes=data.num_nodes)[0]
        d = a.sum(dim=1)
        d = 1/torch.sqrt(d) 
        d = torch.diag(d)
        ahat = d @ a @ d

        adjs.append(ahat)

    return adjs, [torch.eye(data.num_nodes) for _ in range(len(adjs))]
Esempio n. 15
0
def test_to_dense_adj():
    edge_index = torch.tensor([
        [0, 0, 1, 2, 3, 4],
        [0, 1, 0, 3, 4, 2],
    ])
    batch = torch.tensor([0, 0, 1, 1, 1])

    adj = to_dense_adj(edge_index, batch)
    assert adj.size() == (2, 3, 3)
    assert adj[0].tolist() == [[1, 1, 0], [1, 0, 0], [0, 0, 0]]
    assert adj[1].tolist() == [[0, 1, 0], [0, 0, 1], [1, 0, 0]]

    adj = to_dense_adj(edge_index, batch, max_num_nodes=5)
    assert adj.size() == (2, 5, 5)
    assert adj[0][:3, :3].tolist() == [[1, 1, 0], [1, 0, 0], [0, 0, 0]]
    assert adj[1][:3, :3].tolist() == [[0, 1, 0], [0, 0, 1], [1, 0, 0]]

    edge_attr = torch.Tensor([1, 2, 3, 4, 5, 6])
    adj = to_dense_adj(edge_index, batch, edge_attr)
    assert adj.size() == (2, 3, 3)
    assert adj[0].tolist() == [[1, 2, 0], [3, 0, 0], [0, 0, 0]]
    assert adj[1].tolist() == [[0, 4, 0], [0, 0, 5], [6, 0, 0]]

    adj = to_dense_adj(edge_index, batch, edge_attr, max_num_nodes=5)
    assert adj.size() == (2, 5, 5)
    assert adj[0][:3, :3].tolist() == [[1, 2, 0], [3, 0, 0], [0, 0, 0]]
    assert adj[1][:3, :3].tolist() == [[0, 4, 0], [0, 0, 5], [6, 0, 0]]

    edge_attr = edge_attr.view(-1, 1)
    adj = to_dense_adj(edge_index, batch, edge_attr)
    assert adj.size() == (2, 3, 3, 1)

    edge_attr = edge_attr.view(-1, 1)
    adj = to_dense_adj(edge_index, batch, edge_attr, max_num_nodes=5)
    assert adj.size() == (2, 5, 5, 1)

    adj = to_dense_adj(edge_index)
    assert adj.size() == (1, 5, 5)
    assert adj[0].nonzero(as_tuple=False).t().tolist() == edge_index.tolist()

    adj = to_dense_adj(edge_index, max_num_nodes=10)
    assert adj.size() == (1, 10, 10)
    assert adj[0].nonzero(as_tuple=False).t().tolist() == edge_index.tolist()
Esempio n. 16
0
def get_edge_acc(pred_adj, target_data):
    # pred_adj.cpu()
    # pred_adj = torch.pow(pred_adj,2)
    # print(pred_adj.shape)

    pred_adj[pred_adj >= args.threshold] = 1
    pred_adj[pred_adj < args.threshold] = 0
    edge_index, _ = add_self_loops(target_data['edge_index'])
    target_adj = to_dense_adj(edge_index)[0]
    tn, fp, fn, tp = confusion_matrix(
        target_adj.view(-1).cpu().detach().numpy(),
        pred_adj.view(-1).cpu().detach().numpy()).ravel()
    # print(target_adj.shape)
    # check_adj = pred_adj + target_adj
    # check_adj[check_adj >= 1] = 1
    # check_adj[check_adj < 1] = 0

    return (tn + tp) / (tn + fp + fn + tp), tn, fp, fn, tp
Esempio n. 17
0
    def forward(self, x, edge_index, batch):
        x = self.conv1(x, edge_index).relu()

        x, mask = to_dense_batch(x, batch)
        adj = to_dense_adj(edge_index, batch)

        _, x, adj, sp1, o1, c1 = self.pool1(x, adj, mask)

        x = self.conv2(x, adj).relu()

        _, x, adj, sp2, o2, c2 = self.pool2(x, adj)

        x = self.conv3(x, adj)

        x = x.mean(dim=1)
        x = self.lin1(x).relu()
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), sp1 + sp2 + o1 + o2 + c1 + c2
Esempio n. 18
0
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x, mask = to_dense_batch(x, batch=batch)
        adj = to_dense_adj(edge_index, batch=batch)
        # data = ToDense(data.num_nodes)(data)
        # TODO describe mask shape and how batching works

        # adj, mask, x = data.adj, data.mask, data.x
        x_all, l_total, e_total = [], 0, 0

        for i in range(self.num_diffpool_layers):
            if i != 0:
                mask = None

            x, adj, l, e = self.diffpool_layers[i](
                x, adj,
                mask)  # x has shape (batch, MAX_no_nodes, feature_size)
            x_all.append(torch.max(x, dim=1)[0])

            l_total += l
            e_total += e

        x = self.final_embed(x, adj)
        x_all.append(torch.max(x, dim=1)[0])

        x = torch.cat(x_all,
                      dim=1)  # shape (batch, feature_size x diffpool layers)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)

        # Don't apply sigmoid during training b/c using BCEWithLogitsLoss
        if self.classification and not self.training:
            x = self.sigmoid(x)
        if self.multiclass:
            x = x.reshape(
                (x.size(0), -1, self.multiclass_num_classes
                 ))  # batch size x num targets x num classes per target
            if not self.training:
                x = self.multiclass_softmax(
                    x
                )  # to get probabilities during evaluation, but not during training as we're using CrossEntropyLoss

        return x, l_total, e_total
Esempio n. 19
0
    def forward(self, x, adj, batch_num_nodes=None, **kwargs):
        # mask
        # Convert ajd matrix
        adj = to_dense_adj(adj, max_num_nodes=x.shape[0])

        max_num_nodes = adj.size()[1]
        if batch_num_nodes is not None:
            embedding_mask = self.construct_mask(max_num_nodes,
                                                 batch_num_nodes)
        else:
            embedding_mask = None

        self.adj_atts = []
        self.embedding_tensor, adj_att = self.gcn_forward(
            x, adj, self.conv_first, self.conv_block, self.conv_last,
            embedding_mask)
        pred = self.pred_model(self.embedding_tensor)
        pred = pred.squeeze(0)

        return F.log_softmax(pred, dim=1)  # pred
Esempio n. 20
0
    def __call__(self, data):
        x = data.x if data.x.dim() == 1 else data.x[:, 0]

        adj = to_dense_adj(data.edge_index,
                           data.batch,
                           edge_attr=data.edge_attr,
                           max_num_nodes=self.max_size)
        x_node, x_node_sizes = to_dense_batch(x,
                                              data.batch,
                                              max_num_nodes=self.max_size)
        data.num_atoms = x_node_sizes

        data.dense_adj = _one_hotify_adj(adj).float()
        try:
            data.dense_x = torch.nn.functional.one_hot(
                x_node, self.num_atom_classes).float()
        except:  # noqa: E722  # Ignore bare except since the error is raised anyway.
            print(self.num_atom_classes, ", but saw: ", torch.max(x_node))
            raise
        return data
Esempio n. 21
0
def generate_negative_samples(data, num_samples):
    # Build inverse adj matrix to find non-edges
    a = to_dense_adj(data.edge_index[:, data.train_mask])[-1].bool()
    
    # Determine what nodes we'll have embeddings for
    known = data.edge_index[:, data.train_mask].flatten().unique()
    not_known = torch.full((a.size()[0],), 1, dtype=torch.bool)
    not_known[known] = 0
    
    # Mark all untrained nodes as having edges so they won't show
    # up in the inverted adj matrix 
    a[not_known, :] = 1
    a[:, not_known] = 1
    
    # That way, when we invert the matrix, the non-edges will only be
    # between nodes that we for sure have embeddings for
    non_edges = (~a).nonzero()    
    
    # Randomly sample from the allowed edge pool
    return non_edges[torch.randperm(non_edges.size()[0])[:num_samples], :]
Esempio n. 22
0
    def forward(self, x, edge_index, batch):
        x = F.relu(self.conv1(x, edge_index))

        x, mask = to_dense_batch(x, batch)
        adj = to_dense_adj(edge_index, batch)

        s = self.pool1(x)
        x, adj, mc1, o1 = dense_mincut_pool(x, adj, s, mask)

        x = F.relu(self.conv2(x, adj))
        s = self.pool2(x)

        x, adj, mc2, o2 = dense_mincut_pool(x, adj, s)

        x = self.conv3(x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1), mc1 + mc2, o1 + o2
Esempio n. 23
0
    def test(self, loader):

        test_acc, _ = self.eval(loader)

        exact_match = 0
        validity = 0
        num_valid = 0
        num_invalid = 0

        for _, data in enumerate(tqdm(loader, desc='[Test]')):
            nodes = data.x.argmax(-1)
            a = to_dense_adj(data.edge_index, edge_attr=data.edge_attr)[0]
            mol = mol_from_graphs(nodes, a)

            if mol is None:
                num_invalid += 1
                continue
            else:
                num_valid += 1

            smiles = rdkit.Chem.MolToSmiles(mol)
            data = data.to(self.args.device)
            out = self.model(data)
            pred = torch.softmax(out, dim=-1).argmax(-1)
            pred_mol = mol_from_graphs(pred, a)

            if pred_mol is not None:
                pred_smiles = rdkit.Chem.MolToSmiles(pred_mol)
            else:
                pred_smiles = ''

            _exact_match = smiles == pred_smiles
            _validity = pred_smiles != ''

            exact_match += _exact_match
            validity += _validity

        exact_match = exact_match / float(num_valid) * 100
        validity = validity / float(num_valid) * 100

        return num_valid, num_invalid, exact_match, validity, test_acc
Esempio n. 24
0
    def do_trans(data):
        node_num, _ = data.x.size()
        sub_num = int(node_num * ratio)

        if add_self_loop:
            sl = torch.tensor([[n, n] for n in range(node_num)]).t()
            edge_index = torch.cat((data.edge_index, sl), dim=1)
        else:
            edge_index = data.edge_index.detach().clone()

        # edge_index = edge_index.numpy()
        idx_sub = [np.random.randint(node_num, size=1)[0]]
        # idx_neigh = set([n for n in edge_index[1][edge_index[0]==idx_sub[0]]])
        idx_neigh = set(
            [n.item() for n in edge_index[1][edge_index[0] == idx_sub[0]]])

        count = 0
        while len(idx_sub) <= sub_num:
            count = count + 1
            if count > node_num:
                break
            if len(idx_neigh) == 0:
                break
            sample_node = np.random.choice(list(idx_neigh))
            if sample_node in idx_sub:
                continue
            idx_sub.append(sample_node)
            # idx_neigh.union(set([n for n in edge_index[1][edge_index[0]==idx_sub[-1]]]))
            idx_neigh.union(
                set([
                    n.item()
                    for n in edge_index[1][edge_index[0] == idx_sub[-1]]
                ]))

        idx_drop = [n for n in range(node_num) if not n in idx_sub]
        idx_sampled = idx_sub
        adj = to_dense_adj(edge_index)[0]
        adj = adj[idx_sampled, :][:, idx_sampled]

        return Data(x=data.x[idx_sampled], edge_index=dense_to_sparse(adj)[0])
Esempio n. 25
0
 def forward(self, obs, return_attention_weights=False, **kwargs):
     batch_size = obs.shape[0]
     x, edge_index = self.pre_graph_builder(obs)
     x, edge_attention = self.attentioner(x, edge_index, return_attention_weights=True)
     x = self.hidden_activations[0](x)
     edge_index, attention_weights = edge_attention
     attention_weights = attention_weights.squeeze(-1)
     batch_vector = torch.arange(batch_size)[:,None].repeat(1,self.pre_graph_builder.node_num).reshape(-1)
     adj = pyg_utils.to_dense_adj(edge_index, edge_attr=attention_weights, batch=batch_vector)
     adj = adj.squeeze(-1)
     x, _ = pyg_utils.to_dense_batch(x, batch=batch_vector)
     for l, conv in enumerate(self.convs):
         x = conv(x, adj)
         x = self.hidden_activations[l+1](x)
     # for l, conv in enumerate(self.convs):
     #     x = conv(x, edge_index, edge_weight = attention_weights)
     #     x = self.hidden_activations[l+1](x)
     x = x.reshape(batch_size,self.pre_graph_builder.node_num,self.node_dim)
     x = self.output_activation(x)
     if return_attention_weights:
         return x, edge_attention
     else:
         return x
Esempio n. 26
0
def test_dense_graph_conv(aggr):
    channels = 16
    sparse_conv = GraphConv(channels, channels, aggr=aggr)
    dense_conv = DenseGraphConv(channels, channels, aggr=aggr)
    assert dense_conv.__repr__() == 'DenseGraphConv(16, 16)'

    # Ensure same weights and bias.
    dense_conv.lin_rel = sparse_conv.lin_rel
    dense_conv.lin_root = sparse_conv.lin_root

    x = torch.randn((5, channels))
    edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 4],
                               [1, 2, 0, 2, 0, 1, 4, 3]])

    sparse_out = sparse_conv(x, edge_index)
    assert sparse_out.size() == (5, channels)

    adj = to_dense_adj(edge_index)
    mask = torch.ones(5, dtype=torch.bool)

    dense_out = dense_conv(x, adj, mask)[0]
    assert dense_out.size() == (5, channels)
    assert torch.allclose(sparse_out, dense_out, atol=1e-04)
Esempio n. 27
0
def train(model, loader, loss_func, opt):
    total_loss = 0
    model.train()

    for batch_id, rxn_batch in enumerate(tqdm(loader)):

        opt.zero_grad()
        rxn_batch = rxn_batch.to(model.device)
        D_pred, mask, _ = model(rxn_batch)
        D_gt = to_dense_adj(rxn_batch.edge_index, rxn_batch.batch, rxn_batch.y)

        batch_loss = loss_func(D_pred, D_gt) / mask.sum()
        batch_loss.backward()
        clip_grad_norm_(model.parameters(), MAX_CLIP_NORM)  # clip gradients
        #        if logger:
        #            pnorm = compute_parameters_norm(model)
        #            gnorm = compute_gradients_norm(model)
        #            logger.info(f' Batch {batch_id} Loss: {batch_loss.item()}\t Parameter Norm: {pnorm}\t Gradient Norm: {gnorm}')
        opt.step()
        total_loss += batch_loss.item()

    RMSE = math.sqrt(total_loss / len(loader.dataset))
    return RMSE
Esempio n. 28
0
    def pg_graph_to_nb201(pg_graph):
        # first tensor node attributes, second is the edge list
        ops = [OPS_by_IDX_201[i] for i in pg_graph.x.cpu().numpy()]
        matrix = np.array(to_dense_adj(pg_graph.edge_index)[0].cpu().numpy())
        try:
            if (matrix == ADJACENCY).all():
                steps_coding = ['0', '0', '1', '0', '1', '2']

                node_1 = '|' + ops[1] + '~' + steps_coding[0] + '|'
                node_2 = '|' + ops[2] + '~' + steps_coding[1] + '|' + ops[
                    3] + '~' + steps_coding[2] + '|'
                node_3 = '|' + ops[4] + '~' + steps_coding[3] + '|' + ops[
                    5] + '~' + steps_coding[4] + '|' + ops[
                        6] + '~' + steps_coding[5] + '|'
                nodes_nb201 = node_1 + '+' + node_2 + '+' + node_3
                index = nasbench.query_index_by_arch(nodes_nb201)
                acc = Dataset.map_item(index).acc
            else:
                acc = torch.zeros(1)
        except:
            acc = torch.zeros(1)

        return acc
    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        if self.encode_edge:
            x = self.atom_encoder(x)
            x = F.relu(self.convs[0](x, edge_index, data.edge_attr))
        else:
            x = F.relu(self.convs[0](x, edge_index))

        x, mask = to_dense_batch(x, batch)
        adj = to_dense_adj(edge_index, batch)

        if self.pooling_type != 'mlp':
            s = self.rms[0][:x.size(1), :].unsqueeze(dim=0).expand(
                x.size(0), -1, -1).to(x.device)
        else:
            s = self.pools[0](x)

        x, adj, mc, o = dense_mincut_pool(x, adj, s, mask)

        for i in range(1, self.num_layers - 1):
            x = F.relu(self.convs[i](x, adj))
            if self.pooling_type != 'mlp':
                s = self.rms[i][:x.size(1), :].unsqueeze(dim=0).expand(
                    x.size(0), -1, -1).to(x.device)
            else:
                s = self.pools[i](x)
            x, adj, mc_aux, o_aux = dense_mincut_pool(x, adj, s)
            mc += mc_aux
            o += o_aux

        x = self.convs[self.num_layers - 1](x, adj)

        x = x.mean(dim=1)
        x = F.relu(self.lin1(x))
        x = self.lin2(x)
        return x, mc, o
Esempio n. 30
0
def dynamic_new_link_prediction(data,
                                partition_fn,
                                zs,
                                start=0,
                                end=None,
                                include_tr=True,
                                batched=False):
    if batched:
        raise NotImplementedError("Sorry, batching is a TODO")

    p, n = [], []
    b = None

    if partition_fn == None:
        partition_fn = lambda x: data.eis[x]

    end = end if end else data.T

    for i in range(start, end):
        # Use full adj matrix for new link pred
        ei = partition_fn(i)

        a = b
        b = to_dense_adj(ei, max_num_nodes=data.num_nodes)[0].bool()

        if type(a) == type(None):
            continue

        # Generates new links in next time step
        new_links = (~a).logical_and(a.logical_or(b))
        new_links, _ = dense_to_sparse(new_links)

        p.append(new_links)
        n.append(fast_negative_sampling(ei, p[-1].size(1), data.num_nodes))

    return p, n, zs[:-1]