def forward(self, batch_graph):
        node_feat, edge_feat, labels = MOLLIB.PrepareFeatureLabel(batch_graph)
        if cmd_args.mode == 'gpu':
            node_feat = node_feat.cuda()
            edge_feat = edge_feat.cuda()
            labels = labels.cuda()
        embed = self.s2v(batch_graph, node_feat, edge_feat)

        return self.mlp(embed, labels)
Exemple #2
0
        val_list[3] = ctypes.c_void_p(subg_vals.numpy().ctypes.data)

        self.lib.PrepareLoopyBP(self.batch_graph_handle,
                                ctypes.cast(idx_list, ctypes.c_void_p),
                                ctypes.cast(val_list, ctypes.c_void_p))

        n2e_sp = torch.sparse.FloatTensor(n2e_idxes, n2e_vals, torch.Size([total_num_edges * 2, total_num_nodes]))
        e2e_sp = torch.sparse.FloatTensor(e2e_idxes, e2e_vals, torch.Size([total_num_edges * 2, total_num_edges * 2]))
        e2n_sp = torch.sparse.FloatTensor(e2n_idxes, e2n_vals, torch.Size([total_num_nodes, total_num_edges * 2]))
        subg_sp = torch.sparse.FloatTensor(subg_idxes, subg_vals, torch.Size([len(graph_list), total_num_nodes]))

        return n2e_sp, e2e_sp, e2n_sp, subg_sp

dll_path = '%s/build/dll/libs2v.so' % os.path.dirname(os.path.realpath(__file__))
if os.path.exists(dll_path):
    S2VLIB = _s2v_lib(sys.argv)
else:
    S2VLIB = None

if __name__ == '__main__':
    sys.path.append('%s/../harvard_cep' % os.path.dirname(os.path.realpath(__file__)))
    from util import resampling_idxes, load_raw_data
    from mol_lib import MOLLIB, MolGraph

    raw_data_dict = load_raw_data()
    test_data = MOLLIB.LoadMolGraph('test', raw_data_dict['test'])

    batch_graph = test_data[0:10]

    S2VLIB.PrepareLoopyBP(batch_graph)
    regressor = Regressor()
    if cmd_args.mode == 'gpu':
        regressor = regressor.cuda()
    if cmd_args.saved_model is not None and cmd_args.saved_model != '':
        if os.path.isfile(cmd_args.saved_model):
            print('loading model from %s' % cmd_args.saved_model)
            if cmd_args.mode == 'cpu':
                regressor.load_state_dict(
                    torch.load(cmd_args.saved_model,
                               map_location=lambda storage, loc: storage))
            else:
                regressor.load_state_dict(torch.load(cmd_args.saved_model))

    if cmd_args.phase == 'test':
        test_data = MOLLIB.LoadMolGraph('test', raw_data_dict['test'])
        test_loss = loop_dataset(test_data, regressor,
                                 list(range(len(test_data))))
        print('\033[93maverage test loss: mae %.5f rmse %.5f\033[0m' %
              (test_loss[0], test_loss[1]))
        sys.exit()

    train_idxes = resampling_idxes(raw_data_dict)
    cooked_data_dict = {}
    for d in raw_data_dict:
        cooked_data_dict[d] = MOLLIB.LoadMolGraph(d, raw_data_dict[d])

    optimizer = optim.Adam(regressor.parameters(), lr=cmd_args.learning_rate)
    iter_train = (len(train_idxes) +
                  (cmd_args.batch_size - 1)) // cmd_args.batch_size