Exemplo n.º 1
0
    def _supply_laplacians_etc_to_feed_dict(self, feed_dict, pairs, tvt):
        if logging_enabled == True:
            print(
                "- Entered _supply_laplacians_etc_to_feed_dict Private Method")

        for i, (g1, g2) in enumerate(pairs):
            feed_dict[self._get_plhdr('features_1',
                                      tvt)[i]] = g1.get_node_inputs()
            feed_dict[self._get_plhdr('features_2',
                                      tvt)[i]] = g2.get_node_inputs()
            feed_dict[self._get_plhdr(
                'num_nonzero_1', tvt)[i]] = g1.get_node_inputs_num_nonzero()
            feed_dict[self._get_plhdr(
                'num_nonzero_2', tvt)[i]] = g2.get_node_inputs_num_nonzero()
            feed_dict[self._get_plhdr('edge_index_1', tvt)[i]] = g1.edge_index
            feed_dict[self._get_plhdr('edge_index_2', tvt)[i]] = g2.edge_index
            feed_dict[self._get_plhdr('incidence_mat_1',
                                      tvt)[i]] = g1.incidence_mat
            feed_dict[self._get_plhdr('incidence_mat_2',
                                      tvt)[i]] = g2.incidence_mat
            assert g1.incidence_mat is not None
            num_laplacians = 1
            for j in range(get_coarsen_level()):
                for k in range(num_laplacians):
                    feed_dict[
                        self._get_plhdr('laplacians_1', tvt)[i][j][k]] = \
                        g1.get_laplacians(j)[k]
                    feed_dict[
                        self._get_plhdr('laplacians_2', tvt)[i][j][k]] = \
                        g2.get_laplacians(j)[k]
        return feed_dict
Exemplo n.º 2
0
 def _supply_laplacians_etc_to_feed_dict(self, feed_dict, pairs, tvt):
     if is_transductive():
         gemb_lookup_ids_1 = []
         gemb_lookup_ids_2 = []
         for (g1, g2) in pairs:
             gemb_lookup_ids_1.append(g1.global_id)
             gemb_lookup_ids_2.append(g2.global_id)
         feed_dict[self._get_plhdr('gemb_lookup_ids_1', tvt)] = \
             gemb_lookup_ids_1
         feed_dict[self._get_plhdr('gemb_lookup_ids_2', tvt)] = \
             gemb_lookup_ids_2
     else:
         for i, (g1, g2) in enumerate(pairs):
             feed_dict[self._get_plhdr('features_1', tvt)[i]] = \
                 g1.get_node_inputs()
             feed_dict[self._get_plhdr('features_2', tvt)[i]] = \
                 g2.get_node_inputs()
             feed_dict[self._get_plhdr('num_nonzero_1', tvt)[i]] = \
                 g1.get_node_inputs_num_nonzero()
             feed_dict[self._get_plhdr('num_nonzero_2', tvt)[i]] = \
                 g2.get_node_inputs_num_nonzero()
             num_laplacians = 1
             for j in range(get_coarsen_level()):
                 for k in range(num_laplacians):
                     feed_dict[
                         self._get_plhdr('laplacians_1', tvt)[i][j][k]] = \
                         g1.get_laplacians(j)[k]
                     feed_dict[
                         self._get_plhdr('laplacians_2', tvt)[i][j][k]] = \
                         g2.get_laplacians(j)[k]
     return feed_dict
Exemplo n.º 3
0
 def _coarsen(self, dense_node_inputs, adj):
     assert ('metis_' in FLAGS.coarsening)
     self.num_level = get_coarsen_level()
     assert (self.num_level >= 1)
     graphs, perm = coarsen(sp.csr_matrix(adj), levels=self.num_level,
                            self_connections=False)
     permuted_padded_dense_node_inputs = perm_data(
         dense_node_inputs.T, perm).T
     self.sparse_permuted_padded_dense_node_inputs = self._preprocess_inputs(
         sp.csr_matrix(permuted_padded_dense_node_inputs))
     self.coarsened_laplacians = []
     for g in graphs:
         self.coarsened_laplacians.append([self._preprocess_adj(g.todense())])
     assert (len(self.coarsened_laplacians) == self.num_laplacians * self.num_level + 1)
Exemplo n.º 4
0
    def _supply_laplacians_etc_to_feed_dict(self, feed_dict, origin_gs, pn_gs,
                                            tvt):
        num_laplacians = 1
        for i, origin_g in enumerate(origin_gs):
            feed_dict[self._get_plhdr('features_1', tvt)[i]] = \
                origin_g.get_node_inputs()
            feed_dict[self._get_plhdr('num_nonzero_1', tvt)[i]] = \
                origin_g.get_node_inputs_num_nonzero()
            for j in range(get_coarsen_level()):
                for k in range(num_laplacians):
                    feed_dict[self._get_plhdr('laplacians_1', tvt)[i][j][k]] = \
                        origin_g.get_laplacians(j)[k]

        for i, pn_g in enumerate(pn_gs):
            feed_dict[self._get_plhdr('features_2', tvt)[i]] = \
                pn_g.get_node_inputs()
            feed_dict[self._get_plhdr('num_nonzero_2', tvt)[i]] = \
                pn_g.get_node_inputs_num_nonzero()
            for j in range(get_coarsen_level()):
                for k in range(num_laplacians):
                    feed_dict[self._get_plhdr('laplacians_2', tvt)[i][j][k]] = \
                        pn_g.get_laplacians(j)[k]

        return feed_dict
Exemplo n.º 5
0
 def _supply_laplacians_etc_to_feed_dict(self, feed_dict, pairs, tvt):
     for i, (g1, g2) in enumerate(pairs):
         feed_dict[self._get_plhdr('features_1', tvt)[i]] = \
             g1.get_node_inputs()
         feed_dict[self._get_plhdr('features_2', tvt)[i]] = \
             g2.get_node_inputs()
         feed_dict[self._get_plhdr('num_nonzero_1', tvt)[i]] = \
             g1.get_node_inputs_num_nonzero()
         feed_dict[self._get_plhdr('num_nonzero_2', tvt)[i]] = \
             g2.get_node_inputs_num_nonzero()
         num_laplacians = 1
         for j in range(get_coarsen_level()):
             for k in range(num_laplacians):
                 feed_dict[self._get_plhdr('laplacians_1', tvt)[i][j][k]] = \
                     g1.get_laplacians(j)[k]
                 feed_dict[self._get_plhdr('laplacians_2', tvt)[i][j][k]] = \
                     g2.get_laplacians(j)[k]
     return feed_dict
Exemplo n.º 6
0
    def __init__(self, input_dim, data, dist_calculator):
        self.input_dim = input_dim
        self.batch_size = FLAGS.batch_size * (1 + FLAGS.num_neg)
        # Create placeholders.
        self.laplacians_1, self.laplacians_2, self.features_1, self.features_2, \
        self.num_nonzero_1, self.num_nonzero_2, self.dropout, \
        self.val_test_laplacians_1, self.val_test_laplacians_2, \
        self.val_test_features_1, self.val_test_features_2, \
        self.val_test_num_nonzero_1, self.val_test_num_nonzero_2 = \
            self._create_basic_placeholders(FLAGS.batch_size, self.batch_size,
                                            level=get_coarsen_level())

        # self.pos_interact_score = []

        # Build the model.
        super(SiameseRankingModel, self).__init__()
        self.origin_gs, self.pos_gs, self.neg_gs = self._load_pos_neg_train_pairs(
            data, dist_calculator)
Exemplo n.º 7
0
 def __init__(self, input_dim, data, dist_calculator):
     self.input_dim = input_dim
     print('original_input_dim', self.input_dim)
     self.laplacians_1, self.laplacians_2, self.features_1, self.features_2, \
     self.num_nonzero_1, self.num_nonzero_2, self.dropout, \
     self.val_test_laplacians_1, self.val_test_laplacians_2, \
     self.val_test_features_1, self.val_test_features_2, \
     self.val_test_num_nonzero_1, self.val_test_num_nonzero_2 = \
         self._create_basic_placeholders(FLAGS.batch_size, FLAGS.batch_size,
                                         level=get_coarsen_level())
     self.train_y_true = tf.placeholder(tf.float32,
                                        shape=(FLAGS.batch_size, 1))
     self.val_test_y_true = tf.placeholder(tf.float32, shape=(1, 1))
     # Build the model.
     super(SiameseRegressionModel, self).__init__()
     self.sim_kernel = create_sim_kernel(FLAGS.sim_kernel,
                                         get_flags('yeta'),
                                         get_flags('scale'))
     self.train_triples = self._load_train_triples(data, dist_calculator)
Exemplo n.º 8
0
 def __init__(self, input_dim, data, dist_sim_calculator):
     self.input_dim = input_dim
     print('original_input_dim', self.input_dim)
     if is_transductive():
         self._create_transductive_gembs_placeholders(
             data, FLAGS.batch_size, FLAGS.batch_size)
     else:
         self._create_basic_placeholders(FLAGS.batch_size,
                                         FLAGS.batch_size,
                                         level=get_coarsen_level())
     self.train_y_true = tf.placeholder(tf.float32,
                                        shape=(FLAGS.batch_size, 1))
     self.val_test_y_true = tf.placeholder(tf.float32, shape=(1, 1))
     # Build the model.
     super(SiameseRegressionModel, self).__init__()
     self.ds_kernel = create_ds_kernel(FLAGS.ds_kernel, get_flags('yeta'),
                                       get_flags('scale'))
     self.train_triples = self._load_train_triples(data,
                                                   dist_sim_calculator)
Exemplo n.º 9
0
 def __init__(self, input_dim, data, dist_calculator):
     self.input_dim = input_dim
     print('original_input_dim', self.input_dim)
     self.num_class = 2
     self.laplacians_1, self.laplacians_2, self.features_1, self.features_2, \
     self.num_nonzero_1, self.num_nonzero_2, self.dropout, \
     self.val_test_laplacians_1, self.val_test_laplacians_2, \
     self.val_test_features_1, self.val_test_features_2, \
     self.val_test_num_nonzero_1, self.val_test_num_nonzero_2 = \
         self._create_basic_placeholders(FLAGS.batch_size, FLAGS.batch_size,
                                         level=get_coarsen_level())
     self.train_y_true = tf.placeholder(
         tf.float32, shape=(FLAGS.batch_size, self.num_class))
     self.val_test_y_true = tf.placeholder(
         tf.float32, shape=(1, self.num_class))
     # Build the model.
     super(SiameseClassificationModel, self).__init__()
     self.pos_pairs, self.neg_pairs = self._load_pos_neg_train_pairs(
         data, dist_calculator)
     self.cur_sample_class = 1  # 1 for pos, -1 for neg
Exemplo n.º 10
0
    def __init__(self, input_dim, data, dist_sim_calculator):
        if logging_enabled == True:
            print(
                "- Entered SiameseRegressionModel::__init__ Constructor Method"
            )
        self.input_dim = input_dim
        print('original_input_dim', self.input_dim)
        self._create_basic_placeholders(ec.batch_size,
                                        ec.batch_size,
                                        level=get_coarsen_level())

        self.train_y_true = tf.compat.v1.placeholder(tf.float32,
                                                     shape=(ec.batch_size, 1))
        self.val_test_y_true = tf.compat.v1.placeholder(tf.float32,
                                                        shape=(1, 1))

        # Build the model.
        super(SiameseRegressionModel, self).__init__()
        self.ds_kernel = create_ds_kernel(ec.ds_kernel, ec.yeta, ec.scale)
        self.train_triples = self._load_train_triples(data,
                                                      dist_sim_calculator)
        print("dist_sim_calculator-object_type", dist_sim_calculator)