Пример #1
0
    def __init__(self,
                 num_units,
                 adj_mx,
                 max_diffusion_step,
                 num_nodes,
                 num_proj=None,
                 activation=tf.nn.tanh,
                 reuse=None,
                 filter_type="laplacian",
                 use_gc_for_ru=True,
                 **kwargs):
        super(DCLSTMCell, self).__init__(**kwargs)

        self._activation = activation
        self._num_nodes = num_nodes
        self._num_proj = num_proj
        self._num_units = num_units
        self._max_diffusion_step = max_diffusion_step
        self._supports = []
        self._use_gc_for_ru = use_gc_for_ru
        supports = []
        if filter_type == "laplacian":
            supports.append(
                utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
        elif filter_type == "random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
        elif filter_type == "dual_random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
            supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
        else:
            supports.append(utils.calculate_scaled_laplacian(adj_mx))
        for support in supports:
            self._supports.append(self._build_sparse_matrix(support))
Пример #2
0
    def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, num_proj=None,
                 activation=tf.nn.tanh, reuse=None, filter_type="laplacian", use_gc_for_ru=True):
        """

        :param num_units:
        :param adj_mx:
        :param max_diffusion_step:
        :param num_nodes:
        :param input_size:
        :param num_proj:
        :param activation:
        :param reuse:
        :param filter_type: "laplacian", "random_walk", "dual_random_walk".
        :param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
        """
        super(DCGRUCell, self).__init__(_reuse=reuse)
        self._activation = activation
        self._num_nodes = num_nodes
        self._num_proj = num_proj
        self._num_units = num_units
        self._max_diffusion_step = max_diffusion_step
        self._supports = []
        self._use_gc_for_ru = use_gc_for_ru
        supports = []
        if filter_type == "laplacian":
            supports.append(utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
        elif filter_type == "random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
        elif filter_type == "dual_random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
            supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
        else:
            supports.append(utils.calculate_scaled_laplacian(adj_mx))
        for support in supports:
            self._supports.append(self._build_sparse_matrix(support))
Пример #3
0
    def __init__(self,
                 input_dim,
                 num_units,
                 adj_mat,
                 max_diffusion_step,
                 num_nodes,
                 num_proj=None,
                 activation=torch.tanh,
                 use_gc_for_ru=True,
                 filter_type='laplacian'):
        """
        :param num_units: the hidden dim of rnn
        :param adj_mat: the (weighted) adjacency matrix of the graph, in numpy ndarray form
        :param max_diffusion_step: the max diffusion step
        :param num_nodes:
        :param num_proj: num of output dim, defaults to 1 (speed)
        :param activation: if None, don't do activation for cell state
        :param use_gc_for_ru: decide whether to use graph convolution inside rnn
        """
        super(DCGRUCell, self).__init__()
        self._activation = activation
        self._num_nodes = num_nodes
        self._num_units = num_units
        self._max_diffusion_step = max_diffusion_step
        self._num_proj = num_proj
        self._use_gc_for_ru = use_gc_for_ru
        self._supports = []
        supports = []
        if filter_type == "laplacian":
            supports.append(
                utils.calculate_scaled_laplacian(adj_mat, lambda_max=None))
        elif filter_type == "random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mat).T)
        elif filter_type == "dual_random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mat))
            supports.append(utils.calculate_random_walk_matrix(adj_mat.T))
        else:
            supports.append(utils.calculate_scaled_laplacian(adj_mat))
        for support in supports:
            self._supports.append(self._build_sparse_matrix(
                support).cuda())  # to PyTorch sparse tensor
        # supports = utils.calculate_scaled_laplacian(adj_mat, lambda_max=None)  # scipy coo matrix
        # self._supports = self._build_sparse_matrix(supports).cuda()  # to pytorch sparse tensor

        self.dconv_gate = DiffusionGraphConv(
            supports=self._supports,
            input_dim=input_dim,
            hid_dim=num_units,
            num_nodes=num_nodes,
            max_diffusion_step=max_diffusion_step,
            output_dim=num_units * 2)
        self.dconv_candidate = DiffusionGraphConv(
            supports=self._supports,
            input_dim=input_dim,
            hid_dim=num_units,
            num_nodes=num_nodes,
            max_diffusion_step=max_diffusion_step,
            output_dim=num_units)
        if num_proj is not None:
            self.project = nn.Linear(self._num_units, self._num_proj)
Пример #4
0
    def __init__(self,
                 num_units,
                 adj_mx,
                 max_diffusion_step,
                 num_nodes,
                 batch_size,
                 num_proj=None,
                 activation=tf.nn.tanh,
                 reuse=None,
                 filter_type="laplacian",
                 use_gc_for_ru=True):
        """

        :param num_units:
        :param adj_mx:
        :param max_diffusion_step:
        :param num_nodes:
        :param input_size:
        :param num_proj:
        :param activation:
        :param reuse:
        :param filter_type: "laplacian", "random_walk", "dual_random_walk".
        :param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
        """
        super(DCLSTMCellAtt, self).__init__(_reuse=reuse)
        self._activation = activation
        self._num_nodes = num_nodes
        self._num_proj = num_proj
        self._num_units = num_units
        self._max_diffusion_step = max_diffusion_step
        self._supports = []
        self._use_gc_for_ru = use_gc_for_ru
        supports = []
        if filter_type == "laplacian":
            supports.append(
                utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
        elif filter_type == "random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
        elif filter_type == "dual_random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
            supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
        else:
            supports.append(utils.calculate_scaled_laplacian(adj_mx))
        for support in supports:
            self._supports.append(self._build_sparse_matrix(support))

        self._bias_mt = tf.convert_to_tensor(utils.adj_to_bias(np.expand_dims(
            adj_mx, axis=0), [self._num_nodes],
                                                               nhood=1),
                                             dtype=tf.float32)
        _adj_mx = tf.convert_to_tensor(adj_mx)
        self._adj_mx_repeat = tf.tile(tf.expand_dims(_adj_mx, axis=0),
                                      [batch_size, 1, 1])
        for support in self._supports:
            self._supports_dense.append(tf.sparse.to_dense(support))
Пример #5
0
 def __init__(self, num_units, adj_mx, max_diffusion_step, num_nodes, network_type, graphEmbedFile, num_proj=None,
              activation=tf.nn.tanh, reuse=None, filter_type="laplacian"):
     """
     :param num_units:
     :param adj_mx:
     :param max_diffusion_step:
     :param num_nodes:
     :param input_size:
     :param num_proj:
     :param activation:
     :param reuse:
     :param filter_type: "laplacian", "random_walk", "dual_random_walk".
     :param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
     """
     super(DCGRUCell, self).__init__(_reuse=reuse)
     self._activation = activation
     self._num_nodes = num_nodes
     self._num_proj = num_proj
     self._num_units = num_units
     # print(num_nodes, num_proj, num_units)
     # 207 None 64: when creating cell
     # 207 1 64: when creating cell_with_projection
     self._max_diffusion_step = max_diffusion_step
     self._supports = []
     self._use_gc_for_ru = (network_type=='gconv')
     self._graphEmbedFile = graphEmbedFile
     supports = []
     if filter_type == "laplacian":
         supports.append(utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
     elif filter_type == "random_walk":
         supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
     elif filter_type == "dual_random_walk":
         # supports have now two matrices for the two directions
         # all of them are of form D^{-1}W
         supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
         supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
     else:
         supports.append(utils.calculate_scaled_laplacian(adj_mx))
     # print('This is the number of matrices: ', len(supports))
     # 2
     # There are 2 matrices for bi-directional random walk
     # Hence either one or two matrices will be in list of supports
     for support in supports:
         self._supports.append(self._build_sparse_matrix(support))
Пример #6
0
    def __init__(self,
                 num_units,
                 adj_mx,
                 max_diffusion_step,
                 num_nodes,
                 nonlinearity='tanh',
                 filter_type="laplacian",
                 use_gc_for_ru=True):
        """

        :param num_units:
        :param adj_mx:
        :param max_diffusion_step:
        :param num_nodes:
        :param nonlinearity:
        :param filter_type: "laplacian", "random_walk", "dual_random_walk".
        :param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
        """

        super().__init__()
        self._activation = torch.tanh if nonlinearity == 'tanh' else torch.relu
        # support other nonlinearities up here?
        self._num_nodes = num_nodes
        self._num_units = num_units
        self._max_diffusion_step = max_diffusion_step
        self._supports = []
        self._use_gc_for_ru = use_gc_for_ru
        self._mfd = PoincareManifold()
        supports = []
        if filter_type == "laplacian":
            supports.append(
                utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
        elif filter_type == "random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
        elif filter_type == "dual_random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
            supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
        else:
            supports.append(utils.calculate_scaled_laplacian(adj_mx))
        for support in supports:
            self._supports.append(self._build_sparse_matrix(support))

        self._fc_params = LayerParams(self, 'fc')
        self._gconv_params = LayerParams(self, 'gconv')
Пример #7
0
    def evaluate(self, sess, **kwargs):

        y_preds_all = []
        half_length = int(len(self.clusters) / 2)
        sclusters = self.clusters[0:32]
        for cluster in sclusters:

            node_count, adj_mx = self.cluster_data(cluster)
            adj_mx = utils.calculate_random_walk_matrix(adj_mx).T
            adj_mx = self._build_sparse_matrix(adj_mx)
            global_step = sess.run(tf.train.get_or_create_global_step())
            scaler_path = self._kwargs['data'].get(
                'dataset_dir') + '/scaler.npy'
            scaler_data_ = np.load(scaler_path)
            mean, var = scaler_data_[0], scaler_data_[1]
            scaler = StandardScaler(mean=mean, std=var)

            # change val to test before run
            test_data_path = self._kwargs['data'].get(
                'dataset_dir') + '/test_' + str(cluster) + '.tfrecords'
            test_dataset = tf.data.TFRecordDataset([test_data_path])
            test_dataset = test_dataset.map(self._parse_record_fn)
            test_dataset = test_dataset.make_one_shot_iterator()
            test_next_element = test_dataset.get_next()

            test_results = self.run_epoch_generator(sess,
                                                    self._test_model,
                                                    test_next_element,
                                                    adj_mx,
                                                    return_output=True,
                                                    training=False)
            test_loss, y_preds = test_results['loss'], test_results['outputs']
            utils.add_simple_summary(self._writer, ['loss/test_loss'],
                                     [test_loss],
                                     global_step=global_step)

            y_preds = np.concatenate(y_preds, axis=0)
            y_preds = scaler.inverse_transform(y_preds[:, self.horizon - 1, :,
                                                       0])
            y_preds = y_preds[:, 0:node_count]

            y_preds_all.append(y_preds)

        y_preds_all = np.concatenate(y_preds_all, axis=1)
        return y_preds_all
Пример #8
0
    def __init__(self,
                 num_units,
                 adj_mx,
                 max_diffusion_step,
                 num_nodes,
                 num_proj=None,
                 activation=tf.nn.tanh,
                 reuse=None,
                 filter_type="laplacian",
                 use_gc_for_ru=True,
                 output_activation=None,
                 proximity_threshold=None):
        """

        :param num_units:
        :param adj_mx:
        :param max_diffusion_step:
        :param num_nodes:
        :param input_size:
        :param num_proj:
        :param activation:
        :param reuse:
        :param filter_type: "laplacian", "random_walk", "dual_random_walk".
        :param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
        """
        super(DCGRUCell, self).__init__(_reuse=reuse)
        self._activation = activation
        self._output_activation = output_activation
        self._num_nodes = num_nodes
        self._num_proj = num_proj
        self._num_units = num_units
        self._max_diffusion_step = max_diffusion_step
        self._use_gc_for_ru = use_gc_for_ru

        self.id_mx = utils.calculate_identity(adj_mx)
        self._supports = []
        supports = []

        adj_mx[adj_mx < proximity_threshold] = 0
        # adj_mx[adj_mx < proximity_threshold['end_proximity']] = 0
        # self.proximity_threshold = proximity_threshold
        # self.pct_adj_mx = percentile_nd(adj_mx).astype(np.float32)

        if filter_type == "laplacian":
            supports.append(
                utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
        elif filter_type == "laplacian_lambda_max_2":
            supports.append(
                utils.calculate_scaled_laplacian(adj_mx, lambda_max=2))
        elif filter_type == "laplacian_lambda_max_1":
            supports.append(
                utils.calculate_scaled_laplacian(adj_mx, lambda_max=1))
        elif filter_type == "random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
        elif filter_type == "dual_random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
            supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
        elif filter_type == "ignore_spatial_dependency":
            supports.append(self.id_mx)
        else:
            raise ValueError("Invalid filter_type: {}".format(filter_type))
        for support in supports:

            # support = np.asarray(support.todense())
            # threshold = \
            #     linear_cosine_decay_start_end(start=self.proximity_threshold['start_proximity'],
            #                                   end=self.proximity_threshold['end_proximity'],
            #                                   global_step=tf.train.get_or_create_global_step(),
            #                                   decay_steps=self.proximity_threshold['proximity_decay_steps'],
            #                                   )
            # support = thresholded_dense_to_sparse(support, self.pct_adj_mx, threshold=threshold)
            #
            # self._supports.append(support)

            self._supports.append(self._build_sparse_matrix(support))
Пример #9
0
    def _gconv(self, inputs, state, output_size, bias_start=0.0):
        """Graph convolution between input and the graph matrix.

        :param args: a 2D Tensor or a list of 2D, batch x n, Tensors.
        :param output_size:
        :param bias:
        :param bias_start:
        :param scope:
        :return:
        """
        # Reshape input and state to (batch_size, num_nodes, input_dim/state_dim)
        batch_size = inputs.get_shape()[0].value
        inputs = tf.reshape(inputs, (batch_size, self._num_nodes, -1))
        state = tf.reshape(state, (batch_size, self._num_nodes, -1))
        inputs = tf.cast(inputs, dtype=tf.float32)
        inputs_and_state = tf.concat([inputs, state], axis=2)
        input_size = inputs_and_state.get_shape()[2].value
        dtype = inputs.dtype

        x = inputs_and_state
        x0 = tf.transpose(x,
                          perm=[1, 2,
                                0])  # (num_nodes, total_arg_size, batch_size)
        x0 = tf.reshape(x0, shape=[self._num_nodes, input_size * batch_size])
        x = tf.expand_dims(x0, axis=0)

        seq_fts = tf.layers.conv1d(inputs, 8, 1, use_bias=False)
        f_1 = tf.layers.conv1d(seq_fts, 1, 1)
        f_2 = tf.layers.conv1d(seq_fts, 1, 1)
        logits = f_1 + tf.transpose(f_2, [0, 2, 1])
        coefs = tf.nn.softmax(logits)

        supports = []
        for i in range(64):
            supports.append(utils.calculate_random_walk_matrix(coefs[i]))

        scope = tf.get_variable_scope()
        with tf.variable_scope(scope):  # GCN
            if self._max_diffusion_step == 0:
                pass
            else:
                for support in self._supports:
                    support = tf.cast(support, dtype=tf.float32)
                    x1 = tf.sparse_tensor_dense_matmul(support, x0)
                    x = self._concat(x, x1)

                    for k in range(2, self._max_diffusion_step + 1):
                        x2 = 2 * tf.sparse_tensor_dense_matmul(support,
                                                               x1) - x0
                        x = self._concat(x, x2)
                        x1, x0 = x2, x1

            num_matrices = len(
                self._supports
            ) * self._max_diffusion_step + 1  # Adds for x itself.
            x = tf.reshape(
                x,
                shape=[num_matrices, self._num_nodes, input_size, batch_size])
            x = tf.transpose(
                x, perm=[3, 1, 2,
                         0])  # (batch_size, num_nodes, input_size, order)
            x = tf.reshape(x,
                           shape=[
                               batch_size * self._num_nodes,
                               input_size * num_matrices
                           ])
            weights = tf.get_variable(
                'weights', [input_size * num_matrices, output_size],
                dtype=dtype,
                initializer=tf.contrib.layers.xavier_initializer())
            x = tf.matmul(
                x,
                weights)  # (batch_size * self._num_nodes, output_size) 加入参数权重

            biases = tf.get_variable("biases", [output_size],
                                     dtype=dtype,
                                     initializer=tf.constant_initializer(
                                         bias_start, dtype=dtype))
            x = tf.nn.bias_add(x, biases)

        # Reshape res back to 2D: (batch_size, num_node, state_dim) -> (batch_size, num_node * state_dim)
        return tf.reshape(x, [batch_size, self._num_nodes * output_size])
Пример #10
0
    def _train(self,
               sess,
               base_lr,
               epoch,
               steps,
               patience=50,
               epochs=100,
               min_learning_rate=2e-6,
               lr_decay_ratio=0.1,
               save_model=1,
               test_every_n_epochs=10,
               **train_kwargs):
        history = []
        min_val_loss = float('inf')
        wait = 0

        max_to_keep = train_kwargs.get('max_to_keep', 100)
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=max_to_keep)
        model_filename = train_kwargs.get('model_filename')
        if model_filename is not None:
            saver.restore(sess, model_filename)
            self._epoch = epoch + 1
            #print('validation loss', val_loss)
        else:
            sess.run(tf.global_variables_initializer())
        self._logger.info('Start training ...')

        #cluster_arr = [38,0,62, 56, 52, 20, 48, 28, 46, 30, 33, 39, 24, 16, 14, 58]
        #remaining_list = list(set(self.clusters) - set(cluster_arr))
        #setOfSix = []
        #while len(setOfSix) < 15:
        #    setOfSix.append(random.choice(remaining_list))
        #sclusters = cluster_arr + setOfSix

        while self._epoch <= epochs:

            # Learning rate schedule.
            new_lr = max(
                min_learning_rate,
                base_lr *
                (lr_decay_ratio**np.sum(self._epoch >= np.array(steps))))
            self.set_lr(sess=sess, lr=new_lr)

            start_time = time.time()

            self.node_count_seen = 0
            self.accumulated_training_loss = 0

            # generate random number
            half_length = int(len(self.clusters) / 2)
            sclusters = self.clusters[0:half_length]

            random.shuffle(sclusters)
            for cluster in sclusters:

                node_count, adj_mx = self.cluster_data(cluster)
                train_data_path = self._kwargs['data'].get(
                    'dataset_dir') + '/train_' + str(cluster) + '.tfrecords'
                train_dataset = tf.data.TFRecordDataset([train_data_path])
                train_dataset = train_dataset.map(self._parse_record_fn)
                train_iterator = train_dataset.make_one_shot_iterator()
                train_next_element = train_iterator.get_next()

                val_data_path = self._kwargs['data'].get(
                    'dataset_dir') + '/val_' + str(cluster) + '.tfrecords'
                val_dataset = tf.data.TFRecordDataset([val_data_path])
                val_dataset = val_dataset.map(self._parse_record_fn)
                val_iterator = val_dataset.make_one_shot_iterator()
                val_next_element = val_iterator.get_next()

                adj_mx = utils.calculate_random_walk_matrix(adj_mx).T
                adj_mx = self._build_sparse_matrix(adj_mx)

                train_results = self.run_epoch_generator(sess,
                                                         self._train_model,
                                                         train_next_element,
                                                         adj_mx,
                                                         training=True,
                                                         writer=self._writer)
                train_loss, train_mae = train_results['loss'], train_results[
                    'mae']

                if train_loss > 1e5:
                    self._logger.warning(
                        'Gradient explosion detected. Ending...')
                    break
                val_results = self.run_epoch_generator(sess,
                                                       self._test_model,
                                                       val_next_element,
                                                       adj_mx,
                                                       training=False)
                val_loss, val_mae = np.asscalar(
                    val_results['loss']), np.asscalar(val_results['mae'])

            end_time = time.time()
            message = 'Epoch [{}/{}]  train_mae: {:.4f}, val_mae: {:.4f} lr:{:.6f} {:.1f}s'.format(
                self._epoch, epochs, train_loss, val_loss, new_lr,
                (end_time - start_time))
            self._logger.info(message)

            if val_loss <= min_val_loss:
                wait = 0
                if save_model > 0:
                    model_filename = self.save(sess, val_loss)
                self._logger.info(
                    'Val loss decrease from %.4f to %.4f, saving to %s' %
                    (min_val_loss, val_loss, model_filename))
                min_val_loss = val_loss
            else:
                wait += 1
                if wait > patience:
                    self._logger.warning('Early stopping at epoch: %d' %
                                         self._epoch)
                    break

            history.append(val_loss)
            # Increases epoch.
            self._epoch += 1
            sys.stdout.flush()

        return np.min(history)
Пример #11
0
    def __init__(self,
                 args,
                 num_units,
                 adj_mx,
                 squeeze_and_excitation,
                 se_activate,
                 excitation_rate,
                 r,
                 diffusion_with_graph_kernel,
                 graph_kernel_mode,
                 cell_forward_mode,
                 max_diffusion_step,
                 num_nodes,
                 num_proj=None,
                 activation=tf.nn.tanh,
                 reuse=None,
                 filter_type="laplacian",
                 use_gc_for_ru=True):
        """

        :param num_units:
        :param adj_mx:
        :param max_diffusion_step:
        :param num_nodes:
        :param input_size:
        :param num_proj:
        :param activation:
        :param reuse:
        :param filter_type: "laplacian", "random_walk", "dual_random_walk".
        :param use_gc_for_ru: whether to use Graph convolution to calculate the reset and update gates.
        """
        super(DCGRUCell, self).__init__(_reuse=reuse)
        self._activation = activation
        self._num_nodes = num_nodes
        #self._num_edges = np.sum(adj_mx!=0)
        self.mask_mx = adj_mx != 0
        self._num_proj = num_proj
        self._num_units = num_units
        self._max_diffusion_step = max_diffusion_step
        self._supports = []
        self._use_gc_for_ru = use_gc_for_ru
        self.squeeze_and_excitation = squeeze_and_excitation
        self.se_activate = se_activate
        self.excitation_rate = excitation_rate
        self.r = r
        self.cell_forward_mode = cell_forward_mode
        self.diffusion_channel_num = args.diffusion_channel_num
        self.diffusion_with_graph_kernel = diffusion_with_graph_kernel
        self.graph_kernel_mode = graph_kernel_mode
        supports = []
        print('adj_mx: ', adj_mx.shape)
        if filter_type == "laplacian":
            supports.append(
                utils.calculate_scaled_laplacian(adj_mx, lambda_max=None))
        elif filter_type == "random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
        elif filter_type == "dual_random_walk":
            supports.append(utils.calculate_random_walk_matrix(adj_mx).T)
            supports.append(utils.calculate_random_walk_matrix(adj_mx.T).T)
        else:
            supports.append(utils.calculate_scaled_laplacian(adj_mx))
        for support in supports:
            self._supports.append(self._build_sparse_matrix(support))
        self._kernel_inds = self.kernel_ind(supports)
        self.mask_mx_ind = self.mask_ind(supports)