示例#1
0
    def predict(self, A, X, test_indices):
        if self.transform_fn is not None:
            A = [self.transform_fn(a) for a in A]

        # Extract dimensions
        n_graphs = len(A)
        n_features = X[0].shape[1]
        max_nodes = max([a.shape[0] for a in A])

        # Compute the matrix power series
        Apow_seq = [util.A_power_series(a, self.n_hops) for a in A]
        Apow = np.zeros((n_graphs, self.n_hops + 1, max_nodes, max_nodes))
        for i, apow in enumerate(Apow_seq):
            n_nodes = apow.shape[1]
            Apow[i, :, :n_nodes, :n_nodes] = apow

        X_pad = np.zeros((n_graphs, max_nodes, n_features), dtype='float32')
        for i in range(n_graphs):
            n_nodes = X[i].shape[0]
            X_pad[i, :n_nodes, :] = X[i]
        X = X_pad

        # Create symbolic representation of predictions
        pred = layers.get_output(self.l_out)

        # Create a function that applies the model to data to predict a class
        pred_fn = theano.function([self.var_Apow, self.var_X],
                                  T.argmax(pred, axis=1),
                                  allow_input_downcast=True)

        # Return the predictions
        predictions = pred_fn(Apow[test_indices, :, :, :],
                              X[test_indices, :, :])

        return predictions
示例#2
0
    def predict_proba(self, X, B, test_indices, A=None):
        if A is None:
            Apow = self.Apow
        else:
            if self.transform_fn is not None:
                A = np.asarray(
                    [self.transform_fn(A[i]) for i in range(A.shape[0])])
            # Compute the matrix power series
            Apow = np.asarray([
                util.A_power_series(A[i], self.n_hops)
                for i in range(A.shape[0])
            ])

        # add bias term to X
        X = np.hstack([X, np.ones((X.shape[0], 1))]).astype('float32')

        # Create symbolic representation of predictions
        pred = layers.get_output(self.l_out)

        # Create a function that applies the model to data to predict a class
        pred_fn = theano.function([self.var_Apow, self.var_B, self.var_X],
                                  T.exp(pred) /
                                  T.exp(pred).sum(axis=1, keepdims=True),
                                  allow_input_downcast=True)

        # Return the predictions
        predictions = pred_fn(Apow, B[test_indices, :], X)
        return predictions
示例#3
0
    def predict(self, X_N, X_E, B, test_indices, A=None):
        if A is None:
            Apow = self.Apow
        else:
            if self.transform_fn is not None:
                A = self.transform_fn(A)
            # Compute the matrix power series
            Apow = util.A_power_series(A, self.n_hops)

        # add bias term to X
        X_N = np.hstack([X_N, np.ones((X_N.shape[0], 1))]).astype('float32')

        # Create symbolic representation of predictions
        pred = layers.get_output(self.l_out)

        # Create a function that applies the model to data to predict a class
        pred_fn = theano.function([
            self.var_B_left, self.var_Apow, self.var_B, self.var_X_N,
            self.var_X_E
        ],
                                  T.argmax(pred, axis=1),
                                  allow_input_downcast=True)

        # Return the predictions
        predictions = pred_fn(B[test_indices, :], Apow, B, X_N, X_E)
        return predictions
示例#4
0
    def predict_proba(self, X, B, test_indices, A=None):
        if A is None:
            Apow = self.Apow
        else:
            A = self._convert_A(B, A)
            if self.transform_fn is not None:
                A = self.transform_fn(A)
            # Compute the matrix power series
            Apow = util.A_power_series(A, self.n_hops)

        n_nodes = B.shape[1]

        corrected_test_indices = test_indices + n_nodes

        # add bias term to X
        X = np.hstack([X, np.ones((X.shape[0], 1))]).astype('float32')

        X = self._convert_X(B, X)

        # Create symbolic representation of predictions
        pred = layers.get_output(self.l_out)

        # Create a function that applies the model to data to predict a class
        pred_fn = theano.function([self.var_Apow, self.var_X],
                                  T.exp(pred) /
                                  T.exp(pred).sum(axis=1, keepdims=True),
                                  allow_input_downcast=True)

        # Return the predictions
        predictions = pred_fn(Apow[:, corrected_test_indices, :], X)
        return predictions
示例#5
0
文件: kernel.py 项目: jcatw/scnn
 def fit(self, A, alpha=0.01, k = 5):
     if self.lpow is None:
         print 'computing lpow...'
         self.lpow = util.A_power_series(-util.laplacian(A), k)
         print 'done'
     print 'computing K...'
     self.K = sum([((alpha**k) * self.lpow[i]) / factorial(i) for i in range(k+1)])
     print 'done'
示例#6
0
文件: graph_scnn.py 项目: jcatw/scnn
    def _apow(self, A, n_graphs, max_nodes):
        Apow_seq = [util.A_power_series(a, self.n_hops) for a in A]
        Apow = np.zeros((n_graphs, self.n_hops + 1, max_nodes, max_nodes),
                        dtype='float32')
        for i, apow in enumerate(Apow_seq):
            n_nodes = apow.shape[1]
            Apow[i, :, :n_nodes, :n_nodes] = apow

        return Apow
示例#7
0
    def fit(self,
            A,
            X,
            Y,
            train_indices,
            valid_indices,
            learning_rate=0.05,
            batch_size=100,
            n_epochs=100,
            loss_fn=lasagne.objectives.multiclass_hinge_loss,
            update_fn=lasagne.updates.adagrad,
            stop_early=True,
            stop_window_size=5,
            output_weights=False,
            show_weights=False):
        assert len(A) == len(X)
        assert len(X) == Y.shape[0]
        assert len(Y.shape) > 1

        if self.transform_fn is not None:
            A = [self.transform_fn(a) for a in A]

        # Extract dimensions
        n_graphs = len(A)
        n_features = X[0].shape[1] + 1
        n_classes = Y.shape[1]
        max_nodes = max([a.shape[0] for a in A])

        n_batch = n_graphs // batch_size

        # Compute the matrix power series, zero-padding for graphs with fewer than max nodes
        Apow_seq = [util.A_power_series(a, self.n_hops) for a in A]
        Apow = np.zeros((n_graphs, self.n_hops + 1, max_nodes, max_nodes),
                        dtype='float32')
        for i, apow in enumerate(Apow_seq):
            n_nodes = apow.shape[1]
            Apow[i, :, :n_nodes, :n_nodes] = apow

        # zero-pad X and add bias term
        X_pad = np.zeros((n_graphs, max_nodes, n_features), dtype='float32')
        for i in range(n_graphs):
            n_nodes = X[i].shape[0]
            X_pad[i, :n_nodes, -1] = 1
            X_pad[i, :n_nodes, :-1] = X[i]

        X = X_pad

        # Create Lasagne layers
        self.l_in_apow = lasagne.layers.InputLayer(
            (batch_size, self.n_hops + 1, max_nodes, max_nodes),
            input_var=self.var_Apow)
        self.l_in_x = lasagne.layers.InputLayer(
            (n_graphs, max_nodes, n_features), input_var=self.var_X)
        self.l_sc = GraphSearchConvolution([self.l_in_apow, self.l_in_x],
                                           self.n_hops + 1, n_features)
        self.l_out = layers.DenseLayer(
            self.l_sc,
            num_units=n_classes,
            nonlinearity=lasagne.nonlinearities.tanh)

        # Create symbolic representations of predictions, loss, parameters, and updates.
        prediction = layers.get_output(self.l_out)
        loss = lasagne.objectives.aggregate(loss_fn(prediction, self.var_Y),
                                            mode='mean')
        params = lasagne.layers.get_all_params(self.l_out)
        updates = update_fn(loss, params, learning_rate=learning_rate)

        # Create functions that apply the model to data and return loss
        apply_loss = theano.function([self.var_Apow, self.var_X, self.var_Y],
                                     loss,
                                     updates=updates)

        # Train the model
        print 'Training model...'
        validation_losses = []
        validation_loss_window = np.zeros(stop_window_size)
        validation_loss_window[:] = float('+inf')

        for epoch in range(n_epochs):
            train_loss = 0.0

            np.random.shuffle(train_indices)

            for batch in range(n_batch):
                start = batch * batch_size
                end = min((batch + 1) * batch_size, train_indices.shape[0])

                if start < end:
                    train_loss += apply_loss(
                        Apow[train_indices[start:end], :, :, :],
                        X[train_indices[start:end], :, :],
                        Y[train_indices[start:end], :])

            valid_loss = apply_loss(Apow[valid_indices, :, :, :],
                                    X[valid_indices, :, :],
                                    Y[valid_indices, :])

            print "Epoch %d training error: %.6f" % (epoch, train_loss)
            print "Epoch %d validation error: %.6f" % (epoch, valid_loss)

            validation_losses.append(valid_loss)

            if output_weights:
                W = layers.get_all_param_values(self.l_sc)[0]
                np.savetxt('W_%d.csv' % (epoch, ), W, delimiter=',')

            if show_weights:
                W = layers.get_all_param_values(self.l_sc)[0]
                plt.imshow(W, aspect='auto', interpolation='none')
                plt.show()

            if stop_early:
                if valid_loss >= validation_loss_window.mean():
                    print 'Validation loss did not decrease. Stopping early.'
                    break
            validation_loss_window[epoch % stop_window_size] = valid_loss
示例#8
0
    def fit(self, A, X, Y, train_indices, valid_indices,
            learning_rate=0.05, batch_size=100, n_epochs=100,
            loss_fn=lasagne.objectives.multiclass_hinge_loss,
            update_fn=lasagne.updates.adagrad,
            stop_early=True,
            stop_window_size=5,
            output_weights=False,
            show_weights=False):

        # Ensure that data have the correct dimensions
        assert A.shape[0] == X.shape[0]
        assert X.shape[0] == Y.shape[0]
        assert len(Y.shape) > 1

        if self.transform_fn is not None:
            A = self.transform_fn(A)

        # Extract dimensions
        n_nodes = A.shape[0]
        n_features = X.shape[1] + 1
        n_classes = Y.shape[1]

        n_batch = n_nodes // batch_size

        # Compute the matrix power series
        Apow = util.A_power_series(A, self.n_hops)

        # Add bias term to X
        X = np.hstack([X, np.ones((X.shape[0],1))]).astype('float32')

        # Create Lasagne layers
        self.l_in_apow = lasagne.layers.InputLayer((self.n_hops, batch_size, n_nodes), input_var=self.var_Apow)
        self.l_in_x = lasagne.layers.InputLayer((n_nodes, n_features), input_var=self.var_X)
        self.l_sc = SearchConvolution([self.l_in_apow, self.l_in_x], self.n_hops + 1, n_features)
        self.l_out = layers.DenseLayer(self.l_sc, num_units=n_classes, nonlinearity=lasagne.nonlinearities.tanh)

        # Create symbolic representations of predictions, loss, parameters, and updates.
        prediction = layers.get_output(self.l_out)
        loss = lasagne.objectives.aggregate(loss_fn(prediction, self.var_Y), mode='mean')
        params = lasagne.layers.get_all_params(self.l_out)
        updates = update_fn(loss, params, learning_rate=learning_rate)

        # Create functions that apply the model to data and return loss
        apply_loss = theano.function([self.var_Apow, self.var_X, self.var_Y],
                                      loss, updates=updates)

        # Train the model
        print 'Training model...'
        validation_losses = []
        validation_loss_window = np.zeros(stop_window_size)
        validation_loss_window[:] = float('+inf')

        for epoch in range(n_epochs):
            train_loss = 0.0

            np.random.shuffle(train_indices)

            for batch in range(n_batch):
                start = batch * batch_size
                end = min((batch + 1) * batch_size, train_indices.shape[0])

                if start < end:
                    train_loss += apply_loss(Apow[:,train_indices[start:end],:],
                                             X,
                                             Y[train_indices[start:end],:])

            valid_loss = apply_loss(Apow[:,valid_indices,:],
                                    X,
                                    Y[valid_indices,:])

            print "Epoch %d training error: %.6f" % (epoch, train_loss)
            print "Epoch %d validation error: %.6f" % (epoch, valid_loss)

            validation_losses.append(valid_loss)

            if output_weights:
                W = layers.get_all_param_values(self.l_sc)[0]
                np.savetxt('W_%d.csv' % (epoch,), W, delimiter=',')

            if show_weights:
                W = layers.get_all_param_values(self.l_sc)[0]
                plt.imshow(W, aspect='auto', interpolation='none')
                plt.show()

            if stop_early:
                if valid_loss >= validation_loss_window.mean():
                    print 'Validation loss did not decrease. Stopping early.'
                    break
            validation_loss_window[epoch % stop_window_size] = valid_loss
示例#9
0
文件: kernel.py 项目: jcatw/scnn
 def fit(self, A, alpha=0.01, k = 5):
     if self.apow is None:
         self.apow = util.A_power_series(A, 5)
     self.K = sum([((alpha**k) * self.apow[i]) / factorial(i) for i in range(k+1)])