示例#1
0
    def transform(self,graphs):
        adj, x, _ = utilities.from_nx_to_adj(graphs)
        fltr = localpooling_filter(adj)
        fltr, x = self.add_padding(fltr,x)
        y_pred = self.embedder.predict([x,fltr])

        
        return(y_pred)
示例#2
0
 def transform(self,graphs):
     
     # preprocessing
     adj, x, _ = utilities.from_nx_to_adj(graphs)
     fltr = localpooling_filter(adj)
     fltr, x = self.add_padding(fltr,x)
         
     y_pred = self.new_models[self.selected_model].predict([x,fltr])
     
     return(y_pred)
示例#3
0
    def fit(self,graphs,y):
        # Preprocessing
        adj, x, _ = utilities.from_nx_to_adj(graphs)
        fltr = localpooling_filter(adj)
        y_one_hot = utilities.from_np_to_one_hot(y)
        
        
        ### the dataset is splitted, and the input of the model
        ### acceps max_n_nodes as impout, so if needed add a padding
        fltr, x = self.add_padding(fltr,x)

        history = self.classificator.fit([x, fltr],y_one_hot,
                                        epochs=self.epochs,
                                        validation_split=self.validation_split,
                                        callbacks=self.callbacks,
                                        verbose=self.verbose) 
        print("Stopped epoch: ",self.es.stopped_epoch)

        self.embedder.set_weights(self.classificator.get_weights())

        return(self)
示例#4
0
    def fit(self,graphs,y):
        
        # select the right model
        self.selected_model = self.dim.index(self.n_components)
        
        
        # Preprocessing
        adj, x, _ = utilities.from_nx_to_adj(graphs)
        fltr = localpooling_filter(adj)
        y_one_hot = utilities.from_np_to_one_hot(y)
        
        # callback
        es_callback = EarlyStopping(monitor='val_loss', patience=self.patience)
        
        
        ### the dataset is splitted, and the input of the model
        ### acceps max_n_nodes as impout, so if needed add a padding
        fltr, x = self.add_padding(fltr,x)

        
        history = self.original_models[self.selected_model].fit([x, fltr],y_one_hot,
                                                                batch_size=self.batch_size,
                                                                validation_split=self.validation_split,
                                                                epochs=self.epochs,
                                                                callbacks=[es_callback],
                                                                verbose=self.verbose)
        if (self.plot == True):
            tmp_print(history)

        print(es_callback.stopped_epoch)
        
        
        current_weights = self.original_models[self.selected_model].get_weights()
        self.new_models[self.selected_model].set_weights(current_weights)

        return(self)
示例#5
0
    d_tr = train_d.to_numpy().astype(np.float32)
    d_tst = test_d.to_numpy().astype(np.float32)
    l_tr = label_train.to_numpy().astype(np.float32)
    l_ts = label_test.to_numpy().astype(np.float32)

    return [d_tr, l_tr, d_tst, l_ts, start_o, end_o, start_d]


[train_np, label_train_np, test_np, label_test_np, start_o, end_o,
 start_d] = create_data(lyft_data, 0.8001, 200, aug_frac=3)

#save the trip data feature vector size
trip_feature = train_np.shape[1]

# Preprocessing operations (Do not know what this does. Coppied from the GCN example provided by Speckter)
fltr = localpooling_filter(A).astype('f4')
for i in range(K - 1):
    fltr = fltr.dot(fltr)
fltr.sort_indices()

# Model definition

#GCN input
area_in = Input(shape=(F, ))  #area feature
fltr_in = Input((N, ), sparse=True)  #adjacent matrix

bn0 = BN(
    axis=-1,
    momentum=0.99,
    epsilon=0.001,
    center=True,
    dataset)

# Parameters
N = node_features.shape[0]  # Number of nodes in the graph
F = node_features.shape[1]  # Original feature dimensionality
n_classes = y_train.shape[1]  # Number of classes
dropout_rate = 0.5  # Dropout rate applied to the input of GCN layers
l2_reg = 25e-5  # Regularization rate for l2
learning_rate = 1e-2  # Learning rate for SGD
epochs = 2000  # Number of training epochs
es_patience = 10  # Patience fot early stopping
log_dir = init_logging()  # Create log directory and file

# Preprocessing operations
node_features = citation.preprocess_features(node_features)
fltr = localpooling_filter(adj)

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)

dropout_1 = Dropout(dropout_rate)(X_in)
graph_conv_1 = GraphConv(16,
                         activation='relu',
                         kernel_regularizer=l2(l2_reg),
                         use_bias=False)([dropout_1, fltr_in])
dropout_2 = Dropout(dropout_rate)(graph_conv_1)
graph_conv_2 = GraphConv(n_classes, activation='softmax',
                         use_bias=False)([dropout_2, fltr_in])

# Build model
示例#7
0
# Load data
dataset = 'cora'
A, X, y, train_mask, val_mask, test_mask = citation.load_data(dataset)

# Parameters
K = 2  # Degree of propagation
N = X.shape[0]  # Number of nodes in the graph
F = X.shape[1]  # Original feature dimensionality
n_classes = y.shape[1]  # Number of classes
l2_reg = 5e-6  # Regularization rate for l2
learning_rate = 0.2  # Learning rate for SGD
epochs = 20000  # Number of training epochs
es_patience = 200  # Patience for early stopping

# Preprocessing operations
fltr = localpooling_filter(A)

# Pre-compute propagation
for i in range(K - 1):
    fltr = fltr.dot(fltr)

# Model definition
X_in = Input(shape=(F, ))
fltr_in = Input((N, ), sparse=True)
output = GraphConv(n_classes,
                   activation='softmax',
                   kernel_regularizer=l2(l2_reg),
                   use_bias=False)([X_in, fltr_in])

# Build model
model = Model(inputs=[X_in, fltr_in], outputs=output)