Пример #1
0
def compile_model():
    # Define model architecture
    # NOTE: We pass arguments for graph convolutional layers as a list of tensors.
    # This is somewhat hacky, more elegant options would require rewriting the Layer base class.
    if MODEL == "gcn":
        H = Dropout(0.5)(X_in)
        H = GraphConvolution(16,
                             support,
                             activation='relu',
                             kernel_regularizer=l2(5e-4))([H] + G)
        H = Dropout(0.5)(H)
        Y = GraphConvolution(number_classes, support,
                             activation='softmax')([H] + G)
    elif MODEL == "grcn":
        H = Dropout(0.5)(X_in)
        H = GraphResolutionConvolution(16,
                                       support,
                                       activation='relu',
                                       kernel_regularizer=l2(5e-4))([H] + G)
        H = Dropout(0.5)(H)
        Y = GraphResolutionConvolution(number_classes,
                                       support,
                                       activation='softmax')([H] + G)

    # Compile model
    model = Model(inputs=[X_in] + G, outputs=Y)
    model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01))
    return model
Пример #2
0
def gcnn(input_shape,
         dropout,
         n_l1,
         graph_matrix,
         support,
         l2_reg,
         for_sex=False,
         n_measure=None):
    """GCNN model

    Args:
        input_shape (int): dimension of input x
        dropout (float): dropout rate
        n_l1 (int): number of node for GCNN layer 1
        graph_matrix (ndarray): graph matrix for GCNN calculation
        support (int): support for filter
        l2_reg (float): l2 regularizer rate
        for_sex (bool, optional): whether the network is used for sex
            prediction
        n_measure (int, optional): number of measures (HCP dataset only)

    Returns:
        keras.models.Model: GCNN model
    """
    model_in = Input(shape=(input_shape, ))
    H = Dropout(dropout)(model_in)
    H = GraphConvolution(n_l1,
                         graph_matrix=graph_matrix,
                         support=support,
                         activation='relu',
                         W_regularizer=l2(l2_reg))(H)
    H = BatchNormalization()(H)
    H = Dropout(dropout)(H)
    if n_measure is not None:
        model_out = GraphConvolution(n_measure,
                                     graph_matrix=graph_matrix,
                                     support=support)(H)
    elif for_sex:
        model_out = GraphConvolution(2,
                                     graph_matrix=graph_matrix,
                                     support=support,
                                     activation='softmax')(H)
    else:
        model_out = GraphConvolution(1,
                                     graph_matrix=graph_matrix,
                                     support=support)(H)

    return Model(model_in, model_out)
Пример #3
0
def get_model(X, vector_dim):
    # GCN
    # Parameters
    dropout_rate = 0.5
    support = 1

    X_in = Input(shape=(X.shape[0], ))
    G = [Input(shape=(None, None), batch_shape=(None, None), sparse=True)]
    dropout1 = Dropout(dropout_rate)(X_in)
    graph_convolution_1 = GraphConvolution(
        vector_dim, support, activation='elu',
        kernel_regularizer=l2(5e-4))([dropout1] + G)
    dropout2 = Dropout(dropout_rate)(graph_convolution_1)
    graph_convolution_2 = GraphConvolution(
        vector_dim, support, activation='elu',
        kernel_regularizer=l2(5e-4))([dropout2] + G)
    model = Model(inputs=[X_in] + G, outputs=graph_convolution_2)
    return model
Пример #4
0
 def build_model(self,adj_input):
     fea_input = Input(shape=(self.maxLen,), name='fea_input')
     net = Dropout(0.5)(fea_input)
     net = GraphConvolution(512, self.support, activation='relu',name="cov1")([net] + adj_input)
     net = Dropout(0.4)(net)
     net = GraphConvolution(256, self.support, activation='relu', kernel_regularizer=l2(5e-4),name="cov2")([net] + adj_input)
     net = Dropout(0.3)(net)
     net = GraphConvolution(128, self.support, activation='relu', kernel_regularizer=l2(5e-4),name="cov3")([net] + adj_input)
     net = Dropout(0.2)(net)
     net = GraphConvolution(64, self.support, activation='relu', kernel_regularizer=l2(5e-4),name="cov4")([net] + adj_input)
     net = Dropout(0.1)(net)
     net = Flatten()(net)
     # output = Dense(y.shape[1], activation='softmax')(net)
     output = GraphConvolution(self.classNum, self.support, activation='softmax')([net] + adj_input)
     
     model = Model(inputs=[fea_input] + adj_input, outputs=output)
     model.compile(loss='sparse_categorical_crossentropy', optimizer=RMSprop(learning_rate=self.learning_rate))
     
     return model
Пример #5
0
# Get data
# X:features  A:graph  y:labels
X, A, y = load_data(dataset='cora', use_feature=True)
y_train, y_val, y_test, train_mask, val_mask, test_mask = get_splits(y)

# Normalize X
X /= X.sum(1).reshape(-1, 1)
print('ssss', X.shape[1])

A_ = preprocess_adj(A, power=2)
graph = [X, A_]
G = [Input(shape=(None, None), batch_shape=(None, None), sparse=True)]
X_in = Input(shape=(X.shape[1], ))  # the number of feature

Y = GraphConvolution(y_train.shape[1],
                     kernel_regularizer=l2(5e-6),
                     activation='softmax')([X_in] + G)
# Compile model
model = Model(inputs=[X_in] + G, outputs=Y)  # inputs=graph=[X,A_]

model.compile(loss='categorical_crossentropy',
              optimizer=Adam(lr=0.2),
              weighted_metrics=['acc'])
model.summary()

# Callbacks for EarlyStopping
es_callback = EarlyStopping(monitor='val_weighted_acc', patience=PATIENCE)

# Train
validation_data = (graph, y_val, val_mask)
model.fit(graph,
Пример #6
0
    G = [
        Input(shape=(None, None), batch_shape=(None, None), sparse=True)
        for _ in range(support)
    ]

else:
    raise Exception('Invalid filter type.')

X_in = Input(shape=(X.shape[1], ))

# Define model architecture
# NOTE: We pass arguments for graph convolutional layers as a list of tensors.
# This is somewhat hacky, more elegant options would require rewriting the Layer base class.
H = Dropout(0.5)(X_in)
H = GraphConvolution(64,
                     support,
                     activation='relu',
                     kernel_regularizer=l2(5e-4))([H] + G)
H = Dropout(0.5)(H)
Y = GraphConvolution(y.shape[1], support, activation='softmax')([H] + G)

# Compile model
model = Model(inputs=[X_in] + G, outputs=Y)
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.0001))

# Helper variables for main training loop
wait = 0
preds = None
best_val_loss = 99999

# Fit
for epoch in range(1, NB_EPOCH + 1):
Пример #7
0
res = []
att = []

X_in = Input(shape=(X.shape[1], ))  # the number of feature
G = [Input(shape=(None, None), batch_shape=(None, None), sparse=False)]

drop = 0.6
H = Dropout(rate=drop)(X_in)
unit1 = 24
unit2 = 7
regulation = 5e-4
activation = 'elu'

H = GraphConvolution(unit1,
                     activation=activation,
                     kernel_regularizer=l2(regulation))([H] +
                                                        G)  # shared weight
H = Dropout(rate=drop)(H)
Y = Dense(y_train.shape[1],
          activation='softmax',
          kernel_regularizer=l2(regulation))(
              H)  # full-connected layer with softmax

# Compile model
model = Model(inputs=[X_in] + G, outputs=Y)

model.compile(loss='categorical_crossentropy',
              optimizer=Adam(lr=0.01),
              weighted_metrics=['acc'])
# model.summary()
for i in range(2):
    data_train.append(graph_all[i])

for i in range(2, 4):
    data_test.append(graph_all[i])

X_in = Input(shape=(F, ), batch_shape=(N, F))

# Define model architecture
# NOTE: We pass arguments for graph convolutional layers as a list of tensors.
# This is somewhat hacky, more elegant options would require rewriting the Layer base class.
# H = Dropout(0.5)(X_in)

H = GraphConvolution(F_,
                     support,
                     activation='relu',
                     kernel_regularizer=l2(5e-4))([X_in] + G)
H = Reshape((-1, F))(H)
gcn_lstm = LSTM(units=50, input_shape=[N, F], return_sequences=True)(H)
gcn_lstm = attention.attention_3d_block(gcn_lstm)
print('gcn_lstm')
print(gcn_lstm)
# ------------如果是gcn_lstm-------------------------

gcn_lstm = Dense(1)(gcn_lstm)

# ------------------------如果是gcn_lstm_gat---------------------------------

# A_in = Input(shape=(N,), batch_shape=(N, N))
# gcn_lstm_gat = GraphAttention(N=N,
#                               F_=F_,
Пример #9
0
# shape为形状元组,不包括batch_size
# 例如shape=(32, )表示预期的输入将是一批32维的向量
X_in = Input(shape=(X.shape[1], ))  # 输入的每个feature的维度为1433(feature的列数)
print("X_in.shape:", X_in.shape)  # X_in的Layer名是input_2

# 定义模型架构
# 注意:我们将图卷积网络的参数作为张量列表传递
# 更优雅的做法需要重写Layer基类
# Define model architecture
# NOTE: We pass arguments for graph convolutional layers as a list of tensors.
# This is somewhat hacky, more elegant options would require rewriting the Layer base class.

H = Dropout(0.5)(X_in)  # Layer名是dropout_1
H = GraphConvolution(32,
                     support,
                     activation='relu',
                     kernel_regularizer=l2(5e-4))([H] + G)
# Layer名是graph_convolution_1
H = Dropout(0.5)(H)  # Layer名是dropout_2
H = GraphConvolution(256, support, activation=None)([H] + G)  # y.shape[1]
# Layer名是graph_convolution_2
Y = GraphConvolution(64, support,
                     activation=None)([H] + G)  # Layer名是graph_convolution_3

# 编译模型
# Compile model

model = Model(inputs=[X_in] + G, outputs=Y)

# model.load_weights("model_weights.h5")
# model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01))
Пример #10
0
# print("graph类型:", type(graph))  # 类型:list

# shape为形状元组,不包括batch_size
# 例如shape=(32, )表示预期的输入将是一批32维的向量
X_in = Input(shape=(X.shape[1], ))  # 输入的每个feature的维度为1433(feature的列数)

# 定义模型架构
# 注意:我们将图卷积网络的参数作为张量列表传递
# 更优雅的做法需要重写Layer基类
# Define model architecture
# NOTE: We pass arguments for graph convolutional layers as a list of tensors.
# This is somewhat hacky, more elegant options would require rewriting the Layer base class.

H = Dropout(0.5)(X_in)
H = GraphConvolution(32,
                     support,
                     activation='relu',
                     kernel_regularizer=l2(5e-4))([H] + G)
H = Dropout(0.5)(H)
H = GraphConvolution(256, support,
                     activation='relu')([H] + G)  # 需要对应需求做出修改的语句——————————
# y.shape[1]
# H = Dropout(0.5)(H)
Y = GraphConvolution(2, support, activation='softmax')([H] + G)

# model = keras.Sequential()
# model.add(Dropout(0.5))
# model.add(GraphConvolution(16, support, activation='relu', kernel_regularizer=l2(5e-4)))
# model.add(Dropout(0.5))
# model.add(GraphConvolution(2, support, activation='softmax'))

# 编译模型
Пример #11
0
X_in = Input(shape=(FEAT_DIM,))

if FILTER == 'localpool':
    support = 1
    G = [Input(shape=(None, None), batch_shape=(None, None), sparse=True)]

elif FILTER == 'chebyshev':
    support = MAX_DEGREE + 1
    G = [Input(shape=(None, None), batch_shape=(None, None), sparse=True) for _ in range(support)]

else:
    raise Exception('Invalid filter type.')

H = Dropout(0.5)(X_in)
H = GraphConvolution(32, support, activation='relu', W_regularizer=l2(5e-4))([H]+G)
H = Dropout(0.5)(H)
H = GraphConvolution(16, support, activation='relu', W_regularizer=l2(5e-4))([H]+G)
H = Dropout(0.5)(H)
Y = GraphConvolution(LABELS_DIM, support, activation='softmax')([H]+G)

# Compile model
model = Model(inputs=[X_in]+G, outputs=Y)
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01))

# Helper variables for main training loop
wait = 0
best_val_loss = 99999

# Create list of files and associated labels
edgelists = []