Esempio n. 1
0
    def _init_spektral_layers(self, params):
        self._convolutions = []

        for _ in range(self._num_message_passing_layers):
            self._convolutions.append(
                EdgeConditionedConv(
                    channels=self.num_units,
                    kernel_network=params.get("EdgeFcLayerParams", [256]),
                    activation=params.get("MPLayerActivation", "relu")))
Esempio n. 2
0
n_out = y.shape[-1]   # Dimension of the target

# Train/test split
A_train, A_test, \
X_train, X_test, \
E_train, E_test, \
y_train, y_test = train_test_split(A, X, E, y, test_size=0.1, random_state=0)

################################################################################
# BUILD MODEL
################################################################################
X_in = Input(shape=(N, F))
A_in = Input(shape=(N, N))
E_in = Input(shape=(N, N, S))

X_1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
X_2 = EdgeConditionedConv(32, activation='relu')([X_1, A_in, E_in])
X_3 = GlobalSumPool()(X_2)
output = Dense(n_out)(X_3)

# Build model
model = Model(inputs=[X_in, A_in, E_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse')
model.summary()

################################################################################
# FIT MODEL
################################################################################
model.fit([X_train, A_train, E_train],
          y_train,
Esempio n. 3
0
batch_size = 64  # Batch size
es_patience = 5  # Patience fot early stopping
log_dir = init_logging()  # Create log directory and file

# Train/test split
adj_train, adj_test, \
nf_train, nf_test,   \
ef_train, ef_test,   \
y_train, y_test = train_test_split(adj, nf, ef, y, test_size=0.1)

# Model definition
nf_in = Input(shape=(N, F))
adj_in = Input(shape=(N, N))
ef_in = Input(shape=(N, N, S))

gc1 = EdgeConditionedConv(32, activation='relu')([nf_in, adj_in, ef_in])
gc2 = EdgeConditionedConv(64, activation='relu')([gc1, adj_in, ef_in])
pool = GlobalAttentionPool(128)(gc2)
dense1 = Dense(128, activation='relu')(pool)

output = Dense(n_out)(dense1)

# Build model
model = Model(inputs=[nf_in, adj_in, ef_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse')
model.summary()

# Callbacks
es_callback = EarlyStopping(monitor='val_loss', patience=es_patience)
Esempio n. 4
0
# Train/test split
A_train, A_test, \
X_train, X_test, \
E_train, E_test, \
y_train, y_test = train_test_split(A, X, E, y, test_size=0.1)

# Model definition
X_in = Input(batch_shape=(None, F))
A_in = Input(batch_shape=(None, None))
E_in = Input(batch_shape=(None, None, S))
I_in = Input(batch_shape=(None, ), dtype='int64')
target = Input(
    tensor=tf.placeholder(tf.float32, shape=(None, n_out), name='target'))

gc1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
gc2 = EdgeConditionedConv(32, activation='relu')([gc1, A_in, E_in])
pool = GlobalAvgPool()([gc2, I_in])
output = Dense(n_out)(pool)

# Build model
model = Model(inputs=[X_in, A_in, E_in, I_in], outputs=output)
optimizer = Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='mse', target_tensors=target)
model.summary()

# Training setup
sess = K.get_session()
loss = model.total_loss
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_step = opt.minimize(loss)
Esempio n. 5
0
    def _model_builder_ecc(self):
        gc1_channels = 32
        gc2_channels = 64
        full_latent_space = len(self._radius) * self.latent_space

        # Inputs
        adj_in = Input(shape=(self.N, self.N), name='adj_in')
        nf_in = Input(shape=(self.N, self.F), name='nf_in')
        ef_in = Input(shape=(self.N, self.N, self.S), name='ef_in')
        z_in = Input(shape=(full_latent_space, ), name='z_in')

        # Encoder
        gc1 = EdgeConditionedConv(gc1_channels,
                                  kernel_regularizer=l2(self.l2_reg),
                                  name='ecc1')([nf_in, adj_in, ef_in])
        bn1 = BatchNormalization()(gc1)
        relu1 = Activation('relu')(bn1)
        do1 = Dropout(self.dropout_rate)(relu1)

        gc2 = EdgeConditionedConv(gc2_channels,
                                  kernel_regularizer=l2(self.l2_reg),
                                  name='ecc2')([do1, adj_in, ef_in])
        bn2 = BatchNormalization()(gc2)
        relu2 = Activation('relu')(bn2)
        do2 = Dropout(self.dropout_rate)(relu2)

        pool = GlobalAttentionPool(128, name='attn_pool')(do2)

        z_enc_list = []
        z_clip_list = []
        for _r in self._radius:
            z_1 = Dense(128, activation='relu')(pool)
            z_2 = Dense(self.latent_space, activation='linear')(z_1)
            z_3 = CCMProjection(_r)(z_2)
            z_enc_list.append(z_2)
            z_clip_list.append(z_3)

        if len(self._radius) > 1:
            z_enc = Concatenate(name='z_enc')(z_enc_list)
            z_clip = Concatenate(name='z_clip')(z_clip_list)
        else:
            z_enc = z_enc_list[0]
            z_clip = z_clip_list[0]

        # Decoder
        dense3 = Dense(128)(z_in)
        bn3 = BatchNormalization()(dense3)
        relu3 = Activation('relu')(bn3)

        dense4 = Dense(256)(relu3)
        bn4 = BatchNormalization()(dense4)
        relu4 = Activation('relu')(bn4)

        dense5 = Dense(512)(relu4)
        bn5 = BatchNormalization()(dense5)
        relu5 = Activation('relu')(bn5)

        # Output
        adj_out_pre = Dense(self.N * self.N, activation='sigmoid')(relu5)
        adj_out = Reshape((self.N, self.N), name='adj_out')(adj_out_pre)

        nf_out_pre = Dense(self.N * self.F, activation='linear')(relu5)
        nf_out = Reshape((self.N, self.F), name='nf_out')(nf_out_pre)

        ef_out_pre = Dense(self.N * self.N * self.S,
                           activation='linear')(relu5)
        ef_out = Reshape((self.N, self.N, self.S), name='ef_out')(ef_out_pre)

        # Build models
        encoder = Model(inputs=[adj_in, nf_in, ef_in], outputs=z_enc)
        clipper = Model(inputs=[adj_in, nf_in, ef_in], outputs=z_clip)
        decoder = Model(inputs=z_in, outputs=[adj_out, nf_out, ef_out])
        model = Model(inputs=[adj_in, nf_in, ef_in],
                      outputs=decoder(clipper.output))
        model.output_names = ['adj', 'nf', 'ef']

        return model, encoder, decoder, clipper
Esempio n. 6
0
def build_ecn_model(trainset, testset, nb_nodes, nb_node_features, nb_edge_features, nb_classes=1,
                    batch_size=32, nb_epochs=100, lr=0.001, save_path=None):
    
    
    # Create model architecture
    X_in = Input(shape=(nb_nodes, nb_node_features))
    A_in = Input(shape=(nb_nodes, nb_nodes))
    E_in = Input(shape=(nb_nodes, nb_nodes, nb_edge_features))
    gc1 = EdgeConditionedConv(32, activation='relu')([X_in, A_in, E_in])
    gc2 = EdgeConditionedConv(64, activation='relu')([gc1, A_in, E_in])
    pool = GlobalAttentionPool(128)(gc2)
    dense1 = Dense(128, activation='relu')(pool)
    output = Dense(nb_classes, activation='sigmoid')(dense1)
    
    # Build model
    model = Model(inputs=[X_in, A_in, E_in], outputs=output)
    optimizer = Adam(lr=lr)
    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
    model.summary()
    
    # Callback list
    callback_list = []
    # monitor val_loss and terminate training if no improvement
    early_stop = callbacks.EarlyStopping(monitor='val_loss', min_delta=0.00001, \
                patience=20, verbose=2, mode='auto', restore_best_weights=True)
    callback_list.append(early_stop)
    
    if save_path is not None:
        # save best model based on val_acc during training
        checkpoint = callbacks.ModelCheckpoint(save_path, monitor='val_acc', \
                    verbose=0, save_best_only=True, save_weights_only=False, mode='auto')
        callback_list.append(checkpoint)
        
    # Get train and test data
    [X_train, A_train, E_train, y_train] = trainset
    [X_test, A_test, E_test, y_test] = testset
    
    # Compute class weights
    weight_list = class_weight.compute_class_weight('balanced', np.unique(y_train), y_train)
    weight_dict = {}
    for i in range(len(np.unique(y_train))):
        weight_dict[np.unique(y_train)[i]] = weight_list[i]
    
    # Train model
    model.fit([X_train, A_train, E_train],
              y_train,
              batch_size=batch_size,
              validation_data = ([X_val, A_val, E_val], y_val),
              epochs=nb_epochs,
              verbose=2,
              class_weight=weight_dict,
              callbacks=callback_list)
    
    # Evaluate model    
    prediction = model.predict([X_val, A_val, E_val])
    y_val_predict = (prediction > 0.5).astype('uint8')
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')  # disable the warning on f1-score with not all labels
        scores = get_prediction_score(y_val, y_val_predict)
        
    return model, scores