def __init__(self): super().__init__() self.masking = GraphMasking() self.conv1 = ECCConv(32, activation="relu") self.conv2 = ECCConv(32, activation="relu") self.global_pool = GlobalSumPool() self.dense = Dense(n_out)
def __init__(self, n_out = 4, hidden_states=64, gat_layers=2, gat_activation='relu', decode_layers=3, decode_activation='relu', regularization=None, dropout=0.2, batch_norm=True, forward=True): super().__init__() self.n_out=n_out self.hidden_states=hidden_states self.gat_activation=conv_activation self.forward=forward self.dropout=dropout self.gat_layers=gat_layers self.regularize=regularization if type(decode_activation)==str: self.decode_activation=tf.keras.activations.get(decode_activation) else: self.decode_activation=decode_activation self.batch_norm=batch_norm # Define layers of the model if self.edgeconv: self.ECC1 = ECCConv(hidden_states, [hidden_states, hidden_states, hidden_states], n_out = hidden_states, activation = "relu", kernel_regularizer=self.regularize) self.GCNs = [GCNConv(hidden_states*int(i), activation=self.conv_activation, kernel_regularizer=self.regularize) for i in 2**np.arange(self.conv_layers)] self.Pool1 = GlobalMaxPool() self.Pool2 = GlobalAvgPool() self.Pool3 = GlobalSumPool() self.decode = [Dense(i * hidden_states, activation=self.decode_activation) for i in 2**np.arange(decode_layers)] self.dropout_layers = [Dropout(dropout) for i in range(len(self.decode))] if self.batch_norm: self.norm_layers = [BatchNormalization() for i in range(len(self.decode))] else: self.norm_layers = [no_norm for i in range(len(self.decode))] self.final = Dense(n_out)
def buildmodel(dataset): F = dataset.n_node_features # Dimension of node features S = dataset.n_edge_features # Dimension of edge features n_out = dataset.n_labels # Dimension of the target #model X_in = Input(shape=(F,), name="X_in") A_in = Input(shape=(None,), sparse=True, name="A_in") E_in = Input(shape=(S,), name="E_in") I_in = Input(shape=(), name="segment_ids_in", dtype=tf.int32) X_1 = ECCConv(32, activation="relu")([X_in, A_in, E_in]) X_2 = ECCConv(32, activation="relu")([X_1, A_in, E_in]) X_3 = GlobalSumPool()([X_2, I_in]) output = Dense(n_out)(X_3) # Build model model = Model(inputs=[X_in, A_in, E_in, I_in], outputs=output) model.summary() return model
def __init__(self, n_out = 7): super().__init__() # Define layers of the model self.ECC1 = ECCConv(hidden_states, [hidden_states, hidden_states, hidden_states], n_out = hidden_states, activation = "relu") self.GCN1 = GCNConv(hidden_states, activation = "relu") self.GCN2 = GCNConv(hidden_states * 2, activation = "relu") self.GCN3 = GCNConv(hidden_states * 4, activation = "relu") self.GCN4 = GCNConv(hidden_states * 8, activation = "relu") self.Pool1 = GlobalMaxPool() self.Pool2 = GlobalAvgPool() self.Pool3 = GlobalSumPool() self.decode = [Dense(size * hidden_states) for size in [16, 8, 4, 2, 2]] self.norm_layers = [BatchNormalization() for i in range(len(self.decode))] self.d2 = Dense(n_out)
def __init__(self, edgeconv, edgenorm, hidden_states=64, edgetype=0, forward=True, K=[1,2], agg_method='min',regularization=None, dropout=0.025): super().__init__() self.n_out=3 self.n_sigs=2 self.hidden_states=hidden_states self.conv_activation='relu' self.forward=forward self.dropout=dropout self.Ks=K self.agg_method=agg_method self.conv_layers=2 self.decode_layers=2 self.edgeconv=edgeconv self.edgenorm=edgenorm self.edgetype=edgetype self.regularize=regularization self.decode_activation=d_act self.batch_norm=True # Define layers of the model if self.edgenorm: self.norm_edge = BatchNormalization() self.MPs = [SGConv(self.hidden_states, self.hidden_states, K=K, agg_method=self.agg_method, dropout = self.dropout) for K in self.Ks] if self.edgeconv: self.ECC1 = ECCConv(self.hidden_states, [self.hidden_states, self.hidden_states, self.hidden_states], n_out = self.hidden_states, activation = "relu", kernel_regularizer=self.regularize) self.GCNs = [GraphSageConv(self.hidden_states*int(i), activation=self.conv_activation, kernel_regularizer=self.regularize) for i in 4*2**np.arange(self.conv_layers)] self.Pool1 = GlobalMaxPool() self.Pool2 = GlobalAvgPool() self.Pool3 = GlobalSumPool() self.decode = [Dense(i * self.hidden_states) for i in 2*2**np.arange(self.decode_layers+1,1,-1)] self.dropout_layers = [Dropout(self.dropout) for i in range(len(self.decode))] if self.batch_norm: self.norm_layers = [BatchNormalization() for i in range(len(self.decode))] else: self.norm_layers = [no_norm for i in range(len(self.decode))] self.loge = [Dense(self.hidden_states) for _ in range(2)] self.loge_out = Dense(1) self.angles = [Dense(self.hidden_states) for _ in range(2)] self.angles_out = Dense(2) self.angle_scale= Dense(2) if self.n_sigs > 0: self.sigs = [Dense(self.hidden_states) for _ in range(2)] self.sigs_out = Dense(self.n_sigs)
def __init__(self, n_out=4, hidden_states=64, n_GCN=2, GCN_activation=LeakyReLU(alpha=0.2), decode_activation=LeakyReLU(alpha=0.2), regularize=None, dropout=0.2, forward=True, ECC=True): super().__init__() self.n_out = n_out self.hidden_states = hidden_states self.conv_activation = GCN_activation self.forward = forward self.dropout = dropout self.n_GCN = n_GCN self.ECC = ECC self.regularize = regularize self.decode_activation = decode_activation # Define layers of the model self.ECC = ECC if self.ECC: self.ECC1 = ECCConv(hidden_states, [hidden_states, hidden_states, hidden_states], n_out=hidden_states, activation="relu", kernel_regularizer=self.regularize) self.GCNs = [ GCNConv(hidden_states * int(i), activation=GCN_activation, kernel_regularizer=self.regularize) for i in 2**np.arange(n_GCN) ] self.Pool1 = GlobalMaxPool() self.Pool2 = GlobalAvgPool() self.Pool3 = GlobalSumPool() self.decode = [Dense(i * hidden_states) for i in 2**np.arange(n_GCN)] self.dropout_layers = [ Dropout(dropout) for i in range(len(self.decode)) ] self.norm_layers = [ BatchNormalization() for i in range(len(self.decode)) ] self.final = Dense(n_out)
def __init__(self, n_out = 6, hidden_states = 64, forward = False, dropout = 0.5): super().__init__() self.forward = forward # Define layers of the model self.ECC1 = ECCConv(hidden_states, [hidden_states, hidden_states], n_out = hidden_states, activation = "relu") self.GCN1 = GCNConv(hidden_states, activation = "relu") self.GCN2 = GCNConv(hidden_states * 2, activation = "relu") self.GCN3 = GCNConv(hidden_states * 4, activation = "relu") self.GCN4 = GCNConv(hidden_states * 8, activation = "relu") self.Pool1 = GlobalMaxPool() self.Pool2 = GlobalAvgPool() # self.Pool3 = GlobalSumPool() self.decode = [Dense(size * hidden_states) for size in [16, 16, 8]] self.drop_w = [Dropout(dropout) for _ in range(len(self.decode))] self.norm_layers = [BatchNormalization() for _ in range(len(self.decode))] self.angles = [Dense(hidden_states) for _ in range(2)] self.angles_out = Dense(2) self.sigs = [Dense(hidden_states) for _ in range(2)] self.sigs_out = Dense(2)
dataset_tr = dataset[idx_tr] dataset_va = dataset[idx_va] dataset_te = dataset[idx_te] loader_tr = DisjointLoader(dataset_tr, batch_size=batch_size, epochs=epochs) loader_te = DisjointLoader(dataset_te, batch_size=batch_size, epochs=1) ################################################################################ # Build model ################################################################################ X_in = Input(shape=(F,)) A_in = Input(shape=(None,), sparse=True) E_in = Input(shape=(S,)) I_in = Input(shape=(), dtype=tf.int64) X_1 = ECCConv(32, activation="relu")([X_in, A_in, E_in]) X_2 = ECCConv(32, activation="relu")([X_1, A_in, E_in]) X_3 = GlobalSumPool()([X_2, I_in]) output = Dense(n_out, activation="sigmoid")(X_3) model = Model(inputs=[X_in, A_in, E_in, I_in], outputs=output) optimizer = Adam(learning_rate) loss_fn = BinaryCrossentropy() ################################################################################ # Fit model ################################################################################ @tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True) def train_step(inputs, target): with tf.GradientTape() as tape:
def __init__(self, n_out=3, n_sigs=2, hidden_states=64, conv_layers=2, decode_layers=3, conv_activation='relu', decode_activation=1, regularization=None, dropout=0.03): super().__init__() self.n_out = n_out self.n_sigs = n_sigs self.hidden_states = hidden_states self.conv_activation = conv_activation self.dropout = dropout self.conv_layers = conv_layers self.regularize = regularization if type(decode_activation) == str: self.decode_activation = tf.keras.activations.get( decode_activation) else: self.decode_activation = d_act # Define layers of the model # self.hop2mean = SGConv(hidden_states, hidden_states, K=2, agg_method='mean', dropout = dropout) self.hop12min1 = SGConv(hidden_states, hidden_states, K=1, agg_method='min', dropout=dropout) self.hop12min2 = SGConv(hidden_states, hidden_states, K=2, agg_method='min', dropout=dropout) # self.hop12max1 = SGConv(hidden_states, hidden_states, K=1, agg_method='max', dropout = dropout) # self.hop12max2 = SGConv(hidden_states, hidden_states, K=2, agg_method='max', dropout = dropout) #edges! a = 2 self.edgeback = ECCConv(self.hidden_states // 2, [ self.hidden_states // a, self.hidden_states // a, self.hidden_states // a ], n_out=self.hidden_states // 2, activation="relu", kernel_regularizer=self.regularize) # self.edgeforward = ECCConv(self.hidden_states//2, [self.hidden_states//2, self.hidden_states//2, self.hidden_states//2], n_out = self.hidden_states//2, activation = "relu", kernel_regularizer=self.regularize) self.norm_edge = BatchNormalization() # self.norm_edge_f = BatchNormalization() self.GCNs = [ GraphSageConv(hidden_states * int(i), activation=self.conv_activation, kernel_regularizer=self.regularize) for i in 2 * 2**np.arange(self.conv_layers) ] self.Pool1 = GlobalMaxPool() self.Pool2 = GlobalAvgPool() self.Pool3 = GlobalSumPool() self.decode = [ Dense(int(i) * hidden_states) for i in 1.5 * 2**np.arange(decode_layers + 1, 1, -1) ] self.dropout_layers = [ Dropout(dropout) for i in range(len(self.decode)) ] self.norm_layers = [ BatchNormalization() for i in range(len(self.decode)) ] self.loge = [Dense(hidden_states) for _ in range(2)] self.loge_out = Dense(1) self.angles = [Dense(hidden_states) for _ in range(2)] self.angles_out = Dense(2) self.angle_scale = Dense(2) self.sigs = [Dense(hidden_states) for i in range(2)] self.sigs_out = Dense(n_sigs)
def __init__(self, n_out=3, n_sigs=2, hidden_states=64, conv_layers=2, glob=True, conv_activation='relu', decode_layers=3, decode_activation=d_act, regularization=None, dropout=0.2, batch_norm=True, forward=True, edgeconv=True, edgenorm=True): super().__init__() self.n_out = n_out self.n_sigs = n_sigs self.hidden_states = hidden_states self.conv_activation = conv_activation self.forward = forward self.dropout = dropout self.glob = glob self.conv_layers = conv_layers self.edgeconv = edgeconv self.edgenorm = edgenorm self.regularize = regularization if type(decode_activation) == str: self.decode_activation = tf.keras.activations.get( decode_activation) else: self.decode_activation = d_act self.batch_norm = batch_norm # Define layers of the model if self.edgenorm: self.norm_edge = BatchNormalization() self.MP = MP(hidden_states, hidden_states, dropout=dropout) if self.edgeconv: self.ECC1 = ECCConv(hidden_states, [hidden_states, hidden_states, hidden_states], n_out=hidden_states, activation="relu", kernel_regularizer=self.regularize) self.GCNs = [ GraphSageConv(hidden_states * int(i), activation=self.conv_activation, kernel_regularizer=self.regularize) for i in 2 * 2**np.arange(self.conv_layers) ] self.Pool1 = GlobalMaxPool() self.Pool2 = GlobalAvgPool() self.Pool3 = GlobalSumPool() self.decode = [ Dense(i * hidden_states) for i in 2 * 2**np.arange(decode_layers + 1, 1, -1) ] self.dropout_layers = [ Dropout(dropout) for i in range(len(self.decode)) ] if self.batch_norm: self.norm_layers = [ BatchNormalization() for i in range(len(self.decode)) ] else: self.norm_layers = [no_norm for i in range(len(self.decode))] self.loge = [Dense(hidden_states) for _ in range(2)] self.loge_out = Dense(1) self.angles = [Dense(hidden_states) for _ in range(2)] self.angles_out = Dense(2) self.angle_scale = Dense(2) if n_sigs > 0: self.sigs = [Dense(hidden_states) for _ in range(2)] self.sigs_out = Dense(n_sigs)
epochs=epochs, node_level=False) loader_te = DisjointLoader(dataset_te, batch_size=batch_size, epochs=1, node_level=False) # load() output: X, A, E, I ################################################################################ # BUILD MODEL ################################################################################ X_in = Input(shape=(F, ), name='X_in') A_in = Input(shape=(None, ), sparse=True, name='A_in') E_in = Input(shape=(S, ), name='E_in') I_in = Input(shape=(), name='segment_ids_in', dtype=tf.int32) X_1 = ECCConv(32, activation='relu')([X_in, A_in, E_in]) X_2 = ECCConv(32, activation='relu')([X_1, A_in, E_in]) X_3 = GlobalSumPool()([X_2, I_in]) output = Dense(n_out)(X_3) # Build model model = Model(inputs=[X_in, A_in, E_in, I_in], outputs=output) opt = Adam(lr=learning_rate) loss_fn = MeanSquaredError() ################################################################################ # FIT MODEL ################################################################################ @tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)