Esempio n. 1
0
 def __init__(self, config):
     super().__init__()
     self.lstm_pooling = config['lstm_pooling']
     self.lstm_encoder = define_lstm_encoder()(config)
     self.gnn_name = config['gnn_name']
     self.gnn_encoder = define_ns_gnn_encoder(config['gnn_name'])(config)
     self.last_act = get_act_fn(config['final_act_fn'])
     self.lstm_out = nn.Linear(config['lstm_last_ts_dim'],
                               config['out_dim'])
     self._initialize_weights()
Esempio n. 2
0
 def __init__(self, config):
     super().__init__()
     self.lstm_encoder = DynamicLSTM(config)
     self.flat_after = config['flat_after']
     fc_in_dim = config['lstm_outdim']
     if self.flat_after:
         flat_dim = config['flat_nhid'] if config[
             'flat_nhid'] is not None else config['num_flat_feats']
         self.flat_fc = nn.Linear(config['num_flat_feats'], flat_dim)
         fc_in_dim += flat_dim
     self.out_layer = nn.Linear(fc_in_dim, config['out_dim'])
     self.drop = nn.Dropout(config['main_dropout'])
     self.last_act = get_act_fn(config['final_act_fn'])
     self._initialize_weights()
Esempio n. 3
0
 def __init__(self, config):
     super().__init__()
     self.gnn_encoder = define_gnn_encoder(config['gnn_name'])(config)
     self.last_act = get_act_fn(config['final_act_fn'])
     # where to put the flat features
     # self.flat_before = config['add_flat'] and config['flat_first'] (done in GraphDataset)
     self.flat_after = config['flat_after']
     if self.flat_after:
         flat_dim = config['flat_nhid'] if config[
             'flat_nhid'] is not None else config['num_flat_feats']
         self.flat_fc = nn.Linear(config['num_flat_feats'], flat_dim)
         fc_in_dim = config['gnn_outdim'] + flat_dim
         self.out_layer = nn.Linear(fc_in_dim, config['out_dim'])
     self.drop = nn.Dropout(config['main_dropout'])
     self._initialize_weights()
Esempio n. 4
0
 def __init__(self, config):
     super().__init__()
     self.lstm_encoder = define_lstm_encoder()(config)
     self.gnn_encoder = define_gnn_encoder(config['gnn_name'])(config)
     self.k = config['dg_k']
     self.flat_after, self.add_lstm, fc_in_dim, flat_dim = determine_fc_in_dim(
         config)
     if self.flat_after:
         self.flat_fc = nn.Linear(config['num_flat_feats'], flat_dim)
     if self.flat_after or self.add_lstm:
         self.out_layer = nn.Linear(fc_in_dim, config['num_cls'])
     self.last_act = get_act_fn(config['final_act_fn'])
     self.drop = nn.Dropout(config['main_dropout'])
     self.lstm_out = nn.Linear(config['lstm_last_ts_dim'],
                               config['out_dim'])
     self._initialize_weights()
Esempio n. 5
0
 def __init__(self, config):
     super().__init__()
     self.gnn_encoder = define_ns_gnn_encoder(config['gnn_name'])(config)
     self.last_act = get_act_fn(config['final_act_fn'])
     self._initialize_weights()