示例#1
0
    def __init__(self, config, log = True):
        super(nwp_transformer_2lin, self).__init__()
        embed = config['embed']
        tf= config['tf']

        self.is_cuda = config['cuda']
        self.max_len = tf['max_len']

        self.embed = nn.Embedding(num_embeddings = embed['n_embeddings'], 
                                  embedding_dim = embed['embedding_dim'], 
                                  sparse = embed['sparse'],
                                  padding_idx = embed['padding_idx'])
        # prepares the positional embeddings
        self.pos_emb = self.pos_embedding(tf['max_len'],embed['embedding_dim'])

        self.TF_enc = transformer_encoder(in_size = tf['in_size'], 
                                          fc_size = tf['fc_size'], 
                                          n_layers = tf['n_layers'], 
                                          h = tf['h'])

        self.linear = nn.Sequential(nn.Linear(tf['in_size'], 
                                              tf['in_size']
                                              ), 
                                    nn.ReLU(), 
                                    nn.Linear(tf['in_size'], 
                                              embed['n_embeddings']
                                              )
                                    )
        if log:
            self.log(embed, tf)
示例#2
0
    def __init__(self, config, log = True):
        super(nwp_transformer, self).__init__()
        
        embed = config['embed']
        tf= config['tf']

        self.is_cuda = config['cuda']
        self.max_len = tf['max_len']

        self.embed = nn.Embedding(num_embeddings = embed['n_embeddings'], 
                                  embedding_dim = embed['embedding_dim'], 
                                  sparse = embed['sparse'],
                                  padding_idx = embed['padding_idx'])
        # prepares the positional embeddings
        self.pos_emb = self.pos_embedding(tf['max_len'], embed['embedding_dim'])

        self.TF_enc = transformer_encoder(in_size = tf['in_size'], 
                                          fc_size = tf['fc_size'], 
                                          n_layers = tf['n_layers'], 
                                          h = tf['h'])
        # linear layer has no extra configurations, it just maps directly
        # from the transformer output to the number of embeddings
        self.linear = nn.Linear(tf['in_size'], embed['n_embeddings'])
        
        if log:
            self.log(embed, tf)
示例#3
0
 def __init__(self, config):
     super(translator_transformer, self).__init__()
     embed = config['embed']
     tf = config['tf']
     self.is_cuda = config['cuda']
     self.max_len = tf['max_len']
     # create the embedding layer
     self.embed = nn.Embedding(num_embeddings = embed['num_chars'], 
                               embedding_dim = embed['embedding_dim'], sparse = embed['sparse'],
                               padding_idx = embed['padding_idx'])
     # create the positional embeddings
     self.pos_emb = self.pos_embedding(tf['max_len'], embed['embedding_dim'])
     # create the (stacked) transformer
     self.TF_enc = transformer_encoder(in_size = tf['input_size'], fc_size = tf['fc_size'], 
                           n_layers = tf['n_layers'], h = tf['h'])
     self.TF_dec = transformer_decoder(in_size = tf['input_size'], fc_size = tf['fc_size'], 
                           n_layers = tf['n_layers'], h = tf['h'])
     self.linear = nn.Linear(embed['embedding_dim'], embed['num_chars'])