コード例 #1
0
ファイル: decoder.py プロジェクト: shrutijpalaskar/xnmt
 def __init__(self, vocab_size, layers=1, input_dim=None, lstm_dim=None, mlp_hidden_dim=None, trg_embed_dim=None, dropout=None,
              rnn_spec="lstm", residual_to_output=False):
   lstm_dim = lstm_dim or model_globals.get("default_layer_dim")
   mlp_hidden_dim = mlp_hidden_dim or model_globals.get("default_layer_dim")
   trg_embed_dim = trg_embed_dim or model_globals.get("default_layer_dim")
   input_dim = input_dim or model_globals.get("default_layer_dim")
   self.fwd_lstm = RnnDecoder.rnn_from_spec(rnn_spec, layers, trg_embed_dim, lstm_dim, model_globals.dynet_param_collection.param_col, residual_to_output)
   self.mlp = MLP(input_dim + lstm_dim, mlp_hidden_dim, vocab_size, model_globals.dynet_param_collection.param_col)
   self.dropout = dropout or model_globals.get("dropout")
   self.state = None
コード例 #2
0
 def __init__(self, input_dim=None, state_dim=None, hidden_dim=None):
   input_dim = input_dim or model_globals.get("default_layer_dim")
   state_dim = state_dim or model_globals.get("default_layer_dim")
   hidden_dim = hidden_dim or model_globals.get("default_layer_dim")
   self.input_dim = input_dim
   self.state_dim = state_dim
   self.hidden_dim = hidden_dim
   param_collection = model_globals.dynet_param_collection.param_col
   self.pW = param_collection.add_parameters((hidden_dim, input_dim))
   self.pV = param_collection.add_parameters((hidden_dim, state_dim))
   self.pb = param_collection.add_parameters(hidden_dim)
   self.pU = param_collection.add_parameters((1, hidden_dim))
   self.curr_sent = None
コード例 #3
0
 def __init__(self,
              input_dim=512,
              layers=1,
              hidden_dim=None,
              downsampling_method="skip",
              reduce_factor=2,
              dropout=None):
     hidden_dim = hidden_dim or model_globals.get("default_layer_dim")
     dropout = dropout or model_globals.get("dropout")
     self.dropout = dropout
     self.builder = pyramidal.PyramidalRNNBuilder(
         layers, input_dim, hidden_dim,
         model_globals.dynet_param_collection.param_col,
         dy.VanillaLSTMBuilder, downsampling_method, reduce_factor)
コード例 #4
0
 def init_builder(self,
                  input_dim,
                  layers,
                  hidden_dim=None,
                  chn_dim=3,
                  num_filters=32,
                  filter_size_time=3,
                  filter_size_freq=3,
                  stride=(2, 2),
                  dropout=None):
     model = model_globals.dynet_param_collection.param_col
     hidden_dim = hidden_dim or model_globals.get("default_layer_dim")
     dropout = dropout or model_globals.get("dropout")
     self.dropout = dropout
     self.builder = conv_encoder.ConvBiRNNBuilder(
         layers, input_dim, hidden_dim, model, dy.VanillaLSTMBuilder,
         chn_dim, num_filters, filter_size_time, filter_size_freq, stride)
コード例 #5
0
 def __init__(self,
              input_dim=512,
              layers=1,
              hidden_dim=None,
              residual_to_output=False,
              dropout=None,
              bidirectional=True):
     model = model_globals.dynet_param_collection.param_col
     hidden_dim = hidden_dim or model_globals.get("default_layer_dim")
     dropout = dropout or model_globals.get("dropout")
     self.dropout = dropout
     if bidirectional:
         self.builder = residual.ResidualBiRNNBuilder(
             layers, input_dim, hidden_dim, model, dy.VanillaLSTMBuilder,
             residual_to_output)
     else:
         self.builder = residual.ResidualRNNBuilder(layers, input_dim,
                                                    hidden_dim, model,
                                                    dy.VanillaLSTMBuilder,
                                                    residual_to_output)
コード例 #6
0
 def __init__(self,
              input_dim=None,
              layers=1,
              hidden_dim=None,
              dropout=None,
              bidirectional=True):
     model = model_globals.dynet_param_collection.param_col
     input_dim = input_dim or model_globals.get("default_layer_dim")
     hidden_dim = hidden_dim or model_globals.get("default_layer_dim")
     dropout = dropout or model_globals.get("dropout")
     self.input_dim = input_dim
     self.layers = layers
     self.hidden_dim = hidden_dim
     self.dropout = dropout
     if bidirectional:
         self.builder = dy.BiRNNBuilder(layers, input_dim, hidden_dim,
                                        model, dy.VanillaLSTMBuilder)
     else:
         self.builder = dy.VanillaLSTMBuilder(layers, input_dim, hidden_dim,
                                              model)
コード例 #7
0
ファイル: embedder.py プロジェクト: shrutijpalaskar/xnmt
 def __init__(self, vocab_size, emb_dim=None):
     self.vocab_size = vocab_size
     if emb_dim is None: emb_dim = model_globals.get("default_layer_dim")
     self.emb_dim = emb_dim
     self.embeddings = model_globals.dynet_param_collection.param_col.add_lookup_parameters(
         (vocab_size, emb_dim))