Exemple #1
0
 def default_params():
     params = ModelBase.default_params()
     params.update({
         "source.max_seq_len": 50,
         "source.reverse": True,
         "target.max_seq_len": 50,
         "embedding.dim": 100,
         "embedding.share": False,
         "inference.beam_search.beam_width": 0,
         "inference.beam_search.length_penalty_weight": 0.0,
         "inference.beam_search.choose_successors_fn": "choose_top_k",
         "vocab_source": "",
         "vocab_target": "",
     })
     return params
Exemple #2
0
 def default_params():
   params = ModelBase.default_params()
   params.update({
       "source.max_seq_len": 50,
       "source.reverse": True,
       "target.max_seq_len": 50,
       "embedding.dim": 100,
       "embedding.init_scale": 0.04,
       "embedding.share": False,
       "inference.beam_search.beam_width": 0,
       "inference.beam_search.length_penalty_weight": 0.0,
       "inference.beam_search.choose_successors_fn": "choose_top_k",
       "optimizer.clip_embed_gradients": 0.1,
       "vocab_source": "",
       "vocab_target": "",
   })
   return params
Exemple #3
0
 def default_params():
     params = ModelBase.default_params()
     params.update({
         "source.max_seq_len": 50,
         "source.reverse": True,
         "target.max_seq_len": 50,
         "embedding.dim": 100,
         "embedding.init_scale": 0.04,
         "embedding.share": False,
         "embedding.source_embedding": None,
         "embedding.target_embedding": None,
         "inference.beam_search.beam_width": 0,
         "inference.beam_search.length_penalty_weight": 0.0,
         "inference.beam_search.choose_successors_fn": "choose_top_k",
         "optimizer.clip_embed_gradients": 0.1,
         "vocab_source": "",
         "vocab_target": "",
     })
     return params
Exemple #4
0
 def default_params():
     params = ModelBase.default_params()
     params.update({
         "attention.class": "AttentionLayerBahdanau",
         "attention.params": {
             "num_units": 128
         },
         "bridge.class": "seq2seq.models.bridges.ZeroBridge",
         "bridge.params": {},
         "encoder.class": "seq2seq.encoders.InceptionV3Encoder",
         "encoder.params": {},  # Arbitrary parameters for the encoder
         "decoder.class": "seq2seq.decoders.AttentionDecoder",
         "decoder.params": {},  # Arbitrary parameters for the decoder
         "target.max_seq_len": 50,
         "embedding.dim": 100,
         "inference.beam_search.beam_width": 0,
         "inference.beam_search.length_penalty_weight": 0.0,
         "inference.beam_search.choose_successors_fn": "choose_top_k",
         "vocab_target": "",
     })
     return params
Exemple #5
0
 def default_params():
   params = ModelBase.default_params()
   params.update({
       "attention.class": "AttentionLayerBahdanau",
       "attention.params": {
           "num_units": 128
       },
       "bridge.class": "seq2seq.models.bridges.ZeroBridge",
       "bridge.params": {},
       "encoder.class": "seq2seq.encoders.InceptionV3Encoder",
       "encoder.params": {},  # Arbitrary parameters for the encoder
       "decoder.class": "seq2seq.decoders.AttentionDecoder",
       "decoder.params": {},  # Arbitrary parameters for the decoder
       "target.max_seq_len": 50,
       "embedding.dim": 100,
       "inference.beam_search.beam_width": 0,
       "inference.beam_search.length_penalty_weight": 0.0,
       "inference.beam_search.choose_successors_fn": "choose_top_k",
       "vocab_target": "",
   })
   return params