Пример #1
0
def base_architecture(args):
    # default architecture
    args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
    args.encoder_layers = getattr(args, "encoder_layers", 1)
    args.encoder_hidden_dim = getattr(args, "encoder_hidden_dim", 512)
    args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False)
    args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout)
    args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout)
    args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
    args.decoder_layers = getattr(args, "decoder_layers", 1)
    args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 512)
    args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
    args.attention_type = getattr(args, "attention_type", "dot")
    args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout)
    args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout)
    args.averaging_encoder = getattr(args, "averaging_encoder", False)
    args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False)
    args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False)
    args.ngram_decoder = getattr(args, "ngram_decoder", None)
    args.multi_encoder = getattr(args, "multi_encoder", None)
    args.multi_decoder = getattr(args, "multi_decoder", None)
    args.multi_decoder_is_lm = getattr(args, "multi_decoder_is_lm", None)
    args.multiling_encoder_lang = getattr(args, "multiling_encoder_lang", None)
    args.multi_model_training_schedule = getattr(
        args, "multi_model_training_schedule", "complete"
    )
    args.multi_model_fixed_weights = getattr(args, "multi_model_fixed_weights", None)
    args.cell_type = getattr(args, "cell_type", "lstm")
    args.ngram_activation_type = getattr(args, "ngram_activation_type", "relu")
    vocab_reduction.set_arg_defaults(args)
    word_dropout.set_arg_defaults(args)
    args.sequence_lstm = getattr(args, "sequence_lstm", False)
    args.decoder_tie_embeddings = getattr(args, "decoder_tie_embeddings", False)
Пример #2
0
def base_architecture(args):
    # default architecture
    args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
    args.encoder_layers = getattr(args, "encoder_layers", 1)
    args.encoder_hidden_dim = getattr(args, "encoder_hidden_dim", 512)
    args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False)
    args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout)
    args.encoder_dropout_out = getattr(args, "encoder_dropout_out",
                                       args.dropout)
    args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512)
    args.decoder_layers = getattr(args, "decoder_layers", 1)
    args.decoder_hidden_dim = getattr(args, "decoder_hidden_dim", 512)
    args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512)
    args.attention_type = getattr(args, "attention_type", "dot")
    args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout)
    args.decoder_dropout_out = getattr(args, "decoder_dropout_out",
                                       args.dropout)
    args.averaging_encoder = getattr(args, "averaging_encoder", False)
    args.encoder_freeze_embed = getattr(args, "encoder_freeze_embed", False)
    args.decoder_freeze_embed = getattr(args, "decoder_freeze_embed", False)
    args.ngram_decoder = getattr(args, "ngram_decoder", None)
    args.cell_type = getattr(args, "cell_type", "lstm")
    args.ngram_activation_type = getattr(args, "ngram_activation_type", "relu")
    vocab_reduction.set_arg_defaults(args)
    word_dropout.set_arg_defaults(args)
    args.sequence_lstm = getattr(args, "sequence_lstm", False)
    args.add_encodercoder_output_as_decoder_input = getattr(
        args, "add_encoder_output_as_decoder_input", False)
Пример #3
0
def base_architecture(args):
    # default architecture
    args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
    args.encoder_layers = getattr(args, 'encoder_layers', 1)
    args.encoder_hidden_dim = getattr(args, 'encoder_hidden_dim', 512)
    args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', False)
    args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout)
    args.encoder_dropout_out = getattr(args, 'encoder_dropout_out',
                                       args.dropout)
    args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
    args.decoder_layers = getattr(args, 'decoder_layers', 1)
    args.decoder_hidden_dim = getattr(args, 'decoder_hidden_dim', 512)
    args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
    args.attention_type = getattr(args, 'attention_type', 'dot')
    args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout)
    args.decoder_dropout_out = getattr(args, 'decoder_dropout_out',
                                       args.dropout)
    args.averaging_encoder = getattr(args, 'averaging_encoder', False)
    args.encoder_freeze_embed = getattr(args, 'encoder_freeze_embed', False)
    args.decoder_freeze_embed = getattr(args, 'decoder_freeze_embed', False)
    args.cell_type = getattr(args, 'cell_type', 'lstm')
    vocab_reduction.set_arg_defaults(args)
    word_dropout.set_arg_defaults(args)
    args.sequence_lstm = getattr(args, 'sequence_lstm', False)
    args.add_encoder_output_as_decoder_input = getattr(
        args,
        'add_encoder_output_as_decoder_input',
        False,
    )