def from_params(cls, vocab: Vocabulary, params: Params) -> 'ESIM': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) similarity_function = SimilarityFunction.from_params( params.pop("similarity_function")) projection_feedforward = FeedForward.from_params( params.pop('projection_feedforward')) inference_encoder = Seq2SeqEncoder.from_params( params.pop("inference_encoder")) output_feedforward = FeedForward.from_params( params.pop('output_feedforward')) output_logit = FeedForward.from_params(params.pop('output_logit')) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) dropout = params.pop("dropout", 0) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, initializer=initializer, dropout=dropout, regularizer=regularizer)
def from_params(cls, vocab, params: Params) -> 'SimpleSeq2SeqCrf': source_embedder_params = params.pop("source_embedder") source_embedder = TextFieldEmbedder.from_params( vocab, source_embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) max_decoding_steps = params.pop("max_decoding_steps") target_namespace = params.pop("target_namespace", "target_tags") # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab, source_embedder=source_embedder, encoder=encoder, max_decoding_steps=max_decoding_steps, target_namespace=target_namespace, attention_function=attention_function, scheduled_sampling_ratio=scheduled_sampling_ratio, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'AnswerSynthesis': embedder_params = params.pop("text_field_embedder") embedder = TextFieldEmbedder.from_params(vocab, embedder_params) question_encoder = Seq2SeqEncoder.from_params( params.pop("question_encoder")) passage_encoder = Seq2SeqEncoder.from_params( params.pop("passage_encoder")) feed_forward = FeedForward.from_params(params.pop("feed_forward")) dropout = params.pop_float('dropout', 0.1) num_decoding_steps = params.pop_int("num_decoding_steps", 40) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, embedder=embedder, question_encoder=question_encoder, passage_encoder=passage_encoder, feed_forward=feed_forward, dropout=dropout, num_decoding_steps=num_decoding_steps, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) span_extractor = SpanExtractor.from_params(params.pop("span_extractor")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) feed_forward_params = params.pop("feedforward", None) if feed_forward_params is not None: feedforward_layer = FeedForward.from_params(feed_forward_params) else: feedforward_layer = None pos_tag_embedding_params = params.pop("pos_tag_embedding", None) if pos_tag_embedding_params is not None: pos_tag_embedding = Embedding.from_params(vocab, pos_tag_embedding_params) else: pos_tag_embedding = None initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) evalb_directory_path = params.pop("evalb_directory_path", None) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, span_extractor=span_extractor, encoder=encoder, feedforward_layer=feedforward_layer, pos_tag_embedding=pos_tag_embedding, initializer=initializer, regularizer=regularizer, evalb_directory_path=evalb_directory_path)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DialogueContextCoherenceAttentionClassifier': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) context_encoder_params = params.pop("context_encoder", None) if context_encoder_params is not None: context_encoder = Seq2SeqEncoder.from_params(context_encoder_params) else: context_encoder = None response_encoder_params = params.pop("response_encoder", None) if response_encoder_params is not None: response_encoder = Seq2SeqEncoder.from_params(response_encoder_params) else: response_encoder = None attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward')) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward')) classifier_feedforward = FeedForward.from_params(params.pop("classifier_feedforward")) initializer = InitializerApplicator.from_params(params.pop("initializer", [])) regularizer = RegularizerApplicator.from_params(params.pop("regularizer", [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, classifier_feedforward=classifier_feedforward, context_encoder=context_encoder, response_encoder=response_encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) span_extractor = SpanExtractor.from_params( params.pop("span_extractor")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) feed_forward_params = params.pop("feedforward", None) if feed_forward_params is not None: feedforward_layer = FeedForward.from_params(feed_forward_params) else: feedforward_layer = None initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) evalb_directory_path = params.pop("evalb_directory_path", None) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, span_extractor=span_extractor, encoder=encoder, feedforward_layer=feedforward_layer, initializer=initializer, regularizer=regularizer, evalb_directory_path=evalb_directory_path)
def from_params( cls, vocab: Vocabulary, params: Params ) -> 'DialogueContextHierarchicalCoherenceClassifier': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) utterance_encoder = Seq2VecEncoder.from_params( params.pop("utterance_encoder")) context_encoder = Seq2VecEncoder.from_params( params.pop("context_encoder")) response_encoder = Seq2VecEncoder.from_params( params.pop("response_encoder")) classifier_feedforward = FeedForward.from_params( params.pop("classifier_feedforward")) initializer = InitializerApplicator.from_params( params.pop("initializer", [])) regularizer = RegularizerApplicator.from_params( params.pop("regularizer", [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, utterance_encoder=utterance_encoder, context_encoder=context_encoder, response_encoder=response_encoder, classifier_feedforward=classifier_feedforward, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) premise_encoder_params = params.pop("premise_encoder", None) if premise_encoder_params is not None: premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params) else: premise_encoder = None hypothesis_encoder_params = params.pop("hypothesis_encoder", None) if hypothesis_encoder_params is not None: hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params) else: hypothesis_encoder = None attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward')) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward')) aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward')) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, aggregate_feedforward=aggregate_feedforward, premise_encoder=premise_encoder, hypothesis_encoder=hypothesis_encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'CrfTaggerPretrain': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) label_namespace = params.pop("label_namespace", "labels") constraint_type = params.pop("constraint_type", None) dropout = params.pop("dropout", None) include_start_end_transitions = params.pop( "include_start_end_transitions", True) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) initial_model_file = params.pop("initial_model_file", None) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, label_namespace=label_namespace, constraint_type=constraint_type, dropout=dropout, include_start_end_transitions=include_start_end_transitions, initializer=initializer, regularizer=regularizer, initial_model_file=initial_model_file)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'NLIEncoder': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder_params = params.pop("encoder", None) if encoder_params is not None: encoder = Seq2SeqEncoder.from_params(encoder_params) else: encoder = None fc_dim = params.pop('fc_dim', 512) nonlinear_fc = params.pop('nonlinear_fc', True) dropout_fc = params.pop('dropout_fc', 0.0) init_params = params.pop('initializer', None) reg_params = params.pop('regularizer', None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) regularizer = RegularizerApplicator.from_params( reg_params) if reg_params is not None else None return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, fc_dim=fc_dim, nonlinear_fc=nonlinear_fc, dropout_fc=dropout_fc, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BidirectionalAttentionFlow': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) num_highway_layers = params.pop("num_highway_layers") phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) similarity_function = SimilarityFunction.from_params( params.pop("similarity_function")) modeling_layer = Seq2SeqEncoder.from_params( params.pop("modeling_layer")) span_end_encoder = Seq2SeqEncoder.from_params( params.pop("span_end_encoder")) dropout = params.pop('dropout', 0.2) init_params = params.pop('initializer', None) reg_params = params.pop('regularizer', None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) regularizer = RegularizerApplicator.from_params( reg_params) if reg_params is not None else None mask_lstms = params.pop('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, modeling_layer=modeling_layer, span_end_encoder=span_end_encoder, dropout=dropout, mask_lstms=mask_lstms, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'MAMLCrfTagger': embedder_params = params.pop("text_field_embedder") encoder_params = params.pop("encoder") text_field_embedder = [] encoder = [] label_namespace = params.pop("label_namespace", "labels") constraint_type = params.pop("constraint_type", None) initializer_params = params.pop('initializer', []) reg_params = params.pop('regularizer', []) for i in range(20 + 1): print(i) encoder.append( Seq2SeqEncoder.from_params(encoder_params.duplicate())) # device = [w for w in encoder[-1].parameters()][0].get_device() text_field_embedder.append( TextFieldEmbedder.from_params(vocab, embedder_params.duplicate())) initializer = InitializerApplicator.from_params(initializer_params) regularizer = RegularizerApplicator.from_params(reg_params) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, label_namespace=label_namespace, constraint_type=constraint_type, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab, params: Params) -> 'PointerGenerator': source_embedder_params = params.pop("source_embedder") source_embedder = TextFieldEmbedder.from_params( vocab, source_embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) max_decoding_steps = params.pop("max_decoding_steps") target_namespace = params.pop("target_namespace", "tokens") # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.25) pointer_gen = params.pop_bool("pointer_gen", False) language_model = params.pop_bool("language_model", False) max_oovs = params.pop("max_oovs", None) params.assert_empty(cls.__name__) return cls(vocab, source_embedder=source_embedder, encoder=encoder, max_decoding_steps=max_decoding_steps, target_namespace=target_namespace, attention_function=attention_function, scheduled_sampling_ratio=scheduled_sampling_ratio, pointer_gen=pointer_gen, language_model=language_model, max_oovs=max_oovs)
def from_params(cls, vocab, params: Params) -> 'NlvrDirectSemanticParser': sentence_embedder_params = params.pop("sentence_embedder") sentence_embedder = TextFieldEmbedder.from_params( vocab, sentence_embedder_params) action_embedding_dim = params.pop_int('action_embedding_dim') encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) dropout = params.pop_float('dropout', 0.0) attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None decoder_beam_search = BeamSearch.from_params( params.pop("decoder_beam_search")) max_decoding_steps = params.pop_int("max_decoding_steps") params.assert_empty(cls.__name__) return cls(vocab, sentence_embedder=sentence_embedder, action_embedding_dim=action_embedding_dim, encoder=encoder, attention_function=attention_function, decoder_beam_search=decoder_beam_search, max_decoding_steps=max_decoding_steps, dropout=dropout)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BidirectionalAttentionFlow': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) num_highway_layers = params.pop("num_highway_layers") phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer")) span_end_encoder = Seq2SeqEncoder.from_params(params.pop("span_end_encoder")) initializer = InitializerApplicator.from_params(params.pop("initializer", [])) dropout = params.pop('dropout', 0.2) # TODO: Remove the following when fully deprecated evaluation_json_file = params.pop('evaluation_json_file', None) if evaluation_json_file is not None: logger.warning("the 'evaluation_json_file' model parameter is deprecated, please remove") mask_lstms = params.pop('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, modeling_layer=modeling_layer, span_end_encoder=span_end_encoder, initializer=initializer, dropout=dropout, mask_lstms=mask_lstms)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EvidenceExtraction': embedder_params = params.pop("text_field_embedder") embedder = TextFieldEmbedder.from_params(vocab, embedder_params) question_encoder = Seq2SeqEncoder.from_params( params.pop("question_encoder")) passage_encoder = Seq2SeqEncoder.from_params( params.pop("passage_encoder")) dropout = params.pop_float('dropout', 0.1) r = params.pop_float('r', 0.8) #cuda = params.pop_int('cuda', 0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls( vocab=vocab, embedder=embedder, question_encoder=question_encoder, passage_encoder=passage_encoder, r=r, dropout=dropout, #cuda=cuda, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab, params: Params) -> 'NlvrCoverageSemanticParser': sentence_embedder_params = params.pop("sentence_embedder") sentence_embedder = TextFieldEmbedder.from_params(vocab, sentence_embedder_params) action_embedding_dim = params.pop_int('action_embedding_dim') encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params(attention_function_type) else: attention_function = None beam_size = params.pop_int('beam_size') normalize_beam_score_by_length = params.pop_bool('normalize_beam_score_by_length', False) max_decoding_steps = params.pop_int("max_decoding_steps") checklist_cost_weight = params.pop_float("checklist_cost_weight", 0.8) dynamic_cost_weight = params.pop("dynamic_cost_weight", None) penalize_non_agenda_actions = params.pop_bool("penalize_non_agenda_actions", False) initial_mml_model_file = params.pop("initial_mml_model_file", None) params.assert_empty(cls.__name__) return cls(vocab, sentence_embedder=sentence_embedder, action_embedding_dim=action_embedding_dim, encoder=encoder, attention_function=attention_function, beam_size=beam_size, max_decoding_steps=max_decoding_steps, normalize_beam_score_by_length=normalize_beam_score_by_length, checklist_cost_weight=checklist_cost_weight, dynamic_cost_weight=dynamic_cost_weight, penalize_non_agenda_actions=penalize_non_agenda_actions, initial_mml_model_file=initial_mml_model_file)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) stacked_encoder = Seq2SeqEncoder.from_params(params.pop("stacked_encoder")) return cls(vocab=vocab, text_field_embedder=text_field_embedder, stacked_encoder=stacked_encoder)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'CrfTagger': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) label_namespace = params.pop("label_namespace", "labels") constraint_type = params.pop("constraint_type", None) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, label_namespace=label_namespace, constraint_type=constraint_type, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab, params): embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) num_highway_layers = params.pop("num_highway_layers") phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer")) dropout = params.pop('dropout', 0.2) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) mask_lstms = params.pop('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, modeling_layer=modeling_layer, dropout=dropout, mask_lstms=mask_lstms, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork': # type: ignore # pylint: disable=arguments-differ embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params) embedding_dropout = params.pop("embedding_dropout") pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) integrator = Seq2SeqEncoder.from_params(params.pop("integrator")) integrator_dropout = params.pop("integrator_dropout") output_layer_params = params.pop("output_layer") if "activations" in output_layer_params: output_layer = FeedForward.from_params(output_layer_params) else: output_layer = Maxout.from_params(output_layer_params) elmo = params.pop("elmo", None) if elmo is not None: elmo = Elmo.from_params(elmo) use_input_elmo = params.pop_bool("use_input_elmo", False) use_integrator_output_elmo = params.pop_bool("use_integrator_output_elmo", False) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, embedding_dropout=embedding_dropout, pre_encode_feedforward=pre_encode_feedforward, encoder=encoder, integrator=integrator, integrator_dropout=integrator_dropout, output_layer=output_layer, elmo=elmo, use_input_elmo=use_input_elmo, use_integrator_output_elmo=use_integrator_output_elmo, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab, params: Params) -> 'SimpleSeq2Seq': source_embedder_params = params.pop("source_embedder") source_embedder = TextFieldEmbedder.from_params(vocab, source_embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) max_decoding_steps = params.pop("max_decoding_steps") target_namespace = params.pop("target_namespace", "tokens") # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params(attention_function_type) else: attention_function = None scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0) params.assert_empty(cls.__name__) return cls(vocab, source_embedder=source_embedder, encoder=encoder, max_decoding_steps=max_decoding_steps, target_namespace=target_namespace, attention_function=attention_function, scheduled_sampling_ratio=scheduled_sampling_ratio)