def from_params(cls, vocab: Vocabulary, params: Params) -> 'DialogueContextCoherenceAttentionClassifier': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) context_encoder_params = params.pop("context_encoder", None) if context_encoder_params is not None: context_encoder = Seq2SeqEncoder.from_params(context_encoder_params) else: context_encoder = None response_encoder_params = params.pop("response_encoder", None) if response_encoder_params is not None: response_encoder = Seq2SeqEncoder.from_params(response_encoder_params) else: response_encoder = None attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward')) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward')) classifier_feedforward = FeedForward.from_params(params.pop("classifier_feedforward")) initializer = InitializerApplicator.from_params(params.pop("initializer", [])) regularizer = RegularizerApplicator.from_params(params.pop("regularizer", [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, classifier_feedforward=classifier_feedforward, context_encoder=context_encoder, response_encoder=response_encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdHMCN': sh_hierarchy_dir = params.pop("sh_hierarchy_dir") embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params) abstract_text_encoder = Seq2SeqEncoder.from_params(params.pop("abstract_text_encoder")) attention_encoder = params.pop("attention_encoder") if attention_encoder.pop('type') == 'linear_attention': attention_encoder = AttentionEncoder.from_params(attention_encoder) else: attention_encoder = MultiHeadAttentionEncoder.from_params(attention_encoder) local_globel_tradeoff = params.pop_float("local_globel_tradeoff", 0.5) bce_pos_weight = params.pop_int("bce_pos_weight", 10) use_positional_encoding = params.pop("use_positional_encoding", False) child_parent_index_pair_dir = params.pop("child_parent_index_pair_dir", None) hv_penalty_lambda = params.pop_float("hv_penalty_lambda", 0.1) hidden_states_dropout = params.pop_float("hidden_states_dropout", 0.1) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) return cls(vocab=vocab, sh_hierarchy_dir=sh_hierarchy_dir, text_field_embedder=text_field_embedder, abstract_text_encoder=abstract_text_encoder, attention_encoder=attention_encoder, local_globel_tradeoff=local_globel_tradeoff, bce_pos_weight=bce_pos_weight, use_positional_encoding=use_positional_encoding, child_parent_index_pair_dir=child_parent_index_pair_dir, hv_penalty_lambda=hv_penalty_lambda, hidden_states_dropout=hidden_states_dropout, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) embedding_dropout = params.pop("embedding_dropout") pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) integrator = Seq2SeqEncoder.from_params(params.pop("integrator")) integrator_dropout = params.pop("integrator_dropout") output_layer_params = params.pop("output_layer") if "activations" in output_layer_params: output_layer = FeedForward.from_params(output_layer_params) else: output_layer = Maxout.from_params(output_layer_params) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, embedding_dropout=embedding_dropout, pre_encode_feedforward=pre_encode_feedforward, encoder=encoder, integrator=integrator, integrator_dropout=integrator_dropout, output_layer=output_layer, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params, label_indexer: LabelIndexer) -> 'MultiClassifer': method = params.pop("method", "binary") num_tags = label_indexer.get_num_tags() embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder_word = Seq2SeqEncoder.from_params(params.pop("encoder_word")) attn_word_params = params.pop("attention_word") attn_type = attn_word_params.pop("type") attn_word = [] for ix in range(num_tags - 1): # Since from_params clears out the dictionaries, # this deepcopy is necessary tmp_attn_params = deepcopy(attn_word_params) attn_word.append( getattr(Attns, attn_type).from_params(tmp_attn_params)) threshold = params.pop("threshold", 0.5) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, method=method, text_field_embedder=text_field_embedder, encoder_word=encoder_word, attn_word=attn_word, thresh=threshold, initializer=initializer, regularizer=regularizer, label_indexer=label_indexer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'OntoEmmaNN': name_embedder = TextFieldEmbedder.from_params( vocab, params.pop("name_embedder")) definition_embedder = TextFieldEmbedder.from_params( vocab, params.pop("definition_embedder")) name_encoder = Seq2VecEncoder.from_params(params.pop("name_encoder")) definition_encoder = Seq2VecEncoder.from_params( params.pop("definition_encoder")) siamese_feedforward = FeedForward.from_params( params.pop("siamese_feedforward")) decision_feedforward = FeedForward.from_params( params.pop("decision_feedforward")) init_params = params.pop('initializer', None) reg_params = params.pop('regularizer', None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) regularizer = RegularizerApplicator.from_params( reg_params) if reg_params is not None else None return cls(vocab=vocab, name_embedder=name_embedder, definition_embedder=definition_embedder, name_encoder=name_encoder, definition_encoder=definition_encoder, siamese_feedforward=siamese_feedforward, decision_feedforward=decision_feedforward, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'NLIEncoder': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder_params = params.pop("encoder", None) if encoder_params is not None: encoder = Seq2SeqEncoder.from_params(encoder_params) else: encoder = None fc_dim = params.pop('fc_dim', 512) nonlinear_fc = params.pop('nonlinear_fc', True) dropout_fc = params.pop('dropout_fc', 0.0) init_params = params.pop('initializer', None) reg_params = params.pop('regularizer', None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) regularizer = RegularizerApplicator.from_params( reg_params) if reg_params is not None else None return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, fc_dim=fc_dim, nonlinear_fc=nonlinear_fc, dropout_fc=dropout_fc, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'MAMLCrfTagger': embedder_params = params.pop("text_field_embedder") encoder_params = params.pop("encoder") text_field_embedder = [] encoder = [] label_namespace = params.pop("label_namespace", "labels") constraint_type = params.pop("constraint_type", None) initializer_params = params.pop('initializer', []) reg_params = params.pop('regularizer', []) for i in range(20 + 1): print(i) encoder.append( Seq2SeqEncoder.from_params(encoder_params.duplicate())) # device = [w for w in encoder[-1].parameters()][0].get_device() text_field_embedder.append( TextFieldEmbedder.from_params(vocab, embedder_params.duplicate())) initializer = InitializerApplicator.from_params(initializer_params) regularizer = RegularizerApplicator.from_params(reg_params) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, label_namespace=label_namespace, constraint_type=constraint_type, initializer=initializer, regularizer=regularizer)
def from_params(cls, params: Params, vocab: Vocabulary) -> 'CMVPredictor': response_embedder_params = params.pop("response_embedder") response_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=response_embedder_params) response_word_attention_params = params.pop("response_word_attention") response_word_attention = IntraAttention.from_params( params=response_word_attention_params) response_encoder_params = params.pop("response_encoder") response_encoder = Seq2SeqEncoder.from_params( params=response_encoder_params) response_sentence_attention_params = params.pop( "response_sentence_attention") response_sentence_attention = InterAttention.from_params( params=response_sentence_attention_params) op_embedder_params = params.pop("op_embedder", None) op_embedder = None if op_embedder_params is not None: op_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=op_embedder_params) op_word_attention_params = params.pop("op_word_attention", None) op_word_attention = None if op_word_attention_params is not None: op_word_attention = IntraAttention.from_params( params=op_word_attention_params) op_encoder_params = params.pop("op_encoder", None) op_encoder = None if op_encoder_params is not None: op_encoder = Seq2SeqEncoder.from_params(params=op_encoder_params) op_sentence_attention_params = params.pop("op_sentence_attention", None) op_sentence_attention = None if op_sentence_attention_params is not None: op_sentence_attention = IntraAttention.from_params( params=op_sentence_attention_params) output_feedforward = FeedForward.from_params( params=params.pop('output_feedforward')) dropout = params.pop("dropout", 0) initializer = InitializerApplicator.from_params( params=params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params=params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab, response_embedder, response_word_attention, response_encoder, response_sentence_attention, output_feedforward, op_embedder, op_word_attention, op_encoder, op_sentence_attention, dropout, initializer, regularizer)
def from_params(cls, vocab, params: Params) -> 'SimpleSeq2Seq': source_embedder_params = params.pop("source_embedder") source_embedder = TextFieldEmbedder.from_params( vocab, source_embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) max_decoding_steps = params.pop("max_decoding_steps") target_namespace = params.pop("target_namespace", "target_tags") # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab, source_embedder=source_embedder, encoder=encoder, max_decoding_steps=max_decoding_steps, target_namespace=target_namespace, attention_function=attention_function, scheduled_sampling_ratio=scheduled_sampling_ratio, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SelectiveRegressor': token_representation_dim = params.pop_int("token_representation_dim") encoder = params.pop("encoder", None) if encoder is not None: encoder = Seq2SeqEncoder.from_params(encoder) decoder = params.pop("decoder", None) if decoder is not None and not isinstance(decoder, str): decoder = FeedForward.from_params(decoder) contextualizer = params.pop('contextualizer', None) if contextualizer: contextualizer = Contextualizer.from_params(contextualizer) pretrained_file = params.pop("pretrained_file", None) transfer_contextualizer_from_pretrained_file = params.pop_bool( "transfer_contextualizer_from_pretrained_file", False) transfer_encoder_from_pretrained_file = params.pop_bool( "transfer_encoder_from_pretrained_file", False) freeze_encoder = params.pop_bool("freeze_encoder", False) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, token_representation_dim=token_representation_dim, encoder=encoder, decoder=decoder, contextualizer=contextualizer, pretrained_file=pretrained_file, transfer_contextualizer_from_pretrained_file=transfer_contextualizer_from_pretrained_file, transfer_encoder_from_pretrained_file=transfer_encoder_from_pretrained_file, freeze_encoder=freeze_encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'CrfTagger': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) label_namespace = params.pop("label_namespace", "labels") constraint_type = params.pop("constraint_type", None) dropout = params.pop("dropout", None) include_start_end_transitions = params.pop( "include_start_end_transitions", True) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, label_namespace=label_namespace, constraint_type=constraint_type, dropout=dropout, include_start_end_transitions=include_start_end_transitions, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SarcasmClassifier': bert_model_name = params.pop("bert_model_name") quote_response_encoder = Seq2VecEncoder.from_params( params.pop("quote_response_encoder")) classifier_feedforward = FeedForward.from_params( params.pop("classifier_feedforward")) classifier_feedforward_2 = FeedForward.from_params( params.pop("classifier_feedforward_2")) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) report_auxiliary_metrics = params.pop_bool("report_auxiliary_metrics", False) # predict_mode = params.pop_bool("predict_mode", False) # print(f"pred mode: {predict_mode}") return cls(vocab=vocab, bert_model_name=bert_model_name, quote_response_encoder=quote_response_encoder, classifier_feedforward=classifier_feedforward, classifier_feedforward_2=classifier_feedforward_2, initializer=initializer, regularizer=regularizer, report_auxiliary_metrics=report_auxiliary_metrics)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) premise_encoder_params = params.pop("premise_encoder", None) if premise_encoder_params is not None: premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params) else: premise_encoder = None hypothesis_encoder_params = params.pop("hypothesis_encoder", None) if hypothesis_encoder_params is not None: hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params) else: hypothesis_encoder = None attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward')) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward')) aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward')) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, aggregate_feedforward=aggregate_feedforward, premise_encoder=premise_encoder, hypothesis_encoder=hypothesis_encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EvidenceExtraction': embedder_params = params.pop("text_field_embedder") embedder = TextFieldEmbedder.from_params(vocab, embedder_params) question_encoder = Seq2SeqEncoder.from_params( params.pop("question_encoder")) passage_encoder = Seq2SeqEncoder.from_params( params.pop("passage_encoder")) dropout = params.pop_float('dropout', 0.1) r = params.pop_float('r', 0.8) #cuda = params.pop_int('cuda', 0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls( vocab=vocab, embedder=embedder, question_encoder=question_encoder, passage_encoder=passage_encoder, r=r, dropout=dropout, #cuda=cuda, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'MultiTagger': tasks = params.pop("tasks") domains = params.pop("domains") embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) stacked_encoder = Seq2SeqEncoder.from_params( params.pop("stacked_encoder")) source_namespace = params.pop("source_namespace", "tokens") label_suffix_namespace = params.pop("label_suffix_namespace", "labels") is_crf = params.pop("is_crf", False) # device = params.pop("device", -1) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, tasks=tasks, domains=domains, text_field_embedder=text_field_embedder, stacked_encoder=stacked_encoder, source_namespace=source_namespace, label_suffix_namespace=label_suffix_namespace, is_crf=is_crf, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> "CoreferenceResolver": embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) context_layer = Seq2SeqEncoder.from_params(params.pop("context_layer")) mention_feedforward = FeedForward.from_params(params.pop("mention_feedforward")) antecedent_feedforward = FeedForward.from_params(params.pop("antecedent_feedforward")) feature_size = params.pop_int("feature_size") max_span_width = params.pop_int("max_span_width") spans_per_word = params.pop_float("spans_per_word") max_antecedents = params.pop_int("max_antecedents") lexical_dropout = params.pop_float("lexical_dropout", 0.2) init_params = params.pop("initializer", None) reg_params = params.pop("regularizer", None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) regularizer = RegularizerApplicator.from_params(reg_params) if reg_params is not None else None params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, context_layer=context_layer, mention_feedforward=mention_feedforward, antecedent_feedforward=antecedent_feedforward, feature_size=feature_size, max_span_width=max_span_width, spans_per_word=spans_per_word, max_antecedents=max_antecedents, lexical_dropout=lexical_dropout, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) span_extractor = SpanExtractor.from_params( params.pop("span_extractor")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) feed_forward_params = params.pop("feedforward", None) if feed_forward_params is not None: feedforward_layer = FeedForward.from_params(feed_forward_params) else: feedforward_layer = None initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) evalb_directory_path = params.pop("evalb_directory_path", None) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, span_extractor=span_extractor, encoder=encoder, feedforward_layer=feedforward_layer, initializer=initializer, regularizer=regularizer, evalb_directory_path=evalb_directory_path)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BidirectionalAttentionFlow': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) num_highway_layers = params.pop_int("num_highway_layers") phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer")) span_end_encoder = Seq2SeqEncoder.from_params(params.pop("span_end_encoder")) dropout = params.pop_float('dropout', 0.2) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) mask_lstms = params.pop_bool('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, modeling_layer=modeling_layer, span_end_encoder=span_end_encoder, dropout=dropout, mask_lstms=mask_lstms, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ESIM': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) similarity_function = SimilarityFunction.from_params( params.pop("similarity_function")) projection_feedforward = FeedForward.from_params( params.pop('projection_feedforward')) inference_encoder = Seq2SeqEncoder.from_params( params.pop("inference_encoder")) output_feedforward = FeedForward.from_params( params.pop('output_feedforward')) output_logit = FeedForward.from_params(params.pop('output_logit')) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) dropout = params.pop("dropout", 0) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, initializer=initializer, dropout=dropout, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) premise_encoder_params = params.pop("premise_encoder", None) if premise_encoder_params is not None: premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params) else: premise_encoder = None hypothesis_encoder_params = params.pop("hypothesis_encoder", None) if hypothesis_encoder_params is not None: hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params) else: hypothesis_encoder = None attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward')) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward')) aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward')) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, aggregate_feedforward=aggregate_feedforward, premise_encoder=premise_encoder, hypothesis_encoder=hypothesis_encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanDetector': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) stacked_encoder = Seq2SeqEncoder.from_params( params.pop("stacked_encoder")) predicate_feature_dim = params.pop("predicate_feature_dim") dim_hidden = params.pop("hidden_dim", 100) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) ###### config_path = params.pop("config_path") vocab_path = params.pop("vocab_path") model_path = params.pop("model_path") ###### params.assert_empty(cls.__name__) return cls( vocab=vocab, text_field_embedder=text_field_embedder, stacked_encoder=stacked_encoder, ###### config_path=config_path, vocab_path=vocab_path, model_path=model_path, ########### predicate_feature_dim=predicate_feature_dim, dim_hidden=dim_hidden, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'WordLM': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder_params = params.pop("encoder") encoder = Seq2SeqEncoder.from_params(encoder_params) proj = params.pop("proj", False) relu = params.pop("relu", False) dropout = params.pop("dropout", None) softmax_params = params.pop('softmax') softmax = SoftmaxWithNLL.from_params(vocab, softmax_params) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, softmax=softmax, proj=proj, relu=relu, dropout=dropout, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SemiCrfSemanticRoleLabeler': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) stacked_encoder = Seq2SeqEncoder.from_params( params.pop("stacked_encoder")) span_feedforward = FeedForward.from_params( params.pop("span_feedforward")) binary_feature_dim = params.pop("binary_feature_dim") max_span_width = params.pop("max_span_width") binary_feature_size = params.pop("feature_size") distance_feature_size = params.pop("distance_feature_size", 5) fast_mode = params.pop("fast_mode", True) loss_type = params.pop("loss_type", "logloss") label_namespace = params.pop("label_namespace", "labels") initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, stacked_encoder=stacked_encoder, binary_feature_dim=binary_feature_dim, span_feedforward=span_feedforward, max_span_width=max_span_width, binary_feature_size=binary_feature_size, distance_feature_size=distance_feature_size, label_namespace=label_namespace, loss_type=loss_type, fast_mode=fast_mode, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'HBMP': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder_params = params.pop("encoder") rnn1 = Seq2SeqEncoder.from_params(encoder_params.duplicate()) rnn2 = Seq2SeqEncoder.from_params(encoder_params.duplicate()) rnn3 = Seq2SeqEncoder.from_params(encoder_params.duplicate()) aggregated_params = params.pop('aggregate_feedforward', None) aggregate_feedforward = FeedForward.from_params( aggregated_params) if aggregated_params else None initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, rnn1=rnn1, rnn2=rnn2, rnn3=rnn3, aggregate_feedforward=aggregate_feedforward, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) span_extractor = SpanExtractor.from_params(params.pop("span_extractor")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) feed_forward_params = params.pop("feedforward", None) if feed_forward_params is not None: feedforward_layer = FeedForward.from_params(feed_forward_params) else: feedforward_layer = None pos_tag_embedding_params = params.pop("pos_tag_embedding", None) if pos_tag_embedding_params is not None: pos_tag_embedding = Embedding.from_params(vocab, pos_tag_embedding_params) else: pos_tag_embedding = None initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) evalb_directory_path = params.pop("evalb_directory_path", None) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, span_extractor=span_extractor, encoder=encoder, feedforward_layer=feedforward_layer, pos_tag_embedding=pos_tag_embedding, initializer=initializer, regularizer=regularizer, evalb_directory_path=evalb_directory_path)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'AnswerSynthesis': embedder_params = params.pop("text_field_embedder") embedder = TextFieldEmbedder.from_params(vocab, embedder_params) question_encoder = Seq2SeqEncoder.from_params( params.pop("question_encoder")) passage_encoder = Seq2SeqEncoder.from_params( params.pop("passage_encoder")) feed_forward = FeedForward.from_params(params.pop("feed_forward")) dropout = params.pop_float('dropout', 0.1) num_decoding_steps = params.pop_int("num_decoding_steps", 40) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, embedder=embedder, question_encoder=question_encoder, passage_encoder=passage_encoder, feed_forward=feed_forward, dropout=dropout, num_decoding_steps=num_decoding_steps, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdHANLinAtt': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=embedder_params) word_encoder = Seq2SeqEncoder.from_params(params.pop("word_encoder")) sentence_encoder = Seq2SeqEncoder.from_params( params.pop("sentence_encoder")) classifier_feedforward = params.pop("classifier_feedforward") if classifier_feedforward.pop('type') == 'feedforward': classifier_feedforward = FeedForward.from_params( classifier_feedforward) else: classifier_feedforward = Maxout.from_params(classifier_feedforward) use_positional_encoding = params.pop("use_positional_encoding", False) bce_pos_weight = params.pop_int("bce_pos_weight", 10) attended_text_dropout = params.pop_float("attended_text_dropout", 0.0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, word_encoder=word_encoder, sentence_encoder=sentence_encoder, classifier_feedforward=classifier_feedforward, attended_text_dropout=attended_text_dropout, bce_pos_weight=bce_pos_weight, use_positional_encoding=use_positional_encoding, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdTransformer': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=embedder_params) abstract_text_encoder = Seq2SeqEncoder.from_params( params.pop("abstract_text_encoder")) attention_encoder = AttentionEncoder.from_params( params.pop("attention_encoder")) classifier_feedforward = FeedForward.from_params( params.pop("classifier_feedforward")) use_positional_encoding = params.pop("use_positional_encoding", False) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, abstract_text_encoder=abstract_text_encoder, attention_encoder=attention_encoder, classifier_feedforward=classifier_feedforward, use_positional_encoding=use_positional_encoding, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DeIsTe': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) inter_attention = MatrixAttention.from_params( params.pop("inter_attention")) param_dyn_encoder = Seq2VecEncoder.from_params( params.pop("param_dyn_encoder")) pos_embedder = TokenEmbedder.from_params( vocab=None, params=params.pop("pos_embedder")) pos_attn_encoder = Seq2VecEncoder.from_params( params.pop("pos_attn_encoder")) output_feedforward_params = params.pop('output_feedforward', None) output_feedforward = FeedForward.from_params( output_feedforward_params) if output_feedforward_params else None initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, inter_attention=inter_attention, param_dyn_encoder=param_dyn_encoder, pos_embedder=pos_embedder, pos_attn_encoder=pos_attn_encoder, output_feedforward=output_feedforward, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BIOLabeler': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) stacked_encoder = Seq2SeqEncoder.from_params( params.pop("stacked_encoder")) predicate_feature_dim = params.pop("predicate_feature_dim", 100) dim_hidden = params.pop("hidden_dim", 100) question_generator = QuestionGenerator.from_params( vocab, params.pop("question_generator")) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, stacked_encoder=stacked_encoder, question_generator=question_generator, predicate_feature_dim=predicate_feature_dim, dim_hidden=dim_hidden, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SarcasmClassifier': embedder_params1 = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(embedder_params1, vocab=vocab) quote_response_encoder = Seq2VecEncoder.from_params( params.pop("quote_response_encoder")) classifier_feedforward = FeedForward.from_params( params.pop("classifier_feedforward")) classifier_feedforward_2 = FeedForward.from_params( params.pop("classifier_feedforward_2")) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) report_auxiliary_metrics = params.pop_bool("report_auxiliary_metrics", False) predict_mode = params.pop_bool("predict_mode", False) # print(f"pred mode: {predict_mode}") return cls(vocab=vocab, text_field_embedder=text_field_embedder, quote_response_encoder=quote_response_encoder, classifier_feedforward=classifier_feedforward, classifier_feedforward_2=classifier_feedforward_2, initializer=initializer, regularizer=regularizer, report_auxiliary_metrics=report_auxiliary_metrics, predict_mode=predict_mode)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdRNN': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params) abstract_text_encoder = Seq2SeqEncoder.from_params(params.pop("abstract_text_encoder")) attention_encoder = params.pop("attention_encoder") attention_type = attention_encoder.pop('type') if attention_type == 'linear_attention': attention_encoder = AttentionEncoder.from_params(attention_encoder) elif attention_type == 'self_attention': attention_encoder = SelfAttentionEncoder.from_params(attention_encoder) elif attention_type == 'multi_head': attention_encoder = MultiHeadAttentionEncoder.from_params(attention_encoder) else: attention_encoder = Pooling.from_params(attention_encoder) classifier_feedforward = params.pop("classifier_feedforward") if classifier_feedforward.pop('type') == 'feedforward': classifier_feedforward = FeedForward.from_params(classifier_feedforward) else: classifier_feedforward = Maxout.from_params(classifier_feedforward) use_positional_encoding = params.pop("use_positional_encoding", False) bce_pos_weight = params.pop_int("bce_pos_weight", 10) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, abstract_text_encoder=abstract_text_encoder, attention_encoder=attention_encoder, classifier_feedforward=classifier_feedforward, bce_pos_weight=bce_pos_weight, use_positional_encoding=use_positional_encoding, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'CrfTagger': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) label_namespace = params.pop("label_namespace", "labels") constraint_type = params.pop("constraint_type", None) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, label_namespace=label_namespace, constraint_type=constraint_type, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab, params): embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) num_highway_layers = params.pop("num_highway_layers") phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer")) dropout = params.pop('dropout', 0.2) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) mask_lstms = params.pop('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, modeling_layer=modeling_layer, dropout=dropout, mask_lstms=mask_lstms, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork': # type: ignore # pylint: disable=arguments-differ embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params) embedding_dropout = params.pop("embedding_dropout") pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) integrator = Seq2SeqEncoder.from_params(params.pop("integrator")) integrator_dropout = params.pop("integrator_dropout") output_layer_params = params.pop("output_layer") if "activations" in output_layer_params: output_layer = FeedForward.from_params(output_layer_params) else: output_layer = Maxout.from_params(output_layer_params) elmo = params.pop("elmo", None) if elmo is not None: elmo = Elmo.from_params(elmo) use_input_elmo = params.pop_bool("use_input_elmo", False) use_integrator_output_elmo = params.pop_bool("use_integrator_output_elmo", False) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, embedding_dropout=embedding_dropout, pre_encode_feedforward=pre_encode_feedforward, encoder=encoder, integrator=integrator, integrator_dropout=integrator_dropout, output_layer=output_layer, elmo=elmo, use_input_elmo=use_input_elmo, use_integrator_output_elmo=use_integrator_output_elmo, initializer=initializer, regularizer=regularizer)