def test_regex_matches_are_initialized_correctly(self): class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.linear_1_with_funky_name = torch.nn.Linear(5, 10) self.linear_2 = torch.nn.Linear(10, 5) self.conv = torch.nn.Conv1d(5, 5, 5) def forward(self, inputs): # pylint: disable=arguments-differ pass # pyhocon does funny things if there's a . in a key. This test makes sure that we # handle these kinds of regexes correctly. json_params = """{"initializer": [ ["conv", {"type": "constant", "val": 5}], ["funky_na.*bi", {"type": "constant", "val": 7}] ]} """ params = Params(pyhocon.ConfigFactory.parse_string(json_params)) initializers = InitializerApplicator.from_params(params['initializer']) model = Net() initializers(model) for parameter in model.conv.parameters(): assert torch.equal(parameter.data, torch.ones(parameter.size()) * 5) parameter = model.linear_1_with_funky_name.bias assert torch.equal(parameter.data, torch.ones(parameter.size()) * 7)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DialogueContextCoherenceAttentionClassifier': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) context_encoder_params = params.pop("context_encoder", None) if context_encoder_params is not None: context_encoder = Seq2SeqEncoder.from_params(context_encoder_params) else: context_encoder = None response_encoder_params = params.pop("response_encoder", None) if response_encoder_params is not None: response_encoder = Seq2SeqEncoder.from_params(response_encoder_params) else: response_encoder = None attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward')) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward')) classifier_feedforward = FeedForward.from_params(params.pop("classifier_feedforward")) initializer = InitializerApplicator.from_params(params.pop("initializer", [])) regularizer = RegularizerApplicator.from_params(params.pop("regularizer", [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, classifier_feedforward=classifier_feedforward, context_encoder=context_encoder, response_encoder=response_encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ESIM': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) similarity_function = SimilarityFunction.from_params( params.pop("similarity_function")) projection_feedforward = FeedForward.from_params( params.pop('projection_feedforward')) inference_encoder = Seq2SeqEncoder.from_params( params.pop("inference_encoder")) output_feedforward = FeedForward.from_params( params.pop('output_feedforward')) output_logit = FeedForward.from_params(params.pop('output_logit')) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) dropout = params.pop("dropout", 0) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, similarity_function=similarity_function, projection_feedforward=projection_feedforward, inference_encoder=inference_encoder, output_feedforward=output_feedforward, output_logit=output_logit, initializer=initializer, dropout=dropout, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'ModelSQUAD': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) #num_highway_layers = params.pop_int("num_highway_layers") phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) residual_encoder = Seq2SeqEncoder.from_params(params.pop("residual_encoder")) span_start_encoder = Seq2SeqEncoder.from_params(params.pop("span_start_encoder")) span_end_encoder = Seq2SeqEncoder.from_params(params.pop("span_end_encoder")) #feed_forward = FeedForward.from_params(params.pop("feed_forward")) dropout = params.pop_float('dropout', 0.2) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) mask_lstms = params.pop_bool('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, # num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, residual_encoder=residual_encoder, span_start_encoder=span_start_encoder, span_end_encoder=span_end_encoder, dropout=dropout, mask_lstms=mask_lstms, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) span_extractor = SpanExtractor.from_params(params.pop("span_extractor")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) feed_forward_params = params.pop("feedforward", None) if feed_forward_params is not None: feedforward_layer = FeedForward.from_params(feed_forward_params) else: feedforward_layer = None pos_tag_embedding_params = params.pop("pos_tag_embedding", None) if pos_tag_embedding_params is not None: pos_tag_embedding = Embedding.from_params(vocab, pos_tag_embedding_params) else: pos_tag_embedding = None initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) evalb_directory_path = params.pop("evalb_directory_path", None) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, span_extractor=span_extractor, encoder=encoder, feedforward_layer=feedforward_layer, pos_tag_embedding=pos_tag_embedding, initializer=initializer, regularizer=regularizer, evalb_directory_path=evalb_directory_path)
def from_params(cls, params: Params, vocab: Vocabulary) -> 'CMVPredictor': response_embedder_params = params.pop("response_embedder") response_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=response_embedder_params) response_word_attention_params = params.pop("response_word_attention") response_word_attention = IntraAttention.from_params( params=response_word_attention_params) response_encoder_params = params.pop("response_encoder") response_encoder = Seq2SeqEncoder.from_params( params=response_encoder_params) response_sentence_attention_params = params.pop( "response_sentence_attention") response_sentence_attention = InterAttention.from_params( params=response_sentence_attention_params) op_embedder_params = params.pop("op_embedder", None) op_embedder = None if op_embedder_params is not None: op_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=op_embedder_params) op_word_attention_params = params.pop("op_word_attention", None) op_word_attention = None if op_word_attention_params is not None: op_word_attention = IntraAttention.from_params( params=op_word_attention_params) op_encoder_params = params.pop("op_encoder", None) op_encoder = None if op_encoder_params is not None: op_encoder = Seq2SeqEncoder.from_params(params=op_encoder_params) op_sentence_attention_params = params.pop("op_sentence_attention", None) op_sentence_attention = None if op_sentence_attention_params is not None: op_sentence_attention = IntraAttention.from_params( params=op_sentence_attention_params) output_feedforward = FeedForward.from_params( params=params.pop('output_feedforward')) dropout = params.pop("dropout", 0) initializer = InitializerApplicator.from_params( params=params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params=params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab, response_embedder, response_word_attention, response_encoder, response_sentence_attention, output_feedforward, op_embedder, op_word_attention, op_encoder, op_sentence_attention, dropout, initializer, regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'TaskOnlyEmbeddingTagger': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) stacked_encoder = Seq2SeqEncoder.from_params(params.pop("stacked_encoder")) tasks = params.pop("tasks") task_embedder_params = params.pop("task_field_embedder") task_field_embedder = TextFieldEmbedder.from_params(vocab, task_embedder_params) domain_embedder_params = params.pop("domain_field_embedder") domain_field_embedder = TextFieldEmbedder.from_params(vocab, domain_embedder_params) source_namespace = params.pop("source_namespace", "tokens") label_namespace = params.pop("label_namespace", "labels") is_crf = params.pop("is_crf", False) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, stacked_encoder=stacked_encoder, tasks=tasks, task_field_embedder=task_field_embedder, domain_field_embedder=domain_field_embedder, source_namespace=source_namespace, label_namespace=label_namespace, is_crf=is_crf, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params, label_indexer: LabelIndexer) -> 'MultiClassifer': method = params.pop("method", "binary") num_tags = label_indexer.get_num_tags() embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder_word = Seq2SeqEncoder.from_params(params.pop("encoder_word")) attn_word_params = params.pop("attention_word") attn_type = attn_word_params.pop("type") attn_word = [] for ix in range(num_tags - 1): # Since from_params clears out the dictionaries, # this deepcopy is necessary tmp_attn_params = deepcopy(attn_word_params) attn_word.append( getattr(Attns, attn_type).from_params(tmp_attn_params)) threshold = params.pop("threshold", 0.5) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, method=method, text_field_embedder=text_field_embedder, encoder_word=encoder_word, attn_word=attn_word, thresh=threshold, initializer=initializer, regularizer=regularizer, label_indexer=label_indexer)
def from_params(cls, vocab, params): embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) num_highway_layers = params.pop("num_highway_layers") phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) similarity_function = SimilarityFunction.from_params( params.pop("similarity_function")) modeling_layer = Seq2SeqEncoder.from_params( params.pop("modeling_layer")) dropout = params.pop('dropout', 0.2) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) mask_lstms = params.pop('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, modeling_layer=modeling_layer, dropout=dropout, mask_lstms=mask_lstms, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab, params): embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) residual_encoder = Seq2SeqEncoder.from_params( params.pop("residual_encoder")) span_start_encoder = Seq2SeqEncoder.from_params( params.pop("span_start_encoder")) span_end_encoder = Seq2SeqEncoder.from_params( params.pop("span_end_encoder")) initializer = InitializerApplicator.from_params( params.pop("initializer", [])) dropout = params.pop('dropout', 0.2) evaluation_json_file = params.pop('evaluation_json_file', None) if evaluation_json_file is not None: logger.warning( "the 'evaluation_json_file' model parameter is deprecated, please remove" ) mask_lstms = params.pop('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, phrase_layer=phrase_layer, residual_encoder=residual_encoder, span_start_encoder=span_start_encoder, span_end_encoder=span_end_encoder, initializer=initializer, dropout=dropout, mask_lstms=mask_lstms)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdRnnSAP': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params) abstract_text_encoder = Seq2SeqEncoder.from_params(params.pop("abstract_text_encoder")) attention_encoder = params.pop("attention_encoder") attention_type = attention_encoder.pop('type') if attention_type == 'linear_attention': attention_encoder = AttentionEncoder.from_params(attention_encoder) elif attention_type == 'self_attention': attention_encoder = SelfAttentionEncoder.from_params(attention_encoder) else: attention_encoder = MultiHeadAttentionEncoder.from_params(attention_encoder) classifier_feedforward = params.pop("classifier_feedforward") if classifier_feedforward.pop('type') == 'feedforward': classifier_feedforward = FeedForward.from_params(classifier_feedforward) else: classifier_feedforward = Maxout.from_params(classifier_feedforward) use_positional_encoding = params.pop("use_positional_encoding", False) bce_pos_weight = params.pop_int("bce_pos_weight", 10) penalty_coef = params.pop_float("penalty_coef", 1.0) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, abstract_text_encoder=abstract_text_encoder, attention_encoder=attention_encoder, classifier_feedforward=classifier_feedforward, bce_pos_weight=bce_pos_weight, use_positional_encoding=use_positional_encoding, penalty_coef=penalty_coef, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'CrfTaggerPretrain': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) label_namespace = params.pop("label_namespace", "labels") constraint_type = params.pop("constraint_type", None) dropout = params.pop("dropout", None) include_start_end_transitions = params.pop( "include_start_end_transitions", True) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) initial_model_file = params.pop("initial_model_file", None) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, label_namespace=label_namespace, constraint_type=constraint_type, dropout=dropout, include_start_end_transitions=include_start_end_transitions, initializer=initializer, regularizer=regularizer, initial_model_file=initial_model_file)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'OntoEmmaNN': name_embedder = TextFieldEmbedder.from_params( vocab, params.pop("name_embedder")) definition_embedder = TextFieldEmbedder.from_params( vocab, params.pop("definition_embedder")) name_encoder = Seq2VecEncoder.from_params(params.pop("name_encoder")) definition_encoder = Seq2VecEncoder.from_params( params.pop("definition_encoder")) siamese_feedforward = FeedForward.from_params( params.pop("siamese_feedforward")) decision_feedforward = FeedForward.from_params( params.pop("decision_feedforward")) init_params = params.pop('initializer', None) reg_params = params.pop('regularizer', None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) regularizer = RegularizerApplicator.from_params( reg_params) if reg_params is not None else None return cls(vocab=vocab, name_embedder=name_embedder, definition_embedder=definition_embedder, name_encoder=name_encoder, definition_encoder=definition_encoder, siamese_feedforward=siamese_feedforward, decision_feedforward=decision_feedforward, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'NLIEncoder': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder_params = params.pop("encoder", None) if encoder_params is not None: encoder = Seq2SeqEncoder.from_params(encoder_params) else: encoder = None fc_dim = params.pop('fc_dim', 512) nonlinear_fc = params.pop('nonlinear_fc', True) dropout_fc = params.pop('dropout_fc', 0.0) init_params = params.pop('initializer', None) reg_params = params.pop('regularizer', None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) regularizer = RegularizerApplicator.from_params( reg_params) if reg_params is not None else None return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, fc_dim=fc_dim, nonlinear_fc=nonlinear_fc, dropout_fc=dropout_fc, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BidirectionalAttentionFlow': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) num_highway_layers = params.pop("num_highway_layers") phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) similarity_function = SimilarityFunction.from_params( params.pop("similarity_function")) modeling_layer = Seq2SeqEncoder.from_params( params.pop("modeling_layer")) span_end_encoder = Seq2SeqEncoder.from_params( params.pop("span_end_encoder")) dropout = params.pop('dropout', 0.2) init_params = params.pop('initializer', None) reg_params = params.pop('regularizer', None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) regularizer = RegularizerApplicator.from_params( reg_params) if reg_params is not None else None mask_lstms = params.pop('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, modeling_layer=modeling_layer, span_end_encoder=span_end_encoder, dropout=dropout, mask_lstms=mask_lstms, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'MAMLCrfTagger': embedder_params = params.pop("text_field_embedder") encoder_params = params.pop("encoder") text_field_embedder = [] encoder = [] label_namespace = params.pop("label_namespace", "labels") constraint_type = params.pop("constraint_type", None) initializer_params = params.pop('initializer', []) reg_params = params.pop('regularizer', []) for i in range(20 + 1): print(i) encoder.append( Seq2SeqEncoder.from_params(encoder_params.duplicate())) # device = [w for w in encoder[-1].parameters()][0].get_device() text_field_embedder.append( TextFieldEmbedder.from_params(vocab, embedder_params.duplicate())) initializer = InitializerApplicator.from_params(initializer_params) regularizer = RegularizerApplicator.from_params(reg_params) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, label_namespace=label_namespace, constraint_type=constraint_type, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'AnswerSynthesis': embedder_params = params.pop("text_field_embedder") embedder = TextFieldEmbedder.from_params(vocab, embedder_params) question_encoder = Seq2SeqEncoder.from_params( params.pop("question_encoder")) passage_encoder = Seq2SeqEncoder.from_params( params.pop("passage_encoder")) feed_forward = FeedForward.from_params(params.pop("feed_forward")) dropout = params.pop_float('dropout', 0.1) num_decoding_steps = params.pop_int("num_decoding_steps", 40) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, embedder=embedder, question_encoder=question_encoder, passage_encoder=passage_encoder, feed_forward=feed_forward, dropout=dropout, num_decoding_steps=num_decoding_steps, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EvidenceExtraction': embedder_params = params.pop("text_field_embedder") embedder = TextFieldEmbedder.from_params(vocab, embedder_params) question_encoder = Seq2SeqEncoder.from_params( params.pop("question_encoder")) passage_encoder = Seq2SeqEncoder.from_params( params.pop("passage_encoder")) dropout = params.pop_float('dropout', 0.1) r = params.pop_float('r', 0.8) #cuda = params.pop_int('cuda', 0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls( vocab=vocab, embedder=embedder, question_encoder=question_encoder, passage_encoder=passage_encoder, r=r, dropout=dropout, #cuda=cuda, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) embedding_dropout = params.pop("embedding_dropout") pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) integrator = Seq2SeqEncoder.from_params(params.pop("integrator")) integrator_dropout = params.pop("integrator_dropout") output_layer_params = params.pop("output_layer") if "activations" in output_layer_params: output_layer = FeedForward.from_params(output_layer_params) else: output_layer = Maxout.from_params(output_layer_params) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, embedding_dropout=embedding_dropout, pre_encode_feedforward=pre_encode_feedforward, encoder=encoder, integrator=integrator, integrator_dropout=integrator_dropout, output_layer=output_layer, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'TreeAttention': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) premise_encoder_params = params.pop("premise_encoder", None) premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params) attention_similarity = SimilarityFunction.from_params( params.pop('attention_similarity')) phrase_probability = FeedForward.from_params( params.pop('phrase_probability')) edge_probability = FeedForward.from_params( params.pop('edge_probability')) edge_embedding = Embedding.from_params(vocab, params.pop('edge_embedding')) use_encoding_for_node = params.pop('use_encoding_for_node') ignore_edges = params.pop('ignore_edges', False) init_params = params.pop('initializer', None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) return cls(vocab=vocab, text_field_embedder=text_field_embedder, phrase_probability=phrase_probability, edge_probability=edge_probability, premise_encoder=premise_encoder, edge_embedding=edge_embedding, use_encoding_for_node=use_encoding_for_node, attention_similarity=attention_similarity, ignore_edges=ignore_edges, initializer=initializer)
def from_params(cls, vocab, params: Params) -> 'SimpleSeq2SeqCrf': source_embedder_params = params.pop("source_embedder") source_embedder = TextFieldEmbedder.from_params( vocab, source_embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) max_decoding_steps = params.pop("max_decoding_steps") target_namespace = params.pop("target_namespace", "target_tags") # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab, source_embedder=source_embedder, encoder=encoder, max_decoding_steps=max_decoding_steps, target_namespace=target_namespace, attention_function=attention_function, scheduled_sampling_ratio=scheduled_sampling_ratio, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'HBMP': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) encoder_params = params.pop("encoder") rnn1 = Seq2SeqEncoder.from_params(encoder_params.duplicate()) rnn2 = Seq2SeqEncoder.from_params(encoder_params.duplicate()) rnn3 = Seq2SeqEncoder.from_params(encoder_params.duplicate()) aggregated_params = params.pop('aggregate_feedforward', None) aggregate_feedforward = FeedForward.from_params( aggregated_params) if aggregated_params else None initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, rnn1=rnn1, rnn2=rnn2, rnn3=rnn3, aggregate_feedforward=aggregate_feedforward, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BIOLabeler': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) stacked_encoder = Seq2SeqEncoder.from_params( params.pop("stacked_encoder")) predicate_feature_dim = params.pop("predicate_feature_dim", 100) dim_hidden = params.pop("hidden_dim", 100) question_generator = QuestionGenerator.from_params( vocab, params.pop("question_generator")) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, stacked_encoder=stacked_encoder, question_generator=question_generator, predicate_feature_dim=predicate_feature_dim, dim_hidden=dim_hidden, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> "CoreferenceResolverV2": embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) context_layer = Seq2SeqEncoder.from_params(params.pop("context_layer")) mention_feedforward = FeedForward.from_params(params.pop("mention_feedforward")) antecedent_feedforward = FeedForward.from_params(params.pop("antecedent_feedforward")) feature_size = params.pop_int("feature_size") max_span_width = params.pop_int("max_span_width") spans_per_word = params.pop_float("spans_per_word") max_antecedents = params.pop_int("max_antecedents") lexical_dropout = params.pop_float("lexical_dropout", 0.2) init_params = params.pop("initializer", None) reg_params = params.pop("regularizer", None) initializer = (InitializerApplicator.from_params(init_params) if init_params is not None else InitializerApplicator()) regularizer = RegularizerApplicator.from_params(reg_params) if reg_params is not None else None params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, context_layer=context_layer, mention_feedforward=mention_feedforward, antecedent_feedforward=antecedent_feedforward, feature_size=feature_size, max_span_width=max_span_width, spans_per_word=spans_per_word, max_antecedents=max_antecedents, lexical_dropout=lexical_dropout, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) premise_encoder_params = params.pop("premise_encoder", None) if premise_encoder_params is not None: premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params) else: premise_encoder = None hypothesis_encoder_params = params.pop("hypothesis_encoder", None) if hypothesis_encoder_params is not None: hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params) else: hypothesis_encoder = None attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward')) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward')) aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward')) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, aggregate_feedforward=aggregate_feedforward, premise_encoder=premise_encoder, hypothesis_encoder=hypothesis_encoder, initializer=initializer, regularizer=regularizer)
def test_regex_matches_are_initialized_correctly(self): class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.linear_1_with_funky_name = torch.nn.Linear(5, 10) self.linear_2 = torch.nn.Linear(10, 5) self.conv = torch.nn.Conv1d(5, 5, 5) def forward(self, inputs): # pylint: disable=arguments-differ pass # pyhocon does funny things if there's a . in a key. This test makes sure that we # handle these kinds of regexes correctly. json_params = """{"initializer": [ ["conv", {"type": "constant", "val": 5}], ["funky_na.*bi", {"type": "constant", "val": 7}] ]} """ params = Params(pyhocon.ConfigFactory.parse_string(json_params)) initializers = InitializerApplicator.from_params(params['initializer']) model = Net() initializers(model) for parameter in model.conv.parameters(): assert torch.equal(parameter.data, torch.ones(parameter.size()) * 5) parameter = model.linear_1_with_funky_name.bias assert torch.equal(parameter.data, torch.ones(parameter.size()) * 7)
def test_regex_matches_are_initialized_correctly(self): class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.linear_1_with_funky_name = torch.nn.Linear(5, 10) self.linear_2 = torch.nn.Linear(10, 5) self.conv = torch.nn.Conv1d(5, 5, 5) def forward(self, inputs): # pylint: disable=arguments-differ pass # Make sure we handle regexes properly json_params = """{"initializer": [ ["conv", {"type": "constant", "val": 5}], ["funky_na.*bi", {"type": "constant", "val": 7}] ]} """ params = Params(json.loads(_jsonnet.evaluate_snippet("", json_params))) initializers = InitializerApplicator.from_params(params['initializer']) model = Net() initializers(model) for parameter in model.conv.parameters(): assert torch.equal(parameter.data, torch.ones(parameter.size()) * 5) parameter = model.linear_1_with_funky_name.bias assert torch.equal(parameter.data, torch.ones(parameter.size()) * 7)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SarcasmClassifier': embedder_params1 = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(embedder_params1, vocab=vocab) quote_response_encoder = Seq2VecEncoder.from_params( params.pop("quote_response_encoder")) classifier_feedforward = FeedForward.from_params( params.pop("classifier_feedforward")) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) predict_mode = params.pop_bool("predict_mode", False) # print(f"pred mode: {predict_mode}") return cls(vocab=vocab, text_field_embedder=text_field_embedder, quote_response_encoder=quote_response_encoder, classifier_feedforward=classifier_feedforward, initializer=initializer, regularizer=regularizer, predict_mode=predict_mode)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'EtdDebugModel': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab=vocab, params=embedder_params) abstract_text_encoder = Seq2SeqEncoder.from_params( params.pop("abstract_text_encoder")) attention_encoder = AttentionEncoder.from_params( params.pop("attention_encoder")) classifier_feedforward = params.pop("classifier_feedforward") if classifier_feedforward.pop('type') == 'feedforward': classifier_feedforward = FeedForward.from_params( classifier_feedforward) else: classifier_feedforward = Maxout.from_params(classifier_feedforward) use_positional_encoding = params.pop("use_positional_encoding", False) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, abstract_text_encoder=abstract_text_encoder, attention_encoder=attention_encoder, classifier_feedforward=classifier_feedforward, use_positional_encoding=use_positional_encoding, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SpanConstituencyParser': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) span_extractor = SpanExtractor.from_params(params.pop("span_extractor")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) feed_forward_params = params.pop("feedforward", None) if feed_forward_params is not None: feedforward_layer = FeedForward.from_params(feed_forward_params) else: feedforward_layer = None pos_tag_embedding_params = params.pop("pos_tag_embedding", None) if pos_tag_embedding_params is not None: pos_tag_embedding = Embedding.from_params(vocab, pos_tag_embedding_params) else: pos_tag_embedding = None initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) evalb_directory_path = params.pop("evalb_directory_path", None) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, span_extractor=span_extractor, encoder=encoder, feedforward_layer=feedforward_layer, pos_tag_embedding=pos_tag_embedding, initializer=initializer, regularizer=regularizer, evalb_directory_path=evalb_directory_path)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'DecomposableAttention': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) premise_encoder_params = params.pop("premise_encoder", None) if premise_encoder_params is not None: premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params) else: premise_encoder = None hypothesis_encoder_params = params.pop("hypothesis_encoder", None) if hypothesis_encoder_params is not None: hypothesis_encoder = Seq2SeqEncoder.from_params(hypothesis_encoder_params) else: hypothesis_encoder = None attend_feedforward = FeedForward.from_params(params.pop('attend_feedforward')) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) compare_feedforward = FeedForward.from_params(params.pop('compare_feedforward')) aggregate_feedforward = FeedForward.from_params(params.pop('aggregate_feedforward')) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, attend_feedforward=attend_feedforward, similarity_function=similarity_function, compare_feedforward=compare_feedforward, aggregate_feedforward=aggregate_feedforward, premise_encoder=premise_encoder, hypothesis_encoder=hypothesis_encoder, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SemiCrfSemanticRoleLabeler': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params( vocab, embedder_params) stacked_encoder = Seq2SeqEncoder.from_params( params.pop("stacked_encoder")) span_feedforward = FeedForward.from_params( params.pop("span_feedforward")) binary_feature_dim = params.pop("binary_feature_dim") max_span_width = params.pop("max_span_width") binary_feature_size = params.pop("feature_size") distance_feature_size = params.pop("distance_feature_size", 5) fast_mode = params.pop("fast_mode", True) loss_type = params.pop("loss_type", "logloss") label_namespace = params.pop("label_namespace", "labels") initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab=vocab, text_field_embedder=text_field_embedder, stacked_encoder=stacked_encoder, binary_feature_dim=binary_feature_dim, span_feedforward=span_feedforward, max_span_width=max_span_width, binary_feature_size=binary_feature_size, distance_feature_size=distance_feature_size, label_namespace=label_namespace, loss_type=loss_type, fast_mode=fast_mode, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SarcasmClassifier': bert_model_name = params.pop("bert_model_name") quote_response_encoder = Seq2VecEncoder.from_params( params.pop("quote_response_encoder")) classifier_feedforward = FeedForward.from_params( params.pop("classifier_feedforward")) classifier_feedforward_2 = FeedForward.from_params( params.pop("classifier_feedforward_2")) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) report_auxiliary_metrics = params.pop_bool("report_auxiliary_metrics", False) # predict_mode = params.pop_bool("predict_mode", False) # print(f"pred mode: {predict_mode}") return cls(vocab=vocab, bert_model_name=bert_model_name, quote_response_encoder=quote_response_encoder, classifier_feedforward=classifier_feedforward, classifier_feedforward_2=classifier_feedforward_2, initializer=initializer, regularizer=regularizer, report_auxiliary_metrics=report_auxiliary_metrics)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'SimpleTagger': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, initializer=initializer, regularizer=regularizer)
def _get_applicator(self, regex: str, weights_file_path: str, parameter_name_overrides: Optional[Dict[str, str]] = None) -> InitializerApplicator: parameter_name_overrides = parameter_name_overrides or {} initializer_params = Params({ "type": "pretrained", "weights_file_path": weights_file_path, "parameter_name_overrides": parameter_name_overrides }) params = Params({ "initializer": [(regex, initializer_params)] }) return InitializerApplicator.from_params(params["initializer"])
def from_params(cls, vocab: Vocabulary, params: Params) -> 'CrfTagger': embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) label_namespace = params.pop("label_namespace", "labels") constraint_type = params.pop("constraint_type", None) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, encoder=encoder, label_namespace=label_namespace, constraint_type=constraint_type, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab, params): embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params) num_highway_layers = params.pop("num_highway_layers") phrase_layer = Seq2SeqEncoder.from_params(params.pop("phrase_layer")) similarity_function = SimilarityFunction.from_params(params.pop("similarity_function")) modeling_layer = Seq2SeqEncoder.from_params(params.pop("modeling_layer")) dropout = params.pop('dropout', 0.2) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) mask_lstms = params.pop('mask_lstms', True) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, num_highway_layers=num_highway_layers, phrase_layer=phrase_layer, attention_similarity_function=similarity_function, modeling_layer=modeling_layer, dropout=dropout, mask_lstms=mask_lstms, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork': # type: ignore # pylint: disable=arguments-differ embedder_params = params.pop("text_field_embedder") text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params) embedding_dropout = params.pop("embedding_dropout") pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward")) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) integrator = Seq2SeqEncoder.from_params(params.pop("integrator")) integrator_dropout = params.pop("integrator_dropout") output_layer_params = params.pop("output_layer") if "activations" in output_layer_params: output_layer = FeedForward.from_params(output_layer_params) else: output_layer = Maxout.from_params(output_layer_params) elmo = params.pop("elmo", None) if elmo is not None: elmo = Elmo.from_params(elmo) use_input_elmo = params.pop_bool("use_input_elmo", False) use_integrator_output_elmo = params.pop_bool("use_integrator_output_elmo", False) initializer = InitializerApplicator.from_params(params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params(params.pop('regularizer', [])) params.assert_empty(cls.__name__) return cls(vocab=vocab, text_field_embedder=text_field_embedder, embedding_dropout=embedding_dropout, pre_encode_feedforward=pre_encode_feedforward, encoder=encoder, integrator=integrator, integrator_dropout=integrator_dropout, output_layer=output_layer, elmo=elmo, use_input_elmo=use_input_elmo, use_integrator_output_elmo=use_integrator_output_elmo, initializer=initializer, regularizer=regularizer)