def test_registry_has_builtin_similarity_functions(self): assert SimilarityFunction.by_name( "dot_product").__name__ == "DotProductSimilarity" assert SimilarityFunction.by_name( "bilinear").__name__ == "BilinearSimilarity" assert SimilarityFunction.by_name( "linear").__name__ == "LinearSimilarity" assert SimilarityFunction.by_name( "cosine").__name__ == "CosineSimilarity"
def from_params(cls, vocab, params: Params) -> 'NlvrCoverageSemanticParser': sentence_embedder_params = params.pop("sentence_embedder") sentence_embedder = TextFieldEmbedder.from_params(vocab, sentence_embedder_params) action_embedding_dim = params.pop_int('action_embedding_dim') encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) dropout = params.pop_float('dropout', 0.0) attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params(attention_function_type) else: attention_function = None beam_size = params.pop_int('beam_size') max_num_finished_states = params.pop_int('max_num_finished_states', None) normalize_beam_score_by_length = params.pop_bool('normalize_beam_score_by_length', False) max_decoding_steps = params.pop_int("max_decoding_steps") checklist_cost_weight = params.pop_float("checklist_cost_weight", 0.6) dynamic_cost_weight = params.pop("dynamic_cost_weight", None) penalize_non_agenda_actions = params.pop_bool("penalize_non_agenda_actions", False) initial_mml_model_file = params.pop("initial_mml_model_file", None) params.assert_empty(cls.__name__) return cls(vocab, sentence_embedder=sentence_embedder, action_embedding_dim=action_embedding_dim, encoder=encoder, attention_function=attention_function, beam_size=beam_size, max_num_finished_states=max_num_finished_states, dropout=dropout, max_decoding_steps=max_decoding_steps, normalize_beam_score_by_length=normalize_beam_score_by_length, checklist_cost_weight=checklist_cost_weight, dynamic_cost_weight=dynamic_cost_weight, penalize_non_agenda_actions=penalize_non_agenda_actions, initial_mml_model_file=initial_mml_model_file)
def from_params(cls, params: Params) -> 'Attention': similarity_function = SimilarityFunction.from_params( params.pop('similarity_function', {})) normalize = params.pop_bool('normalize', True) params.assert_empty(cls.__name__) return cls(similarity_function=similarity_function, normalize=normalize)
def from_params(cls, vocab, params: Params) -> 'NlvrDirectSemanticParser': sentence_embedder_params = params.pop("sentence_embedder") sentence_embedder = TextFieldEmbedder.from_params( vocab, sentence_embedder_params) action_embedding_dim = params.pop_int('action_embedding_dim') encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) dropout = params.pop_float('dropout', 0.0) attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None decoder_beam_search = BeamSearch.from_params( params.pop("decoder_beam_search")) max_decoding_steps = params.pop_int("max_decoding_steps") params.assert_empty(cls.__name__) return cls(vocab, sentence_embedder=sentence_embedder, action_embedding_dim=action_embedding_dim, encoder=encoder, attention_function=attention_function, decoder_beam_search=decoder_beam_search, max_decoding_steps=max_decoding_steps, dropout=dropout)
def from_params(cls, vocab, params: Params) -> 'WikiTablesSemanticParser': question_embedder = TextFieldEmbedder.from_params(vocab, params.pop("question_embedder")) action_embedding_dim = params.pop_int("action_embedding_dim") encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) entity_encoder = Seq2VecEncoder.from_params(params.pop('entity_encoder')) max_decoding_steps = params.pop_int("max_decoding_steps") mixture_feedforward_type = params.pop('mixture_feedforward', None) if mixture_feedforward_type is not None: mixture_feedforward = FeedForward.from_params(mixture_feedforward_type) else: mixture_feedforward = None decoder_beam_search = BeamSearch.from_params(params.pop("decoder_beam_search")) # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params(attention_function_type) else: attention_function = None dropout = params.pop_float('dropout', 0.0) num_linking_features = params.pop_int('num_linking_features', 8) rule_namespace = params.pop('rule_namespace', 'rule_labels') params.assert_empty(cls.__name__) return cls(vocab, question_embedder=question_embedder, action_embedding_dim=action_embedding_dim, encoder=encoder, entity_encoder=entity_encoder, mixture_feedforward=mixture_feedforward, decoder_beam_search=decoder_beam_search, max_decoding_steps=max_decoding_steps, attention_function=attention_function, dropout=dropout, num_linking_features=num_linking_features, rule_namespace=rule_namespace)
def from_params(cls, vocab, params: Params) -> 'SimpleSeq2SeqCrf': source_embedder_params = params.pop("source_embedder") source_embedder = TextFieldEmbedder.from_params( vocab, source_embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) max_decoding_steps = params.pop("max_decoding_steps") target_namespace = params.pop("target_namespace", "target_tags") # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab, source_embedder=source_embedder, encoder=encoder, max_decoding_steps=max_decoding_steps, target_namespace=target_namespace, attention_function=attention_function, scheduled_sampling_ratio=scheduled_sampling_ratio, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab, params: Params) -> 'WikiTablesErmSemanticParser': question_embedder = TextFieldEmbedder.from_params( vocab, params.pop("question_embedder")) action_embedding_dim = params.pop_int("action_embedding_dim") encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) entity_encoder = Seq2VecEncoder.from_params( params.pop('entity_encoder')) mixture_feedforward_type = params.pop('mixture_feedforward', None) if mixture_feedforward_type is not None: mixture_feedforward = FeedForward.from_params( mixture_feedforward_type) else: mixture_feedforward = None # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None decoder_beam_size = params.pop_int("decoder_beam_size") decoder_num_finished_states = params.pop_int( "decoder_num_finished_states", None) max_decoding_steps = params.pop_int("max_decoding_steps") normalize_beam_score_by_length = params.pop( "normalize_beam_score_by_length", False) use_neighbor_similarity_for_linking = params.pop_bool( "use_neighbor_similarity_for_linking", False) dropout = params.pop_float('dropout', 0.0) num_linking_features = params.pop_int('num_linking_features', 10) tables_directory = params.pop('tables_directory', '/wikitables/') rule_namespace = params.pop('rule_namespace', 'rule_labels') checklist_cost_weight = params.pop_float("checklist_cost_weight", 0.6) mml_model_file = params.pop('mml_model_file', None) params.assert_empty(cls.__name__) return cls( vocab, question_embedder=question_embedder, action_embedding_dim=action_embedding_dim, encoder=encoder, entity_encoder=entity_encoder, mixture_feedforward=mixture_feedforward, attention_function=attention_function, decoder_beam_size=decoder_beam_size, decoder_num_finished_states=decoder_num_finished_states, max_decoding_steps=max_decoding_steps, normalize_beam_score_by_length=normalize_beam_score_by_length, checklist_cost_weight=checklist_cost_weight, use_neighbor_similarity_for_linking= use_neighbor_similarity_for_linking, dropout=dropout, num_linking_features=num_linking_features, tables_directory=tables_directory, rule_namespace=rule_namespace, initial_mml_model_file=mml_model_file)
def from_params(cls, params: Params) -> 'IntraSentenceAttentionEncoder': input_dim = params.pop('input_dim') projection_dim = params.pop('projection_dim', None) similarity_function = SimilarityFunction.from_params(params.pop('similarity_function', {})) num_attention_heads = params.pop('num_attention_heads', 1) combination = params.pop('combination', '1,2') params.assert_empty(cls.__name__) return cls(input_dim=input_dim, projection_dim=projection_dim, similarity_function=similarity_function, num_attention_heads=num_attention_heads, combination=combination)
def from_params(cls, params: Params) -> 'IntraSentenceAttentionEncoder': input_dim = params.pop_int('input_dim') projection_dim = params.pop_int('projection_dim', None) similarity_function = SimilarityFunction.from_params(params.pop('similarity_function', {})) num_attention_heads = params.pop_int('num_attention_heads', 1) combination = params.pop('combination', '1,2') output_dim = params.pop_int('output_dim', None) params.assert_empty(cls.__name__) return cls(input_dim=input_dim, projection_dim=projection_dim, similarity_function=similarity_function, num_attention_heads=num_attention_heads, combination=combination, output_dim=output_dim)
def from_params(cls, vocab, params: Params) -> 'SimpleSeq2MultiSeq': tasks = params.pop("tasks") domains = params.pop("domains") source_embedder_params = params.pop("source_embedder") source_embedder = TextFieldEmbedder.from_params( vocab, source_embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) max_decoding_steps = params.pop("max_decoding_steps") upos_namespace = params.pop("upos_namespace", "upos_tags") ner_namespace = params.pop("ner_namespace", "ner_tags") chunk_namespace = params.pop("chunk_namespace", "chunk_tags") # upos_namespace = params.pop("upos_namespace") # ner_namespace = params.pop("ner_namespace") # chunk_namespace = params.pop("chunk_namespace") # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0) initializer = InitializerApplicator.from_params( params.pop('initializer', [])) regularizer = RegularizerApplicator.from_params( params.pop('regularizer', [])) return cls(vocab, tasks=tasks, domains=domains, source_embedder=source_embedder, encoder=encoder, max_decoding_steps=max_decoding_steps, upos_namespace=upos_namespace, ner_namespace=ner_namespace, chunk_namespace=chunk_namespace, attention_function=attention_function, scheduled_sampling_ratio=scheduled_sampling_ratio, initializer=initializer, regularizer=regularizer)
def from_params(cls, vocab, params: Params) -> 'SimpleSeq2Seq': source_embedder_params = params.pop("source_embedder") source_embedder = TextFieldEmbedder.from_params(vocab, source_embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) max_decoding_steps = params.pop("max_decoding_steps") target_namespace = params.pop("target_namespace", "tokens") # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params(attention_function_type) else: attention_function = None scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0) params.assert_empty(cls.__name__) return cls(vocab, source_embedder=source_embedder, encoder=encoder, max_decoding_steps=max_decoding_steps, target_namespace=target_namespace, attention_function=attention_function, scheduled_sampling_ratio=scheduled_sampling_ratio)
def from_params(cls, vocab, params: Params) -> 'SpanAe': source_embedder_params = params.pop("source_embedder") source_embedder = TextFieldEmbedder.from_params( vocab, source_embedder_params) encoder = Seq2SeqEncoder.from_params(params.pop("encoder")) max_decoding_steps = params.pop("max_decoding_steps") target_namespace = params.pop("target_namespace", "tokens") # If no attention function is specified, we should not use attention, not attention with # default similarity function. attention_function_type = params.pop("attention_function", None) if attention_function_type is not None: attention_function = SimilarityFunction.from_params( attention_function_type) else: attention_function = None scheduled_sampling_ratio = params.pop_float("scheduled_sampling_ratio", 0.0) spans_extractor, spans_scorer_feedforward = None, None spans_extractor_params = params.pop("span_extractor", None) if spans_extractor_params is not None: spans_extractor = SpanExtractor.from_params(spans_extractor_params) spans_scorer_params = params.pop("span_scorer_feedforward", None) if spans_scorer_params is not None: spans_scorer_feedforward = FeedForward.from_params( spans_scorer_params) spans_per_word = params.pop_float("spans_per_word") params.assert_empty(cls.__name__) return cls(vocab, source_embedder=source_embedder, encoder=encoder, max_decoding_steps=max_decoding_steps, spans_per_word=spans_per_word, target_namespace=target_namespace, attention_function=attention_function, scheduled_sampling_ratio=scheduled_sampling_ratio, spans_extractor=spans_extractor, spans_scorer_feedforward=spans_scorer_feedforward)
def test_registry_has_builtin_similarity_functions(self): assert SimilarityFunction.by_name("dot_product").__name__ == 'DotProductSimilarity' assert SimilarityFunction.by_name("bilinear").__name__ == 'BilinearSimilarity' assert SimilarityFunction.by_name("linear").__name__ == 'LinearSimilarity' assert SimilarityFunction.by_name("cosine").__name__ == 'CosineSimilarity'
def from_params(cls, params: Params) -> 'Attention': similarity_function = SimilarityFunction.from_params(params.pop('similarity_function', {})) normalize = params.pop_bool('normalize', True) params.assert_empty(cls.__name__) return cls(similarity_function=similarity_function, normalize=normalize)
def from_params(cls, params: Params) -> 'MatrixAttention': similarity_function = SimilarityFunction.from_params(params.pop('similarity_function', {})) params.assert_empty(cls.__name__) return cls(similarity_function=similarity_function)
def from_params(cls, params: Params) -> 'MatrixAttention': similarity_function = SimilarityFunction.from_params( params.pop('similarity_function', {})) return cls(similarity_function=similarity_function)