コード例 #1
0
 def from_params(cls,
                 params: Params,
                 instances: Iterable['adi.Instance'] = None):
     choice = params.pop_choice('type',
                                cls.list_available(),
                                default_to_first_choice=True)
     return cls.by_name(choice).from_params(params, instances)
コード例 #2
0
ファイル: optimizers.py プロジェクト: zxsted/allennlp
 def from_params(cls, model_parameters: List[torch.nn.Parameter], params: Params):
     if isinstance(params, str):
         optimizer = params
         params = Params({})
     else:
         optimizer = params.pop_choice("type", Optimizer.list_available())
     return Optimizer.by_name(optimizer)(model_parameters, **params.as_dict()) # type: ignore
コード例 #3
0
 def from_params(cls, params: Params) -> 'WordSplitter':
     choice = params.pop_choice('type',
                                cls.list_available(),
                                default_to_first_choice=True)
     # None of the word splitters take parameters, so we just make sure the parameters are empty
     # here.
     params.assert_empty('WordSplitter')
     return cls.by_name(choice)()
コード例 #4
0
ファイル: data_iterator.py プロジェクト: panyang/allennlp
    def from_params(cls, params: Params):
        from allennlp.experiments.registry import Registry
        # TODO(Mark): The adaptive iterator will need a bit of work here,
        # to retrieve the scaling function etc.

        iterator_type = params.pop_choice("type",
                                          Registry.list_data_iterators())
        return Registry.get_data_iterator(iterator_type)(
            **params.as_dict())  # type: ignore
コード例 #5
0
 def from_params(
     cls, model: Model, task_list: List[Task], serialization_dir: str, params: Params
 ) -> "MultiTaskTrainer":
     """
     Static method that constructs the multi task trainer described by ``params``.
     """
     choice = params.pop_choice("type", cls.list_available())
     return cls.by_name(choice).from_params(
         model=model, task_list=task_list, serialization_dir=serialization_dir, params=params
     )
コード例 #6
0
 def from_params(cls, params: Params) -> 'StanfordSentimentTreeBankDatasetReader':
     token_indexers = TokenIndexer.dict_from_params(params.pop('token_indexers', {}))
     use_subtrees = params.pop('use_subtrees', False)
     granularity = params.pop_choice('granularity', ["5-class", "3-class", "2-class"], True)
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return StanfordSentimentTreeBankDatasetReader(
             token_indexers=token_indexers,
             use_subtrees=use_subtrees,
             granularity=granularity,
             lazy=lazy)
コード例 #7
0
ファイル: tagger.py プロジェクト: danielhers/streusle_tagger
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'Tagger':
        token_representation_dim = params.pop_int("token_representation_dim")

        encoder = params.pop("encoder", None)
        if encoder is not None:
            encoder = Seq2SeqEncoder.from_params(encoder)
        decoder = params.pop("decoder", None)
        if decoder is not None and not isinstance(decoder, str):
            decoder = FeedForward.from_params(decoder)

        use_crf = params.pop_bool("use_crf", False)
        constrain_crf_decoding = params.pop_bool("constrain_crf_decoding", False)
        include_start_end_transitions = params.pop_bool("include_start_end_transitions", True)

        contextualizer = params.pop('contextualizer', None)
        if contextualizer:
            contextualizer = Contextualizer.from_params(contextualizer)
        calculate_per_label_f1 = params.pop_bool("calculate_per_label_f1", False)
        calculate_span_f1 = params.pop_bool("calculate_span_f1", False)
        calculate_perplexity = params.pop_bool("calculate_perplexity", False)
        loss_average = params.pop("loss_average", "batch")
        label_encoding = params.pop_choice("label_encoding", [None, "BIO", "BIOUL", "IOB1"],
                                           default_to_first_choice=True)

        pretrained_file = params.pop("pretrained_file", None)
        transfer_contextualizer_from_pretrained_file = params.pop_bool(
            "transfer_contextualizer_from_pretrained_file", False)
        transfer_encoder_from_pretrained_file = params.pop_bool(
            "transfer_encoder_from_pretrained_file", False)
        freeze_encoder = params.pop_bool("freeze_encoder", False)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   token_representation_dim=token_representation_dim,
                   encoder=encoder,
                   decoder=decoder,
                   use_crf=use_crf,
                   constrain_crf_decoding=constrain_crf_decoding,
                   include_start_end_transitions=include_start_end_transitions,
                   label_encoding=label_encoding,
                   contextualizer=contextualizer,
                   calculate_per_label_f1=calculate_per_label_f1,
                   calculate_span_f1=calculate_span_f1,
                   calculate_perplexity=calculate_perplexity,
                   loss_average=loss_average,
                   pretrained_file=pretrained_file,
                   transfer_contextualizer_from_pretrained_file=transfer_contextualizer_from_pretrained_file,
                   transfer_encoder_from_pretrained_file=transfer_encoder_from_pretrained_file,
                   freeze_encoder=freeze_encoder,
                   initializer=initializer,
                   regularizer=regularizer)
 def from_params(cls, params: Params) -> 'UniversalDatasetReader':
     token_indexers = TokenIndexer.dict_from_params(
         params.pop('token_indexers', {}))
     use_subtrees = params.pop('use_subtrees', False)
     granularity = params.pop_choice('granularity',
                                     ["5-class", "3-class", "2-class"],
                                     True)
     lazy = params.pop('lazy', False)
     params.assert_empty(cls.__name__)
     return UniversalDatasetReader(token_indexers=token_indexers,
                                   use_subtrees=use_subtrees,
                                   granularity=granularity,
                                   lazy=lazy)
コード例 #9
0
 def from_params(cls, params: Params) -> 'MultiCorpusReader':
     token_indexers_params = params.pop('token_indexers', {})
     token_indexers = TokenIndexer.dict_from_params(token_indexers_params)
     corpus_langmap = params.pop('corpus_langmap', None)
     logger.info('corpus langmap %s', corpus_langmap)
     shuffle_corpus = params.pop('shuffle_corpus', True)
     corpus_readers_params: Dict = params.pop('corpus_readers', {})
     corpus_readers = defaultdict()
     for name, params in corpus_readers_params.items():
         params['token_indexers'] = token_indexers_params
         choice = params.pop_choice('type', DatasetReader.list_available())
         corpus_readers[name] = DatasetReader.by_name(choice).from_params(
             params)
         # corpus_readers[name] = DatasetReader.from_params(**params)
     lazy = params.pop('lazy', True)
     params.assert_empty(cls.__name__)
     return MultiCorpusReader(token_indexers=token_indexers,
                              corpus_readers=corpus_readers,
                              corpus_langmap=corpus_langmap,
                              shuffle_corpus=shuffle_corpus,
                              lazy=lazy)
コード例 #10
0
 def from_params(params: Params) -> 'WordSplitter':
     choice = params.pop_choice('type', list(word_splitters.keys()), default_to_first_choice=True)
     params.assert_empty('WordSplitter')
     return word_splitters[choice]()
コード例 #11
0
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'QuestionGenerator':
     choice = params.pop_choice('type', cls.list_available())
     return cls.by_name(choice).from_params(vocab, params)
コード例 #12
0
ファイル: attention.py プロジェクト: zhyq/allennlp
 def from_params(cls, params: Params) -> 'Attention':
     clazz = cls.by_name(params.pop_choice("type", cls.list_available()))
     return clazz.from_params(params)
コード例 #13
0
 def from_params(cls, params: Params) -> 'Seq2VecEncoder':
     choice = params.pop_choice('type', cls.list_available())
     return cls.by_name(choice).from_params(params)
コード例 #14
0
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'TextFieldEmbedder':
     choice = params.pop_choice('type', cls.list_available(), default_to_first_choice=True)
     return cls.by_name(choice).from_params(vocab, params)
コード例 #15
0
ファイル: optimizers.py プロジェクト: apmoore1/allennlp
    def from_params(cls, model_parameters: List, params: Params):  # type: ignore
        # pylint: disable=arguments-differ
        if isinstance(params, str):
            optimizer = params
            params = Params({})
        else:
            optimizer = params.pop_choice("type", Optimizer.list_available())

        # make the parameter groups if need
        groups = params.pop("parameter_groups", None)
        if groups:
            # The input to the optimizer is list of dict.
            # Each dict contains a "parameter group" and groups specific options,
            # e.g., {'params': [list of parameters], 'lr': 1e-3, ...}
            # Any config option not specified in the additional options (e.g.
            # for the default group) is inherited from the top level config.
            # see: http://pytorch.org/docs/0.3.0/optim.html?#per-parameter-options
            #
            # groups contains something like:
            #"parameter_groups": [
            #       [["regex1", "regex2"], {"lr": 1e-3}],
            #       [["regex3"], {"lr": 1e-4}]
            #]
            #(note that the allennlp config files require double quotes ", and will
            # fail (sometimes silently) with single quotes ').

            # This is typed as as Any since the dict values other then
            # the params key are passed to the Optimizer constructor and
            # can be any type it accepts.
            # In addition to any parameters that match group specific regex,
            # we also need a group for the remaining "default" group.
            # Those will be included in the last entry of parameter_groups.
            parameter_groups: Any = [{'params': []} for _ in range(len(groups) + 1)]
            # add the group specific kwargs
            for k in range(len(groups)): # pylint: disable=consider-using-enumerate
                parameter_groups[k].update(groups[k][1].as_dict())

            regex_use_counts: Dict[str, int] = {}
            parameter_group_names: List[set] = [set() for _ in range(len(groups) + 1)]
            for name, param in model_parameters:
                # Determine the group for this parameter.
                group_index = None
                for k, group_regexes in enumerate(groups):
                    for regex in group_regexes[0]:
                        if regex not in regex_use_counts:
                            regex_use_counts[regex] = 0
                        if re.search(regex, name):
                            if group_index is not None and group_index != k:
                                raise ValueError("{} was specified in two separate parameter groups".format(name))
                            group_index = k
                            regex_use_counts[regex] += 1

                if group_index is not None:
                    parameter_groups[group_index]['params'].append(param)
                    parameter_group_names[group_index].add(name)
                else:
                    # the default group
                    parameter_groups[-1]['params'].append(param)
                    parameter_group_names[-1].add(name)

            # log the parameter groups
            logger.info("Done constructing parameter groups.")
            for k in range(len(groups) + 1):
                group_options = {key: val for key, val in parameter_groups[k].items()
                                 if key != 'params'}
                logger.info("Group %s: %s, %s", k,
                            list(parameter_group_names[k]),
                            group_options)
            # check for unused regex
            for regex, count in regex_use_counts.items():
                if count == 0:
                    logger.warning("When constructing parameter groups, "
                                   " %s not match any parameter name", regex)

        else:
            parameter_groups = [param for name, param in model_parameters]

        # Log the number of parameters to optimize
        num_parameters = 0
        for parameter_group in parameter_groups:
            if isinstance(parameter_group, dict):
                num_parameters += sum(parameter.numel() for parameter in parameter_group["params"])
            else:
                num_parameters += parameter_group.numel()
        logger.info("Number of trainable parameters: %s", num_parameters)

        # By default we cast things that e.g. look like floats to floats before handing them
        # to the Optimizer constructor, but if you want to disable that behavior you could add a
        #       "infer_type_and_cast": false
        # key to your "trainer.optimizer" config.
        infer_type_and_cast = params.pop_bool("infer_type_and_cast", True)
        params_as_dict = params.as_dict(infer_type_and_cast=infer_type_and_cast)
        return Optimizer.by_name(optimizer)(parameter_groups, **params_as_dict) # type: ignore
コード例 #16
0
ファイル: entities.py プロジェクト: kougou/Allen_HCN
 def from_params(cls, params: Params) -> 'HCNEntityTracker':
     """
     Static method that constructs the dataset reader described by ``params``.
     """
     choice = params.pop_choice('type', cls.list_available())
     return cls.by_name(choice).from_params(params)
コード例 #17
0
ファイル: optimizers.py プロジェクト: ryan-leung/ml_monorepo
    def from_params(cls, model_parameters      , params        ):  # type: ignore
        # pylint: disable=arguments-differ
        if isinstance(params, unicode):
            optimizer = params
            params = Params({})
        else:
            optimizer = params.pop_choice(u"type", Optimizer.list_available())

        # make the parameter groups if need
        groups = params.pop(u"parameter_groups", None)
        if groups:
            # The input to the optimizer is list of dict.
            # Each dict contains a "parameter group" and groups specific options,
            # e.g., {'params': [list of parameters], 'lr': 1e-3, ...}
            # Any config option not specified in the additional options (e.g.
            # for the default group) is inherited from the top level config.
            # see: http://pytorch.org/docs/0.3.0/optim.html?#per-parameter-options
            #
            # groups contains something like:
            #"parameter_groups": [
            #       [["regex1", "regex2"], {"lr": 1e-3},
            #        ["regex3"], {"lr": 1e-4}]
            #]
            #(note that the allennlp config files require double quotes ", and will
            # fail (sometimes silently) with single quotes ').

            # This is typed as as Any since the dict values other then
            # the params key are passed to the Optimizer constructor and
            # can be any type it accepts.
            # In addition to any parameters that match group specific regex,
            # we also need a group for the remaining "default" group.
            # Those will be included in the last entry of parameter_groups.
            parameter_groups      = [{u'params': []} for _ in range(len(groups) + 1)]
            # add the group specific kwargs
            for k in range(len(groups)): # pylint: disable=consider-using-enumerate
                parameter_groups[k].update(groups[k][1].as_dict())

            regex_use_counts                 = {}
            parameter_group_names            = [set() for _ in range(len(groups) + 1)]
            for name, param in model_parameters:
                # Determine the group for this parameter.
                group_index = None
                for k, group_regexes in enumerate(groups):
                    for regex in group_regexes[0]:
                        if regex not in regex_use_counts:
                            regex_use_counts[regex] = 0
                        if re.search(regex, name):
                            if group_index is not None and group_index != k:
                                raise ValueError(u"{} was specified in two separate parameter groups".format(name))
                            group_index = k
                            regex_use_counts[regex] += 1

                if group_index is not None:
                    parameter_groups[group_index][u'params'].append(param)
                    parameter_group_names[group_index].add(name)
                else:
                    # the default group
                    parameter_groups[-1][u'params'].append(param)
                    parameter_group_names[-1].add(name)

            # log the parameter groups
            logger.info(u"Done constructing parameter groups.")
            for k in range(len(groups) + 1):
                group_options = dict((key, val) for key, val in list(parameter_groups[k].items())
                                 if key != u'params')
                logger.info(u"Group %s: %s, %s", k,
                            list(parameter_group_names[k]),
                            group_options)
            # check for unused regex
            for regex, count in list(regex_use_counts.items()):
                if count == 0:
                    logger.warning(u"When constructing parameter groups, "
                                   u" %s not match any parameter name", regex)

        else:
            parameter_groups = [param for name, param in model_parameters]

        # Log the number of parameters to optimize
        num_parameters = 0
        for parameter_group in parameter_groups:
            if isinstance(parameter_group, dict):
                num_parameters += sum(parameter.numel() for parameter in parameter_group[u"params"])
            else:
                num_parameters += parameter_group.numel()
        logger.info(u"Number of trainable parameters: %s", num_parameters)
        return Optimizer.by_name(optimizer)(parameter_groups, **params.as_dict()) # type: ignore
コード例 #18
0
 def from_params(cls, params: Params) -> 'DatasetReader':
     """
     Static method that constructs the dataset reader described by ``params``.
     """
     choice = params.pop_choice('type', cls.list_available())
     return cls.by_name(choice).from_params(params)
コード例 #19
0
 def from_params(cls, params: Params) -> 'WordFilter':
     choice = params.pop_choice('type', cls.list_available(), default_to_first_choice=True)
     params.assert_empty('WordFilter')
     return cls.by_name(choice)()
コード例 #20
0
 def from_params(cls, params: Params) -> 'SimilarityFunction':
     choice = params.pop_choice('type', cls.list_available(), default_to_first_choice=True)
     return cls.by_name(choice).from_params(params)
コード例 #21
0
 def from_params(cls, params: Params) -> 'TokenIndexer':  # type: ignore
     choice = params.pop_choice('type', cls.list_available(), default_to_first_choice=True)
     return cls.by_name(choice).from_params(params)
コード例 #22
0
ファイル: data_iterator.py プロジェクト: deepmipt/ner-meta
    def from_params(cls, params: Params) -> 'DataIterator':
        # TODO(Mark): The adaptive iterator will need a bit of work here,
        # to retrieve the scaling function etc.

        iterator_type = params.pop_choice("type", cls.list_available())
        return cls.by_name(iterator_type).from_params(params)
コード例 #23
0
ファイル: step.py プロジェクト: himkt/allennlp
    def from_params(
        cls: Type["Step"],
        params: Params,
        constructor_to_call: Callable[..., "Step"] = None,
        constructor_to_inspect: Union[Callable[..., "Step"],
                                      Callable[["Step"], None]] = None,
        existing_steps: Optional[Dict[str, "Step"]] = None,
        step_name: Optional[str] = None,
        **extras,
    ) -> "Step":
        # Why do we need a custom from_params? Step classes have a run() method that takes all the
        # parameters necessary to perform the step. The __init__() method of the step takes those
        # same parameters, but each of them could be wrapped in another Step instead of being
        # supplied directly. from_params() doesn't know anything about these shenanigans, so
        # we have to supply the necessary logic here.

        if constructor_to_call is not None:
            raise ConfigurationError(
                f"{cls.__name__}.from_params cannot be called with a constructor_to_call."
            )
        if constructor_to_inspect is not None:
            raise ConfigurationError(
                f"{cls.__name__}.from_params cannot be called with a constructor_to_inspect."
            )

        if existing_steps is None:
            existing_steps = {}

        if isinstance(params, str):
            params = Params({"type": params})

        if not isinstance(params, Params):
            raise ConfigurationError(
                "from_params was passed a `params` object that was not a `Params`. This probably "
                "indicates malformed parameters in a configuration file, where something that "
                "should have been a dictionary was actually a list, or something else. "
                f"This happened when constructing an object of type {cls}.")

        as_registrable = cast(Type[Registrable], cls)
        choice = params.pop_choice("type",
                                   choices=as_registrable.list_available(),
                                   default_to_first_choice=True)
        subclass, constructor_name = as_registrable.resolve_class_name(choice)
        if not issubclass(subclass, Step):
            # This can happen if `choice` is a fully qualified name.
            raise ConfigurationError(
                f"Tried to make a Step of type {choice}, but ended up with a {subclass}."
            )

        parameters = infer_method_params(subclass, subclass.run)
        del parameters["self"]
        init_parameters = infer_constructor_params(subclass)
        del init_parameters["self"]
        del init_parameters["kwargs"]
        parameter_overlap = parameters.keys() & init_parameters.keys()
        assert len(parameter_overlap) <= 0, (
            f"If this assert fails it means that you wrote a Step with a run() method that takes one of the "
            f"reserved parameters ({', '.join(init_parameters.keys())})")
        parameters.update(init_parameters)

        kwargs: Dict[str, Any] = {}
        accepts_kwargs = False
        for param_name, param in parameters.items():
            if param.kind == param.VAR_KEYWORD:
                # When a class takes **kwargs we store the fact that the method allows extra keys; if
                # we get extra parameters, instead of crashing, we'll just pass them as-is to the
                # constructor, and hope that you know what you're doing.
                accepts_kwargs = True
                continue

            explicitly_set = param_name in params
            constructed_arg = pop_and_construct_arg(
                subclass.__name__,
                param_name,
                param.annotation,
                param.default,
                params,
                existing_steps=existing_steps,
                **extras,
            )

            # If the param wasn't explicitly set in `params` and we just ended up constructing
            # the default value for the parameter, we can just omit it.
            # Leaving it in can cause issues with **kwargs in some corner cases, where you might end up
            # with multiple values for a single parameter (e.g., the default value gives you lazy=False
            # for a dataset reader inside **kwargs, but a particular dataset reader actually hard-codes
            # lazy=True - the superclass sees both lazy=True and lazy=False in its constructor).
            if explicitly_set or constructed_arg is not param.default:
                kwargs[param_name] = constructed_arg

        if accepts_kwargs:
            kwargs.update(params)
        else:
            params.assert_empty(subclass.__name__)

        return subclass(step_name=step_name, **kwargs)
コード例 #24
0
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'TextFieldEmbedder':
     choice = params.pop_choice('type', cls.list_available(), default_to_first_choice=True)
     return cls.by_name(choice).from_params(vocab, params)
コード例 #25
0
 def from_params(cls, params: Params):
     from allennlp.experiments.registry import Registry
     choice = params.pop_choice('type', Registry.list_dataset_readers())
     return Registry.get_dataset_reader(choice).from_params(params)
コード例 #26
0
 def from_params(cls, params: Params) -> 'DataIterator':
     iterator_type = params.pop_choice("type", cls.list_available())
     return cls.by_name(iterator_type).from_params(params)
コード例 #27
0
ファイル: optimizers.py プロジェクト: wjn922/allennlp
    def from_params(cls, model_parameters: List, params: Params):  # type: ignore
        # pylint: disable=arguments-differ
        if isinstance(params, str):
            optimizer = params
            params = Params({})
        else:
            optimizer = params.pop_choice("type", Optimizer.list_available())

        # make the parameter groups if need
        groups = params.pop("parameter_groups", None)
        if groups:
            # The input to the optimizer is list of dict.
            # Each dict contains a "parameter group" and groups specific options,
            # e.g., {'params': [list of parameters], 'lr': 1e-3, ...}
            # Any config option not specified in the additional options (e.g.
            # for the default group) is inherited from the top level config.
            # see: https://pytorch.org/docs/0.3.0/optim.html?#per-parameter-options
            #
            # groups contains something like:
            #"parameter_groups": [
            #       [["regex1", "regex2"], {"lr": 1e-3}],
            #       [["regex3"], {"lr": 1e-4}]
            #]
            #(note that the allennlp config files require double quotes ", and will
            # fail (sometimes silently) with single quotes ').

            # This is typed as as Any since the dict values other then
            # the params key are passed to the Optimizer constructor and
            # can be any type it accepts.
            # In addition to any parameters that match group specific regex,
            # we also need a group for the remaining "default" group.
            # Those will be included in the last entry of parameter_groups.
            parameter_groups: Any = [{'params': []} for _ in range(len(groups) + 1)]
            # add the group specific kwargs
            for k in range(len(groups)): # pylint: disable=consider-using-enumerate
                parameter_groups[k].update(groups[k][1].as_dict())

            regex_use_counts: Dict[str, int] = {}
            parameter_group_names: List[set] = [set() for _ in range(len(groups) + 1)]
            for name, param in model_parameters:
                # Determine the group for this parameter.
                group_index = None
                for k, group_regexes in enumerate(groups):
                    for regex in group_regexes[0]:
                        if regex not in regex_use_counts:
                            regex_use_counts[regex] = 0
                        if re.search(regex, name):
                            if group_index is not None and group_index != k:
                                raise ValueError("{} was specified in two separate parameter groups".format(name))
                            group_index = k
                            regex_use_counts[regex] += 1

                if group_index is not None:
                    parameter_groups[group_index]['params'].append(param)
                    parameter_group_names[group_index].add(name)
                else:
                    # the default group
                    parameter_groups[-1]['params'].append(param)
                    parameter_group_names[-1].add(name)

            # log the parameter groups
            logger.info("Done constructing parameter groups.")
            for k in range(len(groups) + 1):
                group_options = {key: val for key, val in parameter_groups[k].items()
                                 if key != 'params'}
                logger.info("Group %s: %s, %s", k,
                            list(parameter_group_names[k]),
                            group_options)
            # check for unused regex
            for regex, count in regex_use_counts.items():
                if count == 0:
                    logger.warning("When constructing parameter groups, "
                                   " %s not match any parameter name", regex)

        else:
            parameter_groups = [param for name, param in model_parameters]

        # Log the number of parameters to optimize
        num_parameters = 0
        for parameter_group in parameter_groups:
            if isinstance(parameter_group, dict):
                num_parameters += sum(parameter.numel() for parameter in parameter_group["params"])
            else:
                num_parameters += parameter_group.numel()
        logger.info("Number of trainable parameters: %s", num_parameters)

        # By default we cast things that e.g. look like floats to floats before handing them
        # to the Optimizer constructor, but if you want to disable that behavior you could add a
        #       "infer_type_and_cast": false
        # key to your "trainer.optimizer" config.
        infer_type_and_cast = params.pop_bool("infer_type_and_cast", True)
        params_as_dict = params.as_dict(infer_type_and_cast=infer_type_and_cast)
        subclass = Optimizer.by_name(optimizer)

        # If the optimizer subclass has a from_params, use it.
        if hasattr(subclass, 'from_params'):
            return subclass.from_params(parameter_groups, params=params)
        else:
            return subclass(parameter_groups, **params_as_dict) # type: ignore
コード例 #28
0
 def from_params(cls, params: Params) -> 'WordStemmer':
     choice = params.pop_choice('type', cls.list_available(), default_to_first_choice=True)
     params.assert_empty('WordStemmer')
     return cls.by_name(choice)()
コード例 #29
0
ファイル: token_embedder.py プロジェクト: panyang/allennlp
 def from_params(cls, vocab: Vocabulary, params: Params):
     from allennlp.experiments.registry  import Registry
     choice = params.pop_choice('type', Registry.list_token_embedders())
     return Registry.get_token_embedder(choice).from_params(vocab, params)
コード例 #30
0
ファイル: token_indexer.py プロジェクト: panyang/allennlp
 def from_params(cls, params: Params):  # type: ignore
     from allennlp.experiments.registry import Registry
     choice = params.pop_choice('type',
                                Registry.list_token_indexers(),
                                default_to_first_choice=True)
     return Registry.get_token_indexer(choice).from_params(params)
コード例 #31
0
 def from_params(cls, vocab: Vocabulary, params: Params):
     from allennlp.experiments.registry  import Registry
     choice = params.pop_choice('type', Registry.list_text_field_embedders(), default_to_first_choice=True)
     return Registry.get_text_field_embedder(choice).from_params(vocab, params)
コード例 #32
0
 def from_params(cls, params: Params) -> 'Tokenizer':
     choice = params.pop_choice('type',
                                cls.list_available(),
                                default_to_first_choice=True)
     return cls.by_name(choice).from_params(params)
コード例 #33
0
 def from_params(cls, params: Params) -> 'SimilarityFunction':
     choice = params.pop_choice('type', cls.list_available())
     return cls.by_name(choice).from_params(params)
コード例 #34
0
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'TokenEmbedder':
     choice = params.pop_choice('type', cls.list_available())
     return cls.by_name(choice).from_params(vocab, params)
コード例 #35
0
ファイル: optimizers.py プロジェクト: uganyasavur/allennlp
    def from_params(cls, model_parameters: List, params: Params):
        if isinstance(params, str):
            optimizer = params
            params = Params({})
        else:
            optimizer = params.pop_choice("type", Optimizer.list_available())

        # make the parameter groups if need
        groups = params.pop("parameter_groups", None)
        if groups:
            # input to optimizer is list of dict
            # each dict contains {'params': [list of parameters], 'lr': 1e-3, ...}
            # Any config option not specified in the additional options (e.g.
            # for the default group) is inherited from the top level config.
            # see: http://pytorch.org/docs/0.3.0/optim.html?#per-parameter-options
            #
            # groups contains something like:
            #"parameter_groups": [
            #       [['regex1', 'regex2'], {'lr': 1e-3},
            #        ['regex3'], {'lr': 1e-4}]
            #]
            #
            # The last entry of this list is for the parameters not in any regex.
            #
            # This is typed as as Any since the dict values other then
            # the params key are passed to the Optimizer constructor and
            # can be any type it accepts.
            parameter_groups: Any = [{
                'params': []
            } for _ in range(len(groups) + 1)]
            # add the group specific kwargs
            for k in range(len(groups)):  # pylint: disable=consider-using-enumerate
                parameter_groups[k].update(groups[k][1].as_dict())

            regex_use_counts: Dict[str, int] = {}
            parameter_group_names: List[set] = [
                set() for _ in range(len(groups) + 1)
            ]
            for name, param in model_parameters:
                # Determine the group for this parameter.
                group_index = None
                for k, group_regexes in enumerate(groups):
                    for regex in group_regexes[0]:
                        if regex not in regex_use_counts:
                            regex_use_counts[regex] = 0
                        if re.search(regex, name):
                            if group_index is not None and group_index != k:
                                raise ValueError(
                                    "{} was specified in two separate parameter groups"
                                    .format(name))
                            group_index = k
                            regex_use_counts[regex] += 1

                if group_index is not None:
                    parameter_groups[group_index]['params'].append(param)
                    parameter_group_names[group_index].add(name)
                else:
                    # the default group
                    parameter_groups[-1]['params'].append(param)
                    parameter_group_names[-1].add(name)

            # log the parameter groups
            logger.info("Done constructing parameter groups.")
            for k in range(len(groups) + 1):
                group_options = {
                    key: val
                    for key, val in parameter_groups[k].items()
                    if key != 'params'
                }
                print("Group {0}: {1}, {2}".format(
                    k, list(parameter_group_names[k]), group_options))
            # check for unused regex
            for regex, count in regex_use_counts.items():
                if count == 0:
                    logger.warning(
                        "When constructing parameter groups, "
                        " %s not match any parameter name", regex)

        else:
            parameter_groups = [param for name, param in model_parameters]

        return Optimizer.by_name(optimizer)(parameter_groups,
                                            **params.as_dict())  # type: ignore
コード例 #36
0
 def from_params(cls, params: Params) -> 'WordSplitter':
     choice = params.pop_choice('type', cls.list_available(), default_to_first_choice=True)
     return cls.by_name(choice).from_params(params)
コード例 #37
0
ファイル: seq2seq_encoder.py プロジェクト: zxsted/allennlp
 def from_params(cls, params: Params) -> 'Seq2SeqEncoder':
     choice = params.pop_choice('type', cls.list_available())
     return cls.by_name(choice).from_params(params)