#!/usr/bin/env python """ Ensures models are automatically found by allennlp. """ import logging from allennlp.common.plugins import import_plugins from allennlp.models import Model logging.basicConfig(level=logging.INFO) import_plugins() Model.by_name("copynet_seq2seq")
def __init__( self, id: str, registered_model_name: Optional[str] = None, model_class: Optional[type] = None, display_name: Optional[str] = None, archive_file: Optional[str] = None, overrides: Optional[Dict] = None, model_details: Optional[Union[str, ModelDetails]] = None, intended_use: Optional[Union[str, IntendedUse]] = None, factors: Optional[Union[str, Factors]] = None, metrics: Optional[Union[str, Metrics]] = None, evaluation_data: Optional[Union[str, EvaluationData]] = None, training_data: Optional[Union[str, TrainingData]] = None, quantitative_analyses: Optional[Union[str, QuantitativeAnalyses]] = None, ethical_considerations: Optional[Union[str, EthicalConsiderations]] = None, caveats_and_recommendations: Optional[Union[str, CaveatsAndRecommendations]] = None, ): assert id if not model_class and registered_model_name: try: model_class = Model.by_name(registered_model_name) except ConfigurationError: logger.warning("{} is not a registered model.".format(registered_model_name)) if model_class: display_name = display_name or model_class.__name__ model_details = model_details or get_description(model_class) if archive_file and not archive_file.startswith("https:"): archive_file = os.path.join(self._storage_location, archive_file) if isinstance(model_details, str): model_details = ModelDetails(description=model_details) if isinstance(intended_use, str): intended_use = IntendedUse(primary_uses=intended_use) if isinstance(factors, str): factors = Factors(relevant_factors=factors) if isinstance(metrics, str): metrics = Metrics(model_performance_measures=metrics) if isinstance(evaluation_data, str): evaluation_data = EvaluationData(dataset=evaluation_data) if isinstance(training_data, str): training_data = TrainingData(dataset=training_data) if isinstance(quantitative_analyses, str): quantitative_analyses = QuantitativeAnalyses(unitary_results=quantitative_analyses) if isinstance(ethical_considerations, str): ethical_considerations = EthicalConsiderations(ethical_considerations) if isinstance(caveats_and_recommendations, str): caveats_and_recommendations = CaveatsAndRecommendations(caveats_and_recommendations) self.id = id self.registered_model_name = registered_model_name self.display_name = display_name self.archive_file = archive_file self.model_details = model_details self.intended_use = intended_use self.factors = factors self.metrics = metrics self.evaluation_data = evaluation_data self.training_data = training_data self.quantitative_analyses = quantitative_analyses self.ethical_considerations = ethical_considerations self.caveats_and_recommendations = caveats_and_recommendations
def __init__( self, id: str, registered_model_name: Optional[str] = None, model_class: Optional[Callable[..., Model]] = None, registered_predictor_name: Optional[str] = None, display_name: Optional[str] = None, task_id: Optional[str] = None, model_usage: Optional[Union[str, ModelUsage]] = None, model_details: Optional[Union[str, ModelDetails]] = None, intended_use: Optional[Union[str, IntendedUse]] = None, factors: Optional[Union[str, Factors]] = None, metrics: Optional[Union[str, Metrics]] = None, evaluation_data: Optional[Union[str, EvaluationData]] = None, training_data: Optional[Union[str, TrainingData]] = None, quantitative_analyses: Optional[Union[str, QuantitativeAnalyses]] = None, model_ethical_considerations: Optional[Union[ str, ModelEthicalConsiderations]] = None, model_caveats_and_recommendations: Optional[Union[ str, ModelCaveatsAndRecommendations]] = None, ): assert id if not model_class and registered_model_name: try: model_class = Model.by_name(registered_model_name) except ConfigurationError: logger.warning("{} is not a registered model.".format( registered_model_name)) if model_class: display_name = display_name or model_class.__name__ model_details = model_details or get_description(model_class) if not registered_predictor_name: registered_predictor_name = model_class.default_predictor # type: ignore if isinstance(model_usage, str): model_usage = ModelUsage(archive_file=model_usage) if isinstance(model_details, str): model_details = ModelDetails(description=model_details) if isinstance(intended_use, str): intended_use = IntendedUse(primary_uses=intended_use) if isinstance(factors, str): factors = Factors(relevant_factors=factors) if isinstance(metrics, str): metrics = Metrics(model_performance_measures=metrics) if isinstance(evaluation_data, str): evaluation_data = EvaluationData(dataset=evaluation_data) if isinstance(training_data, str): training_data = TrainingData(dataset=training_data) if isinstance(quantitative_analyses, str): quantitative_analyses = QuantitativeAnalyses( unitary_results=quantitative_analyses) if isinstance(model_ethical_considerations, str): model_ethical_considerations = ModelEthicalConsiderations( model_ethical_considerations) if isinstance(model_caveats_and_recommendations, str): model_caveats_and_recommendations = ModelCaveatsAndRecommendations( model_caveats_and_recommendations) self.id = id self.registered_model_name = registered_model_name self.registered_predictor_name = registered_predictor_name self.display_name = display_name self.task_id = task_id self.model_usage = model_usage self.model_details = model_details self.intended_use = intended_use self.factors = factors self.metrics = metrics self.evaluation_data = evaluation_data self.training_data = training_data self.quantitative_analyses = quantitative_analyses self.model_ethical_considerations = model_ethical_considerations self.model_caveats_and_recommendations = model_caveats_and_recommendations