コード例 #1
0
def train(models, train_set, eval_set=None, silent=False):
    """Train all model for production and save them

    Args:
        models (list of str): Model names. Pass if you want to train a just a
            set particular models
        train_set (dg.enums.Dataset): Dataset to train on
        eval_set (dg.enums.Dataset): Dataset to use for evaluation during
            training.
        silent (bool): Don't print details to standard out.
    """
    config = Config()
    model_dir = config.get_model_dir()
    if not silent:
        print('Model dir: ', model_dir)

    bar(silent=silent)
    for model_id in models:
        model = config.models[model_id].set_params(
            **config.get_params(model_id))
        datasets = config.get_datasets(model.id)
        train_set = (datasets[train_set.value] if isinstance(
            train_set, Dataset) else train_set)
        eval_set = (datasets[eval_set.value]
                    if isinstance(eval_set, Dataset) else eval_set)
        train_model(model,
                    train_set=train_set,
                    eval_set=eval_set,
                    model_dir=model_dir,
                    save=True,
                    silent=silent)
        bar(silent=silent)
コード例 #2
0
def evaluate_model(model, datasets, silent=False):
    """Evaluate a single model

    Args:
        model (dg.Model): Model to evaluate
        datasets (list of dg.enums.Dataset): List of datasets used for
            evaluation.
        silent (bool): Don't print details to standard out.
    Returns:
        dict: Evaluation metrics
    """
    config = Config()
    metrics = config.get('metrics.all', None)
    if not silent:
        print('Evaluating:', model.id)
    db = persistence.Database()
    old_metrics = db.get(model)
    new_metrics = deepcopy(old_metrics)
    model_datasets = config.get_datasets(model.id)
    for ds in datasets:
        if (new_metrics.get(ds.value, None) is None
                and model_datasets[ds.value] is not None):
            score = model.score_dataset(model_datasets[ds.value],
                                        metrics=metrics)
            new_metrics[ds.value] = (score if isinstance(score, dict) else {
                'score': score
            })
    if old_metrics != new_metrics:
        db.add(model, new_metrics)
    if not silent:
        print_metrics(new_metrics)
    return metrics_to_dict(model, new_metrics)