示例#1
0
def test_compare_performance_vis_api(experiment_to_use):
    """Ensure pdf and png figures can be saved via visualization API call.

    :param experiment_to_use: Object containing trained model and results to
        test visualization
    :return: None
    """
    experiment = experiment_to_use
    # extract test stats only
    test_stats = experiment.test_stats_full
    viz_outputs = ('pdf', 'png')
    with TemporaryDirectory() as tmpvizdir:
        for viz_output in viz_outputs:
            vis_output_pattern_pdf = tmpvizdir + '/*.{}'.format(
                viz_output
            )
            visualize.compare_performance(
                [test_stats, test_stats],
                output_feature_name=None,
                model_names=['Model1', 'Model2'],
                output_directory=tmpvizdir,
                file_format=viz_output
            )
            figure_cnt = glob.glob(vis_output_pattern_pdf)
            assert 1 == len(figure_cnt)
示例#2
0
def compare_perf():
    "compare performance of two models"
    test_file = SCRIPT_DIR / 'rotten_tomatoes_test.csv'
    output_dir = get_ludwig_output_dir()

    model_name = "run"

    experiment_name1 = "rt"
    experiment_dir = experiment_name1 + '_' + model_name
    model_dir1 = output_dir / 'results' / experiment_dir / 'model'

    model1 = LudwigModel.load(model_dir1, backend='local')
    eval_stats1, predictions1, output_dir1 = model1.evaluate(
        dataset=str(test_file))

    experiment_name2 = "rt_zscore"
    experiment_dir = experiment_name2 + '_' + model_name
    model_dir2 = output_dir / 'results' / experiment_dir / 'model'

    model2 = LudwigModel.load(model_dir2, backend='local')
    eval_stats2, predictions2, output_dir2 = model2.evaluate(
        dataset=str(test_file))

    list_of_eval_stats = [eval_stats1, eval_stats2]
    model_names = [experiment_name1, experiment_name2]
    compare_performance(
        list_of_eval_stats,
        "recommended",
        model_names=model_names,
        output_directory=output_dir,
        file_format="png",
    )
    print(f'{output_dir=}')
示例#3
0
def test_compare_performance_vis_api(csv_filename):
    """Ensure pdf and png figures can be saved via visualization API call.

    :param csv_filename: csv fixture from tests.fixtures.filenames.csv_filename
    :return: None
    """
    experiment = Experiment(csv_filename)
    test_stats = experiment.test_stats_full[1]
    viz_outputs = ('pdf', 'png')
    for viz_output in viz_outputs:
        vis_output_pattern_pdf = experiment.model.exp_dir_name + '/*.{}'.format(
            viz_output)
        visualize.compare_performance(
            [test_stats, test_stats],
            output_feature_name=None,
            model_namess=['Model1', 'Model2'],
            output_directory=experiment.model.exp_dir_name,
            file_format=viz_output)
        figure_cnt = glob.glob(vis_output_pattern_pdf)
        assert 2 == len(figure_cnt)
    shutil.rmtree(experiment.model.exp_dir_name, ignore_errors=True)
示例#4
0
    # Define Ludwig model object that drive model training
    model = LudwigModel(config=model_id + "_config.yaml",
                        logging_level=logging.WARN)

    # initiate model training
    train_stats, _, _ = model.train(
        training_set=training_set,
        validation_set=val_set,
        test_set=test_set,
        experiment_name="balance_example",
        model_name=model_id,
        skip_save_model=True,
    )

    # evaluate model on test_set
    eval_stats, _, _ = model.evaluate(test_set)

    # save eval stats for later use
    list_of_eval_stats.append(eval_stats)

    print(">>>>>>> completed: ", model_id, "\n")

compare_performance(
    list_of_eval_stats,
    "Response",
    model_names=list_of_model_ids,
    output_directory="./visualizations",
    file_format="png",
)