def test_confusion_matrix_vis_api(csv_filename): """Ensure pdf and png figures can be saved via visualisation API call. :param csv_filename: csv fixture from tests.fixtures.filenames.csv_filename :return: None """ experiment = Experiment(csv_filename) test_stats = experiment.test_stats_full[1] viz_outputs = ('pdf', 'png') for viz_output in viz_outputs: vis_output_pattern_pdf = experiment.model.exp_dir_name + '/*.{}'.format( viz_output ) visualize.confusion_matrix( [test_stats, test_stats], experiment.ground_truth_metadata, experiment.output_feature_name, top_n_classes=[0], normalize=False, model_names=['Model1', 'Model2'], output_directory=experiment.model.exp_dir_name, file_format=viz_output ) figure_cnt = glob.glob(vis_output_pattern_pdf) assert 4 == len(figure_cnt) shutil.rmtree(experiment.model.exp_dir_name, ignore_errors=True)
def test_confusion_matrix_vis_api(experiment_to_use): """Ensure pdf and png figures can be saved via visualization API call. :param experiment_to_use: Object containing trained model and results to test visualization :return: None """ experiment = experiment_to_use # extract test stats only test_stats = experiment.test_stats_full viz_outputs = ('pdf', 'png') with TemporaryDirectory() as tmpvizdir: for viz_output in viz_outputs: vis_output_pattern_pdf = tmpvizdir + '/*.{}'.format( viz_output ) visualize.confusion_matrix( [test_stats, test_stats], experiment.ground_truth_metadata, experiment.output_feature_name, top_n_classes=[0], normalize=False, model_names=['Model1', 'Model2'], output_directory=tmpvizdir, file_format=viz_output ) figure_cnt = glob.glob(vis_output_pattern_pdf) assert 4 == len(figure_cnt)
"./profile_images") with open("./config.yaml") as f: config = yaml.safe_load(f.read()) model = LudwigModel(config, logging_level=logging.INFO) train_stats, preprocessed_data, output_directory = model.train( dataset=training_set) # Generates predictions and performance statistics for the test set. test_stats, predictions, output_directory = model.evaluate( test_set, collect_predictions=True, collect_overall_stats=True) confusion_matrix( [test_stats], model.training_set_metadata, "account_type", top_n_classes=[2], model_names=[""], normalize=True, output_directory="./visualizations", file_format="png", ) # Visualizes learning curves, which show how performance metrics changed over time during training. learning_curves(train_stats, output_feature_name="account_type", output_directory="./visualizations", file_format="png")