def test_check_replicate_summary_method(): for metric in get_available_summary_methods(): check_replicate_summary_method(metric) with pytest.raises(ValueError) as ve: output = check_replicate_summary_method("fail") assert "fail method not supported. Select one of:" in str(ve.value)
def test_compare_distributions(): # Define two distributions using a specific compound as an example compound = "BRD-K07857022-002-01-1" profile_id = "Metadata_profile_378" target_group = similarity_melted_full_df.query( "Metadata_profile_id_pair_a == @profile_id") replicate_group_values = target_group.query( "Metadata_broad_sample_pair_b == @compound" ).similarity_metric.values.reshape(-1, 1) control_group_values = target_group.query( "Metadata_broad_sample_pair_b == 'DMSO'" ).similarity_metric.values.reshape(-1, 1) control_perts = df.query( "Metadata_broad_sample == 'DMSO'").Metadata_profile_id.tolist() hardcoded_values_should_not_change = { "zscore": { "mean": 5.639379456018854, "median": 5.648269672347573 } } for summary_method in get_available_summary_methods(): hardcoded = hardcoded_values_should_not_change["zscore"][ summary_method] result = compare_distributions( target_distrib=replicate_group_values, control_distrib=control_group_values, method="zscore", replicate_summary_method=summary_method, ) assert np.round(result, 5) == np.round(hardcoded, 5) grit_result = (grit( similarity_melted_full_df, control_perts=control_perts, profile_col="Metadata_profile_id", replicate_group_col="Metadata_broad_sample", replicate_summary_method=summary_method, ).query("perturbation == @profile_id").grit.values[0]) assert result == grit_result
def test_get_available_summary_methods(): expected_result = ["mean", "median"] assert expected_result == get_available_summary_methods()