def test_load_distributions():
    obj, attributes = general.get_instance_of_ModelContainer()
    tstConfig = TstConfig()
    temp_folder = tstConfig.get_temperory_file_folder()
    #<TO-Do> keeping on hold for now
    # Will need to create a sample .joblib file and
    # load the 'feature_uniques' and 'feature_summaries'
    pass

    def test_publish():
        pass

    def test_dump_reference():
        pass

    def test_create_merge_request():
        pass

    def test_bind_model():
        pass

    def test_dump_model():
        pass

    def test_load_model():
        pass

    def test_get_local_path():
        pass

    def test_get_bucket_path():
        pass
def test_dump_distributions():
    # This is a to-do and we will see how testing of this happens
    # at the moment the method writes a file and we need to work out
    # where the test file would be written and how it would be cleaned
    obj, attributes = general.get_instance_of_ModelContainer()

    pass
def test_get_model():
    hmlapp,hmlapp_attributes=general.get_instance_of_HmlApp()
    #just instantiated so no models in there
    assert len(hmlapp.models.keys())==0
    model,model_attr=general.get_instance_of_ModelContainer()
    hmlapp.models["test"]=model
    assert model==hmlapp.get_model("test")
def test_register_model():
    hmlapp,hmlapp_attributes=general.get_instance_of_HmlApp()
    model,model_attr=general.get_instance_of_ModelContainer()
    hmlapp.register_model("test",model)
    assert hmlapp.models["test"]==model
    assert "test" in hmlapp.inference.models.keys() 
    assert model in hmlapp.inference.models.values() 
def test_load():
    #fil_nam=create_job_lib_file()
    fil_nam = create_distributions_file()
    obj, attributes = general.get_instance_of_ModelContainer()
    # tstConfig=TstConfig()
    # temp_folder=tstConfig.get_temperory_file_folder()
    #<TO-Do> keeping on hold for now
    # Will need to create a sample .joblib file and
    #fil_nam=create_job_lib_file()
    obj.load(fil_nam)
    # if no error is thrown mark as success
    #<To-Do> Maybe we can check the content of the loads in a refactoring exercise
    assert True
def test_analyze_distributions():
    obj, attributes = general.get_instance_of_ModelContainer()
    df, numDict, catDict, tarDict = data_frame_utility.get_numerical_categorical_dataframe(
        row_count=50)
    obj.analyze_distributions(df)
    actual_cat_feat_value_dict = obj.feature_uniques
    actual_feature_desc = obj.feature_summaries
    expected_feature_desc = {
        ky: val
        for ky in list(numDict.keys()) for val in df[ky].describe().to_dict()
    }
    #comparing categorical features
    assert general.compare_dictionaries_equal(actual_cat_feat_value_dict,
                                              catDict)
    #comparing numerical features
    assert general.compare_dictionaries_equal(actual_feature_desc,
                                              expected_feature_desc)
def test_build_training_matrix():
    obj, attributes = general.get_instance_of_ModelContainer()
    df, numDict, catDict, tarDict = data_frame_utility.get_numerical_categorical_dataframe(
        row_count=50)
    list_of_categorical_columns = attributes["features_categorical"]
    expected_unique_value_dict = catDict
    obj.build_training_matrix(df)
    col_names_removed = list(expected_unique_value_dict.keys())
    new_added__categorical_columns = [
        f"{col}:{val}" for col in col_names_removed
        for val in expected_unique_value_dict[col]
    ]
    numerical_columns = list(numDict.keys())
    expected_column_count = len(numerical_columns) + len(
        new_added__categorical_columns)
    matrix = obj.build_training_matrix(df)

    actual_column_count = matrix.shape[1]  # first row has the column names

    #Checking If The Column Count Matches
    assert actual_column_count == expected_column_count
def test_init():
    obj, expected_values = general.get_instance_of_ModelContainer()
    retBool, retLst = general.check_attributes_in_object(obj, expected_values)
    assert retBool