def test_get_db_filenames():
    """Tests that the list of table names are returned accurately"""
    test_db = 'test_database.db'
    test_filename1 = 'file1.csv'
    decoded_dataframe1 = pd.DataFrame({
        'Cycle_Index': [1, 1, 2],
        'Data_Point': [0, 1, 2],
        'Voltage(V)': [0.3, 0.4, 0.5],
        'Current(A)': [1.5, 1.4, 1.3],
        'Discharge_Capacity(Ah)': [0, 0, 0],
        'Charge_Capacity(Ah)': [10, 20, 30],
        'Step_Index': [1, 1, 1]
    })
    test_datatype1 = 'ARBIN'

    test_filename2 = 'file2.csv'
    decoded_dataframe2 = pd.DataFrame({
        'Cycle_Index': [1, 1, 1],
        'Data_Point': [10, 11, 12],
        'Voltage(V)': [10.3, 10.4, 10.5],
        'Current(A)': [11.5, 11.4, 11.3],
        'Discharge_Capacity(Ah)': [0, 10, 10],
        'Charge_Capacity(Ah)': [110, 120, 130],
        'Step_Index': [0, 0, 0]
    })
    test_datatype2 = 'ARBIN'

    process_data(test_filename1, test_db, decoded_dataframe1, test_datatype1)
    process_data(test_filename2, test_db, decoded_dataframe2, test_datatype2)

    names_list = get_db_filenames(test_db)
    assert names_list == ['file1', 'file2']

    os.remove("test_database.db")
    return
def test_parse_update_master():
    """Tests the parse update master function"""
    process_data(test_filename, test_db, decoded_dataframe, test_datatype)
    core_test_filename = get_filename_pref(test_filename)
    ans = parse_update_master(core_test_filename, test_db, test_datatype,
                              decoded_dataframe)
    assert ans == None
    master_table = get_file_from_database('master_table', test_db)
    name = get_filename_pref(test_filename)
    assert name + 'Raw' in list(master_table['Raw_Data_Prefix'])
    assert name + 'CleanSet' in list(master_table['Cleaned_Data_Prefix'])
    assert name + '-CleanCycle' in list(master_table['Cleaned_Cycles_Prefix'])
    assert name + '-descriptors' in list(master_table['Descriptors_Prefix'])
    os.remove(test_db)
    return
def test_if_file_exists_in_db():
    """Tests the if_file_exists_in_db function gives the 
    correct result when there is a file that does exist in 
    the test database, when ther is a file that does not 
    exist in the database, and when there is a database name 
    given for a database that does not exist."""
    process_data(test_filename, test_db, decoded_dataframe, test_datatype)
    answer = if_file_exists_in_db(test_db, test_filename)
    assert answer == True
    answer2 = if_file_exists_in_db(test_db, 'ThisFileDoesNotExist.csv')
    assert answer2 == False
    assert if_file_exists_in_db('NotaRealDB.db', test_filename) == False
    assert if_file_exists_in_db('NotaRealDB.db',
                                'ThisFileDoesNotExist.csv') == False
    os.remove(test_db)
    return
示例#4
0
def test_pop_with_db():
    """test that the clean and raw dataframes are 
    returned from this function from a file'"""

    process_data(test_filename, test_db, decoded_dataframe, test_datatype)

    df_clean, df_raw = pop_with_db(test_filename, test_db)

    assert df_clean is not None
    assert type(df_clean) == pd.DataFrame

    assert df_raw is not None
    assert type(df_raw) == pd.DataFrame

    assert 'Smoothed_dQ/dV' in df_clean.columns
    os.remove(test_db)
    return
示例#5
0
def test_get_model_dfs():
    """This tests that the model results returned make sense 
    with respect to each other. First tests types of returned 
    variables, and also checks that the number of peaks found 
    in either charge or discharge sections correspond to the
    number of unique prefixes in the model vals dictionary."""

    process_data(test_filename, test_db, decoded_dataframe,
                 test_datatype)

    df_clean, df_raw = pop_with_db(test_filename, test_db)
    cyc = 1
    lenmax = len(df_clean)
    peak_thresh = 0.7
    new_df_mody, model_c_vals, model_d_vals, \
        peak_heights_c, peak_heights_d = get_model_dfs(df_clean,
                                                       test_datatype,
                                                       cyc, lenmax,
                                                       peak_thresh)

    assert type(new_df_mody) == pd.DataFrame
    assert type(model_c_vals) == dict and type(model_d_vals) == dict
    assert type(peak_heights_c) == list
    assert type(peak_heights_d) == list

    # There should be at least a base_amplitude, base_center, base_fwhm, and base sigma for all cycles
    for key in ['base_amplitude', 'base_center', 'base_fwhm', 'base_height', 'base_sigma']:
        assert key in model_c_vals.keys()
        assert key in model_d_vals.keys()
        assert type(model_c_vals[key]) in (np.float64, np.float32, float, int)
        assert type(model_d_vals[key]) in (np.float64, np.float32, float, int)

    # There should be one peak height in peak_heights_cd for each unique key
    for item in [[model_d_vals, peak_heights_d], [model_c_vals, peak_heights_c]]:
        pref_list = []
        for key in item[0].keys():
            pref = key.split('_')[0]
            pref_list.append(pref)
            pref_set = set(pref_list)
        assert len(pref_set) - 1 == len(item[1])
        # minus one because of 'base' prefix
    os.remove(test_db)
    return
def test_get_table_names():
    """Tests that the correct table names are returned"""
    # first make sure all data is processed
    process_data(test_filename, test_db, decoded_dataframe, test_datatype)
    new_peak_thresh = 0.7
    core_filename = get_filename_pref(test_filename)
    df_clean = get_file_from_database(core_filename + 'CleanSet', test_db)
    feedback = generate_model(df_clean, core_filename, new_peak_thresh,
                              test_db)
    assert core_filename + 'ModParams' in get_table_names(test_db)
    param_dicts_to_df(core_filename + 'ModParams', test_db)

    names_list = get_table_names(test_db)
    expected_list = [
        'master_table', 'test_data-CleanCycle1', 'test_data-Cycle1',
        'test_data-ModPoints', 'test_data-descriptors', 'test_dataCleanSet',
        'test_dataModParams', 'test_dataRaw', 'test_dataUnalteredRaw'
    ]
    assert set(names_list) == set(expected_list)
    os.remove(test_db)
    return
示例#7
0
def test_generate_model_for_maccor():
    """Tests that three new tables are generated in the database 
    in the process of generating the model. Acutal model generation
    functions are tested further outside of this wrapper."""
    peak_thresh = 0.7

    process_data(test_filename_mac, test_db, decoded_dataframe_mac,
                 test_datatype_mac)
    filename_pref = get_filename_pref(test_filename_mac)
    df_clean, df_raw = pop_with_db(test_filename_mac, test_db)

    feedback = generate_model(df_clean, filename_pref,
                              peak_thresh, test_db)
    assert type(feedback) == str
    names_list = get_table_names(test_db)

    list_new_tables = ['-ModPoints', 'ModParams', '-descriptors']
    for i in list_new_tables:
        assert filename_pref + i in names_list
    os.remove(test_db)
    return
def parse_contents(decoded,
                   filename,
                   datatype,
                   database,
                   windowlength=9,
                   polyorder=3):
    """Checks if the uploaded file exists in the database yet. Will
    process and add that file to the database if it doesn't appear in
    the master table yet. Otherwise will return html.Div that the
    file already exists in the database. """

    cleanset_name = get_filename_pref(filename) + 'CleanSet'
    # this gets rid of any filepath in the filename and just leaves the
    # clean set name as it appears in the database check to see if the
    # database exists, and if it does, check if the file exists.
    ans_p = if_file_exists_in_db(database, filename)
    if ans_p:
        df_clean = get_file_from_database(cleanset_name, database)
        new_peak_thresh = 0.7
        feedback = generate_model(df_clean, filename, new_peak_thresh,
                                  database)
        return 'That file exists in the database: ' + \
            str(get_filename_pref(filename))
    else:
        try:
            decoded_dataframe = decoded_to_dataframe(decoded, datatype,
                                                     filename)
            process_data(filename, database, decoded_dataframe, datatype,
                         windowlength, polyorder)
            df_clean = get_file_from_database(cleanset_name, database)
            new_peak_thresh = 0.7
            feedback = generate_model(df_clean, filename, new_peak_thresh,
                                      database)
            return 'New file has been processed: ' + \
                str(get_filename_pref(filename))
        except Exception as e:
            return 'There was a problem uploading that file. ' + \
                'Check the format of the upload file is as expected.' + \
                str(e)
def test_process_data():
    """Tests the process data function adds the correct
    datatables to the database."""
    ans = process_data(test_filename, test_db, decoded_dataframe,
                       test_datatype)
    # shouldn't return anything:
    assert ans == None
    names_list = get_table_names(test_db)
    assert 'master_table' in names_list
    list_new_tables = ['Raw', '-CleanCycle1', '-Cycle1', 'CleanSet']
    for i in list_new_tables:
        assert get_filename_pref(test_filename) + i in names_list
    os.remove(test_db)
    return
示例#10
0
def test_param_dicts_to_df():
    """Tests the parameter dictionaries generated by the model
    functions are parsed nicely and added to the database in the 
    modparams table"""

    process_data(test_filename, test_db, decoded_dataframe, test_datatype)
    core_test_filename = get_filename_pref(test_filename)
    new_peak_thresh = 0.7
    df_clean = get_file_from_database(core_test_filename + 'CleanSet', test_db)
    feedback = generate_model(
        df_clean, core_test_filename, new_peak_thresh, test_db)
    assert core_test_filename + 'ModParams' in get_table_names(test_db)
    param_dicts_to_df(core_test_filename + 'ModParams', test_db)
    assert core_test_filename + '-descriptors' in get_table_names(test_db)
    desc_df = get_file_from_database(
        core_test_filename + '-descriptors', test_db)
    expected_cols = ['d_gauss_sigma', 'd_gauss_center', 'd_gauss_amplitude',
                     'd_gauss_fwhm', 'd_gauss_height', 'd_cycle_number',
                                     'c_gauss_sigma', 'c_gauss_center', 'c_gauss_amplitude',
                                     'c_gauss_fwhm', 'c_gauss_height', 'c_cycle_number']
    for col in expected_cols:
        assert col in desc_df.columns
    os.remove(test_db)
    return