コード例 #1
0
def test_get_filename_pref():
    """Tests that the filename to be attached to the file 
    in the database is parsed out of the path and extension 
    nicely."""
    foo1 = 'Directory1/Directory2/anotherDirectory/real_filename.csv'
    foo2 = 'dir100/myfilefoo.someextension'
    foo3 = 'myfilename.csv'
    foo4 = 'foo'

    assert get_filename_pref(foo1) == 'real_filename'
    assert get_filename_pref(foo2) == 'myfilefoo'
    assert get_filename_pref(foo3) == 'myfilename'
    assert get_filename_pref(foo4) == 'foo'
    return
コード例 #2
0
def test_parse_update_master():
    """Tests the parse update master function"""
    process_data(test_filename, test_db, decoded_dataframe, test_datatype)
    core_test_filename = get_filename_pref(test_filename)
    ans = parse_update_master(core_test_filename, test_db, test_datatype,
                              decoded_dataframe)
    assert ans == None
    master_table = get_file_from_database('master_table', test_db)
    name = get_filename_pref(test_filename)
    assert name + 'Raw' in list(master_table['Raw_Data_Prefix'])
    assert name + 'CleanSet' in list(master_table['Cleaned_Data_Prefix'])
    assert name + '-CleanCycle' in list(master_table['Cleaned_Cycles_Prefix'])
    assert name + '-descriptors' in list(master_table['Descriptors_Prefix'])
    os.remove(test_db)
    return
コード例 #3
0
def test_get_clean_sets():
    test_db = 'my_great_db.db'
    test_filename = 'my_great_data.xlsx'
    test_decoded_df = pd.DataFrame({
        'Cycle_Index': [1, 1, 2, 2, 2],
        'Data_Point': [0, 1, 2, 3, 4],
        'Voltage(V)': [4, 8, 16, 8, 4],
        'Current(A)': [2, 4, 6, 8, 12],
        'Discharge_Capacity(Ah)': [10, 0, 30, 0, 10],
        'Charge_Capacity(Ah)': [0, 20, 0, 10, 0],
        'Step_Index': [1, 0, 1, 0, 1]
    })
    test_datatype = 'ARBIN'
    # initialize database:
    init_master_table(test_db)
    core_test_filename = get_filename_pref(test_filename)
    parse_update_master(core_test_filename, test_db, test_datatype,
                        test_decoded_df)
    # set up by adding raw data frame to database
    cycle_dict = load_sep_cycles(core_test_filename, test_db, test_datatype)
    clean_cycle_dict = get_clean_cycles(cycle_dict,
                                        core_test_filename,
                                        test_db,
                                        test_datatype,
                                        windowlength=9,
                                        polyorder=3)
    result = get_clean_sets(clean_cycle_dict, core_test_filename, test_db)
    assert type(result) == pd.DataFrame
    assert list(result['Cycle_Index'].unique()) == [1, 2]
    os.remove(test_db)
コード例 #4
0
def pop_with_db(filename, database):
    """Returns dataframes that can be used to populate the app graphs.
    Finds the already existing file in the database and returns
    the cleaned version (as a dataframe) and the raw version
    (also as a dataframe)."""
    cleanset_name = get_filename_pref(filename) + 'CleanSet'
    rawset_name = get_filename_pref(filename) + 'Raw'
    if if_file_exists_in_db(database, filename):
        # then the file exists in the database and we can just read it
        df_clean = get_file_from_database(cleanset_name, database)
        df_raw = get_file_from_database(rawset_name, database)
        datatype = df_clean['datatype'].iloc[0]
        (cycle_ind_col, data_point_col, volt_col, curr_col, dis_cap_col,
         char_cap_col, charge_or_discharge) = col_variables(datatype)

    else:
        df_clean = None
        df_raw = None
        peakloc_dict = {}

    return df_clean, df_raw
コード例 #5
0
def parse_contents(decoded,
                   filename,
                   datatype,
                   database,
                   windowlength=9,
                   polyorder=3):
    """Checks if the uploaded file exists in the database yet. Will
    process and add that file to the database if it doesn't appear in
    the master table yet. Otherwise will return html.Div that the
    file already exists in the database. """

    cleanset_name = get_filename_pref(filename) + 'CleanSet'
    # this gets rid of any filepath in the filename and just leaves the
    # clean set name as it appears in the database check to see if the
    # database exists, and if it does, check if the file exists.
    ans_p = if_file_exists_in_db(database, filename)
    if ans_p:
        df_clean = get_file_from_database(cleanset_name, database)
        new_peak_thresh = 0.7
        feedback = generate_model(df_clean, filename, new_peak_thresh,
                                  database)
        return 'That file exists in the database: ' + \
            str(get_filename_pref(filename))
    else:
        try:
            decoded_dataframe = decoded_to_dataframe(decoded, datatype,
                                                     filename)
            process_data(filename, database, decoded_dataframe, datatype,
                         windowlength, polyorder)
            df_clean = get_file_from_database(cleanset_name, database)
            new_peak_thresh = 0.7
            feedback = generate_model(df_clean, filename, new_peak_thresh,
                                      database)
            return 'New file has been processed: ' + \
                str(get_filename_pref(filename))
        except Exception as e:
            return 'There was a problem uploading that file. ' + \
                'Check the format of the upload file is as expected.' + \
                str(e)
コード例 #6
0
def test_process_data():
    """Tests the process data function adds the correct
    datatables to the database."""
    ans = process_data(test_filename, test_db, decoded_dataframe,
                       test_datatype)
    # shouldn't return anything:
    assert ans == None
    names_list = get_table_names(test_db)
    assert 'master_table' in names_list
    list_new_tables = ['Raw', '-CleanCycle1', '-Cycle1', 'CleanSet']
    for i in list_new_tables:
        assert get_filename_pref(test_filename) + i in names_list
    os.remove(test_db)
    return
コード例 #7
0
def test_get_table_names():
    """Tests that the correct table names are returned"""
    # first make sure all data is processed
    process_data(test_filename, test_db, decoded_dataframe, test_datatype)
    new_peak_thresh = 0.7
    core_filename = get_filename_pref(test_filename)
    df_clean = get_file_from_database(core_filename + 'CleanSet', test_db)
    feedback = generate_model(df_clean, core_filename, new_peak_thresh,
                              test_db)
    assert core_filename + 'ModParams' in get_table_names(test_db)
    param_dicts_to_df(core_filename + 'ModParams', test_db)

    names_list = get_table_names(test_db)
    expected_list = [
        'master_table', 'test_data-CleanCycle1', 'test_data-Cycle1',
        'test_data-ModPoints', 'test_data-descriptors', 'test_dataCleanSet',
        'test_dataModParams', 'test_dataRaw', 'test_dataUnalteredRaw'
    ]
    assert set(names_list) == set(expected_list)
    os.remove(test_db)
    return
コード例 #8
0
def test_generate_model_for_maccor():
    """Tests that three new tables are generated in the database 
    in the process of generating the model. Acutal model generation
    functions are tested further outside of this wrapper."""
    peak_thresh = 0.7

    process_data(test_filename_mac, test_db, decoded_dataframe_mac,
                 test_datatype_mac)
    filename_pref = get_filename_pref(test_filename_mac)
    df_clean, df_raw = pop_with_db(test_filename_mac, test_db)

    feedback = generate_model(df_clean, filename_pref,
                              peak_thresh, test_db)
    assert type(feedback) == str
    names_list = get_table_names(test_db)

    list_new_tables = ['-ModPoints', 'ModParams', '-descriptors']
    for i in list_new_tables:
        assert filename_pref + i in names_list
    os.remove(test_db)
    return
コード例 #9
0
def test_param_dicts_to_df():
    """Tests the parameter dictionaries generated by the model
    functions are parsed nicely and added to the database in the 
    modparams table"""

    process_data(test_filename, test_db, decoded_dataframe, test_datatype)
    core_test_filename = get_filename_pref(test_filename)
    new_peak_thresh = 0.7
    df_clean = get_file_from_database(core_test_filename + 'CleanSet', test_db)
    feedback = generate_model(
        df_clean, core_test_filename, new_peak_thresh, test_db)
    assert core_test_filename + 'ModParams' in get_table_names(test_db)
    param_dicts_to_df(core_test_filename + 'ModParams', test_db)
    assert core_test_filename + '-descriptors' in get_table_names(test_db)
    desc_df = get_file_from_database(
        core_test_filename + '-descriptors', test_db)
    expected_cols = ['d_gauss_sigma', 'd_gauss_center', 'd_gauss_amplitude',
                     'd_gauss_fwhm', 'd_gauss_height', 'd_cycle_number',
                                     'c_gauss_sigma', 'c_gauss_center', 'c_gauss_amplitude',
                                     'c_gauss_fwhm', 'c_gauss_height', 'c_cycle_number']
    for col in expected_cols:
        assert col in desc_df.columns
    os.remove(test_db)
    return
コード例 #10
0
def test_load_sep_cycles():
    test_db = 'my_great_db.db'
    test_filename = 'my_great_data.xlsx'
    test_decoded_df = pd.DataFrame({
        'Cycle_Index': [1, 1, 2, 2, 2],
        'Data_Point': [0, 1, 2, 3, 4],
        'Voltage(V)': [4, 8, 16, 8, 4],
        'Current(A)': [2, 4, 6, 8, 12],
        'Discharge_Capacity(Ah)': [10, 0, 30, 0, 10],
        'Charge_Capacity(Ah)': [0, 20, 0, 10, 0],
        'Step_Index': [1, 0, 1, 0, 1]
    })
    test_datatype = 'ARBIN'
    # initialize database:
    init_master_table(test_db)
    core_test_filename = get_filename_pref(test_filename)
    parse_update_master(core_test_filename, test_db, test_datatype,
                        test_decoded_df)
    # set up by adding raw data frame to database
    result = load_sep_cycles(core_test_filename, test_db, test_datatype)
    # there are two cycles in this test data:
    assert list(result.keys()) == [1, 2]
    os.remove(test_db)
    return