Ejemplo n.º 1
0
def test_get_clean_sets():
    test_db = 'my_great_db.db'
    test_filename = 'my_great_data.xlsx'
    test_decoded_df = pd.DataFrame({
        'Cycle_Index': [1, 1, 2, 2, 2],
        'Data_Point': [0, 1, 2, 3, 4],
        'Voltage(V)': [4, 8, 16, 8, 4],
        'Current(A)': [2, 4, 6, 8, 12],
        'Discharge_Capacity(Ah)': [10, 0, 30, 0, 10],
        'Charge_Capacity(Ah)': [0, 20, 0, 10, 0],
        'Step_Index': [1, 0, 1, 0, 1]
    })
    test_datatype = 'ARBIN'
    # initialize database:
    init_master_table(test_db)
    core_test_filename = get_filename_pref(test_filename)
    parse_update_master(core_test_filename, test_db, test_datatype,
                        test_decoded_df)
    # set up by adding raw data frame to database
    cycle_dict = load_sep_cycles(core_test_filename, test_db, test_datatype)
    clean_cycle_dict = get_clean_cycles(cycle_dict,
                                        core_test_filename,
                                        test_db,
                                        test_datatype,
                                        windowlength=9,
                                        polyorder=3)
    result = get_clean_sets(clean_cycle_dict, core_test_filename, test_db)
    assert type(result) == pd.DataFrame
    assert list(result['Cycle_Index'].unique()) == [1, 2]
    os.remove(test_db)
Ejemplo n.º 2
0
def test_update_master_table():
    init_master_table('amazing_database2.db')
    update_dict = {
        'Dataset_Name': 'my_dataset',
        'Raw_Data_Prefix': 'raw',
        'Cleaned_Data_Prefix': 'clean',
        'Cleaned_Cycles_Prefix': 'cycles',
        'Descriptors_Prefix': 'desc'
    }
    update_master_table(update_dict, 'amazing_database2.db')
    test_df = get_file_from_database('master_table', 'amazing_database2.db')
    expected = pd.DataFrame({
        'Dataset_Name': ['my_dataset'],
        'Raw_Data_Prefix': ['raw'],
        'Cleaned_Data_Prefix': ['clean'],
        'Cleaned_Cycles_Prefix': ['cycles'],
        'Descriptors_Prefix': ['desc']
    })
    assert pd.DataFrame.equals(test_df, expected)

    neg_result = update_master_table(
        None,
        'amazing_database2.db',
    )
    assert neg_result == [{}]

    os.remove('amazing_database2.db')
    return
Ejemplo n.º 3
0
def process_data(file_name,
                 database_name,
                 decoded_dataframe,
                 datatype,
                 windowlength=9,
                 polyorder=3):
    """Takes raw file, separates cycles, cleans cycles,
    gets the descriptors, saves descriptors for each cycle
    into database, puts cycles back together, and then saves
    resulting cleaned data. """
    if not os.path.exists(database_name):
        init_master_table(database_name)
    names_list = get_table_names(database_name)
    core_file_name = get_filename_pref(file_name)
    if core_file_name + 'CleanSet' in names_list:
        return
    else:
        parse_update_master(core_file_name, database_name, datatype,
                            decoded_dataframe)
        cycle_dict = load_sep_cycles(core_file_name, database_name, datatype)
        clean_cycle_dict = get_clean_cycles(cycle_dict, core_file_name,
                                            database_name, datatype,
                                            windowlength, polyorder)
        clean_set_df = get_clean_sets(clean_cycle_dict, core_file_name,
                                      database_name)
    return
Ejemplo n.º 4
0
def test_init_master_table():
    init_master_table('new_database.db')
    assert os.path.exists('new_database.db')
    init_table = get_file_from_database('master_table', 'new_database.db')
    expected_cols = [
        'Dataset_Name', 'Raw_Data_Prefix', 'Cleaned_Data_Prefix',
        'Cleaned_Cycles_Prefix', 'Descriptors_Prefix'
    ]
    assert init_table.empty
    assert set(expected_cols) == set(init_table.columns)
    os.remove('new_database.db')
    return
Ejemplo n.º 5
0
def test_load_sep_cycles():
    test_db = 'my_great_db.db'
    test_filename = 'my_great_data.xlsx'
    test_decoded_df = pd.DataFrame({
        'Cycle_Index': [1, 1, 2, 2, 2],
        'Data_Point': [0, 1, 2, 3, 4],
        'Voltage(V)': [4, 8, 16, 8, 4],
        'Current(A)': [2, 4, 6, 8, 12],
        'Discharge_Capacity(Ah)': [10, 0, 30, 0, 10],
        'Charge_Capacity(Ah)': [0, 20, 0, 10, 0],
        'Step_Index': [1, 0, 1, 0, 1]
    })
    test_datatype = 'ARBIN'
    # initialize database:
    init_master_table(test_db)
    core_test_filename = get_filename_pref(test_filename)
    parse_update_master(core_test_filename, test_db, test_datatype,
                        test_decoded_df)
    # set up by adding raw data frame to database
    result = load_sep_cycles(core_test_filename, test_db, test_datatype)
    # there are two cycles in this test data:
    assert list(result.keys()) == [1, 2]
    os.remove(test_db)
    return
Ejemplo n.º 6
0
from diffcapanalyzer.app_helper_functions import parse_contents
from diffcapanalyzer.app_helper_functions import pop_with_db
from diffcapanalyzer.chachifuncs import col_variables
from diffcapanalyzer.databasewrappers import get_db_filenames, get_filename_pref
from diffcapanalyzer.databasefuncs import get_file_from_database
from diffcapanalyzer.databasefuncs import init_master_table
from diffcapanalyzer.descriptors import generate_model
from diffcapanalyzer.descriptors import get_model_dfs

database = "data/databases/dQdV.db"
init_db = "data/databases/init_database.db"

assert os.path.exists(init_db)
if not os.path.exists(database):
    init_master_table(database)

app = dash.Dash(__name__,
                external_stylesheets=[
                    "https://codepen.io/chriddyp/pen/bWLwgP.css", {
                        'href': "https://codepen.io/chriddyp/pen/bWLwgP.css",
                        'rel': 'stylesheet'
                    }
                ])

##########################################
# App Layout
##########################################

Introduction = dcc.Markdown('''
		# dQ/dV