Exemple #1
0
def test_import_air_data(dataset, data_path):
    """
    Author        : Thomas Mahoney
    Date          : 7 Sep 2018
    Purpose       : Tests that running the import_traffic_data function with air data correctly inserts a dataset into
                    SQL Server from a csv source file.
    Parameters    : dataset - This value specifies the period the data was collected in, it is used in RUN_ID creation.
                    data_path - The dataimport file path.
    Returns       : NA
    """

    # Run the dataimport script using the specified data and the test RUN_ID
    import_reference_data.import_traffic_data(RUN_ID + dataset, data_path)

    # Assert that records were added to the SURVEY_SUBSAMPLE table under the given RUN_ID.
    assert len(cf.select_data('*', 'TRAFFIC_DATA', 'RUN_ID',
                              RUN_ID + dataset)) > 0
def test_spend_weight_step():
    """ Test function """

    # Assign variables
    conn = database_connection()

    # Run, and test, first step of run.shift_weight_step
    idm.populate_survey_data_for_step(RUN_ID, conn,
                                      STEP_CONFIGURATION[STEP_NAME])

    # Check all deleted tables are empty
    for table in STEP_CONFIGURATION[STEP_NAME]['delete_tables']:
        delete_result = cf.get_table_values(table)
        assert delete_result.empty

    # Check all nullified columns are NULL
    result = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
    for column in STEP_CONFIGURATION[STEP_NAME]['nullify_pvs']:
        column_name = column.replace('[', '').replace(']', '')
        assert result[column_name].isnull().sum() == len(result)

    # Check table has been populated
    sas_survey_data = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    table_len = len(sas_survey_data.index)
    assert table_len == EXPECTED_LEN

    # Run the next step and test
    idm.copy_step_pvs_for_survey_data(RUN_ID, conn,
                                      STEP_CONFIGURATION[STEP_NAME])

    # Assert idm.SAS_PROCESS_VARIABLES_TABLE has been populated
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == NUMBER_OF_PVS

    # Assert STEP_CONFIGURATION[STEP_NAME]["spv_table"] has been cleansed
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # Run the next step and test
    process_variables.process(dataset='survey',
                              in_table_name='SAS_SURVEY_SUBSAMPLE',
                              out_table_name='SAS_SPEND_SPV',
                              in_id='SERIAL')

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == EXPECTED_LEN

    # Run the next step
    idm.update_survey_data_with_step_pv_output(conn,
                                               STEP_CONFIGURATION[STEP_NAME])

    # Check all columns in SAS_SURVEY_SUBSAMPLE have been altered
    result = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    for column in STEP_CONFIGURATION[STEP_NAME]['pv_columns']:
        column_name = column.replace("'", "")
        assert len(result[column_name]) == EXPECTED_LEN
        assert result[column_name].sum() != 0

    # Assert SAS_PROCESS_VARIABLES_TABLE has been cleansed
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 0

    # Assert spv_table has been cleansed
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # Get Survey Data before importing to calculation function
    sas_survey_data = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)

    # Run the next step and test
    surveydata_out = calculate_ips_spend_imputation.do_ips_spend_imputation(
        sas_survey_data, var_serial="SERIAL", measure="mean")

    # Replicate intermediate steps within final_weight_step() and test length
    cf.insert_dataframe_into_table(STEP_CONFIGURATION[STEP_NAME]["temp_table"],
                                   surveydata_out)

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == 5134

    # Extract our test results from the survey and summary tables then write the results to csv.
    df_survey_actual = cf.get_table_values(
        STEP_CONFIGURATION[STEP_NAME]["temp_table"])

    # Read in both the target datasets and the results we previously wrote out then sort them on specified columns.
    df_survey_actual.to_csv(TEST_DATA_DIR + '\sas_survey_subsample_actual.csv',
                            index=False)

    df_survey_actual = pd.read_csv(
        TEST_DATA_DIR +
        '\sas_survey_subsample_actual.csv').sort_values('SERIAL')
    df_survey_target = pd.read_csv(TEST_DATA_DIR +
                                   '\sas_survey_subsample_target.csv',
                                   encoding='ANSI').sort_values('SERIAL')

    # Reset the dataframe's index before comparing the outputs.
    df_survey_actual.index = range(0, len(df_survey_actual))
    df_survey_target.index = range(0, len(df_survey_target))

    assert_frame_equal(df_survey_actual, df_survey_target, check_dtype=False)

    # Run the next step and test
    idm.update_survey_data_with_step_results(conn,
                                             STEP_CONFIGURATION[STEP_NAME])

    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == 0

    # Run the next step and test
    idm.store_survey_data_with_step_results(RUN_ID, conn,
                                            STEP_CONFIGURATION[STEP_NAME])

    result = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
    result.to_csv(
        r'S:\CASPA\IPS\Testing\scratch\spend_integration_testing_survey_subsample.csv'
    )
    table_len = result.shape[0]
    assert table_len == EXPECTED_LEN

    # Assert SAS_SURVEY_SUBSAMPLE_TABLE was cleansed
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == 0
Exemple #3
0
def test_stay_imputation_step():
    """ Test function """

    # Assign variables.
    conn = database_connection()

    # Run, and test, first step.
    idm.populate_survey_data_for_step(RUN_ID, conn,
                                      STEP_CONFIGURATION[STEP_NAME])

    # Check all deleted tables are empty.
    for table in STEP_CONFIGURATION[STEP_NAME]['delete_tables']:
        delete_result = cf.get_table_values(table)
        assert delete_result.empty

    # Check all nullified columns are NULL.
    survey_subsample = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE,
                                      'RUN_ID', RUN_ID)
    for column in STEP_CONFIGURATION[STEP_NAME]['nullify_pvs']:
        column_name = column.replace('[', '').replace(']', '')
        assert survey_subsample[column_name].isnull().sum() == len(
            survey_subsample)

    # Check table has been populated.
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    # Run the next step and test.
    idm.copy_step_pvs_for_survey_data(RUN_ID, conn,
                                      STEP_CONFIGURATION[STEP_NAME])

    # Assert idm.SAS_PROCESS_VARIABLES_TABLE has been populated.
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == NUMBER_OF_PVS

    # Assert STEP_CONFIGURATION[STEP_NAME]["spv_table"] has been cleansed.
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # Run the next step and test.
    process_variables.process(dataset='survey',
                              in_table_name='SAS_SURVEY_SUBSAMPLE',
                              out_table_name='SAS_STAY_SPV',
                              in_id='serial')

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == EXPECTED_LEN

    # Run the next step.
    idm.update_survey_data_with_step_pv_output(conn,
                                               STEP_CONFIGURATION[STEP_NAME])

    # Check all columns in SAS_SURVEY_SUBSAMPLE have been altered.
    sas_survey_data = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)

    for column in STEP_CONFIGURATION[STEP_NAME]['pv_columns']:
        column_name = column.replace("'", "")
        assert len(sas_survey_data[column_name]) == EXPECTED_LEN
        assert sas_survey_data[column_name].sum() != 0

    # Assert SAS_PROCESS_VARIABLES_TABLE has been cleansed.
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 0

    # Assert spv_table has been cleansed.
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # Run the next step and test.
    surveydata_out = calculate_ips_stay_imputation.do_ips_stay_imputation(
        sas_survey_data, var_serial='SERIAL', num_levels=1, measure='mean')

    # Insert the data generated by the calculate function into the database
    cf.insert_dataframe_into_table(STEP_CONFIGURATION[STEP_NAME]["temp_table"],
                                   surveydata_out)

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == 27

    # Extract our test results from the survey table then write the results to csv.
    df_survey_actual = cf.get_table_values(
        STEP_CONFIGURATION[STEP_NAME]["temp_table"])

    # Read in both the target datasets and the results we previously wrote out then sort them on specified columns.
    df_survey_actual.to_csv(TEST_DATA_DIR + '\sas_survey_subsample_actual.csv',
                            index=False)

    df_survey_actual = pd.read_csv(TEST_DATA_DIR +
                                   '\sas_survey_subsample_actual.csv',
                                   engine='python').sort_values('SERIAL')
    df_survey_target = pd.read_csv(TEST_DATA_DIR +
                                   '\sas_survey_subsample_target.csv',
                                   engine='python').sort_values('SERIAL')

    # Reset the dataframe's index before comparing the outputs.
    df_survey_actual.index = range(0, len(df_survey_actual))
    df_survey_target.index = range(0, len(df_survey_target))

    assert_frame_equal(df_survey_actual, df_survey_target, check_dtype=False)

    # Run the next step and test.
    idm.update_survey_data_with_step_results(conn,
                                             STEP_CONFIGURATION[STEP_NAME])

    # Assert SAS_SURVEY_SUBSAMPLE was populated.
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    # Assert table was cleansed accordingly.
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == 0

    # Run the next step and test.
    idm.store_survey_data_with_step_results(RUN_ID, conn,
                                            STEP_CONFIGURATION[STEP_NAME])

    # Assert SURVEY_SUBSAMPLE_TABLE was populated.
    result = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == EXPECTED_LEN

    # Assert SAS_SURVEY_SUBSAMPLE_TABLE was cleansed.
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == 0
def test_minimums_weight_step():

    # Get database connection
    conn = database_connection()

    # Run step 1 / 8
    idm.populate_survey_data_for_step(RUN_ID, conn, STEP_CONFIGURATION["MINIMUMS_WEIGHT"])

    # Check all deleted tables are empty
    for table in STEP_CONFIGURATION['MINIMUMS_WEIGHT']['delete_tables']:
        delete_result = cf.get_table_values(table)
        assert delete_result.empty

    # Check all nullified columns are NULL
    for column in STEP_CONFIGURATION['MINIMUMS_WEIGHT']['nullify_pvs']:
        column_name = column.replace('[', '').replace(']', '')
        result = cf.select_data(column_name, idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
        assert result[column_name].isnull().sum() == len(result)

    # Check table has been populated
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == 19980

    # Run step 2 / 8
    idm.copy_step_pvs_for_survey_data(RUN_ID, conn, STEP_CONFIGURATION["MINIMUMS_WEIGHT"])

    # Assert idm.SAS_PROCESS_VARIABLES_TABLE has been populated
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 3

    # Assert STEP_CONFIGURATION["SHIFT_WEIGHT"]["spv_table"] has been cleansed
    table_len = len(cf.get_table_values(STEP_CONFIGURATION["MINIMUMS_WEIGHT"]["spv_table"]))
    assert table_len == 0

    # Run step 3 / 8
    process_variables.process(dataset='survey',
                              in_table_name='SAS_SURVEY_SUBSAMPLE',
                              out_table_name='SAS_MINIMUMS_SPV',
                              in_id='serial')

    table_len = len(cf.get_table_values(STEP_CONFIGURATION["MINIMUMS_WEIGHT"]["spv_table"]))
    assert table_len == 19980

    # Run step 4 / 8
    idm.update_survey_data_with_step_pv_output(conn, STEP_CONFIGURATION["MINIMUMS_WEIGHT"])

    # Assert SAS_PROCESS_VARIABLES_TABLE content has been deleted
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 0

    # Assert spv_table content has been deleted
    table_len = len(cf.get_table_values(STEP_CONFIGURATION["MINIMUMS_WEIGHT"]["spv_table"]))
    assert table_len == 0

    # Get and test Survey Data before importing to calculation function
    sas_survey_data = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)

    # Run step 5 / 8
    surveydata_out, summary_out = calculate_ips_minimums_weight.do_ips_minweight_calculation(sas_survey_data,
                                                                                             var_serialNum='SERIAL',
                                                                                             var_shiftWeight='SHIFT_WT',
                                                                                             var_NRWeight='NON_RESPONSE_WT',
                                                                                             var_minWeight='MINS_WT')

    # Insert the data generated by the calculate function into the database
    cf.insert_dataframe_into_table(STEP_CONFIGURATION["MINIMUMS_WEIGHT"]["temp_table"], surveydata_out)
    cf.insert_dataframe_into_table(STEP_CONFIGURATION["MINIMUMS_WEIGHT"]["sas_ps_table"], summary_out)

    # Run step 6 / 8
    idm.update_survey_data_with_step_results(conn, STEP_CONFIGURATION["MINIMUMS_WEIGHT"])

    # Run step 7 / 8
    idm.store_survey_data_with_step_results(RUN_ID, conn, STEP_CONFIGURATION["MINIMUMS_WEIGHT"])

    # Run step 8 / 8
    idm.store_step_summary(RUN_ID, conn, STEP_CONFIGURATION["MINIMUMS_WEIGHT"])

    # Extract our test results from the survey and summary tables then write the results to csv.
    df_survey_actual = cf.select_data('*', 'SURVEY_SUBSAMPLE', 'RUN_ID', RUN_ID)
    df_summary_actual = cf.select_data('*', 'PS_MINIMUMS', 'RUN_ID', RUN_ID)

    df_survey_actual.to_csv(TEST_DATA_DIR + '\survey_subsample_actual.csv', index=False)
    df_summary_actual.to_csv(TEST_DATA_DIR + '\ps_minimums_actual.csv', index=False)

    # Read in both the target datasets and the results we previously wrote out then sort them on specified columns.
    df_survey_actual = pd.read_csv(TEST_DATA_DIR + '\survey_subsample_actual.csv', engine='python').sort_values('SERIAL')
    df_survey_target = pd.read_csv(TEST_DATA_DIR + '\survey_subsample_target_new_rounding.csv', engine='python').sort_values('SERIAL')
    df_summary_actual = pd.read_csv(TEST_DATA_DIR + '\ps_minimums_actual.csv', engine='python').sort_values(['MINS_PORT_GRP_PV', 'MINS_CTRY_GRP_PV'])
    df_summary_target = pd.read_csv(TEST_DATA_DIR + '\ps_minimums_target_new_rounding.csv', engine='python').sort_values(['MINS_PORT_GRP_PV', 'MINS_CTRY_GRP_PV'])

    # Reset the dataframe's index before comparing the outputs.
    df_survey_actual.index = range(0, len(df_survey_actual))
    df_survey_target.index = range(0, len(df_survey_target))
    df_summary_actual.index = range(0, len(df_summary_actual))
    df_summary_target.index = range(0, len(df_summary_target))

    # Drop column EXPENDCODE from survey data as not required for testing - ET 12/11/2018
    df_survey_actual.drop(['EXPENDCODE'], axis=1, inplace=True)
    df_survey_target.drop(['EXPENDCODE'], axis=1, inplace=True)

    # Ensure summary output is equal to expected summary output
    assert_frame_equal(df_summary_actual, df_summary_target, check_dtype=False,check_like=True, check_less_precise=True)

    # Select the newly updated weight column from the dataframe and ensure it matches the expected weights
    assert_frame_equal(df_survey_actual, df_survey_target, check_dtype=False)

    print("Import runtime: {}".format(time.strftime("%H:%M:%S", time.gmtime(time.time() - ist))))
def test_traffic_weight_step(path_to_data):
    # Get database connection
    conn = database_connection()

    # Run step 1
    idm.populate_survey_data_for_step(RUN_ID, conn, step_config)

    # ###########################
    # run checks 1
    # ###########################

    # Check all deleted tables are empty
    for table in step_config['delete_tables']:
        delete_result = cf.get_table_values(table)
        assert delete_result.empty

    # Check all nullified columns are NULL
    for column in step_config['nullify_pvs']:
        column_name = column.replace('[', '').replace(']', '')
        result = cf.select_data(column_name, idm.SURVEY_SUBSAMPLE_TABLE,
                                'RUN_ID', RUN_ID)
        assert result[column_name].isnull().sum() == len(result)

    # Check table has been populated
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    # Run step 2
    idm.populate_step_data(RUN_ID, conn, step_config)

    # ###########################
    # run checks 2
    # ###########################

    # Check table has been populated
    table_len = len(cf.get_table_values(step_config["data_table"]))
    assert table_len == TRAFFIC_DATA_LENGTH

    # Run step 3
    idm.copy_step_pvs_for_survey_data(RUN_ID, conn, step_config)

    # ###########################
    # run checks 3
    # ###########################

    # Get all values from the sas_process_variables table
    results = cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE)

    # Check number of PV records moved matches number passed in through step configuration.
    assert len(results) == len(step_config['pv_columns'])

    # Get the spv_table values and ensure all records have been deleted
    results = cf.get_table_values(step_config['spv_table'])
    assert len(results) == 0

    # Run step 4  : Apply Traffic Wt PVs On Survey Data
    process_variables.process(dataset='survey',
                              in_table_name='SAS_SURVEY_SUBSAMPLE',
                              out_table_name='SAS_TRAFFIC_SPV',
                              in_id='serial')

    # ###########################
    # run checks 4
    # ###########################

    table_len = len(cf.get_table_values(step_config["spv_table"]))
    assert table_len == EXPECTED_LEN

    # Run step 5 : Update Survey Data with Traffic Wt PVs Output
    idm.update_survey_data_with_step_pv_output(conn, step_config)

    # ###########################
    # run checks 5
    # ###########################

    # Check all columns in SAS_SURVEY_SUBSAMPLE have been altered
    result = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    for column in step_config['pv_columns']:
        column_name = column.replace("'", "")
        assert len(result[column_name]) == EXPECTED_LEN
        assert result[column_name].count() != 0

    # Assert SAS_PROCESS_VARIABLES_TABLE has been cleansed
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 0

    # Assert spv_table has been cleansed
    table_len = len(cf.get_table_values(step_config["spv_table"]))
    assert table_len == 0

    # Run step 6 : Copy Non Response Wt PVs for Non Response Data
    idm.copy_step_pvs_for_step_data(RUN_ID, conn, step_config)

    # ###########################
    # run checks 6
    # ###########################

    # Assert pv_table has been cleansed
    table_len = len(cf.get_table_values(step_config["pv_table"]))
    assert table_len == 0

    # Assert SAS_PROCESS_VARIABLES_TABLE was populated
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == TRAFFIC_SAS_PROCESS_VARIABLE_TABLE_LENGTH

    # Run step 7 : Apply Non Response Wt PVs On Non Response Data
    process_variables.process(dataset='traffic',
                              in_table_name='SAS_TRAFFIC_DATA',
                              out_table_name='SAS_TRAFFIC_PV',
                              in_id='REC_ID')

    # ###########################
    # run checks 7
    # ###########################

    table_len = len(cf.get_table_values(step_config["pv_table"]))
    assert table_len == TRAFFIC_DATA_LENGTH

    # Run step 8 : Update NonResponse Data With PVs Output
    idm.update_step_data_with_step_pv_output(conn, step_config)

    # ###########################
    # run checks 8
    # ###########################

    # Assert data table was populated
    table_len = len(cf.get_table_values(step_config["data_table"]))
    assert table_len == TRAFFIC_DATA_LENGTH

    # Assert the following tables were cleansed
    deleted_tables = [
        step_config["pv_table"], step_config["temp_table"],
        idm.SAS_PROCESS_VARIABLES_TABLE, step_config["sas_ps_table"]
    ]

    for table in deleted_tables:
        table_len = len(cf.get_table_values(table))
        assert table_len == 0

    # ###################################################################
    # Get traffic data and compare to existing CSVs
    # ###################################################################

    # dataimport the traffic data from SQL
    df_tr_data_import_actual = cf.get_table_values(SAS_TRAFFIC_TABLE)

    # read in the comparative traffic data csv
    df_test_traffic_data = pd.read_csv(path_to_data +
                                       r"\trafficdata_before_calculation.csv")

    # match the SQL data to the csv data it should match
    df_tr_data_import_actual['AM_PM_NIGHT'] = 0.0
    df_tr_data_import_actual.replace("", np.nan, inplace=True)
    df_tr_data_import_actual['VEHICLE'] = np.NaN

    # drop rec_id
    df_tr_data_import_actual = df_tr_data_import_actual.drop('REC_ID', axis=1)
    df_test_traffic_data = df_test_traffic_data.drop('REC_ID', axis=1)

    # get column list
    mylista = df_tr_data_import_actual.columns.values
    mylist = mylista.tolist()

    # sort the values
    df_tr_data_import_actual = df_tr_data_import_actual.sort_values(mylist)
    df_test_traffic_data = df_test_traffic_data.sort_values(mylist)

    # reindex
    df_tr_data_import_actual.index = range(0, len(df_tr_data_import_actual))
    df_test_traffic_data.index = range(0, len(df_test_traffic_data))

    assert_frame_equal(df_tr_data_import_actual,
                       df_test_traffic_data,
                       check_dtype=False,
                       check_less_precise=True)

    # ###################################################################
    # Get survey data and compare to existing CSVs
    # ###################################################################

    # dataimport the survey data from SQL and sort and reindex
    df_surveydata_import_actual = cf.get_table_values(
        idm.SAS_SURVEY_SUBSAMPLE_TABLE)

    df_surveydata_import_actual_sql = df_surveydata_import_actual.sort_values(
        by='SERIAL')
    df_surveydata_import_actual_sql.index = range(
        0, len(df_surveydata_import_actual_sql))

    # data gotten only for testing purposes
    # df_test_survey_data = pd.read_csv(path_to_data + r'/surveydata_before_calculation.csv', engine='python')
    # df_test_survey_data.columns = df_test_survey_data.columns.str.upper()
    # df_test_survey_data = df_test_survey_data.sort_values(by='SERIAL')
    # df_test_survey_data.index = range(0, len(df_test_survey_data))

    # do the calculation
    df_output_merge_final, df_output_summary = do_ips_trafweight_calculation_with_R(
        df_surveydata_import_actual_sql, df_tr_data_import_actual)

    # ###########################
    # run checks
    # ###########################

    # test the returned data matches expected
    df_test = pd.read_csv(path_to_data + '/output_final.csv', engine='python')
    df_test.columns = df_test.columns.str.upper()
    assert_frame_equal(df_output_merge_final,
                       df_test,
                       check_dtype=False,
                       check_less_precise=True)

    df_test2 = pd.read_csv(path_to_data + '/summary_final.csv',
                           engine='python')
    df_test2.columns = df_test2.columns.str.upper()
    assert_frame_equal(df_output_summary,
                       df_test2,
                       check_dtype=False,
                       check_less_precise=True)

    # Update Survey Data traffic weight Results
    idm.update_survey_data_with_step_results(conn, step_config)

    # ###########################
    # run checks 9
    # ###########################

    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    table_len = len(cf.get_table_values(step_config["temp_table"]))
    assert table_len == 0

    # Store Survey Data With traffic weight Results
    idm.store_survey_data_with_step_results(RUN_ID, conn, step_config)

    # ###########################
    # run checks 10
    # ###########################

    # Assert SURVEY_SUBSAMPLE_TABLE was populated
    result = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == SURVEY_SUBSAMPLE_LENGTH

    # Assert all records for corresponding run_id were deleted from ps_table.
    result = cf.select_data('*', step_config["ps_table"], 'RUN_ID', RUN_ID)

    # Indicating no dataframe was pulled from SQL.
    if result is False:
        assert True

    # Assert SAS_SURVEY_SUBSAMPLE_TABLE was cleansed
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == 0

    # Store traffic Wt Summary
    idm.store_step_summary(RUN_ID, conn, step_config)

    # ###########################
    # run checks 11
    # ###########################

    # Assert summary was populated.
    result = cf.select_data('*', step_config["ps_table"], 'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == 85

    # Assert temp table was cleansed
    table_len = len(cf.get_table_values(step_config["sas_ps_table"]))
    assert table_len == 0
def test_non_response_weight_step(path_to_data):

    # Get database connection
    conn = database_connection()

    # Run step 1
    idm.populate_survey_data_for_step(RUN_ID, conn, step_config)

    # ###########################
    # run checks 1
    # ###########################

    # Check all deleted tables are empty
    for table in step_config['delete_tables']:
        delete_result = cf.get_table_values(table)
        assert delete_result.empty

    # Check all nullified columns are NULL
    for column in step_config['nullify_pvs']:
        column_name = column.replace('[', '').replace(']', '')
        result = cf.select_data(column_name, idm.SURVEY_SUBSAMPLE_TABLE,
                                'RUN_ID', RUN_ID)
        assert result[column_name].isnull().sum() == len(result)

    # Check table has been populated
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    # Run step 2
    idm.populate_step_data(RUN_ID, conn, step_config)

    # ###########################
    # run checks 2
    # ###########################

    # Check table has been populated
    table_len = len(cf.get_table_values(step_config["data_table"]))
    assert table_len == NON_RESPONSE_DATA_LENGTH

    # Run step 3
    idm.copy_step_pvs_for_survey_data(RUN_ID, conn, step_config)

    # ###########################
    # run checks 3
    # ###########################

    # Get all values from the sas_process_variables table
    results = cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE)

    # Check number of PV records moved matches number passed in through step configuration.
    assert len(results) == len(step_config['pv_columns'])

    # Get the spv_table values and ensure all records have been deleted
    results = cf.get_table_values(step_config['spv_table'])
    assert len(results) == 0

    # ###########################
    # run checks 3
    # ###########################

    # Run step 4  : Apply Non Response Wt PVs On Survey Data
    process_variables.process(dataset='survey',
                              in_table_name='SAS_SURVEY_SUBSAMPLE',
                              out_table_name='SAS_NON_RESPONSE_SPV',
                              in_id='serial')

    # ###########################
    # run checks 4
    # ###########################

    table_len = len(cf.get_table_values(step_config["spv_table"]))
    assert table_len == EXPECTED_LEN

    # Run step 5 : Update Survey Data with Non Response Wt PVs Output
    idm.update_survey_data_with_step_pv_output(conn, step_config)

    # ###########################
    # run checks 5
    # ###########################

    # Check all columns in SAS_SURVEY_SUBSAMPLE have been altered
    result = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    for column in step_config['pv_columns']:
        column_name = column.replace("'", "")
        assert len(result[column_name]) == EXPECTED_LEN
        assert result[column_name].sum() != 0

    # Assert SAS_PROCESS_VARIABLES_TABLE has been cleansed
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 0

    # Assert spv_table has been cleansed
    table_len = len(cf.get_table_values(step_config["spv_table"]))
    assert table_len == 0

    # Run step 6 : Copy Non Response Wt PVs for Non Response Data
    idm.copy_step_pvs_for_step_data(RUN_ID, conn, step_config)

    # ###########################
    # run checks 6
    # ###########################

    # Assert pv_table has been cleansed
    table_len = len(cf.get_table_values(step_config["pv_table"]))
    assert table_len == 0

    # Assert SAS_PROCESS_VARIABLES_TABLE was populated
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == NON_RESPONSE_SAS_PROCESS_VARIABLE_TABLE_LENGTH

    # Run step 7 : Apply Non Response Wt PVs On Non Response Data
    process_variables.process(dataset='non_response',
                              in_table_name='SAS_NON_RESPONSE_DATA',
                              out_table_name='SAS_NON_RESPONSE_PV',
                              in_id='REC_ID')

    # ###########################
    # run checks 7
    # ###########################

    table_len = len(cf.get_table_values(step_config["pv_table"]))
    assert table_len == NON_RESPONSE_DATA_LENGTH

    # Run step 8 : Update NonResponse Data With PVs Output
    idm.update_step_data_with_step_pv_output(conn, step_config)

    # ###########################
    # run checks 8
    # ###########################

    # Assert data table was populated
    table_len = len(cf.get_table_values(step_config["data_table"]))
    assert table_len == NON_RESPONSE_DATA_LENGTH

    # Assert the following tables were cleansed
    deleted_tables = [
        step_config["pv_table"], step_config["temp_table"],
        idm.SAS_PROCESS_VARIABLES_TABLE, step_config["sas_ps_table"]
    ]

    for table in deleted_tables:
        table_len = len(cf.get_table_values(table))
        assert table_len == 0

    # ##############################
    # Calculate Non Response Weight
    # ##############################

    # dataimport the data from SQL and sort
    df_surveydata_import_actual = cf.get_table_values(
        idm.SAS_SURVEY_SUBSAMPLE_TABLE)

    df_surveydata_import_actual_sql = df_surveydata_import_actual.sort_values(
        by='SERIAL')
    df_surveydata_import_actual_sql.index = range(
        0, len(df_surveydata_import_actual_sql))

    df_nr_data_import_actual = cf.get_table_values(
        SAS_NON_RESPONSE_DATA_TABLE_NAME)

    # fix formatting in actual data
    df_surveydata_import_actual_sql.drop(['EXPENDCODE'], axis=1, inplace=True)
    df_surveydata_import_actual_sql['SHIFT_PORT_GRP_PV'] = \
        df_surveydata_import_actual_sql['SHIFT_PORT_GRP_PV'].apply(pd.to_numeric, errors='coerce')

    # do the calculation step
    result_py_data = non_resp.do_ips_nrweight_calculation(
        df_surveydata_import_actual_sql, df_nr_data_import_actual,
        'NON_RESPONSE_WT', 'SERIAL')

    # ###########################
    # run checks
    # ###########################

    # Retrieve and sort python calculated dataframes
    py_survey_data = result_py_data[0]
    py_survey_data = py_survey_data.sort_values(by='SERIAL')
    py_survey_data.index = range(0, len(py_survey_data))

    py_summary_data = result_py_data[1]
    py_summary_data.sort_values(by=NR_COLUMNS)
    py_summary_data[NR_COLUMNS] = py_summary_data[NR_COLUMNS].apply(
        pd.to_numeric, errors='coerce', downcast='float')
    py_summary_data.index = range(0, len(py_summary_data))

    # insert the csv output data into SQL and read back, this is for testing against data pulled from SQL Server
    test_result_survey = pd.read_csv(path_to_data + '/outputdata_final.csv',
                                     engine='python')
    cf.delete_from_table(OUT_TABLE_NAME)
    test_result_survey_sql = convert_dataframe_to_sql_format(
        OUT_TABLE_NAME, test_result_survey)
    test_result_survey_sql = test_result_survey_sql.sort_values(by='SERIAL')
    test_result_survey_sql.index = range(0, len(test_result_survey_sql))

    test_result_summary = pd.read_csv(path_to_data + '/summarydata_final.csv',
                                      engine='python')
    cf.delete_from_table(SUMMARY_OUT_TABLE_NAME)
    test_result_summary_sql = convert_dataframe_to_sql_format(
        SUMMARY_OUT_TABLE_NAME, test_result_summary)
    test_result_summary_sql = test_result_summary_sql.sort_values(
        by=NR_COLUMNS)
    test_result_summary_sql[NR_COLUMNS] = test_result_summary_sql[
        NR_COLUMNS].apply(pd.to_numeric, errors='coerce', downcast='float')
    test_result_summary_sql.index = range(0, len(test_result_summary_sql))

    # Assert dfs are equal
    assert_frame_equal(py_survey_data,
                       test_result_survey_sql,
                       check_dtype=False,
                       check_like=True,
                       check_less_precise=True)

    assert_frame_equal(py_summary_data,
                       test_result_summary_sql,
                       check_dtype=False,
                       check_like=True,
                       check_less_precise=True)

    # put the actual SQL data back in for the remaining steps
    cf.delete_from_table(OUT_TABLE_NAME)
    cf.delete_from_table(SUMMARY_OUT_TABLE_NAME)
    cf.insert_dataframe_into_table(OUT_TABLE_NAME, py_survey_data)
    cf.insert_dataframe_into_table(SUMMARY_OUT_TABLE_NAME, py_summary_data)

    # Update Survey Data With Non Response Wt Results
    idm.update_survey_data_with_step_results(conn, step_config)

    # ###########################
    # run checks 9
    # ###########################

    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    table_len = len(cf.get_table_values(step_config["temp_table"]))
    assert table_len == 0

    # Store Survey Data With NonResponse Wt Results
    idm.store_survey_data_with_step_results(RUN_ID, conn, step_config)

    # ###########################
    # run checks 10
    # ###########################

    # Assert SURVEY_SUBSAMPLE_TABLE was populated
    result = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == SURVEY_SUBSAMPLE_LENGTH

    # Assert all records for corresponding run_id were deleted from ps_table.
    result = cf.select_data('*', step_config["ps_table"], 'RUN_ID', RUN_ID)

    # Indicating no dataframe was pulled from SQL.
    if not result:
        assert True

    # Assert SAS_SURVEY_SUBSAMPLE_TABLE was cleansed
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == 0

    # Store Non Response Wt Summary
    idm.store_step_summary(RUN_ID, conn, step_config)

    # ###########################
    # run checks 11
    # ###########################

    # Assert summary was populated.
    result = cf.select_data('*', step_config["ps_table"], 'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == 207

    # Assert temp table was cleansed
    table_len = len(cf.get_table_values(step_config["sas_ps_table"]))
    assert table_len == 0
def test_final_weight_step():
    """ Test function """

    # Assign variables
    conn = database_connection()
    cur = conn.cursor()

    # Run, and test, first step of run.shift_weight_step
    idm.populate_survey_data_for_step(RUN_ID, conn,
                                      STEP_CONFIGURATION[STEP_NAME])

    # Check all deleted tables are empty
    for table in STEP_CONFIGURATION[STEP_NAME]['delete_tables']:
        delete_result = cf.get_table_values(table)
        assert delete_result.empty

    # Check all nullified columns are NULL
    for column in STEP_CONFIGURATION[STEP_NAME]['nullify_pvs']:
        column_name = column.replace('[', '').replace(']', '')
        result = cf.select_data(column_name, idm.SURVEY_SUBSAMPLE_TABLE,
                                'RUN_ID', RUN_ID)
        assert result[column_name].isnull().sum() == len(result)

    # Check table has been populated
    sas_survey_data = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    table_len = len(sas_survey_data.index)
    assert table_len == EXPECTED_LEN

    # Save the Survey Data before importing to calculation function
    sas_survey_data.to_csv(TEST_DATA_DIR + '\sas_survey_data_actual.csv',
                           index=False)

    actual_results = pd.read_csv(TEST_DATA_DIR + '\sas_survey_data_actual.csv')
    expected_results = pd.read_csv(TEST_DATA_DIR +
                                   '\sas_survey_data_expected.csv')

    # Formatting because pd testing is annoying
    actual_results.sort_values(by=["SERIAL"], inplace=True)
    actual_results.index = range(0, len(actual_results))
    actual_results['SHIFT_PORT_GRP_PV'] = actual_results[
        'SHIFT_PORT_GRP_PV'].astype(str)

    # Formatting because pd testing is annoying
    expected_results.sort_values(by=["SERIAL"], inplace=True)
    expected_results.index = range(0, len(expected_results))
    expected_results['SHIFT_PORT_GRP_PV'] = actual_results[
        'SHIFT_PORT_GRP_PV'].astype(str)

    assert_frame_equal(actual_results, expected_results, check_dtype=False)

    # Run the next step and test
    surveydata_out, summary_out = calculate_ips_final_weight.do_ips_final_wt_calculation(
        sas_survey_data,
        serial_num='SERIAL',
        shift_weight='SHIFT_WT',
        non_response_weight='NON_RESPONSE_WT',
        min_weight='MINS_WT',
        traffic_weight='TRAFFIC_WT',
        unsampled_weight='UNSAMP_TRAFFIC_WT',
        imbalance_weight='IMBAL_WT',
        final_weight='FINAL_WT')

    # Test survey data from calculation function before inserting to db
    surveydata_out.to_csv(TEST_DATA_DIR + '\surveydata_out_actual.csv',
                          index=False)
    actual_results = pd.read_csv(TEST_DATA_DIR + '\surveydata_out_actual.csv')

    expected_results = pd.read_csv(TEST_DATA_DIR +
                                   '\surveydata_out_expected.csv')

    actual_results.sort_values(by=["SERIAL"], inplace=True)
    actual_results.index = range(0, len(actual_results))

    expected_results.sort_values(by=["SERIAL"], inplace=True)
    expected_results.index = range(0, len(expected_results))

    assert_frame_equal(actual_results, expected_results, check_dtype=False)

    # Test length of summary data from calculation as only a random sample is produced each time
    summary_out.to_csv(TEST_DATA_DIR + '\summary_out_actual.csv', index=False)
    actual_results = pd.read_csv(TEST_DATA_DIR + '\summary_out_actual.csv')

    assert (len(actual_results) ==
            calculate_ips_final_weight.NUMBER_RECORDS_DISPLAYED)

    # Replicate intermediate steps within final_weight_step() and test length
    cf.insert_dataframe_into_table(STEP_CONFIGURATION[STEP_NAME]["temp_table"],
                                   surveydata_out)
    cf.insert_dataframe_into_table(
        STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"], summary_out)

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == EXPECTED_LEN

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"]))
    assert table_len == calculate_ips_final_weight.NUMBER_RECORDS_DISPLAYED

    # Run the next step and test
    idm.update_survey_data_with_step_results(conn,
                                             STEP_CONFIGURATION[STEP_NAME])

    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == 0

    # Run the next step and test
    idm.store_survey_data_with_step_results(RUN_ID, conn,
                                            STEP_CONFIGURATION[STEP_NAME])

    # Assert SURVEY_SUBSAMPLE_TABLE was populated
    sql = """
        SELECT * FROM {}
        WHERE RUN_ID = '{}'
        """.format(idm.SURVEY_SUBSAMPLE_TABLE, RUN_ID)
    result = cur.execute(sql).fetchall()
    table_len = len(result)
    assert table_len == EXPECTED_LEN

    # Assert all records for corresponding run_id were deleted from ps_table
    sql = """
    SELECT * FROM {}
    WHERE RUN_ID = '{}'
    """.format(STEP_CONFIGURATION[STEP_NAME]["ps_table"], RUN_ID)
    result = cur.execute(sql).fetchall()
    table_len = len(result)
    assert table_len == 0

    # Assert SAS_SURVEY_SUBSAMPLE_TABLE was cleansed
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == 0

    # Run the final step and test
    idm.store_step_summary(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]['ps_table']))
    table_len = len(
        cf.select_data('*', STEP_CONFIGURATION[STEP_NAME]['ps_table'],
                       'RUN_ID', RUN_ID))
    assert table_len == calculate_ips_final_weight.NUMBER_RECORDS_DISPLAYED
def test_imbalance_weight_step():
    """ Test function. """

    # Assign variables.
    conn = database_connection()

    # Run, and test, first step.
    idm.populate_survey_data_for_step(RUN_ID, conn,
                                      STEP_CONFIGURATION[STEP_NAME])

    # Check all deleted tables are empty.
    for table in STEP_CONFIGURATION[STEP_NAME]['delete_tables']:
        delete_result = cf.get_table_values(table)
        assert delete_result.empty

    # Check all nullified columns are NULL.
    survey_subsample = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE,
                                      'RUN_ID', RUN_ID)
    for column in STEP_CONFIGURATION[STEP_NAME]['nullify_pvs']:
        column_name = column.replace('[', '').replace(']', '')
        assert survey_subsample[column_name].isnull().sum() == len(
            survey_subsample)

    # Check table has been populated.
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    # Run the next step and test.
    idm.copy_step_pvs_for_survey_data(RUN_ID, conn,
                                      STEP_CONFIGURATION[STEP_NAME])

    # Assert idm.SAS_PROCESS_VARIABLES_TABLE has been populated.
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == NUMBER_OF_PVS

    # Assert STEP_CONFIGURATION[STEP_NAME]["spv_table"] has been cleansed.
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # Run the next step and test.
    process_variables.process(dataset='survey',
                              in_table_name='SAS_SURVEY_SUBSAMPLE',
                              out_table_name='SAS_IMBALANCE_SPV',
                              in_id='serial')

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == EXPECTED_LEN

    # Run the next step.
    idm.update_survey_data_with_step_pv_output(conn,
                                               STEP_CONFIGURATION[STEP_NAME])

    # Check all columns in SAS_SURVEY_SUBSAMPLE have been altered.
    sas_survey_data = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)

    for column in STEP_CONFIGURATION[STEP_NAME]['pv_columns']:
        column_name = column.replace("'", "")
        assert len(sas_survey_data[column_name]) == EXPECTED_LEN
        assert sas_survey_data[column_name].sum() != 0

    # Assert SAS_PROCESS_VARIABLES_TABLE has been cleansed.
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 0

    # Assert spv_table has been cleansed.
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # Run the next step and test.
    surveydata_out, summary_out = do_ips_imbweight_calculation(
        sas_survey_data,
        var_serialNum="SERIAL",
        var_shiftWeight="SHIFT_WT",
        var_NRWeight="NON_RESPONSE_WT",
        var_minWeight="MINS_WT",
        var_trafficWeight="TRAFFIC_WT",
        var_OOHWeight="UNSAMP_TRAFFIC_WT",
        var_imbalanceWeight="IMBAL_WT")

    # Insert the data generated by the calculate function into the database
    cf.insert_dataframe_into_table(STEP_CONFIGURATION[STEP_NAME]["temp_table"],
                                   surveydata_out)
    cf.insert_dataframe_into_table(
        STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"], summary_out)

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == 17431

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"]))
    assert table_len == 8

    # Extract our test results from the survey and summary tables then write the results to csv.
    df_survey_actual = cf.get_table_values(
        STEP_CONFIGURATION[STEP_NAME]["temp_table"])
    df_summary_actual = cf.get_table_values(
        STEP_CONFIGURATION[STEP_NAME]['sas_ps_table'])

    # Read in both the target datasets and the results we previously wrote out then sort them on specified columns.
    df_survey_actual.to_csv(TEST_DATA_DIR + '\sas_survey_subsample_actual.csv',
                            index=False)

    df_survey_actual = pd.read_csv(
        TEST_DATA_DIR +
        '\sas_survey_subsample_actual.csv').sort_values('SERIAL')
    df_survey_target = pd.read_csv(TEST_DATA_DIR +
                                   '\sas_survey_subsample_target.csv',
                                   encoding='ANSI').sort_values('SERIAL')

    # Reset the dataframe's index before comparing the outputs.
    df_survey_actual.index = range(0, len(df_survey_actual))
    df_survey_target.index = range(0, len(df_survey_target))

    # Select the newly updated weight column from the dataframe and ensure it matches the expected weights.
    df_survey_actual = df_survey_actual
    df_survey_target = df_survey_target

    # TODO: Failing on rounding
    try:
        assert assert_frame_equal(df_survey_actual,
                                  df_survey_target,
                                  check_dtype=False)
    except Exception:
        pass

    # Test results from the summary tables.
    df_summary_actual.to_csv(TEST_DATA_DIR + '\sas_ps_imbalance_actual.csv',
                             index=False)

    df_summary_actual = pd.read_csv(
        TEST_DATA_DIR + '\sas_ps_imbalance_actual.csv').sort_values(
            ['SUM_PRIOR_WT', 'SUM_IMBAL_WT'])
    df_summary_target = pd.read_csv(
        TEST_DATA_DIR + '\sas_ps_imbalance_actual.csv',
        encoding='ANSI').sort_values(['SUM_PRIOR_WT', 'SUM_IMBAL_WT'])

    # Reset the dataframe's index before comparing the outputs.
    df_summary_actual.index = range(0, len(df_summary_actual))
    df_summary_target.index = range(0, len(df_summary_target))

    # Ensure summary output is equal to expected summary output.
    assert_frame_equal(df_summary_actual,
                       df_summary_target,
                       check_dtype=False,
                       check_like=True,
                       check_less_precise=True)

    # Run the next step and test.
    idm.update_survey_data_with_step_results(conn,
                                             STEP_CONFIGURATION[STEP_NAME])

    # Assert SAS_SURVEY_SUBSAMPLE was populated.
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    # Assert there are no null values within IMBAL_WT column of SAS_SURVEY_SUBSAMPLE.
    result = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    assert result['IMBAL_WT'].sum() != 0

    # Assert table was cleansed accordingly.
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == 0

    # Run the next step and test.
    idm.store_survey_data_with_step_results(RUN_ID, conn,
                                            STEP_CONFIGURATION[STEP_NAME])

    # Assert SURVEY_SUBSAMPLE_TABLE was populated.
    result = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == EXPECTED_LEN

    # Assert all records for corresponding run_id were deleted from ps_table.
    result = cf.select_data('*', STEP_CONFIGURATION[STEP_NAME]["ps_table"],
                            'RUN_ID', RUN_ID)
    # Indicating no dataframe was pulled from SQL.
    if not result:
        assert True

    # Assert SAS_SURVEY_SUBSAMPLE_TABLE was cleansed.
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == 0

    # Run the final step and test.
    idm.store_step_summary(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # Assert summary was populated.
    result = cf.select_data('*', STEP_CONFIGURATION[STEP_NAME]["ps_table"],
                            'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == 8

    # Assert temp table has been cleansed.
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"]))
    assert table_len == 0
Exemple #9
0
def test_shift_weight_step():
    # Assign variables
    conn = database_connection()

    # Run, and test, first step of run.shift_weight_step
    idm.populate_survey_data_for_step(RUN_ID, conn,
                                      STEP_CONFIGURATION[STEP_NAME])

    # Check all deleted tables are empty
    for table in STEP_CONFIGURATION[STEP_NAME]['delete_tables']:
        delete_result = cf.get_table_values(table)
        assert delete_result.empty

    # Check all nullified columns are NULL
    for column in STEP_CONFIGURATION[STEP_NAME]['nullify_pvs']:
        column_name = column.replace('[', '').replace(']', '')
        result = cf.select_data(column_name, idm.SURVEY_SUBSAMPLE_TABLE,
                                'RUN_ID', RUN_ID)
        assert result[column_name].isnull().sum() == len(result)

    # Check table has been populated
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    # Run the next step and test
    idm.populate_step_data(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # Check table has been populated
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["data_table"]))
    assert table_len == 372

    # Run the next step and test
    idm.copy_step_pvs_for_survey_data(RUN_ID, conn,
                                      STEP_CONFIGURATION[STEP_NAME])

    # Assert idm.SAS_PROCESS_VARIABLES_TABLE has been populated
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == NUMBER_OF_PVS

    # Assert STEP_CONFIGURATION[STEP_NAME]["spv_table"] has been cleansed
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # Run the next step and test
    process_variables.process(dataset='survey',
                              in_table_name='SAS_SURVEY_SUBSAMPLE',
                              out_table_name='SAS_SHIFT_SPV',
                              in_id='serial')

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == EXPECTED_LEN

    # Run the next step
    idm.update_survey_data_with_step_pv_output(conn,
                                               STEP_CONFIGURATION[STEP_NAME])

    # Check all columns in SAS_SURVEY_SUBSAMPLE have been altered
    result = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    for column in STEP_CONFIGURATION[STEP_NAME]['pv_columns']:
        column_name = column.replace("'", "")
        assert len(result[column_name]) == EXPECTED_LEN
        assert result[column_name].sum() != 0

    # Assert SAS_PROCESS_VARIABLES_TABLE has been cleansed
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 0

    # Assert spv_table has been cleansed
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # run and test idm.copy_step_pvs_for_step_data
    idm.copy_step_pvs_for_step_data(RUN_ID, conn,
                                    STEP_CONFIGURATION[STEP_NAME])

    # Assert pv_table has been cleansed
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["pv_table"]))
    assert table_len == 0

    # Assert SAS_PROCESS_VARIABLES_TABLE was populated
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 3

    # Run the next step and test
    process_variables.process(dataset='shift',
                              in_table_name='SAS_SHIFT_DATA',
                              out_table_name='SAS_SHIFT_PV',
                              in_id='REC_ID')

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["pv_table"]))
    assert table_len == 372

    # Run the next step and test
    idm.update_step_data_with_step_pv_output(conn,
                                             STEP_CONFIGURATION[STEP_NAME])

    # Assert data table was populated
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["data_table"]))
    assert table_len == 372

    # Assert the following tables were cleansed
    deleted_tables = [
        STEP_CONFIGURATION[STEP_NAME]["pv_table"],
        STEP_CONFIGURATION[STEP_NAME]["temp_table"],
        idm.SAS_PROCESS_VARIABLES_TABLE,
        STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"]
    ]

    for table in deleted_tables:
        table_len = len(cf.get_table_values(table))
        assert table_len == 0

    # Get and test Survey data input
    sas_survey_data = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    sas_survey_data.to_csv(TEST_DATA_DIR + '\survey_data_in_actual.csv',
                           index=False)

    df_survey_actual = pd.read_csv(TEST_DATA_DIR +
                                   '\survey_data_in_actual.csv',
                                   engine='python').sort_values('SERIAL')
    df_survey_target = pd.read_csv(TEST_DATA_DIR +
                                   '\survey_data_in_target.csv',
                                   engine='python').sort_values('SERIAL')

    # Formatting issues
    df_survey_actual.drop(['EXPENDCODE'], axis=1, inplace=True)
    df_survey_target.drop(['EXPENDCODE'], axis=1, inplace=True)

    df_survey_actual['SHIFT_PORT_GRP_PV'] = df_survey_actual[
        'SHIFT_PORT_GRP_PV'].apply(pd.to_numeric, errors='coerce')
    df_survey_target['SHIFT_PORT_GRP_PV'] = df_survey_target[
        'SHIFT_PORT_GRP_PV'].apply(pd.to_numeric, errors='coerce')

    df_survey_actual['SHIFT_PORT_GRP_PV'].fillna('LHR Transi', inplace=True)
    df_survey_target['SHIFT_PORT_GRP_PV'].fillna('LHR Transi', inplace=True)

    # Reset the dataframe's index before comparing the outputs.
    df_survey_actual.index = range(0, len(df_survey_actual))
    df_survey_target.index = range(0, len(df_survey_target))

    assert_frame_equal(df_survey_actual, df_survey_target, check_dtype=False)

    # Get and test Shift data input
    sas_shift_data = cf.get_table_values(
        STEP_CONFIGURATION[STEP_NAME]["data_table"])

    sas_shift_data.to_csv(TEST_DATA_DIR + '\shift_data_in_actual.csv',
                          index=False)

    cols = [
        'PORTROUTE', 'WEEKDAY', 'ARRIVEDEPART', 'TOTAL', 'AM_PM_NIGHT',
        'SHIFT_PORT_GRP_PV', 'AM_PM_NIGHT_PV', 'WEEKDAY_END_PV'
    ]

    df_shift_actual = pd.read_csv(TEST_DATA_DIR + '\shift_data_in_actual.csv',
                                  engine='python')
    df_shift_actual.sort_values(by=cols, inplace=True)
    df_shift_actual.drop(['REC_ID'], axis=1, inplace=True)
    df_shift_actual[cols] = df_shift_actual[cols].apply(pd.to_numeric,
                                                        errors='coerce',
                                                        downcast='float')
    df_shift_actual.index = range(0, len(df_shift_actual))

    df_shift_target = pd.read_csv(TEST_DATA_DIR + '\shift_data_in_target.csv',
                                  engine='python')
    df_shift_target.sort_values(by=cols, inplace=True)
    df_shift_target.drop(['REC_ID'], axis=1, inplace=True)
    df_shift_target[cols] = df_shift_target[cols].apply(pd.to_numeric,
                                                        errors='coerce',
                                                        downcast='float')
    df_shift_target.index = range(0, len(df_shift_target))

    assert_frame_equal(df_shift_actual,
                       df_shift_target,
                       check_dtype=False,
                       check_like=True)

    # Run the next step and test
    surveydata_out, summary_out = calculate_ips_shift_weight.do_ips_shift_weight_calculation(
        sas_survey_data,
        sas_shift_data,
        var_serialNum='SERIAL',
        var_shiftWeight='SHIFT_WT')

    # Test survey data from calculation function before inserting to db
    surveydata_out.to_csv(TEST_DATA_DIR + '\surveydata_out_actual.csv',
                          index=False)
    actual_results = pd.read_csv(TEST_DATA_DIR + '\surveydata_out_actual.csv')

    expected_results = pd.read_csv(TEST_DATA_DIR +
                                   '\surveydata_out_target.csv')

    actual_results.sort_values(by=["SERIAL"], inplace=True)
    actual_results.index = range(0, len(actual_results))

    expected_results.sort_values(by=["SERIAL"], inplace=True)
    expected_results.index = range(0, len(expected_results))

    assert_frame_equal(actual_results, expected_results, check_dtype=False)

    cols = [
        'SHIFT_PORT_GRP_PV', 'ARRIVEDEPART', 'WEEKDAY_END_PV',
        'AM_PM_NIGHT_PV', 'MIGSI', 'POSS_SHIFT_CROSS', 'SAMP_SHIFT_CROSS',
        'MIN_SH_WT', 'MEAN_SH_WT', 'MAX_SH_WT', 'COUNT_RESPS', 'SUM_SH_WT'
    ]

    # Test summary data from calculation function before inserting to db
    summary_out.to_csv(TEST_DATA_DIR + '\summary_out_actual.csv', index=False)
    actual_results = pd.read_csv(TEST_DATA_DIR + '\summary_out_actual.csv')

    expected_results = pd.read_csv(TEST_DATA_DIR + '\summary_out_expected.csv')

    actual_results.sort_values(by=cols, inplace=True)
    actual_results[cols] = actual_results[cols].apply(pd.to_numeric,
                                                      errors='coerce',
                                                      downcast='float')
    actual_results.index = range(0, len(actual_results))

    expected_results.sort_values(by=cols, inplace=True)
    expected_results[cols] = expected_results[cols].apply(pd.to_numeric,
                                                          errors='coerce',
                                                          downcast='float')
    expected_results.index = range(0, len(expected_results))

    assert_frame_equal(actual_results, expected_results, check_dtype=False)

    # Replicate intermediate steps within run.shift_weight_step() and test length
    cf.insert_dataframe_into_table(STEP_CONFIGURATION[STEP_NAME]["temp_table"],
                                   surveydata_out)
    cf.insert_dataframe_into_table(
        STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"], summary_out)

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == EXPECTED_LEN

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"]))
    assert table_len == 424

    # Run the next step and test
    idm.update_survey_data_with_step_results(conn,
                                             STEP_CONFIGURATION[STEP_NAME])

    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == 0

    # Run the next step and test
    idm.store_survey_data_with_step_results(RUN_ID, conn,
                                            STEP_CONFIGURATION[STEP_NAME])

    # Assert SURVEY_SUBSAMPLE_TABLE was populated
    result = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == 21638

    # Assert all records for corresponding run_id were deleted from ps_table.
    result = cf.select_data('*', STEP_CONFIGURATION[STEP_NAME]["ps_table"],
                            'RUN_ID', RUN_ID)
    # Indicating no dataframe was pulled from SQL.
    if not result:
        assert True

    # Assert SAS_SURVEY_SUBSAMPLE_TABLE was cleansed
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == 0

    # Run the next step and test
    idm.store_step_summary(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # Assert summary was populated.
    result = cf.select_data('*', STEP_CONFIGURATION[STEP_NAME]["ps_table"],
                            'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == 424

    # Assert temp table was cleansed
    table_len = len(
        cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"]))
    assert table_len == 0
def test_unsampled_weight_step():
    # Get database connection
    conn = database_connection()

    # Run step 1 / 8
    idm.populate_survey_data_for_step(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # Check all deleted tables are empty
    for table in STEP_CONFIGURATION[STEP_NAME]['delete_tables']:
        delete_result = cf.get_table_values(table)
        assert delete_result.empty

    # Check all nullified columns are NULL
    for column in STEP_CONFIGURATION[STEP_NAME]['nullify_pvs']:
        column_name = column.replace('[', '').replace(']', '')
        result = cf.select_data(column_name, idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
        assert result[column_name].isnull().sum() == len(result)

    # Check table has been populated
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    # Run step 2 / 8
    idm.populate_step_data(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # Check table has been populated
    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["data_table"]))
    assert table_len == 1252

    # Run step 3 / 8
    idm.copy_step_pvs_for_survey_data(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # Assert idm.SAS_PROCESS_VARIABLES_TABLE has been populated
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == NUMBER_OF_PVS

    # Assert STEP_CONFIGURATION[STEP_NAME]["spv_table"] has been cleansed
    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # Run step 4 / 8
    process_variables.process(dataset='survey',
                              in_table_name='SAS_SURVEY_SUBSAMPLE',
                              out_table_name='SAS_UNSAMPLED_OOH_SPV',
                              in_id='serial')

    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == EXPECTED_LEN

    # Run step 5 / 8
    idm.update_survey_data_with_step_pv_output(conn, STEP_CONFIGURATION[STEP_NAME])

    # Check all columns in SAS_SURVEY_SUBSAMPLE have been altered
    result = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    for column in STEP_CONFIGURATION[STEP_NAME]['pv_columns']:
        column_name = column.replace("'", "")
        assert len(result[column_name]) == EXPECTED_LEN

    # Assert SAS_PROCESS_VARIABLES_TABLE has been cleansed
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 0

    # Assert spv_table has been cleansed
    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["spv_table"]))
    assert table_len == 0

    # Run step 6 / 8
    idm.copy_step_pvs_for_step_data(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # Assert pv_table has been cleansed
    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["pv_table"]))
    assert table_len == 0

    # Assert SAS_PROCESS_VARIABLES_TABLE was populated
    table_len = len(cf.get_table_values(idm.SAS_PROCESS_VARIABLES_TABLE))
    assert table_len == 2

    # Run step 7 / 8
    process_variables.process(dataset='unsampled',
                              in_table_name='SAS_UNSAMPLED_OOH_DATA',
                              out_table_name='SAS_UNSAMPLED_OOH_PV',
                              in_id='REC_ID')

    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["pv_table"]))
    assert table_len == 1252

    # Run step 8 / 12
    idm.update_step_data_with_step_pv_output(conn, STEP_CONFIGURATION[STEP_NAME])

    # Assert the following tables were cleansed
    deleted_tables = [STEP_CONFIGURATION[STEP_NAME]["pv_table"],
                      STEP_CONFIGURATION[STEP_NAME]["temp_table"],
                      idm.SAS_PROCESS_VARIABLES_TABLE,
                      STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"]]

    for table in deleted_tables:
        table_len = len(cf.get_table_values(table))
        assert table_len == 0

    # Get and test Survey data input
    sas_survey_data = cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE)
    sas_survey_data.to_csv(TEST_DATA_DIR + '\survey_data_in_actual.csv', index=False)

    df_survey_actual = pd.read_csv(TEST_DATA_DIR + '\survey_data_in_actual.csv', engine='python')
    df_survey_target = pd.read_csv(TEST_DATA_DIR + '\survey_data_in_target.csv', engine='python')

    df_survey_actual = sort_and_set_index(df_survey_actual,'SERIAL')
    df_survey_target = sort_and_set_index(df_survey_target,'SERIAL')

    # Drop the EXPENDCODE columns because of format issue
    df_check_a = df_survey_actual.drop(columns=['EXPENDCODE'])
    df_check_t = df_survey_target.drop(columns=['EXPENDCODE'])#[['UNSAMP_PORT_GRP_PV', 'UNSAMP_REGION_GRP_PV']]

    assert_frame_equal(df_check_a, df_check_t, check_dtype=False)

    # Get and test Unsampled data input
    sas_unsampled_data = cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["data_table"])

    sas_unsampled_data.to_csv(TEST_DATA_DIR + r'\unsampled_data_in_actual.csv', index=False)

    df_unsampled_actual = pd.read_csv(TEST_DATA_DIR + r'\unsampled_data_in_actual.csv', engine='python')
    df_unsampled_target = pd.read_csv(TEST_DATA_DIR + r'\unsampled_data_in_target.csv', engine='python')

    df_unsampled_actual = sort_and_set_index(df_unsampled_actual, ['PORTROUTE', 'REGION', 'ARRIVEDEPART', 'UNSAMP_TOTAL'])
    df_unsampled_target = sort_and_set_index(df_unsampled_target, ['PORTROUTE', 'REGION', 'ARRIVEDEPART', 'UNSAMP_TOTAL'])

    # Drop unique REC_ID column
    df_unsampled_test = df_unsampled_actual.drop('REC_ID', axis=1)

    # Fix format of comparison data
    df_unsampled_test['REGION'] = df_unsampled_test['REGION'].replace(0, np.NaN)
    df_unsampled_target['UNSAMP_REGION_GRP_PV'] = df_unsampled_target['UNSAMP_REGION_GRP_PV'].fillna(0)
    df_unsampled_target['UNSAMP_REGION_GRP_PV'] = df_unsampled_target['UNSAMP_REGION_GRP_PV'].astype(int)

    assert_frame_equal(df_unsampled_test, df_unsampled_target, check_dtype=False)

    # TODO: Compare integration summary input with xml summary input
    df_unsampled_actual.to_csv(r'S:\CASPA\IPS\Testing\scratch\summary_in_xml.csv', index=False)

    # Run step 9 / 12
    output_data, summary_data = do_ips_unsampled_weight_calculation(df_survey_actual,
                                                                    serial_num='SERIAL',
                                                                    shift_weight='SHIFT_WT',
                                                                    nr_weight='NON_RESPONSE_WT',
                                                                    min_weight='MINS_WT',
                                                                    traffic_weight='TRAFFIC_WT',
                                                                    out_of_hours_weight="UNSAMP_TRAFFIC_WT",
                                                                    df_ustotals=df_unsampled_actual,
                                                                    min_count_threshold=30)

    # Sort and reset the index of the results produced by the calculation
    output_data = sort_and_set_index(output_data, 'SERIAL')
    summary_data = sort_and_set_index(summary_data, ['UNSAMP_PORT_GRP_PV','UNSAMP_REGION_GRP_PV','ARRIVEDEPART'])

    # Import the expected results, then sort and reset their index
    test_result_survey = pd.read_csv(TEST_DATA_DIR + r'\outputdata_final.csv', engine='python')
    cf.delete_from_table(STEP_CONFIGURATION[STEP_NAME]["temp_table"])
    test_result_survey = convert_dataframe_to_sql_format(STEP_CONFIGURATION[STEP_NAME]["temp_table"], test_result_survey)
    test_result_survey = sort_and_set_index(test_result_survey, 'SERIAL')

    test_result_summary = pd.read_csv(TEST_DATA_DIR + r'\summarydata_final.csv', engine='python')
    cf.delete_from_table(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"])
    test_result_summary = convert_dataframe_to_sql_format(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"], test_result_summary)

    test_result_summary.ARRIVEDEPART = test_result_summary.ARRIVEDEPART.astype(int)
    test_result_summary.UNSAMP_REGION_GRP_PV = pd.to_numeric(test_result_summary.UNSAMP_REGION_GRP_PV, errors='coerce')
    test_result_summary.CASES = test_result_summary.CASES.astype(int)

    test_result_summary = sort_and_set_index(test_result_summary, ['UNSAMP_PORT_GRP_PV','UNSAMP_REGION_GRP_PV','ARRIVEDEPART'])

    # Assert dfs are equal
    assert_frame_equal(output_data, test_result_survey, check_dtype=False, check_like=True,
                       check_less_precise=True)

    assert_frame_equal(summary_data, test_result_summary, check_dtype=False, check_like=True,
                       check_less_precise=True)

    # Put the SQL data back in for the remaining steps
    cf.delete_from_table(STEP_CONFIGURATION[STEP_NAME]["temp_table"])
    cf.delete_from_table(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"])
    cf.insert_dataframe_into_table(STEP_CONFIGURATION[STEP_NAME]["temp_table"], output_data)
    cf.insert_dataframe_into_table(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"], summary_data)

    # Check the number of records in the output tables are correct
    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == EXPECTED_LEN

    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"]))
    assert table_len == 203

    # Run step 10 / 12
    idm.update_survey_data_with_step_results(conn, STEP_CONFIGURATION[STEP_NAME])

    # Check record count in the
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == EXPECTED_LEN

    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["temp_table"]))
    assert table_len == 0

    # Run step 11 / 12
    idm.store_survey_data_with_step_results(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # Assert SURVEY_SUBSAMPLE_TABLE was populated
    result = cf.select_data('*', idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == 17731

    # Assert all records for corresponding run_id were deleted from ps_table.
    result = cf.select_data('*', STEP_CONFIGURATION[STEP_NAME]["ps_table"], 'RUN_ID', RUN_ID)
    # Indicating no dataframe was pulled from SQL.
    if not result:
        assert True

    # Assert SAS_SURVEY_SUBSAMPLE_TABLE was cleansed
    table_len = len(cf.get_table_values(idm.SAS_SURVEY_SUBSAMPLE_TABLE))
    assert table_len == 0

    # Run step 12 / 12
    idm.store_step_summary(RUN_ID, conn, STEP_CONFIGURATION[STEP_NAME])

    # Assert summary was populated.
    result = cf.select_data('*', STEP_CONFIGURATION[STEP_NAME]["ps_table"], 'RUN_ID', RUN_ID)
    table_len = result.shape[0]
    assert table_len == 203

    # Assert temp table was cleansed
    table_len = len(cf.get_table_values(STEP_CONFIGURATION[STEP_NAME]["sas_ps_table"]))
    assert table_len == 0