コード例 #1
0
    def _generated_form_test_output(self, parent_trace, form_request,
                                    fr_response, fr_log_txt, fr_rep,
                                    generated_form_worksheet):
        '''
        Helper method that returns 4 strings with information on the generated form, so that it can be 
        validated that it matches regression output.

        It returns (in this order):

        * A string with layout information (e.g., # of columns, rows, widths, etc)
        * A string with Excel formatting information for the posting label worksheet of the generated form
        * A string with Excel formatting information for the main worksheet of the generated form
        * A string with the contents of the Posting Label that was generated
        '''
        # Check the layout for the generated form is right
        layout_output_nice = ""
        for key in fr_response.getManifestIdentifiers(
                parent_trace):  #manifest_handles_dict.keys():
            layout_output_dict = {}

            layout_output_dict['layout span'] = fr_rep.span_dict[key]
            layout_output_dict['hidden columns'] = fr_rep.hidden_cols_dict[key]
            widths_dict = fr_rep.widths_dict_dict[key]
            layout_output_dict['column widths'] = DictionaryFormatter(
            ).dict_2_nice(parent_trace=parent_trace, a_dict=widths_dict)
            layout_output_dict['total width'] = sum(
                [widths_dict[k]['width'] for k in widths_dict.keys()])

            layout_output_nice += "************** Layout information for Manifest '" + key + "' **********\n\n"

            layout_output_nice += DictionaryFormatter().dict_2_nice(
                parent_trace=parent_trace, a_dict=layout_output_dict)

        # Extract Excel formatting for the generated posting label
        posting_label_ws_info = fr_rep.worksheet_info_dict[
            ManifestRepresenter.POSTING_LABEL_SHEET]
        pl_ws_info_nice = self._nice_ws_info(parent_trace,
                                             posting_label_ws_info)

        # Extract Excel formatting for the main worksheet containing the manifests' information
        worksheet_info = fr_rep.worksheet_info_dict[generated_form_worksheet]
        ws_info_nice = self._nice_ws_info(parent_trace, worksheet_info)

        # Extract Posting Label content
        label_ctx = fr_rep.label_ctx

        label_ctx_nice = "************** Content of Posting Label **********\n\n"

        label_ctx_nice += DictionaryFormatter().dict_2_nice(
            parent_trace=parent_trace, a_dict=label_ctx)

        return layout_output_nice, pl_ws_info_nice, ws_info_nice, label_ctx_nice
コード例 #2
0
    def test_a6i_config(self):
        try:
            root_trace = FunctionalTrace(
                parent_trace=None, path_mask=self._path_mask).doing(
                    "Testing loading for Apodeixi Config")
            config = ApodeixiConfig(root_trace)

            # To ensure determistic output, mask parent part of any path that is mentioned in the configuration before
            # displaying it in regression putput
            #
            clean_dict = DictionaryUtils().apply_lambda(
                parent_trace=root_trace,
                root_dict=config.config_dict,
                root_dict_name="Apodeixi config",
                lambda_function=self._path_mask)

            config_txt = DictionaryFormatter().dict_2_nice(
                parent_trace=root_trace, a_dict=clean_dict, flatten=True)

            self._compare_to_expected_txt(parent_trace=root_trace,
                                          output_txt=config_txt,
                                          test_output_name='test_a6i_config',
                                          save_output_txt=True)
        except ApodeixiError as ex:
            print(ex.trace_message())
            self.assertTrue(1 == 2)
コード例 #3
0
 def as_string(self, parent_trace):
     introspection_nice = DictionaryFormatter().dict_2_nice(
         parent_trace=parent_trace,
         a_dict=self.introspection_dict,
         flatten=True,
         delimeter="::")
     return introspection_nice
コード例 #4
0
    def _nice_ws_info(self, parent_trace, worksheet_info):
        nice_format = ''
        nice_format += "\n======================== Column information ==========================\n"
        nice_format += DictionaryFormatter().dict_2_nice(
            parent_trace=parent_trace, a_dict=worksheet_info.colinfo)

        fmt_dict = worksheet_info.format_dict
        for row_nb in fmt_dict.keys():
            row_dict = fmt_dict[row_nb]
            for col_nb in row_dict.keys():
                nice_format += "\n\n================ Formats row = " + str(
                    row_nb) + ", col = " + str(col_nb) + " ============"
                cell_fmt_dict = row_dict[col_nb]
                nice = DictionaryFormatter().dict_2_nice(
                    parent_trace=parent_trace, a_dict=cell_fmt_dict)
                nice_format += "\n" + nice
        return nice_format
コード例 #5
0
def nice_ws_info(parent_trace, worksheet_info):
    '''
    Helper method to create a "nice string" that can be outputted in a readable form (for example, in test outputs)

    param worksheet_info An instance of apodeixi.representers.as_excel.XL_WorksheetInfo that should be formatted as a string
    '''
    nice_format = ''
    nice_format += "\n======================== Column information =========================="
    nice_format += DictionaryFormatter().dict_2_nice(
        parent_trace=parent_trace, a_dict=worksheet_info.colinfo)

    fmt_dict = worksheet_info.format_dict
    for row_nb in fmt_dict.keys():
        row_dict = fmt_dict[row_nb]
        for col_nb in row_dict.keys():
            nice_format += "\n\n================ Formats row = " + str(
                row_nb) + ", col = " + str(col_nb) + " ============"
            cell_fmt_dict = row_dict[col_nb]
            nice = DictionaryFormatter().dict_2_nice(parent_trace=parent_trace,
                                                     a_dict=cell_fmt_dict)
            nice_format += "\n" + nice
    return nice_format
コード例 #6
0
    def _locate_workstream_postings(self, posting_api, test_name):

        coords_txt = ''
        root_trace = FunctionalTrace(
            parent_trace=None, path_mask=self._path_mask).doing(
                "Testing File Knowledge Base::locate postings")
        try:

            def _coords_filter(coords):
                return coords.scoringCycle == "FY 22"  # and coords.scenario == "MTP"

            scanned_handles = self.stack().store().searchPostings(
                parent_trace=root_trace,
                posting_api=posting_api,
                filing_coordinates_filter=_coords_filter)

            # To ensure that regression test output is deterministic across Windows and Linux/Containers, sort the scanned
            # handles before going further
            scanned_handles = sorted(
                scanned_handles,
                key=lambda handle: format(handle.filing_coords
                                          ) + handle.excel_filename)

            stringified_coords_dict = {
            }  # Keys in coords_dict are objects, need to turn them into strings to print test output

            idx = 1
            for handle in scanned_handles:
                stringified_coords_dict[str(idx) + "." + format(
                    handle.filing_coords, '')] = handle.excel_filename
                idx += 1

            coords_txt = "--------------------- Workstreams with a filing structure in the KnowledgeBase\n\n"
            coords_txt += DictionaryFormatter().dict_2_nice(
                parent_trace=root_trace,
                a_dict=stringified_coords_dict,
                flatten=True)

        except ApodeixiError as ex:
            print(ex.trace_message())
            self.assertTrue(1 == 2)

        self._compare_to_expected_txt(parent_trace=root_trace,
                                      output_txt=coords_txt,
                                      test_output_name=test_name,
                                      save_output_txt=True)
コード例 #7
0
def config(kb_session, environment):
    '''
    Displays the Apodeixi configuration.
    '''
    T0 = _datetime.datetime.now()
    func_trace = FunctionalTrace(parent_trace=None, path_mask=None)
    root_trace = func_trace.doing("CLI call to get products",
                                  origination={'signaled_from': __file__})
    try:
        if environment != None:
            kb_session.store.activate(parent_trace=root_trace,
                                      environment_name=environment)
            click.echo(CLI_Utils().sandox_announcement(environment))

        client_url = kb_session.store.getClientURL(root_trace)
        postings_url = kb_session.store.getPostingsURL(root_trace)
        click.echo("\n----- Current environment -----")
        click.echo("\nclient URL:\t\t" + str(client_url))
        click.echo("postings URL:\t\t" + str(postings_url))
        config_txt = DictionaryFormatter().dict_2_nice(
            root_trace, kb_session.a6i_config.config_dict, flatten=True)
        click.echo("\n\n----- Config Settings -----")
        click.echo("\n" + config_txt)
        output = "Success"
        click.echo(output)
    except ApodeixiError as ex:
        error_msg = CLI_ErrorReporting(kb_session).report_a6i_error(
            parent_trace=root_trace, a6i_error=ex)
        # GOTCHA
        #       Use print, not click.echo or click exception because they don't correctly display styling
        #       (colors, underlines, etc.). So use vanilla Python print and then exit
        print(error_msg)
        _sys.exit()
    except Exception as ex:
        click.echo("Unrecoverable error: " + str(ex))
        _sys.exit()
    T1 = _datetime.datetime.now()
    duration = T1 - T0

    duration_msg = str(duration.seconds) + "." + str(
        duration.microseconds) + " sec"
    click.echo(duration_msg)
コード例 #8
0
    def _compare_to_expected_df(self, parent_trace, output_df, test_output_name, 
                                output_data_dir, expected_data_dir, columns_to_ignore=[], id_column=None):
        '''
        Utility method for derived classes that creates DataFrames (saved as CSV files) and checks they match an expected output
        previously saves as a CSV file as well. 

        It also saves the output as a CSV file, which can be copied to be the expected output when test case is created.

        @param output_data_dir Directory to which to save any output.
        @param expected_data_dir Directory from which to retrieve any previously saved expected output.
        @param columns_to_ignore List of column names (possibly empty), for columns that should be excluded from the comparison
        @param id_column A string representing the column that should be used to identify rows in comparison text produced. 
                         If set to None, then the row index is used.
        '''
        OUTPUT_FOLDER               = output_data_dir
        EXPECTED_FOLDER             = expected_data_dir
        OUTPUT_FILE                 = test_output_name + '_OUTPUT.csv'
        EXPECTED_FILE               = test_output_name + '_EXPECTED.csv'
        OUTPUT_COMPARISON_FILE      = test_output_name + '_comparison_OUTPUT.txt'
        EXPECTED_COMPARISON_FILE    = test_output_name + '_comparison_EXPECTED.txt'

        # Check not null, or else rest of actions will "gracefully do nothing" and give the false impression that test passes
        # (at least it would erroneously pass when the expected output is set to an empty file)
        self.assertIsNotNone(output_df)

        PathUtils().create_path_if_needed(parent_trace = parent_trace, path = output_data_dir)

        OUTPUT_COLUMNS              = [col for col in output_df.columns if not col in columns_to_ignore] 
        output_df[OUTPUT_COLUMNS].to_csv(OUTPUT_FOLDER + '/' + OUTPUT_FILE)

        if type(output_df.columns) == _pd.MultiIndex: # Will need headers to load properly
            nb_levels               = len(output_df.columns.levels)
            header                  = list(range(nb_levels))
        else:
            header                  = 0

        # Load the output we just saved, which we'll use for regression comparison since in Pandas the act of loading will
        # slightly change formats (e.g., strings for numbers become Numpy numbers) 
        # and we want to apply the same such changes as were applied to the expected output,
        # to avoid frivolous differences that don't deserve to cause this test to fail
        loaded_output_df            = self.load_csv(parent_trace, 
                                            path        = OUTPUT_FOLDER + '/' + OUTPUT_FILE,
                                            header      = header)

        # Retrieve expected output
        expected_df                 = self.load_csv(parent_trace, 
                                            path        = EXPECTED_FOLDER + '/' + EXPECTED_FILE,
                                            header      = header)

        EXPECTED_COLUMNS            = [col for col in expected_df.columns if not col in columns_to_ignore]  

        # GOTCHA: 
        # 
        #   OUTPUT_COLUMNS may differ from LOADED_OUTPUT_COLUMNS in the case of MultiLevel indices because
        # of padding introduced in the load. For example, a column like ('Comment', '') in OUTPUT_COLUMNS
        # will become ('Comment', 'Unnamed: 1_leveL_1'). So to compare, we use the LOADED columns
        LOADED_OUTPUT_COLUMNS       = [col for col in loaded_output_df.columns if not col in columns_to_ignore]

        my_trace                    = parent_trace.doing("Invoking the DataFrameComparator")
        comparator                  = DataFrameComparator(  df1         = loaded_output_df[LOADED_OUTPUT_COLUMNS], 
                                                            df1_name    = "output",
                                                            df2         = expected_df[EXPECTED_COLUMNS], 
                                                            df2_name    = "expected",
                                                            id_column   = id_column)

        check, comparison_dict      = comparator.compare(my_trace)

        df_comparison_nice          = DictionaryFormatter().dict_2_nice(    parent_trace    = parent_trace,
                                                                            a_dict          = comparison_dict, 
                                                                            flatten         = True)
        with open(OUTPUT_FOLDER + '/'  + OUTPUT_COMPARISON_FILE, 'w', encoding="utf8") as file:
            file            .write(df_comparison_nice)
        try:
            with open(EXPECTED_FOLDER + '/'  + EXPECTED_COMPARISON_FILE, 'r', encoding="utf8") as file:
                expected_df_comparison  = file.read()    
        except FileNotFoundError as ex:
            raise ApodeixiError(parent_trace, "Can't load comparison file because it doesn't exist",
                                    data = {'file':             EXPECTED_COMPARISON_FILE,
                                            'path':             EXPECTED_FOLDER + '/'  + EXPECTED_COMPARISON_FILE,
                                            'error':            str(ex)})
        self.assertEqual(df_comparison_nice,       expected_df_comparison)
        self.assertTrue(check)
コード例 #9
0
    def test_simple_burnout(self):
        '''
        Tests the "internal logic" of a controller: the _buildAllManifests method, using a simple mock KnowledgeBaseStore
        suitable for unit tests.
        '''

        EXCEL_FILE = 'simple_burnout_INPUT.xlsx'
        SHEET = 'simple burnout'

        MANIFEST_FILE_PREFIX = 'simple_burnout'

        STORE_IMPL = UnitTest_KnowledgeBaseStore(
            test_case_name=MANIFEST_FILE_PREFIX,
            input_manifests_dir=self.input_data,
            input_postings_dir=self.input_data,
            output_manifests_dir=self.output_data,
            output_postings_dir=self.output_data)
        root_trace = FunctionalTrace(parent_trace=None,
                                     path_mask=self._path_mask).doing(
                                         "Discovering URL",
                                         data={
                                             'path': EXCEL_FILE,
                                             'sheet': SHEET
                                         })

        STORE = KnowledgeBaseStore(root_trace, STORE_IMPL)
        posting_handle = STORE.buildPostingHandle(root_trace,
                                                  EXCEL_FILE,
                                                  sheet=SHEET,
                                                  excel_range="B2:C100")

        MANIFESTS_OUTPUT_DIR = self.output_data
        MANIFESTS_EXPECTED_DIR = self.expected_data
        EXPLANATIONS_OUTPUT = 'simple_burnout_explanations_OUTPUT.yaml'
        EXPLANATIONS_EXPECTED = 'simple_burnout_explanations_EXPECTED.yaml'
        all_manifests_dicts = []

        PL = big_rocks.BigRocksEstimate_Controller._MyPostingLabel  # Abbreviation for readability purposes

        try:
            root_trace = FunctionalTrace(
                parent_trace=None, path_mask=self._path_mask).doing(
                    "Generating Big Rocks (simple burnout)")

            controller = big_rocks.BigRocksEstimate_Controller(
                root_trace, STORE, a6i_config=self.a6i_config)
            all_manifests_dict, label, = controller._buildAllManifests(
                root_trace, posting_handle)

            NB_MANIFESTS_EXPECTED = 3
            if len(all_manifests_dict.keys()) != NB_MANIFESTS_EXPECTED:
                raise ApodeixiError(
                    root_trace, 'Expected ' + str(NB_MANIFESTS_EXPECTED) +
                    ' manifests, but found ' + str(len(all_manifests_dicts)))

            for manifest_nb in all_manifests_dict.keys():
                manifest_dict = all_manifests_dict[manifest_nb]

                STORE.persistManifest(root_trace, manifest_dict)

            # Make explanations readable by creating a pretty
            explanations_dict = controller.show_your_work.as_dict(
                root_trace) | controller.link_table.as_dict(root_trace)
            explanations_nice = DictionaryFormatter().dict_2_nice(
                parent_trace=root_trace,
                a_dict=explanations_dict,
                flatten=True,
                delimeter="::")
            with open(MANIFESTS_OUTPUT_DIR + '/' + EXPLANATIONS_OUTPUT,
                      'w') as file:
                file.write(explanations_nice)

        except ApodeixiError as ex:
            print(ex.trace_message())
            self.assertTrue(1 == 2)

        self.assertTrue(len(all_manifests_dict) == NB_MANIFESTS_EXPECTED)
        for manifest_nb in all_manifests_dict.keys():
            manifest_dict = all_manifests_dict[manifest_nb]
            kind = manifest_dict['kind']
            self._compare_to_expected_yaml(root_trace, manifest_dict,
                                           MANIFEST_FILE_PREFIX + "." + kind)
        with open(MANIFESTS_EXPECTED_DIR + '/' + EXPLANATIONS_EXPECTED,
                  'r') as file:
            expected_explain = file.read()
        self.assertEqual(explanations_nice, expected_explain)
コード例 #10
0
    def _shell_test_case(self, name, viewport_width, viewport_height,
                         max_word_length):

        INPUT_FOLDER = self.input_data
        INPUT_FILE = name + '_INPUT.csv'
        OUTPUT_FOLDER = self.output_data
        OUTPUT_FILE = name + '_OUTPUT.csv'
        EXPECTED_FOLDER = self.expected_data
        EXPECTED_FILE = name + '_EXPECTED.csv'

        OUTPUT_COMPARISON_FILE = name + '_comparison_OUTPUT.txt'
        EXPECTED_COMPARISON_FILE = name + '_comparison_EXPECTED.txt'

        OUTPUT_EXPLAIN_FILE = name + '_explain_OUTPUT.txt'
        EXPECTED_EXPLAIN_FILE = name + '_explain_EXPECTED.txt'

        OUTPUT_RESULTS_FILE = name + '_results_OUTPUT.txt'
        EXPECTED_RESULTS_FILE = name + '_results_EXPECTED.txt'

        try:
            root_trace = FunctionalTrace(
                parent_trace=None, path_mask=self._path_mask).doing(
                    "Testing computation of column widths")

            data_df = self.load_csv(root_trace,
                                    INPUT_FOLDER + '/' + INPUT_FILE)

            calc = ColumnWidthCalculator(data_df=data_df,
                                         viewport_width=viewport_width,
                                         viewport_height=viewport_height,
                                         max_word_length=max_word_length)
            result_dict = calc.calc(root_trace)
            output_df = calc.analysis_df
            output_explain = '\n'.join(calc.explanations)
            # Save DataFrame, explain and results in case the assertion below fails, so that we can do
            # a visual comparison of OUTPUT vs EXPECTED csv files
            output_df.to_csv(OUTPUT_FOLDER + '/' + OUTPUT_FILE)
            with open(OUTPUT_FOLDER + '/' + OUTPUT_EXPLAIN_FILE, 'w') as file:
                file.write(output_explain)
            # Make results readable by creating a pretty
            result_nice = DictionaryFormatter().dict_2_nice(
                parent_trace=root_trace, a_dict=result_dict)
            with open(OUTPUT_FOLDER + '/' + OUTPUT_RESULTS_FILE, 'w') as file:
                file.write(result_nice)

            # Load the output we just saved, which we'll use for regression comparison since in Pandas the act of loading will
            # slightly change formats and we want to apply the same such changes as were applied to the expected output,
            # to avoid frivolous differences that don't deserve to cause this test to fail
            loaded_output_df = self.load_csv(root_trace,
                                             OUTPUT_FOLDER + '/' + OUTPUT_FILE)

            # Now load the expected output.
            expected_df = self.load_csv(root_trace,
                                        EXPECTED_FOLDER + '/' + EXPECTED_FILE)

            check, comparison_dict = self._compare_dataframes(
                df1=loaded_output_df,
                df1_name="output",
                df2=expected_df,
                df2_name="expected")

            df_comparison_nice = DictionaryFormatter().dict_2_nice(
                parent_trace=root_trace, a_dict=comparison_dict, flatten=True)
            with open(OUTPUT_FOLDER + '/' + OUTPUT_COMPARISON_FILE,
                      'w') as file:
                file.write(df_comparison_nice)

            with open(EXPECTED_FOLDER + '/' + EXPECTED_COMPARISON_FILE,
                      'r') as file:
                expected_df_comparison = file.read()
            with open(EXPECTED_FOLDER + '/' + EXPECTED_EXPLAIN_FILE,
                      'r') as file:
                expected_explain = file.read()
            with open(EXPECTED_FOLDER + '/' + EXPECTED_RESULTS_FILE,
                      'r') as file:
                expected_result = file.read()

        except ApodeixiError as ex:
            print(ex.trace_message())
            self.assertTrue(1 == 2)

        self.assertEqual(df_comparison_nice, expected_df_comparison)
        self.assertTrue(check)
        self.assertEqual(output_explain, expected_explain)
        self.assertEqual(result_nice, expected_result)
コード例 #11
0
    def test_compare_dataframes(self):
        root_trace = FunctionalTrace(
            parent_trace=None,
            path_mask=self._path_mask).doing("Testing DataFrame Comparison")
        try:
            INPUT_FOLDER = self.input_data
            OUTPUT_FOLDER = self.output_data
            TEST_SCENARIO = 'test_compare_dataframes'

            my_trace = root_trace.doing("Loading and comparing the dataframes")

            df1 = self.load_csv(
                my_trace,
                INPUT_FOLDER + '/' + TEST_SCENARIO + '_df1_INPUT.csv')
            df2 = self.load_csv(
                my_trace,
                INPUT_FOLDER + '/' + TEST_SCENARIO + '_df2_INPUT.csv')

            comparator = DataFrameComparator(df1=df1,
                                             df2=df2,
                                             df1_name="df1",
                                             df2_name="df2",
                                             id_column='Diario')

            check, comparison_dict = comparator.compare(my_trace)

            my_trace = root_trace.doing("Formatting comparison text")

            output_txt = ''
            for key in comparison_dict:
                output_txt += "\n\n-------------------- " + str(
                    key) + ' ----------------------\n'
                val = comparison_dict[key]
                if type(val) == dict:
                    for sub_key in val.keys():
                        output_txt += str(sub_key) + ":\n"
                        subval = val[sub_key]
                        if type(subval) == dict:
                            subval_txt = DictionaryFormatter().dict_2_nice(
                                parent_trace=my_trace,
                                a_dict=subval,
                                flatten=True)
                        else:
                            subval_txt = str(subval)
                        output_txt += "" + subval_txt + "\n"

                else:
                    output_txt += str(val)

            self._compare_to_expected_txt(parent_trace=my_trace,
                                          output_txt=output_txt,
                                          test_output_name=TEST_SCENARIO +
                                          '_comparison',
                                          save_output_txt=True)

            self.assertTrue(
                check == False
            )  # The comparison should result in the two DataFrames being different

        except ApodeixiError as ex:
            print(ex.trace_message())
            self.assertTrue(1 == 2)
コード例 #12
0
    def test_feature_injection(self):

        EXCEL_FILE = 'feature_injection_INPUT.xlsx'
        SHEET = 'Feature Injection'

        MANIFEST_FILE_PREFIX = 'feature_injection'

        STORE_IMPL = UnitTest_KnowledgeBaseStore(
            test_case_name=MANIFEST_FILE_PREFIX,
            input_manifests_dir=self.input_data,
            input_postings_dir=self.input_data,
            output_manifests_dir=self.output_data,
            output_postings_dir=self.output_data)

        root_trace = FunctionalTrace(parent_trace=None,
                                     path_mask=self._path_mask).doing(
                                         "Discovering URL",
                                         data={
                                             'path': EXCEL_FILE,
                                             'sheet': SHEET
                                         })

        STORE = KnowledgeBaseStore(root_trace, STORE_IMPL)
        posting_handle = STORE.buildPostingHandle(root_trace,
                                                  EXCEL_FILE,
                                                  sheet=SHEET,
                                                  excel_range="B2:C100")

        MANIFESTS_OUTPUT_DIR = self.output_data
        MANIFESTS_EXPECTED_DIR = self.expected_data
        EXPLANATIONS_OUTPUT = 'feature_injection_explanations_OUTPUT.yaml'
        EXPLANATIONS_EXPECTED = 'feature_injection_explanations_EXPECTED.yaml'
        manifest_dict = {}

        try:
            root_trace = FunctionalTrace(
                parent_trace=None,
                path_mask=self._path_mask).doing("Generating BDD scaffolding")

            controller = ctrl.CapabilityHierarchy_Controller(
                root_trace, STORE, a6i_config=self.a6i_config)
            all_manifests_dict, label, = controller._buildAllManifests(
                parent_trace=root_trace, posting_label_handle=posting_handle)

            if len(all_manifests_dict) != 1:
                raise ApodeixiError(
                    root_trace, 'Expected one manifest, but found ' +
                    str(len(all_manifests_dict)))

            manifest_dict = all_manifests_dict[0]

            STORE.persistManifest(root_trace, manifest_dict)

            # Make explanations readable by creating a pretty
            explanations_dict = controller.show_your_work.as_dict(
                root_trace) | controller.link_table.as_dict(root_trace)
            explanations_nice = DictionaryFormatter().dict_2_nice(
                parent_trace=root_trace,
                a_dict=explanations_dict,
                flatten=True,
                delimeter="::")
            with open(MANIFESTS_OUTPUT_DIR + '/' + EXPLANATIONS_OUTPUT,
                      'w') as file:
                file.write(explanations_nice)

        except ApodeixiError as ex:
            print(ex.trace_message())
            self.assertTrue(1 == 2)

        with open(MANIFESTS_EXPECTED_DIR + '/' + EXPLANATIONS_EXPECTED,
                  'r') as file:
            expected_explain = file.read()
        self._compare_to_expected_yaml(root_trace,
                                       manifest_dict,
                                       'feature_injection',
                                       save_output_dict=True)
        self.assertEqual(explanations_nice, expected_explain)
コード例 #13
0
    def _attempt_to_run(self, test_case_name, expect_error):
        STORE_IMPL = UnitTest_KnowledgeBaseStore(
            test_case_name=test_case_name,
            input_manifests_dir=self.input_data,
            input_postings_dir=self.input_data,
            output_manifests_dir=self.output_data,
            output_postings_dir=self.output_data)

        EXCEL_FILE = test_case_name + "_INPUT.xlsx"

        root_trace = FunctionalTrace(parent_trace=None,
                                     path_mask=self._path_mask).doing(
                                         "Discovering URL",
                                         data={
                                             'path': EXCEL_FILE,
                                         })
        STORE = KnowledgeBaseStore(root_trace, STORE_IMPL)
        posting_handle = STORE.buildPostingHandle(root_trace,
                                                  EXCEL_FILE,
                                                  sheet="Posting Label",
                                                  excel_range="B2:C100")

        MANIFESTS_OUTPUT_DIR = self.output_data
        MANIFESTS_EXPECTED_DIR = self.expected_data
        EXPLANATIONS_OUTPUT = test_case_name + '_explanations_OUTPUT.yaml'
        EXPLANATIONS_EXPECTED = test_case_name + '_explanations_EXPECTED.yaml'
        all_manifests_dicts = []

        PL = Mock_Controller._MyPostingLabel  # Abbreviation for readability purposes

        try:
            root_trace = FunctionalTrace(
                parent_trace=None,
                path_mask=self._path_mask).doing("Running Mock_Controller")

            controller = Mock_Controller(root_trace,
                                         STORE,
                                         a6i_config=self.a6i_config)
            all_manifests_dict, label = controller._buildAllManifests(
                root_trace, posting_handle)

            NB_MANIFESTS_EXPECTED = 3
            if len(all_manifests_dict.keys()) != NB_MANIFESTS_EXPECTED:
                raise ApodeixiError(
                    root_trace, 'Expected ' + str(NB_MANIFESTS_EXPECTED) +
                    ' manifests, but found ' + str(len(all_manifests_dicts)))

            for manifest_nb in all_manifests_dict.keys():
                manifest_dict = all_manifests_dict[manifest_nb]

                STORE.persistManifest(root_trace, manifest_dict)

            # Make explanations readable by creating a pretty
            explanations_dict = controller.show_your_work.as_dict(
                root_trace) | controller.link_table.as_dict(root_trace)
            explanations_nice = DictionaryFormatter().dict_2_nice(
                parent_trace=root_trace,
                a_dict=explanations_dict,
                flatten=True,
                delimeter="::")
            with open(MANIFESTS_OUTPUT_DIR + '/' + EXPLANATIONS_OUTPUT,
                      'w') as file:
                file.write(explanations_nice)

        except ApodeixiError as ex:
            if expect_error:
                raise ex
            else:
                print(ex.trace_message())
                self.assertTrue(1 == 2)

        self.assertTrue(len(all_manifests_dict) == NB_MANIFESTS_EXPECTED)
        for manifest_nb in all_manifests_dict.keys():
            manifest_dict = all_manifests_dict[manifest_nb]
            kind = manifest_dict['kind']
            self._compare_to_expected_yaml(root_trace, manifest_dict,
                                           test_case_name + "." + kind)
        with open(MANIFESTS_EXPECTED_DIR + '/' + EXPLANATIONS_EXPECTED,
                  'r') as file:
            expected_explain = file.read()
        self.assertEqual(explanations_nice, expected_explain)
コード例 #14
0
    def impl_testcase(self, parent_trace, test_name, controller_class, nb_manifests_expected=1, excel_sheet='Posting Label', ctx_range='b2:c1000'):

        t0                      = time.time()

        MANIFEST_FILE_PREFIX    = test_name

        EXCEL_FILE              = MANIFEST_FILE_PREFIX + '_INPUT.xlsx' 
        SHEET                   = excel_sheet
        CTX_RANGE               = ctx_range
        NB_MANIFESTS_EXPECTED   = nb_manifests_expected

        

        STORE_IMPL              = UnitTest_KnowledgeBaseStore(  test_case_name          = MANIFEST_FILE_PREFIX,
                                                                input_manifests_dir     = self.input_data, 
                                                                input_postings_dir      = self.input_data, 
                                                                output_manifests_dir    = self.output_data, 
                                                                output_postings_dir     = self.output_data)
        

        root_trace              = FunctionalTrace(parent_trace=None, path_mask=self._path_mask).doing("Discovering URL", data={'path'  : EXCEL_FILE,
                                                                                                  'sheet' : SHEET})
        STORE                   = KnowledgeBaseStore(root_trace, STORE_IMPL)
        posting_handle          = STORE.buildPostingHandle(root_trace, EXCEL_FILE, sheet=SHEET, excel_range=CTX_RANGE)

        MANIFESTS_OUTPUT_DIR    = self.output_data
        MANIFESTS_EXPECTED_DIR  = self.expected_data
        EXPLANATIONS_OUTPUT     = MANIFEST_FILE_PREFIX + '_explanations_OUTPUT.yaml'
        EXPLANATIONS_EXPECTED   = MANIFEST_FILE_PREFIX + '_explanations_EXPECTED.yaml'
        all_manifests_dict      = []

        t100                    = time.time()
        try:
            my_trace          = parent_trace.doing("Generating manifest(s)")

            controller                  = controller_class(my_trace, STORE, a6i_config = self.a6i_config)
            t120                         = time.time()
            all_manifests_dict, label,   = controller._buildAllManifests(my_trace, posting_handle)

            if len(all_manifests_dict.keys()) != NB_MANIFESTS_EXPECTED:
                raise ApodeixiError(my_trace, 'Expected ' + str(NB_MANIFESTS_EXPECTED) + ' manifests, but found ' 
                                    + str(len(all_manifests_dict.keys())))

            
            t180                         = time.time()
            for manifest_nb in all_manifests_dict.keys():
                manifest_dict     = all_manifests_dict[manifest_nb]

                STORE.persistManifest(my_trace, manifest_dict)

            t200                      = time.time()
            # Make explanations readable by creating a pretty 
            explanations_dict   = controller.show_your_work.as_dict(root_trace) | controller.link_table.as_dict(root_trace)
            explanations_nice   = DictionaryFormatter().dict_2_nice(    parent_trace    = my_trace, 
                                                                        a_dict          = explanations_dict, 
                                                                        flatten         = True, 
                                                                        delimeter       = "::")
            with open(MANIFESTS_OUTPUT_DIR + '/'  + EXPLANATIONS_OUTPUT, 'w') as file:
                file            .write(explanations_nice)

        except ApodeixiError as ex:
            print(ex.trace_message())
            self.assertTrue(1==2)    

        t300                      = time.time()                                                                                    

        self.assertTrue(len(all_manifests_dict) == NB_MANIFESTS_EXPECTED)
        for manifest_nb in all_manifests_dict.keys():
            manifest_dict   = all_manifests_dict[manifest_nb]
            kind            = manifest_dict['kind']
            self._compare_to_expected_yaml(parent_trace, manifest_dict, MANIFEST_FILE_PREFIX + "." + kind)
        with open(MANIFESTS_EXPECTED_DIR + '/'  + EXPLANATIONS_EXPECTED, 'r') as file:
                expected_explain        = file.read()
        self.assertEqual(explanations_nice,    expected_explain)

        t400                      = time.time()
        #print("************* Timing at 100: " + str(t100-t0))
        #print("************* Timing at 120: " + str(t120-t0))
        #print("************* Timing at 180: " + str(t180-t0))
        #print("************* Timing at 200: " + str(t200-t0))
        #print("************* Timing at 300: " + str(t300-t0))
        #print("************* Timing at 400: " + str(t400-t0))
コード例 #15
0
    def _locate_product_postings(self, posting_api, test_name):
        coords_txt = ''

        root_trace = FunctionalTrace(
            parent_trace=None, path_mask=self._path_mask).doing(
                "Testing File Knowledge Base::locate product postings")

        try:

            def _coords_filter(coords):
                return coords.scoringCycle == "FY 22" and coords.scenario == "MTP"

            scanned_handles = self.stack().store().searchPostings(
                parent_trace=root_trace,
                posting_api=posting_api,
                filing_coordinates_filter=_coords_filter)

            # To ensure that regression test output is deterministic across Windows and Linux/Containers, sort the scanned
            # handles before going further
            scanned_handles = sorted(
                scanned_handles,
                key=lambda handle: format(handle.filing_coords
                                          ) + handle.excel_filename)

            stringified_coords_dict = {
            }  # Keys in coords_dict are objects, need to turn them into strings to print test output
            idx = 1
            for handle in scanned_handles:
                stringified_coords_dict[str(idx) + "." + format(
                    handle.filing_coords, '')] = handle.excel_filename
                idx += 1
            coords_txt = "--------------------- Products with a posting in the KnowledgeBase filing structure\n\n"
            coords_txt += DictionaryFormatter().dict_2_nice(
                parent_trace=root_trace,
                a_dict=stringified_coords_dict,
                flatten=True)

            products_in_kb = [
                handle.filing_coords.product for handle in scanned_handles
            ]

            # These are legitimate products in the Apodeixi config but with no filing structure in the Knowledge Base
            missing = [
                prod for prod in self.products if not prod in products_in_kb
            ]

            # This is the dual gap: products with filing structure in the Knowledge Base but not appearing as legitimate in the Apodeixi config
            illegitimate = [
                prod for prod in products_in_kb if not prod in self.products
            ]

            coords_txt += "\n\n--------------------- Products lacking postings in the KnowledgeBase filing structure\n\n"
            coords_txt += "\n".join(missing)

            coords_txt                      += "\n\n--------------------- 'Illegitimate' products: \n" \
                                                + "\tthey are missing in the Apodeixi config but appear in the KnowledgeBase\n\n"
            coords_txt += "\n".join(illegitimate)

        except ApodeixiError as ex:
            print(ex.trace_message())
            self.assertTrue(1 == 2)

        self._compare_to_expected_txt(parent_trace=root_trace,
                                      output_txt=coords_txt,
                                      test_output_name=test_name,
                                      save_output_txt=True)