Пример #1
0
    def test_extract_xml_log(self):
        """ Compare contents of XML log when using stream and normal logfile. """
        stream = TestIO("")
        fmu_s = load_fmu(self.example_fmu, log_file_name=stream, log_level=4)
        xml_file1 = fmu_s.get_log_filename() + '.xml'
        if os.path.isfile(xml_file1):
            os.remove(xml_file1)
        res_s = fmu_s.simulate()
        xml_log_s = fmu_s.extract_xml_log()

        log_file_name = 'test_cmp_xml_files.txt'
        if os.path.isfile(log_file_name):
            os.remove(log_file_name)
        fmu = load_fmu(self.example_fmu,
                       log_file_name=log_file_name,
                       log_level=4)
        xml_file2 = 'test_cmp_xml_files.xml'
        if os.path.isfile(xml_file2):
            os.remove(xml_file2)
        res = fmu.simulate()
        xml_log = fmu.extract_xml_log()

        err_msg = "Unequal xml files, please compare the contents of:\n{}\nand\n{}".format(
            xml_log_s, xml_log)
        nose.tools.assert_true(compare_files(xml_log_s, xml_log), err_msg)
Пример #2
0
    def test_extract_xml_log_into_stream(self):
        """ Compare contents of XML log when extract XML into a stream. """
        stream = TestIO("")
        extracted_xml_stream = StringIO("")
        fmu_s = load_fmu(self.example_fmu, log_file_name=stream, log_level=4)
        res_s = fmu_s.simulate()
        fmu_s.extract_xml_log(extracted_xml_stream)

        # write the contents of extract_xml_stream to a file for test
        xml_file1 = "my_new_file.xml"
        if os.path.isfile(xml_file1):
            os.remove(xml_file1)
        write_stream_to_file(extracted_xml_stream, xml_file1)

        log_file_name = 'test_cmp_xml_files.txt'
        if os.path.isfile(log_file_name):
            os.remove(log_file_name)
        fmu = load_fmu(self.example_fmu,
                       log_file_name=log_file_name,
                       log_level=4)
        xml_file2 = 'test_cmp_xml_files.xml'
        if os.path.isfile(xml_file2):
            os.remove(xml_file2)
        res = fmu.simulate()
        xml_log = fmu.extract_xml_log()

        err_msg = "Unequal xml files, please compare the contents of:\n{}\nand\n{}".format(
            xml_file1, xml_log)
        nose.tools.assert_true(compare_files(xml_file1, xml_log), err_msg)
Пример #3
0
def verify_file_matches_repo_root(result, *file):
    """
    Assert that a generated file matches the one with the identical name in
    the project repository root.
    """
    mother_file = REPO_ROOT_PATH.join(*file).strpath
    generated_file = result.project.join(*file).strpath
    assert compare_files(mother_file, generated_file), \
        "Mother project '{}' not matching template.\n {} != {}".format(
            pathsep.join(file), mother_file, generated_file)
Пример #4
0
 def test_output_stats(self):
     out = TwitchStatisticsOutput(game_name='Test',
                                  game_shorthand='TEST',
                                  db_mid_directory='data',
                                  db_name_format='{}_stats_complete.db',
                                  verbose=True)
     out.run()
     self.assertTrue(os.path.isfile(os.path.join(os.getcwd(), 'data', 'TEST_Twitch_Stats.txt')))
     self.assertTrue(compare_files(
             f1=os.path.join(os.getcwd(), 'data', 'TEST_Twitch_Stats.txt'),
             f2=os.path.join(os.getcwd(), 'data', 'TEST_Twitch_Stats_complete.txt')))
    def test_ci_setup(self, cookies, project_slug,
                      vcs_account, vcs_platform, vcs_remote,
                      ci_service, ci_testcommand, tests):
        """
        Generate a CI setup with specific settings and verify it is complete.
        """
        result = cookies.bake(extra_context={
            'project_slug': project_slug,
            'vcs_platform': vcs_platform,
            'vcs_account': vcs_account,
            'ci_service': ci_service,
            'tests': tests,
        })

        assert result.exit_code == 0
        assert result.exception is None

        assert result.project.basename == project_slug
        assert result.project.isdir()
        assert result.project.join('README.rst').isfile()
        assert result.project.join('tests', 'requirements.txt').isfile()

        assert result.project.join('.git').isdir()
        assert result.project.join('.gitignore').isfile()
        git_config = result.project.join('.git', 'config').readlines(cr=False)
        assert '[remote "origin"]' in git_config
        assert '\turl = {}'.format(vcs_remote) in git_config

        tox_ini = result.project.join('tox.ini').readlines(cr=False)
        assert '[tox]' in tox_ini
        assert 'envlist = {}'.format(tests) in tox_ini
        assert '[testenv]' in tox_ini
        assert '[testenv:flake8]' in tox_ini
        assert '[testenv:pylint]' in tox_ini

        ci_service_conf = result.project.join(ci_service).readlines(cr=False)
        assert ci_testcommand in ci_service_conf

        codeship_services = result.project.join('codeship-services.yml')
        assert (ci_service == 'codeship-steps.yml' and
                codeship_services.isfile()) or not codeship_services.exists()

        # ensure this project itself stays up-to-date with the template
        file_list = ['.gitignore', ci_service, 'tests/README.rst']
        for filename in file_list:
            mother_file = REPO_ROOT_PATH.join(filename).strpath
            generated_file = result.project.join(filename).strpath
            assert compare_files(mother_file, generated_file), \
                "Mother project '{}' not matching template.\n {} != {}".format(
                    filename, mother_file, generated_file)
Пример #6
0
    def equal_files_for_expanded_path(self, user, file_path):
        """
        Compares the corresponding file in home and real root.
        Returns ``True`` if files are egual, ``False`` otherwise.
        """
        src_file_path, dst_file_path = self.get_src_and_dst_path(
            user, file_path)

        debug("Comparing '{0}' and '{1}'".format(src_file_path, dst_file_path))

        if not isfile(dst_file_path):
            return False

        return compare_files(src_file_path, dst_file_path)
Пример #7
0
Файл: Path.py Проект: idin/disk
    def is_the_same_file(self, other):
        """
		checks if self is the same file as path
		:type other: Path
		:rtype: bool
		"""
        if not self.exists():
            FileNotFoundError(self)

        if not other.exists():
            FileNotFoundError(other)

        if not self.is_file():
            raise NotAFileError(self)

        if not other.is_file():
            raise NotAFileError(other)

        return compare_files(self.absolute_path, other.absolute_path)
Пример #8
0
    def run_fm_test(self, test_case_dir, run_dir):
        """
        Runs an FM test case and validates generated
        losses against expected losses.
        """

        net_ri = True
        il_alloc_rule = KTOOLS_ALLOC_IL_DEFAULT
        ri_alloc_rule = KTOOLS_ALLOC_RI_DEFAULT
        output_level = 'loc'

        loss_factor_fp = os.path.join(test_case_dir, 'loss_factors.csv')
        loss_factor = []
        include_loss_factor = False
        if os.path.exists(loss_factor_fp):
            loss_factor = []
            include_loss_factor = True
            try:
                with open(loss_factor_fp, 'r') as csvfile:
                    reader = csv.DictReader(csvfile)
                    for row in reader:
                        loss_factor.append(
                            float(row['loss_factor']))
            except Exception as e:
                raise OasisException(f"Failed to read {loss_factor_fp}", e)
        else:
            loss_factor.append(1.0)

        output_file = os.path.join(run_dir, 'loc_summary.csv')
        (il, ril) = self.run_exposure(
            test_case_dir, run_dir, loss_factor, net_ri,
            il_alloc_rule, ri_alloc_rule, output_level, output_file,
            include_loss_factor)

        expected_data_dir = os.path.join(test_case_dir, 'expected')
        if not os.path.exists(expected_data_dir):
            raise OasisException(
                'No subfolder named `expected` found in the input directory - '
                'this subfolder should contain the expected set of GUL + IL '
                'input files, optionally the RI input files, and the expected '
                'set of GUL, IL and optionally the RI loss files'
            )

        files = ['keys.csv', 'loc_summary.csv']
        files += [
            '{}.csv'.format(fn)
            for ft, fn in chain(OASIS_FILES_PREFIXES['gul'].items(), OASIS_FILES_PREFIXES['il'].items())
        ]
        files += ['gul_summary_map.csv', 'guls.csv']
        if il:
            files += ['fm_summary_map.csv', 'ils.csv']
        if ril:
            files += ['rils.csv']

        test_result = True
        for f in files:
            generated = os.path.join(run_dir, f)
            expected = os.path.join(expected_data_dir, f)

            if not os.path.exists(expected):
                continue

            file_test_result = compare_files(generated, expected)
            if not file_test_result:
                self.logger.debug(
                    f'\n FAIL: generated {generated} vs expected {expected}')
            test_result = test_result and file_test_result
        return file_test_result
Пример #9
0
 def __eq__(self, other: Any) -> bool:
     if isinstance(other, AbsoluteFile):
         return compare_files(self.path, other.path)
     return False
Пример #10
0
    def action(self, args):
        """
        Generates deterministic losses using the installed ktools framework given
        direct Oasis files (GUL + optionally IL and RI input files).

        :param args: The arguments from the command line
        :type args: Namespace
        """
        inputs = InputValues(args)

        test_case_name = inputs.get('test_case_name')

        self.logger.info('\nProcessing arguments for {}'.format(test_case_name)
                         if test_case_name else '\nProcessing arguments')

        call_dir = os.getcwd()

        src_dir = as_path(inputs.get('src_dir', default=call_dir,
                                     is_path=True),
                          'Source files directory',
                          is_dir=True,
                          preexists=True)

        run_dir = as_path(inputs.get('run_dir',
                                     default=os.path.join(src_dir, 'run'),
                                     is_path=True),
                          'Run directory',
                          is_dir=True,
                          preexists=False)
        if not os.path.exists(run_dir):
            Path(run_dir).mkdir(parents=True, exist_ok=True)

        loss_factor = inputs.get('loss_factor',
                                 default=1.0,
                                 type=float,
                                 required=False)

        net_ri = True

        il_alloc_rule = inputs.get('alloc_rule_il',
                                   default=KTOOLS_ALLOC_IL_DEFAULT,
                                   required=False)
        ri_alloc_rule = inputs.get('alloc_rule_ri',
                                   default=KTOOLS_ALLOC_RI_DEFAULT,
                                   required=False)

        validate = inputs.get('validate', default=False, required=False)

        # item, loc, pol, acc, port
        output_level = inputs.get('output_level',
                                  default="item",
                                  required=False)
        if output_level not in ['port', 'acc', 'loc', 'pol', 'item']:
            raise OasisException(
                'Invalid output level. Must be one of port, acc, loc, pol or item.'
            )

        output_file = as_path(inputs.get('output_file',
                                         required=False,
                                         is_path=True),
                              'Output file path',
                              preexists=False)

        src_contents = [fn.lower() for fn in os.listdir(src_dir)]

        if 'location.csv' not in src_contents:
            raise OasisException(
                'No location/exposure file found in source directory - '
                'a file named `location.csv` is expected')

        il = ril = False
        il = ('account.csv' in src_contents)
        ril = il and ('ri_info.csv' in src_contents) and ('ri_scope.csv'
                                                          in src_contents)

        self.logger.info(
            '\nRunning deterministic losses (GUL=True, IL={}, RIL={})\n'.
            format(il, ril))
        guls_df, ils_df, rils_df = om().run_deterministic(
            src_dir,
            run_dir=run_dir,
            loss_percentage_of_tiv=loss_factor,
            net_ri=net_ri,
            il_alloc_rule=il_alloc_rule,
            ri_alloc_rule=ri_alloc_rule)

        # Read in the summary map
        summaries_df = get_dataframe(
            src_fp=os.path.join(run_dir, 'fm_summary_map.csv'))

        guls_df.to_csv(path_or_buf=os.path.join(run_dir, 'guls.csv'),
                       index=False,
                       encoding='utf-8')
        guls_df.rename(columns={'loss': 'loss_gul'}, inplace=True)

        total_gul = guls_df.loss_gul.sum()

        guls_df = guls_df.merge(right=summaries_df,
                                left_on=["item_id"],
                                right_on=["agg_id"])

        if il:
            ils_df.to_csv(path_or_buf=os.path.join(run_dir, 'ils.csv'),
                          index=False,
                          encoding='utf-8')
            ils_df.rename(columns={'loss': 'loss_il'}, inplace=True)
            all_losses_df = guls_df.merge(how='left',
                                          right=ils_df,
                                          on=["event_id", "output_id"],
                                          suffixes=["_gul", "_il"])
        if ril:
            rils_df.to_csv(path_or_buf=os.path.join(run_dir, 'rils.csv'),
                           index=False,
                           encoding='utf-8')
            rils_df.rename(columns={'loss': 'loss_ri'}, inplace=True)
            all_losses_df = all_losses_df.merge(how='left',
                                                right=rils_df,
                                                on=["event_id", "output_id"])

        oed_hierarchy = get_oed_hierarchy()
        portfolio_num = oed_hierarchy['portnum']['ProfileElementName'].lower()
        acc_num = oed_hierarchy['accnum']['ProfileElementName'].lower()
        loc_num = oed_hierarchy['locnum']['ProfileElementName'].lower()
        policy_num = oed_hierarchy['polnum']['ProfileElementName'].lower()

        if output_level == 'port':
            summary_cols = [portfolio_num]
        elif output_level == 'acc':
            summary_cols = [portfolio_num, acc_num]
        elif output_level == 'pol':
            summary_cols = [portfolio_num, acc_num, policy_num]
        elif output_level == 'loc':
            summary_cols = [portfolio_num, acc_num, loc_num]
        elif output_level == 'item':
            summary_cols = [
                'output_id', portfolio_num, acc_num, loc_num, policy_num,
                'coverage_type_id'
            ]

        guls_df = guls_df.loc[:, summary_cols + ['loss_gul']]

        if not il and not ril:
            all_losses_df = guls_df.loc[:, summary_cols + ['loss_gul']]
            all_losses_df.drop_duplicates(keep=False, inplace=True)
            header = 'Losses (loss factor={}; total gul={:,.00f})'.format(
                loss_factor, total_gul)
        elif not ril:
            total_il = ils_df.loss_il.sum()
            all_losses_df = all_losses_df.loc[:, summary_cols +
                                              ['loss_gul', 'loss_il']]
            summary_gul_df = pd.DataFrame({
                'loss_gul':
                guls_df.groupby(summary_cols)['loss_gul'].sum()
            }).reset_index()
            summary_il_df = pd.DataFrame({
                'loss_il':
                all_losses_df.groupby(summary_cols)['loss_il'].sum()
            }).reset_index()
            all_losses_df = summary_gul_df.merge(how='left',
                                                 right=summary_il_df,
                                                 on=summary_cols)
            header = 'Losses (loss factor={}; total gul={:,.00f}; total il={:,.00f})'.format(
                loss_factor, total_gul, total_il)
        else:
            total_il = ils_df.loss_il.sum()
            total_ri_net = rils_df.loss_ri.sum()
            total_ri_ceded = total_il - total_ri_net
            all_losses_df = all_losses_df.loc[:, summary_cols + [
                'loss_gul', 'loss_il', 'loss_ri'
            ]]
            summary_gul_df = pd.DataFrame({
                'loss_gul':
                guls_df.groupby(summary_cols)['loss_gul'].sum()
            }).reset_index()
            summary_il_df = pd.DataFrame({
                'loss_il':
                all_losses_df.groupby(summary_cols)['loss_il'].sum()
            }).reset_index()
            summary_ri_df = pd.DataFrame({
                'loss_ri':
                all_losses_df.groupby(summary_cols)['loss_ri'].sum()
            }).reset_index()
            all_losses_df = summary_gul_df.merge(how='left',
                                                 right=summary_il_df,
                                                 on=summary_cols)
            all_losses_df = all_losses_df.merge(how='left',
                                                right=summary_ri_df,
                                                on=summary_cols)
            header = 'Losses (loss factor={}; total gul={:,.00f}; total il={:,.00f}; total ri ceded={:,.00f})'.format(
                loss_factor, total_gul, total_il, total_ri_ceded)

        print_dataframe(all_losses_df,
                        frame_header=header,
                        string_cols=all_losses_df.columns)
        if output_file:
            all_losses_df.to_csv(output_file, index=False, encoding='utf-8')

        # Do not validate if the loss factor < 1 - this is because the
        # expected data files for validation are based on a loss factor
        # of 1.0
        if loss_factor < 1:
            validate = False

        if validate:
            expected_data_dir = os.path.join(src_dir, 'expected')
            if not os.path.exists(expected_data_dir):
                raise OasisException(
                    'No subfolder named `expected` found in the input directory - '
                    'this subfolder should contain the expected set of GUL + IL '
                    'input files, optionally the RI input files, and the expected '
                    'set of GUL, IL and optionally the RI loss files')

            files = ['keys.csv', 'loc_summary.csv']
            files += [
                '{}.csv'.format(fn)
                for ft, fn in chain(OASIS_FILES_PREFIXES['gul'].items(),
                                    OASIS_FILES_PREFIXES['il'].items())
            ]
            files += ['gul_summary_map.csv', 'guls.csv']
            if il:
                files += ['fm_summary_map.csv', 'ils.csv']
            if ril:
                files += ['rils.csv']

            status = 'PASS'
            for f in files:
                generated = os.path.join(run_dir, f)
                expected = os.path.join(expected_data_dir, f)

                if not os.path.exists(expected):
                    continue

                self.logger.info(
                    '\nComparing generated {} vs expected {}'.format(
                        generated, expected))
                try:
                    assert (compare_files(generated, expected) is True)
                except AssertionError:
                    status = 'FAIL'
                    self.logger.info('\n{}'.format(
                        column_diff(generated, expected)))
                    self.logger.info('\tFAIL')
                else:
                    self.logger.info('\n\tPASS')

            self.logger.info('\n{} validation complete: {}'.format(
                test_case_name, status
            ) if test_case_name else 'Validation complete: {}'.format(status))

            sys.exit(0 if status == 'PASS' else -1)