コード例 #1
0
class OasisLookupFactoryWriteJsonFiles(TestCase):
    @settings(deadline=None, suppress_health_check=HealthCheck.all())
    @given(
        successes=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']),
                       size=5),
        nonsuccesses=keys(from_statuses=sampled_from([
            OASIS_KEYS_STATUS['fail']['id'], OASIS_KEYS_STATUS['nomatch']['id']
        ]),
                          size=5))
    def test_records_are_given___records_are_written_to_json_keys_files_correctly(
            self, successes, nonsuccesses):
        with TemporaryDirectory() as d:
            keys_file_path = os.path.join(d, 'keys.json')
            keys_errors_file_path = os.path.join(d, 'keys-errors.json')
            result = pd.DataFrame(successes + nonsuccesses)

            key_server = BasicKeyServer(
                {'builtin_lookup_type': 'deterministic'})
            _, successes_count, _, nonsuccesses_count = key_server.write_keys_file(
                [result],
                successes_fp=keys_file_path,
                errors_fp=keys_errors_file_path,
                output_format='json',
                keys_success_msg=False)

            with io.open(keys_file_path, 'r', encoding='utf-8') as f1, io.open(
                    keys_errors_file_path, 'r', encoding='utf-8') as f2:
                written_successes = json.load(f1)
                written_nonsuccesses = json.load(f2)

            self.assertEqual(successes_count, len(successes))
            self.assertEqual(written_successes, successes)

            self.assertEqual(nonsuccesses_count, len(nonsuccesses))
            self.assertEqual(written_nonsuccesses, nonsuccesses)
コード例 #2
0
class OasisLookupFactoryWriteJsonFiles(TestCase):
    @settings(suppress_health_check=[HealthCheck.too_slow])
    @given(successes=keys(from_statuses=just(KEYS_STATUS_SUCCESS), size=5),
           nonsuccesses=keys(from_statuses=sampled_from(
               [KEYS_STATUS_FAIL, KEYS_STATUS_NOMATCH]),
                             size=5))
    def test_records_are_given___records_are_written_to_json_keys_files_correctly(
            self, successes, nonsuccesses):

        with TemporaryDirectory() as d:
            keys_file_path = os.path.join(d, 'keys.json')
            keys_errors_file_path = os.path.join(d, 'keys-errors.json')

            _, successes_count = olf.write_json_keys_file(
                successes, keys_file_path)
            _, nonsuccesses_count = olf.write_json_keys_file(
                nonsuccesses, keys_errors_file_path)

            with io.open(keys_file_path, 'r', encoding='utf-8') as f1, io.open(
                    keys_errors_file_path, 'r', encoding='utf-8') as f2:
                written_successes = json.load(f1)
                written_nonsuccesses = json.load(f2)

            self.assertEqual(successes_count, len(successes))
            self.assertEqual(written_successes, successes)

            self.assertEqual(nonsuccesses_count, len(nonsuccesses))
            self.assertEqual(written_nonsuccesses, nonsuccesses)
コード例 #3
0
class OasisLookupFactoryWriteOasisKeysFiles(TestCase):
    @settings(suppress_health_check=[HealthCheck.too_slow])
    @given(successes=keys(from_statuses=just(KEYS_STATUS_SUCCESS), size=5),
           nonsuccesses=keys(from_statuses=sampled_from(
               [KEYS_STATUS_FAIL, KEYS_STATUS_NOMATCH]),
                             size=5))
    def test_records_are_given___records_are_written_to_oasis_keys_files_correctly(
            self, successes, nonsuccesses):

        oasis_keys_file_to_record_metadict = {
            'LocID': 'id',
            'PerilID': 'peril_id',
            'CoverageTypeID': 'coverage_type',
            'AreaPerilID': 'area_peril_id',
            'VulnerabilityID': 'vulnerability_id'
        }
        oasis_keys_errors_file_to_record_metadict = {
            'LocID': 'id',
            'PerilID': 'peril_id',
            'CoverageTypeID': 'coverage_type',
            'Message': 'message'
        }

        with TemporaryDirectory() as d:
            keys_file_path = os.path.join(d, 'keys.csv')
            keys_errors_file_path = os.path.join(d, 'keys-errors.csv')

            _, successes_count = olf.write_oasis_keys_file(
                successes, keys_file_path)
            _, nonsuccesses_count = olf.write_oasis_keys_errors_file(
                nonsuccesses, keys_errors_file_path)

            with io.open(keys_file_path, 'r', encoding='utf-8') as f1, io.open(
                    keys_errors_file_path, 'r', encoding='utf-8') as f2:
                written_successes = [
                    dict((oasis_keys_file_to_record_metadict[k], r[k])
                         for k in r)
                    for r in pd.read_csv(f1).T.to_dict().values()
                ]
                written_nonsuccesses = [
                    dict((oasis_keys_errors_file_to_record_metadict[k], r[k])
                         for k in r)
                    for r in pd.read_csv(f2).T.to_dict().values()
                ]

            success_matches = list(
                filter(
                    lambda r: (r['id'] == ws['id']
                               for ws in written_successes), successes))
            nonsuccess_matches = list(
                filter(
                    lambda r: (r['id'] == ws['id']
                               for ws in written_nonsuccesses), nonsuccesses))

            self.assertEqual(successes_count, len(successes))
            self.assertEqual(success_matches, successes)

            self.assertEqual(nonsuccesses_count, len(nonsuccesses))
            self.assertEqual(nonsuccess_matches, nonsuccesses)
コード例 #4
0
class OasisLookupFactoryWriteKeys(TestCase):
    def create_fake_lookup(self):
        self.lookup_instance = Mock()
        return self.lookup_instance

    def test_no_model_exposure_are_provided___oasis_exception_is_raised(self):
        with self.assertRaises(OasisException):
            list(olf.get_keys(self.create_fake_lookup()))

    @settings(suppress_health_check=[HealthCheck.too_slow])
    @given(data=keys(from_statuses=just(KEYS_STATUS_SUCCESS), size=10))
    def test_produced_keys_are_passed_to_write_oasis_keys_file(self, data):
        with TemporaryDirectory() as d,\
             patch('oasislmf.model_preparation.lookup.OasisLookupFactory.get_keys', Mock(return_value=(r for r in data))) as get_keys_mock,\
             patch('oasislmf.model_preparation.lookup.OasisLookupFactory.write_oasis_keys_file') as write_oasis_keys_file_mock:

            keys_file_path = os.path.join(d, 'piwind-keys.csv')
            olf.save_keys(lookup=self.create_fake_lookup(),
                          keys_file_path=keys_file_path,
                          source_exposure=json.dumps(data))

            get_keys_mock.assert_called_once_with(
                lookup=self.lookup_instance,
                source_exposure=json.dumps(data),
                source_exposure_fp=None,
                success_only=True)
            write_oasis_keys_file_mock.assert_called_once_with(
                data, keys_file_path, id_col='locnumber')
コード例 #5
0
    def test_summary_file_written(self, data):
        loc_size = data.draw(integers(10, 20))

        # Create Mock keys_data
        keys_data = data.draw(
            keys(size=loc_size,
                 from_area_peril_ids=just(1),
                 from_vulnerability_ids=just(1),
                 from_messages=just('str')))

        # Create Mock location_data
        loc_data = data.draw(
            min_source_exposure(
                size=loc_size,
                from_building_tivs=st.one_of(st.floats(1.0, 1000.0),
                                             st.integers(1, 1000)),
                from_other_tivs=st.one_of(st.floats(1.0, 1000.0),
                                          st.integers(1, 1000)),
                from_contents_tivs=st.one_of(st.floats(1.0, 1000.0),
                                             st.integers(1, 1000)),
                from_bi_tivs=st.one_of(st.floats(1.0, 1000.0),
                                       st.integers(1, 1000))))

        # Prepare arguments for write_exposure_summary
        with TemporaryDirectory() as tmp_dir:

            keys_fp = os.path.join(tmp_dir, 'keys.csv')
            successes = [k for k in keys_data if k['status'] in ['success']]
            keys_errors_fp = os.path.join(tmp_dir, 'keys_errors.csv')
            nonsuccesses = [
                k for k in keys_data if k['status'] not in ['success']
            ]

            write_keys_files(keys=successes,
                             keys_file_path=keys_fp,
                             keys_errors=nonsuccesses,
                             keys_errors_file_path=keys_errors_fp)

            location_fp = os.path.join(tmp_dir, 'location.csv')
            write_source_files(exposure=loc_data, exposure_fp=location_fp)
            location_df = get_location_df(location_fp)

            exposure_summary_fp = write_exposure_summary(
                tmp_dir, location_df, keys_fp, keys_errors_fp,
                get_default_exposure_profile())
            self.assertTrue(os.path.isfile(exposure_summary_fp))

            with open(exposure_summary_fp) as f:
                data = json.load(f)
                loc_df = pd.DataFrame.from_dict(loc_data)
                loc_df['loc_id'] = get_ids(
                    loc_df, ['portnumber', 'accnumber', 'locnumber'])

                keys_df = pd.DataFrame.from_dict(keys_data)
                exp_summary = get_exposure_summary(loc_df, keys_df)
                self.assertDictAlmostEqual(data, exp_summary)
コード例 #6
0
ファイル: test_summaries.py プロジェクト: stacyliu16/OasisLMF
    def test_single_peril__totals_correct(self, data):

        # Shared Values between Loc / keys
        loc_size = data.draw(integers(10, 20))
        supported_cov = data.draw(st.lists(integers(1,4), unique=True, min_size=1, max_size=4))
        perils = 'WTC'

        # Create Mock keys_df
        keys_data = list()
        for i in supported_cov:
            keys_data += data.draw(keys(
                size=loc_size,
                from_peril_ids=just(perils),
                from_coverage_type_ids=just(i),
                from_area_peril_ids=just(1),
                from_vulnerability_ids=just(1),
                from_messages=just('str')))
        keys_df = pd.DataFrame.from_dict(keys_data)

        # Create Mock location_df
        loc_df = pd.DataFrame.from_dict(data.draw(min_source_exposure(
            size=loc_size,
            from_location_perils_covered=just(perils),
            from_location_perils=just(perils),
            from_building_tivs=integers(1000, 1000000),
            from_other_tivs=integers(100, 100000),
            from_contents_tivs=integers(50, 50000),
            from_bi_tivs=integers(20, 20000))))
        loc_df['loc_id'] = get_ids(loc_df, ['portnumber', 'accnumber', 'locnumber'])

        # Run exposure_summary
        exp_summary = get_exposure_summary(
            exposure_df=loc_df,
            keys_df=keys_df,
        )

        # Run Gul Proccessing
        gul_inputs = get_gul_input_items(loc_df, keys_df)
        gul_inputs = gul_inputs[gul_inputs['status'].isin(OASIS_KEYS_STATUS_MODELLED)]

        # Fetch expected TIVS
        tiv_portfolio = loc_df[['buildingtiv', 'othertiv', 'bitiv', 'contentstiv']].sum(1).sum(0)
        tiv_modelled = gul_inputs['tiv'].sum()
        tiv_not_modelled = tiv_portfolio - tiv_modelled

        # Check TIV values
        self.assertEqual(tiv_portfolio, exp_summary['total']['portfolio']['tiv'])
        self.assertEqual(tiv_modelled, exp_summary['total']['modelled']['tiv'])
        self.assertEqual(tiv_not_modelled, exp_summary['total']['not-modelled']['tiv'])

        # Check number of locs
        self.assertEqual(len(loc_df), exp_summary['total']['portfolio']['number_of_locations'])
        self.assertEqual(len(gul_inputs.loc_id.unique()), exp_summary['total']['modelled']['number_of_locations'])
コード例 #7
0
    def test_multi_perils__multi_covarage(self, data):
        loc_size = data.draw(integers(10, 20))
        supported_cov = data.draw(
            st.lists(integers(1, 4), unique=True, min_size=1, max_size=4))
        perils = data.draw(
            st.lists(st.text(alphabet=(string.ascii_letters + string.digits),
                             min_size=2,
                             max_size=6),
                     min_size=2,
                     max_size=6,
                     unique=True))

        # Create Mock keys_df
        keys_data = list()
        for c in supported_cov:
            for p in perils:
                keys_data += data.draw(
                    keys(size=loc_size,
                         from_peril_ids=just(p),
                         from_coverage_type_ids=just(c),
                         from_area_peril_ids=just(1),
                         from_vulnerability_ids=just(1),
                         from_messages=just('str')))

        keys_df = pd.DataFrame.from_dict(keys_data)
        perils_returned = keys_df.peril_id.unique().tolist()

        # Create Mock location_df
        loc_df = pd.DataFrame.from_dict(
            data.draw(
                min_source_exposure(
                    size=loc_size,
                    from_location_perils_covered=st.sampled_from(perils),
                    from_location_perils=st.sampled_from(perils),
                    from_building_tivs=st.one_of(st.floats(1.0, 1000.0),
                                                 st.integers(1, 1000)),
                    from_other_tivs=st.one_of(st.floats(1.0, 1000.0),
                                              st.integers(1, 1000)),
                    from_contents_tivs=st.one_of(st.floats(1.0, 1000.0),
                                                 st.integers(1, 1000)),
                    from_bi_tivs=st.one_of(st.floats(1.0, 1000.0),
                                           st.integers(1, 1000)))))
        loc_df['loc_id'] = get_ids(loc_df,
                                   ['portnumber', 'accnumber', 'locnumber'])

        # Run Summary output check
        exp_summary = get_exposure_summary(exposure_df=loc_df, keys_df=keys_df)
        gul_inputs = get_gul_input_items(loc_df, keys_df)
        self.assertSummaryIsValid(loc_df, gul_inputs, exp_summary,
                                  perils_returned)
コード例 #8
0
class OasisLookupFactoryWriteKeys(TestCase):

    def create_fake_lookup(self):
        self.lookup_instance = Mock()
        return self.lookup_instance

    @settings(suppress_health_check=[HealthCheck.too_slow])
    @given(
        data=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']), size=10)
    )
    def test_produced_keys_are_passed_to_write_oasis_keys_file(self, data):
        write_oasis_keys_file_path = 'oasislmf.model_preparation.lookup.OasisLookupFactory.write_oasis_keys_file'
        with TemporaryDirectory() as d, patch(write_oasis_keys_file_path) as write_oasis_keys_file_mock:
            keys_file_path = os.path.join(d, 'piwind-keys.csv')

            olf.save_keys(
                keys_data=data,
                keys_file_path=keys_file_path,
            )
            write_oasis_keys_file_mock.assert_called_once_with(data, keys_file_path, False)
コード例 #9
0
class TestSummaries(TestCase):
    @settings(deadline=None, suppress_health_check=[HealthCheck.too_slow])
    @given(exposure=source_exposure(
        from_account_ids=just('1'),
        from_portfolio_ids=just('1'),
        from_location_perils=just(';'.join(LOC_PERIL_IDS)),
        from_location_perils_covered=just(';'.join(LOC_PERIL_IDS)),
        from_country_codes=just('US'),
        from_area_codes=just('CA'),
        from_building_tivs=integers(1000, 1000000),
        from_building_deductibles=just(0),
        from_building_min_deductibles=just(0),
        from_building_max_deductibles=just(0),
        from_building_limits=just(0),
        from_other_tivs=integers(100, 100000),
        from_other_deductibles=just(0),
        from_other_min_deductibles=just(0),
        from_other_max_deductibles=just(0),
        from_other_limits=just(0),
        from_contents_tivs=integers(50, 50000),
        from_contents_deductibles=just(0),
        from_contents_min_deductibles=just(0),
        from_contents_max_deductibles=just(0),
        from_contents_limits=just(0),
        from_bi_tivs=integers(20, 20000),
        from_bi_deductibles=just(0),
        from_bi_min_deductibles=just(0),
        from_bi_max_deductibles=just(0),
        from_bi_limits=just(0),
        from_sitepd_deductibles=just(0),
        from_sitepd_min_deductibles=just(0),
        from_sitepd_max_deductibles=just(0),
        from_sitepd_limits=just(0),
        from_siteall_deductibles=just(0),
        from_siteall_min_deductibles=just(0),
        from_siteall_max_deductibles=just(0),
        from_siteall_limits=just(0),
        size=MAX_NLOCATIONS),
           keys=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']),
                     size=MAX_NKEYS))
    def test_write_exposure_summary(self, exposure, keys):
        """
        Test write_exposure_summary method. Create keys and keys errors files
        with random perils and coverage types. At least one key given success
        status. Remaining keys given either fail or nomatch statuses.

        Arithmentic within output file tested.
        """

        # Use current system time to set random seed
        random.seed(None)

        # Create keys and keys errors files with random perils and coverage
        # types. At least one key given success status.
        model_perils = random.sample(MODEL_PERIL_LS,
                                     random.randint(1, len(MODEL_PERIL_LS)))
        model_peril_ids = [PERILS[peril]['id'] for peril in model_perils]
        model_coverage_types = random.sample(
            COVERAGE_TYPE_IDS.keys(), random.randint(1,
                                                     len(COVERAGE_TYPE_IDS)))
        nlocations = {}
        nlocations[OASIS_KEYS_STATUS['success']['id']] = random.randint(
            1, MAX_NLOCATIONS)
        # Remaining keys given either fail or nomatch statuses
        if nlocations[OASIS_KEYS_STATUS['success']['id']] != MAX_NLOCATIONS:
            nlocations[OASIS_KEYS_STATUS['fail']['id']] = random.randint(
                0, MAX_NLOCATIONS -
                nlocations[OASIS_KEYS_STATUS['success']['id']])
            nlocations[OASIS_KEYS_STATUS['nomatch']
                       ['id']] = MAX_NLOCATIONS - nlocations[
                           OASIS_KEYS_STATUS['success']['id']] - nlocations[
                               OASIS_KEYS_STATUS['fail']['id']]
        else:
            nlocations[OASIS_KEYS_STATUS['fail']['id']] = nlocations[
                OASIS_KEYS_STATUS['nomatch']['id']] = 0
        location_ids_range = {}
        location_ids_range[OASIS_KEYS_STATUS['success']['id']] = [
            0, nlocations[OASIS_KEYS_STATUS['success']['id']]
        ]
        location_ids_range[OASIS_KEYS_STATUS['fail']['id']] = [
            nlocations[OASIS_KEYS_STATUS['success']['id']],
            nlocations[OASIS_KEYS_STATUS['success']['id']] +
            nlocations[OASIS_KEYS_STATUS['fail']['id']]
        ]
        location_ids_range[OASIS_KEYS_STATUS['nomatch']['id']] = [
            nlocations[OASIS_KEYS_STATUS['success']['id']] +
            nlocations[OASIS_KEYS_STATUS['fail']['id']], MAX_NLOCATIONS
        ]

        keys_per_loc = len(model_peril_ids) * len(model_coverage_types)
        successes = keys[:nlocations[OASIS_KEYS_STATUS['success']['id']] *
                         keys_per_loc]
        nonsuccesses = keys[nlocations[OASIS_KEYS_STATUS['success']['id']] *
                            keys_per_loc:MAX_NLOCATIONS * keys_per_loc]

        for row, key in enumerate(successes):
            key['locnumber'] = row // keys_per_loc + 1
            key['peril_id'] = model_peril_ids[(row //
                                               len(model_coverage_types)) %
                                              len(model_peril_ids)]
            key['coverage_type'] = model_coverage_types[
                row % len(model_coverage_types)]
        if len(nonsuccesses) != 0:
            for row, key in enumerate(nonsuccesses):
                key['locnumber'] = row // keys_per_loc + 1 + nlocations[
                    OASIS_KEYS_STATUS['success']['id']]
                key['peril_id'] = model_peril_ids[(row //
                                                   len(model_coverage_types)) %
                                                  len(model_peril_ids)]
                key['coverage_type'] = model_coverage_types[
                    row % len(model_coverage_types)]
                if key['locnumber'] <= (
                        nlocations[OASIS_KEYS_STATUS['success']['id']] +
                        nlocations[OASIS_KEYS_STATUS['fail']['id']]):
                    key['status'] = OASIS_KEYS_STATUS['fail']['id']
                else:
                    key['status'] = OASIS_KEYS_STATUS['nomatch']['id']
        else:  # If all keys have success status
            nonsuccesses = [{}]

        with TemporaryDirectory() as d:

            # Prepare arguments for write_exposure_summary
            target_dir = os.path.join(d, 'inputs')
            os.mkdir(target_dir)

            keys_fp = os.path.join(d, 'keys.csv')
            keys_errors_fp = os.path.join(d, 'keys_errors.csv')
            write_keys_files(keys=successes,
                             keys_file_path=keys_fp,
                             keys_errors=nonsuccesses,
                             keys_errors_file_path=keys_errors_fp)

            # If keys errors file empty then drop empty rows and preserve
            # headings
            if not any(nonsuccesses):
                nonsuccesses_df = pd.read_csv(keys_errors_fp)
                nonsuccesses_df.drop([0], axis=0).to_csv(keys_errors_fp,
                                                         index=False,
                                                         encoding='utf-8')

            exposure_fp = os.path.join(d, 'exposure.csv')
            write_source_files(exposure=exposure, exposure_fp=exposure_fp)

            self.manager = om()
            exposure_profile = get_default_exposure_profile()

            exposure_df = get_location_df(exposure_fp, exposure_profile)
            gul_inputs_df = get_gul_input_items(exposure_df, keys_fp,
                                                exposure_profile)

            oed_hierarchy = get_oed_hierarchy(
                exposure_profile=exposure_profile)

            # Execute method
            write_exposure_summary(target_dir, gul_inputs_df, exposure_df,
                                   keys_errors_fp, exposure_profile,
                                   oed_hierarchy)

            model_coverage_tivs = {
                v['CoverageTypeID']: k.lower()
                for k, v in exposure_profile.items()
                if v.get('FMTermType') == 'TIV'
                and v.get('CoverageTypeID') in model_coverage_types
            }
            model_coverage_tivs = {
                COVERAGE_TYPE_IDS[k]: model_coverage_tivs[k]
                for k in set(COVERAGE_TYPE_IDS.keys())
                & set(model_coverage_tivs.keys())
            }

            # Workaround:
            # Test is expecting 'locnumber' to be type int when its OED type is string
            # it ends up comparing str(s) to int(s) so fails, fix by covert locnumber before check
            exposure_df["locnumber"] = pd.to_numeric(exposure_df["locnumber"])

            # Get output file for testing
            output_filename = target_dir + "/exposure_summary_report.json"
            with open(output_filename) as f:
                data = json.load(f)

            # Test integrity of output file
            # Loop over all modelled perils
            for peril in model_perils:
                # Test modelled peril is in output file
                self.assertIn(peril, data.keys())

                tiv_per_peril = 0
                tiv_per_coverage = {}
                total_nlocations = 0

                # Loop over all keys statuses
                for status in OASIS_KEYS_STATUS.values():
                    status_id = status['id']
                    tiv_per_status = 0

                    # Loop over all supported coverage types
                    for coverage_type in SUPPORTED_COVERAGE_TYPES.keys():
                        coverage_tiv = data[peril][status_id][
                            'tiv_by_coverage'][coverage_type]
                        tiv_per_status += coverage_tiv
                        if coverage_type in tiv_per_coverage.keys():
                            tiv_per_coverage[coverage_type] += coverage_tiv
                        else:
                            tiv_per_coverage[coverage_type] = coverage_tiv

                        # Test TIV by coverage values correctly summed
                        if peril in LOC_PERIL_LS and coverage_type in model_coverage_tivs.keys(
                        ):
                            self.assertEqual(
                                coverage_tiv, exposure_df.loc[
                                    (exposure_df['locnumber'] >
                                     location_ids_range[status_id][0]) &
                                    (exposure_df['locnumber'] <=
                                     location_ids_range[status_id][1]),
                                    model_coverage_tivs[coverage_type]].sum())
                        else:  # Modelled perils not in exposure dataframe
                            self.assertEqual(coverage_tiv, 0)

                    # Test sum of TIV by coverage per status
                    self.assertEqual(tiv_per_status,
                                     data[peril][status_id]['tiv'])

                    tiv_per_peril += tiv_per_status
                    total_nlocations += data[peril][status_id][
                        'number_of_locations']

                    # Test number of locations by peril per status
                    if peril in LOC_PERIL_LS:
                        self.assertEqual(
                            nlocations[status_id],
                            data[peril][status_id]['number_of_locations'])
                    else:  # Modelled perils not in exposure dataframe
                        self.assertEqual(
                            0, data[peril][status_id]['number_of_locations'])

                # Test sum of TIV by status per peril
                self.assertEqual(tiv_per_peril, data[peril]['all']['tiv'])

                # Loop over all supported coverage types
                for coverage_type in SUPPORTED_COVERAGE_TYPES.keys():

                    # Test sum of TIV by coverage and status per peril
                    self.assertEqual(
                        tiv_per_coverage[coverage_type],
                        data[peril]['all']['tiv_by_coverage'][coverage_type])

                # Test sum of number of locations per status
                self.assertEqual(total_nlocations,
                                 data[peril]['all']['number_of_locations'])
コード例 #10
0
class OasisLookupFactoryWriteOasisKeysFiles(TestCase):
    @settings(deadline=None, suppress_health_check=HealthCheck.all())
    @given(
        successes=keys(from_statuses=just(OASIS_KEYS_STATUS['success']['id']),
                       size=5),
        nonsuccesses=keys(from_statuses=sampled_from([
            OASIS_KEYS_STATUS['fail']['id'], OASIS_KEYS_STATUS['nomatch']['id']
        ]),
                          size=5))
    def test_records_are_given___records_are_written_to_oasis_keys_files_correctly(
            self, successes, nonsuccesses):
        oasis_keys_file_to_record_metadict = {
            'LocID': 'id',
            'PerilID': 'peril_id',
            'CoverageTypeID': 'coverage_type',
            'AreaPerilID': 'area_peril_id',
            'VulnerabilityID': 'vulnerability_id'
        }
        oasis_keys_errors_file_to_record_metadict = {
            'LocID': 'id',
            'PerilID': 'peril_id',
            'CoverageTypeID': 'coverage_type',
            'Status': 'status',
            'Message': 'message'
        }

        with TemporaryDirectory() as d:
            keys_file_path = os.path.join(d, 'keys.csv')
            keys_errors_file_path = os.path.join(d, 'keys-errors.csv')

            result = pd.DataFrame(successes + nonsuccesses)
            result.rename(columns={'coverage_type_id': 'coverage_type'},
                          inplace=True)
            key_server = BasicKeyServer(
                {'builtin_lookup_type': 'deterministic'})
            _, successes_count, _, nonsuccesses_count = key_server.write_keys_file(
                [result],
                successes_fp=keys_file_path,
                errors_fp=keys_errors_file_path,
                output_format='oasis',
                keys_success_msg=False)

            with io.open(keys_file_path, 'r', encoding='utf-8') as f1, io.open(
                    keys_errors_file_path, 'r', encoding='utf-8') as f2:
                written_successes = [
                    dict((oasis_keys_file_to_record_metadict[k], r[k])
                         for k in r)
                    for r in pd.read_csv(f1).T.to_dict().values()
                ]
                written_nonsuccesses = [
                    dict((oasis_keys_errors_file_to_record_metadict[k], r[k])
                         for k in r)
                    for r in pd.read_csv(f2).T.to_dict().values()
                ]

            success_matches = list(
                filter(
                    lambda r: (r['id'] == ws['id']
                               for ws in written_successes), successes))
            nonsuccess_matches = list(
                filter(
                    lambda r: (r['id'] == ws['id']
                               for ws in written_nonsuccesses), nonsuccesses))

            self.assertEqual(successes_count, len(successes))
            self.assertEqual(success_matches, successes)

            self.assertEqual(nonsuccesses_count, len(nonsuccesses))
            self.assertEqual(nonsuccess_matches, nonsuccesses)