Example #1
0
 def setUp(self):
     self._model_1 = FairModel('model1', n_simulations=5)
     self._model_1.input_data('Loss Event Frequency', mean=100, stdev=5)
     self._model_1.input_data('Loss Magnitude', mean=1000, stdev=50)
     self._model_1.calculate_all()
     # Node model or iterable test will be done prior to instantiation
     self._fdc1 = FairDistributionCurve(self._model_1)
     self._fdc2 = FairDistributionCurve([self._model_1, self._model_1])
Example #2
0
 def setUp(self):
     self._fbr = FairBaseReport()
     self._model_1 = FairModel('model1', n_simulations=5)
     self._model_1.input_data('Risk', mean=100, stdev=5)
     self._model_2 = FairModel('model2', n_simulations=5)
     self._model_2.input_data('Risk', mean=1000, stdev=50)
     self._metamodel = FairMetaModel(
         name='meta', 
         models=[self._model_1, self._model_2],
     )
Example #3
0
 def test_exports(self):
     """Test outputs post calculation"""
     # Create model and calculate
     model = FairModel('Test', self.N_SAMPLES)
     model.bulk_import_data({
         'Loss Magnitude': {
             'constant': 100
         },
         'Loss Event Frequency': {
             'low': 10,
             'mode': 15,
             'high': 20
         }
     })
     model.calculate_all()
     # Export results
     results = model.export_results()
     self.assertIsInstance(results, pd.DataFrame)
     self.assertTrue(len(results) == self.N_SAMPLES)
     # Export json and ensure parse-able
     json_data = model.to_json()
     self.assertIsInstance(json_data, str)
     _ = json.loads(json_data)
     # Export params
     params = model.export_params()
     self.assertIsInstance(params, dict)
     self.assertTrue(params)
Example #4
0
 def test_calculation(self):
     """Run a calulate all."""
     # Test regular instantiation
     m1 = FairModel.read_json(self._MODEL_JSON)
     m2 = FairModel.read_json(self._MODEL_JSON)
     self._meta = FairMetaModel('New Model', [m1, m2])
     # Test before
     self.assertFalse(self._meta.calculation_completed())
     # Calculate
     self._meta.calculate_all()
     # Test after
     self.assertTrue(self._meta.calculation_completed())
Example #5
0
 def test_creation(self):
     """Test basic instantiation"""
     # Ensure existence of appropriate attributes
     self.assertTrue(self._meta._model_uuid)
     self.assertTrue(self._meta._creation_date)
     # Check that the table is of proper-ish
     self.assertEqual(len(self._meta._risk_table.columns),
                      self._RISK_TABLE_COLUMN_COUNT)
     # Test regular instantiation
     m1 = FairModel.read_json(self._MODEL_JSON)
     m2 = FairModel.read_json(self._MODEL_JSON)
     self._meta = FairMetaModel('New Model', [m1, m2])
     # Throw garbage in metamodel
     self.assertRaises(FairException, FairMetaModel, 'Garnage Name',
                       ['Garbage Input'])
Example #6
0
 def test_good_inputs(self):
     """Test base_curve for good inputs"""
     model = FairModel('model')
     meta = FairMetaModel('meta', models=[model, model])
     good_list = [model, meta, model]
     for input_item in [model, meta, good_list]:
         self._fbc._input_check(input_item)
Example #7
0
 def test_read_json(self):
     """Test static method for reading JSON"""
     # Instantiate model
     model = FairModel.read_json(self.MODEL_JSON)
     self.assertTrue(model)
     # Ensure metamodel fails
     self.assertRaises(FairException, FairModel.read_json,
                       self.META_MODEL_JSON)
Example #8
0
 def test_calculation(self):
     """Run a calulate all."""
     # Create model and import data
     model = FairModel('Test', self.N_SAMPLES)
     model.input_data('Loss Magnitude', constant=100)
     # Calculate based on incomplete data
     self.assertRaises(FairException, model.calculate_all)
     # Complete calculation and run
     model.input_data('Loss Event Frequency', constant=10)
     model.calculate_all()
Example #9
0
 def test_tree_graph_creation(self):
     """Test tree greaph creation"""
     # There is little to test here other than simple creation
     # Whether it comes out OK or not ... ¯\_(ツ)_/¯
     model = FairModel(name='Test')
     model.input_data('Loss Magnitude', mean=50, stdev=5)
     model.input_data('Loss Event Frequency', low=10, mode=20, high=30)
     model.calculate_all()
     with warnings.catch_warnings(record=False):
         warnings.simplefilter("ignore")
         ftg = FairTreeGraph(model, self._FORMAT_STRINGS)
         _, _ = ftg.generate_image()
Example #10
0
class TestFairBaseReport(unittest.TestCase):
    def setUp(self):
        self._model_1 = FairModel('model1', n_simulations=5)
        self._model_1.input_data('Loss Event Frequency', mean=100, stdev=5)
        self._model_1.input_data('Loss Magnitude', mean=1000, stdev=50)
        self._model_1.calculate_all()
        # Node model or iterable test will be done prior to instantiation
        self._fdc1 = FairDistributionCurve(self._model_1)
        self._fdc2 = FairDistributionCurve([self._model_1, self._model_1])

    def tearDown(self):
        self._fbr = None
        self._model_1 = None
        self._model_2 = None
        self._fdc1 = None
        self._fdc2 = None

    def test_generate_icon(self):
        "Test distribution icon generation" ""
        for fdc in [self._fdc1, self._fdc2]:
            fdc.generate_icon('model1', 'Loss Event Frequency')
            self.assertRaises(KeyError, fdc.generate_icon, 'model5',
                              'Vulnerability')

    def test_generate_image(self):
        """Test main distribution image generation"""
        for fdc in [self._fdc1, self._fdc2]:
            fdc.generate_image()
Example #11
0
 def test_bad_inputs(self):
     """Test base_curve for bad inputs."""
     model = FairModel('model')
     bad_input_1 = []
     bad_input_2 = [model, 'a', 1]
     bad_input_3 = 'abc'
     bad_list = [bad_input_1, bad_input_2, bad_input_3]
     for input_item in bad_list:
         self.assertRaises(
             FairException,
             self._fbc._input_check,
             input_item,
         )
Example #12
0
 def test_generate_image(self):
     """Check HTML content can be generated"""
     model_1 = FairModel(name='Model', n_simulations=10)
     model_1.input_data('Loss Event Frequency', mean=10, stdev=1)
     model_1.input_data('Loss Magnitude', low=0, mode=10, high=100)
     model_1.calculate_all()
     meta_model_1 = FairMetaModel(
         name='Meta', 
         models=[model_1, model_1]
     ).calculate_all()
     # Suppress warnings for number of figures generated
     with warnings.catch_warnings(record=False):
         warnings.simplefilter("ignore")
         fsr = FairSimpleReport([model_1, meta_model_1])
         _ = fsr._construct_output()
Example #13
0
 def test_tree_graph_creation(self):
     """Test violin plot creation"""
     # There is little to test here other than simple creation
     # Whether it comes out OK or not ... ¯\_(ツ)_/¯
     model = FairModel(name='Test')
     model.input_data('Loss Magnitude', mean=50, stdev=5)
     model.input_data('Loss Event Frequency', low=10, mode=20, high=30)
     metamodel = FairMetaModel(name='Test Meta', models=[model, model])
     with warnings.catch_warnings(record=False):
         warnings.simplefilter("ignore")
         fvp = FairViolinPlot(metamodel)
         _, _ = fvp.generate_image()
Example #14
0
    def test_creation(self):
        """Test basic instantiation."""
        # Create FairModel
        model = FairModel('Test', self.N_SAMPLES, random_seed=42)
        # Ensure existence of appropriate attributes
        attributes = [
            model._tree,
            model._data_input,
            model._calculation,
            model._model_uuid,
            model._creation_date,
        ]
        for attribute in attributes:
            self.assertTrue(attribute)

        # Check that the table is of proper
        self.assertEqual(len(model._model_table.columns),
                         self.MODEL_TABLE_COLUMN_COUNT)
Example #15
0
 def test_input_check(self):
     """Test the validity of the input check"""
     # Create inputs
     bad_model = 'Not a model'
     model = FairModel(name='model')
     bad_meta = 0
     meta = FairMetaModel(name='metamodel', models=[model, model])
     model_list = [model, meta]
     bad_model_list_1 = []
     bad_model_list_2 = [model, bad_model]
     # Test good items
     for good_item in [model, meta, model_list]:
         self._fbr._input_check(good_item)
     # Test bad items
     for bad_item in [bad_model, bad_meta, bad_model_list_1, bad_model_list_2]: 
         self.assertRaises(
             FairException,
             self._fbr._input_check,
             bad_item
         )
Example #16
0
class TestFairExceedenceCurves(unittest.TestCase):

    _RISK = pd.Series([10_000, 20_000, 30_000])
    _SPACE = pd.Series(np.linspace(0, 30_000, 100))
    _MEAN_QUANTILE = 34
    _MEAN_PERCENT = 66

    def setUp(self):
        self._model_1 = FairModel('model1', n_simulations=5)
        self._model_1.input_data('Loss Event Frequency', mean=100, stdev=5)
        self._model_1.input_data('Loss Magnitude', mean=1000, stdev=50)
        self._model_1.calculate_all()
        # Node model or iterable test will be done prior to instantiation
        self._fec_1 = FairExceedenceCurves(self._model_1)
        self._fec_2 = FairExceedenceCurves([
            self._model_1, 
            self._model_1
        ])

    def tearDown(self):
        self._model_1 = None
        self._fec_1 = None
        self._fec_2 = None

    def test_generate_image(self):
        """Ensure generate_image() output"""
        for fec in [self._fec_1, self._fec_2]:
            fec.generate_image()

    def test_prob_data(self):
        """Test quantile generation"""
        quantiles, space = self._fec_1._get_prob_data(
            self._SPACE,
            self._RISK
        )
        self.assertAlmostEquals(self._MEAN_QUANTILE, quantiles.mean())

    def test_loss_data(self):
        """Test loss percentage gneration data"""
        space, percent = self._fec_1._get_loss_data(
            self._SPACE,
            self._RISK,
        )
        self.assertAlmostEquals(self._MEAN_PERCENT, percent.mean())
Example #17
0
 def test_inspection(self):
     """Check the inspection methods"""
     # Build model
     model = FairModel('Test', self.N_SAMPLES)
     model.input_data('Loss Magnitude', mean=20, stdev=10)
     model.input_data('Loss Event Frequency', constant=10)
     model.input_data('Loss Magnitude', constant=10)
     model.calculate_all()
     # Check inspection methods
     model.get_node_statuses()
     model.get_name()
     model.calculation_completed()
Example #18
0
 def test_inputs(self):
     """Check the input methods (leave validation to FairDataInput)"""
     # Test basic input
     model = FairModel('Test', self.N_SAMPLES)
     model.input_data('Loss Magnitude', constant=100)
     # Test duplicate inputs passed
     model.input_data('Loss Magnitude', constant=10)
     # Test bulk_import_data
     model.bulk_import_data({
         'Loss Magnitude': {
             'constant': 100
         },
         'Loss Event Frequency': {
             'low': 10,
             'mode': 15,
             'high': 20
         }
     })
     # Test import_multi_data
     model.input_multi_data(
         'Secondary Loss', {
             'Reputational': {
                 'Secondary Loss Event Frequency': {
                     'constant': 4000
                 },
                 'Secondary Loss Event Magnitude': {
                     'low': 10,
                     'mode': 20,
                     'high': 100
                 },
             },
             'Legal': {
                 'Secondary Loss Event Frequency': {
                     'constant': 2000
                 },
                 'Secondary Loss Event Magnitude': {
                     'low': 10,
                     'mode': 20,
                     'high': 100
                 },
             }
         })
     # Test input_raw_data
     model.input_raw_data('Vulnerability', [1] * self.N_SAMPLES)
     self.assertRaises(FairException, model.input_raw_data, 'Vulnerability',
                       [2] * self.N_SAMPLES)
     self.assertRaises(FairException, model.input_raw_data, 'Vulnerability',
                       'abc')
     model.calculate_all()
Example #19
0
class TestFairBaseReport(unittest.TestCase):

    _CALLER_SOURCE_DOCSTRING = "\"\"\"Script to create and run a test suite.\"\"\""
    _BASE64_BYTES = bytes([100, 100, 100, 100, 100])
    _BASE64_BYTES_TAG = '<img  src="data:image/png;base64, ZGRkZGQ=" alt=""/>'
    _BASE64_FIG_TAG_FIRST_50 = '<img  src="data:image/png;base64, iVBORw0KGgoAAAAN'

    def setUp(self):
        self._fbr = FairBaseReport()
        self._model_1 = FairModel('model1', n_simulations=5)
        self._model_1.input_data('Risk', mean=100, stdev=5)
        self._model_1.calculate_all()
        self._model_2 = FairModel('model2', n_simulations=5)
        self._model_2.input_data('Risk', mean=1000, stdev=50)
        self._model_2.calculate_all()
        self._metamodel = FairMetaModel(
            name='meta',
            models=[self._model_1, self._model_2],
        )
        self._metamodel.calculate_all()

    def tearDown(self):
        self._fbr = None
        self._model_1 = None
        self._model_2 = None
        self._metamodel = None

    def test_input_check(self):
        """Test the validity of the input check"""
        # Create inputs
        bad_model = 'Not a model'
        model = self._model_1
        bad_meta = 0
        meta = self._metamodel
        model_list = [model, meta]
        bad_model_list_1 = []
        bad_model_list_2 = [model, bad_model]
        # Test good items
        for good_item in [model, meta]:
            self._fbr._input_check([good_item])
        for good_item in [model_list]:
            self._fbr._input_check(model_list)
        # Test bad items
        for bad_item in [
                bad_model, bad_meta, bad_model_list_1, bad_model_list_2
        ]:
            self.assertRaises(FairException, self._fbr._input_check, bad_item)

    def test_base64ify(self):
        """Test base64ify"""
        tag = self._fbr.base64ify(self._BASE64_BYTES)
        self.assertEquals(tag, self._BASE64_BYTES_TAG)

    # DO NOT TEST to_html or _construct_output. Those are done by subclass.

    def test_fig_to_img_tag(self):
        """Convert fig to image tag"""
        fig = matplotlib.pyplot.figure()
        tag_first_50 = self._fbr._fig_to_img_tag(fig)[:50]
        self.assertEqual(tag_first_50, self._BASE64_FIG_TAG_FIRST_50)

    def test_get_tree(self):
        """Test tree creation creation"""
        self._fbr._get_tree(self._model_1)

    def test_get_distribution(self):
        """Test distribution creation"""
        self._fbr._get_distribution(self._model_1, currency_prefix='$')
        self._fbr._get_distribution([self._model_1, self._model_2],
                                    currency_prefix='$')

    def test_get_distribution_icon(self):
        """Test distribution icon creation"""
        self._fbr._get_distribution_icon(self._model_1, 'Risk')

    def test_get_exceedence_curves(self):
        """Test exceedence curve creation"""
        self._fbr._get_exceedence_curves(self._model_1, currency_prefix='$')
        self._fbr._get_exceedence_curves([self._model_1, self._model_2],
                                         currency_prefix='$')

    def test_get_violins(self):
        """Test violin creation"""
        self._fbr._get_violins(self._metamodel)

    def test_get_overview_table(self):
        """Test overvieww table"""
        self._fbr._get_overview_table({
            'name_1': self._model_1,
            'name_2': self._model_2,
        })

    def test_get_model_parameter_table(self):
        """Get paramter table"""
        self._fbr._get_model_parameter_table(self._model_1)

    def test_get_metamodel_parameter_table(self):
        """Get metamodel paramter table"""
        self._fbr._get_metamodel_parameter_table(self._metamodel)