class TestFairBaseReport(unittest.TestCase): def setUp(self): self._model_1 = FairModel('model1', n_simulations=5) self._model_1.input_data('Loss Event Frequency', mean=100, stdev=5) self._model_1.input_data('Loss Magnitude', mean=1000, stdev=50) self._model_1.calculate_all() # Node model or iterable test will be done prior to instantiation self._fdc1 = FairDistributionCurve(self._model_1) self._fdc2 = FairDistributionCurve([self._model_1, self._model_1]) def tearDown(self): self._fbr = None self._model_1 = None self._model_2 = None self._fdc1 = None self._fdc2 = None def test_generate_icon(self): "Test distribution icon generation" "" for fdc in [self._fdc1, self._fdc2]: fdc.generate_icon('model1', 'Loss Event Frequency') self.assertRaises(KeyError, fdc.generate_icon, 'model5', 'Vulnerability') def test_generate_image(self): """Test main distribution image generation""" for fdc in [self._fdc1, self._fdc2]: fdc.generate_image()
def test_calculation(self): """Run a calulate all.""" # Create model and import data model = FairModel('Test', self.N_SAMPLES) model.input_data('Loss Magnitude', constant=100) # Calculate based on incomplete data self.assertRaises(FairException, model.calculate_all) # Complete calculation and run model.input_data('Loss Event Frequency', constant=10) model.calculate_all()
def test_tree_graph_creation(self): """Test tree greaph creation""" # There is little to test here other than simple creation # Whether it comes out OK or not ... ¯\_(ツ)_/¯ model = FairModel(name='Test') model.input_data('Loss Magnitude', mean=50, stdev=5) model.input_data('Loss Event Frequency', low=10, mode=20, high=30) model.calculate_all() with warnings.catch_warnings(record=False): warnings.simplefilter("ignore") ftg = FairTreeGraph(model, self._FORMAT_STRINGS) _, _ = ftg.generate_image()
def test_tree_graph_creation(self): """Test violin plot creation""" # There is little to test here other than simple creation # Whether it comes out OK or not ... ¯\_(ツ)_/¯ model = FairModel(name='Test') model.input_data('Loss Magnitude', mean=50, stdev=5) model.input_data('Loss Event Frequency', low=10, mode=20, high=30) metamodel = FairMetaModel(name='Test Meta', models=[model, model]) with warnings.catch_warnings(record=False): warnings.simplefilter("ignore") fvp = FairViolinPlot(metamodel) _, _ = fvp.generate_image()
def test_inspection(self): """Check the inspection methods""" # Build model model = FairModel('Test', self.N_SAMPLES) model.input_data('Loss Magnitude', mean=20, stdev=10) model.input_data('Loss Event Frequency', constant=10) model.input_data('Loss Magnitude', constant=10) model.calculate_all() # Check inspection methods model.get_node_statuses() model.get_name() model.calculation_completed()
def test_inputs(self): """Check the input methods (leave validation to FairDataInput)""" # Test basic input model = FairModel('Test', self.N_SAMPLES) model.input_data('Loss Magnitude', constant=100) # Test duplicate inputs passed model.input_data('Loss Magnitude', constant=10) # Test bulk_import_data model.bulk_import_data({ 'Loss Magnitude': { 'constant': 100 }, 'Loss Event Frequency': { 'low': 10, 'mode': 15, 'high': 20 } }) # Test import_multi_data model.input_multi_data( 'Secondary Loss', { 'Reputational': { 'Secondary Loss Event Frequency': { 'constant': 4000 }, 'Secondary Loss Event Magnitude': { 'low': 10, 'mode': 20, 'high': 100 }, }, 'Legal': { 'Secondary Loss Event Frequency': { 'constant': 2000 }, 'Secondary Loss Event Magnitude': { 'low': 10, 'mode': 20, 'high': 100 }, } }) # Test input_raw_data model.input_raw_data('Vulnerability', [1] * self.N_SAMPLES) self.assertRaises(FairException, model.input_raw_data, 'Vulnerability', [2] * self.N_SAMPLES) self.assertRaises(FairException, model.input_raw_data, 'Vulnerability', 'abc') model.calculate_all()
def test_generate_image(self): """Check HTML content can be generated""" model_1 = FairModel(name='Model', n_simulations=10) model_1.input_data('Loss Event Frequency', mean=10, stdev=1) model_1.input_data('Loss Magnitude', low=0, mode=10, high=100) model_1.calculate_all() meta_model_1 = FairMetaModel( name='Meta', models=[model_1, model_1] ).calculate_all() # Suppress warnings for number of figures generated with warnings.catch_warnings(record=False): warnings.simplefilter("ignore") fsr = FairSimpleReport([model_1, meta_model_1]) _ = fsr._construct_output()
class TestFairExceedenceCurves(unittest.TestCase): _RISK = pd.Series([10_000, 20_000, 30_000]) _SPACE = pd.Series(np.linspace(0, 30_000, 100)) _MEAN_QUANTILE = 34 _MEAN_PERCENT = 66 def setUp(self): self._model_1 = FairModel('model1', n_simulations=5) self._model_1.input_data('Loss Event Frequency', mean=100, stdev=5) self._model_1.input_data('Loss Magnitude', mean=1000, stdev=50) self._model_1.calculate_all() # Node model or iterable test will be done prior to instantiation self._fec_1 = FairExceedenceCurves(self._model_1) self._fec_2 = FairExceedenceCurves([ self._model_1, self._model_1 ]) def tearDown(self): self._model_1 = None self._fec_1 = None self._fec_2 = None def test_generate_image(self): """Ensure generate_image() output""" for fec in [self._fec_1, self._fec_2]: fec.generate_image() def test_prob_data(self): """Test quantile generation""" quantiles, space = self._fec_1._get_prob_data( self._SPACE, self._RISK ) self.assertAlmostEquals(self._MEAN_QUANTILE, quantiles.mean()) def test_loss_data(self): """Test loss percentage gneration data""" space, percent = self._fec_1._get_loss_data( self._SPACE, self._RISK, ) self.assertAlmostEquals(self._MEAN_PERCENT, percent.mean())
class TestFairBaseReport(unittest.TestCase): _CALLER_SOURCE_DOCSTRING = "\"\"\"Script to create and run a test suite.\"\"\"" _BASE64_BYTES = bytes([100, 100, 100, 100, 100]) _BASE64_BYTES_TAG = '<img src="data:image/png;base64, ZGRkZGQ=" alt=""/>' _BASE64_FIG_TAG_FIRST_50 = '<img src="data:image/png;base64, iVBORw0KGgoAAAAN' def setUp(self): self._fbr = FairBaseReport() self._model_1 = FairModel('model1', n_simulations=5) self._model_1.input_data('Risk', mean=100, stdev=5) self._model_1.calculate_all() self._model_2 = FairModel('model2', n_simulations=5) self._model_2.input_data('Risk', mean=1000, stdev=50) self._model_2.calculate_all() self._metamodel = FairMetaModel( name='meta', models=[self._model_1, self._model_2], ) self._metamodel.calculate_all() def tearDown(self): self._fbr = None self._model_1 = None self._model_2 = None self._metamodel = None def test_input_check(self): """Test the validity of the input check""" # Create inputs bad_model = 'Not a model' model = self._model_1 bad_meta = 0 meta = self._metamodel model_list = [model, meta] bad_model_list_1 = [] bad_model_list_2 = [model, bad_model] # Test good items for good_item in [model, meta]: self._fbr._input_check([good_item]) for good_item in [model_list]: self._fbr._input_check(model_list) # Test bad items for bad_item in [ bad_model, bad_meta, bad_model_list_1, bad_model_list_2 ]: self.assertRaises(FairException, self._fbr._input_check, bad_item) def test_base64ify(self): """Test base64ify""" tag = self._fbr.base64ify(self._BASE64_BYTES) self.assertEquals(tag, self._BASE64_BYTES_TAG) # DO NOT TEST to_html or _construct_output. Those are done by subclass. def test_fig_to_img_tag(self): """Convert fig to image tag""" fig = matplotlib.pyplot.figure() tag_first_50 = self._fbr._fig_to_img_tag(fig)[:50] self.assertEqual(tag_first_50, self._BASE64_FIG_TAG_FIRST_50) def test_get_tree(self): """Test tree creation creation""" self._fbr._get_tree(self._model_1) def test_get_distribution(self): """Test distribution creation""" self._fbr._get_distribution(self._model_1, currency_prefix='$') self._fbr._get_distribution([self._model_1, self._model_2], currency_prefix='$') def test_get_distribution_icon(self): """Test distribution icon creation""" self._fbr._get_distribution_icon(self._model_1, 'Risk') def test_get_exceedence_curves(self): """Test exceedence curve creation""" self._fbr._get_exceedence_curves(self._model_1, currency_prefix='$') self._fbr._get_exceedence_curves([self._model_1, self._model_2], currency_prefix='$') def test_get_violins(self): """Test violin creation""" self._fbr._get_violins(self._metamodel) def test_get_overview_table(self): """Test overvieww table""" self._fbr._get_overview_table({ 'name_1': self._model_1, 'name_2': self._model_2, }) def test_get_model_parameter_table(self): """Get paramter table""" self._fbr._get_model_parameter_table(self._model_1) def test_get_metamodel_parameter_table(self): """Get metamodel paramter table""" self._fbr._get_metamodel_parameter_table(self._metamodel)