def test_calculation(self): """Run a calulate all.""" # Test regular instantiation m1 = FairModel.read_json(self._MODEL_JSON) m2 = FairModel.read_json(self._MODEL_JSON) self._meta = FairMetaModel('New Model', [m1, m2]) # Test before self.assertFalse(self._meta.calculation_completed()) # Calculate self._meta.calculate_all() # Test after self.assertTrue(self._meta.calculation_completed())
def setUp(self): self._fbr = FairBaseReport() self._model_1 = FairModel('model1', n_simulations=5) self._model_1.input_data('Risk', mean=100, stdev=5) self._model_1.calculate_all() self._model_2 = FairModel('model2', n_simulations=5) self._model_2.input_data('Risk', mean=1000, stdev=50) self._model_2.calculate_all() self._metamodel = FairMetaModel( name='meta', models=[self._model_1, self._model_2], ) self._metamodel.calculate_all()
def test_creation(self): """Test basic instantiation""" # Ensure existence of appropriate attributes self.assertTrue(self._meta._model_uuid) self.assertTrue(self._meta._creation_date) # Check that the table is of proper-ish self.assertEqual(len(self._meta._risk_table.columns), self._RISK_TABLE_COLUMN_COUNT) # Test regular instantiation m1 = FairModel.read_json(self._MODEL_JSON) m2 = FairModel.read_json(self._MODEL_JSON) self._meta = FairMetaModel('New Model', [m1, m2]) # Throw garbage in metamodel self.assertRaises(FairException, FairMetaModel, 'Garnage Name', ['Garbage Input'])
def test_good_inputs(self): """Test base_curve for good inputs""" model = FairModel('model') meta = FairMetaModel('meta', models=[model, model]) good_list = [model, meta, model] for input_item in [model, meta, good_list]: self._fbc._input_check(input_item)
def test_tree_graph_creation(self): """Test violin plot creation""" # There is little to test here other than simple creation # Whether it comes out OK or not ... ¯\_(ツ)_/¯ model = FairModel(name='Test') model.input_data('Loss Magnitude', mean=50, stdev=5) model.input_data('Loss Event Frequency', low=10, mode=20, high=30) metamodel = FairMetaModel(name='Test Meta', models=[model, model]) with warnings.catch_warnings(record=False): warnings.simplefilter("ignore") fvp = FairViolinPlot(metamodel) _, _ = fvp.generate_image()
def test_generate_image(self): """Check HTML content can be generated""" model_1 = FairModel(name='Model', n_simulations=10) model_1.input_data('Loss Event Frequency', mean=10, stdev=1) model_1.input_data('Loss Magnitude', low=0, mode=10, high=100) model_1.calculate_all() meta_model_1 = FairMetaModel( name='Meta', models=[model_1, model_1] ).calculate_all() # Suppress warnings for number of figures generated with warnings.catch_warnings(record=False): warnings.simplefilter("ignore") fsr = FairSimpleReport([model_1, meta_model_1]) _ = fsr._construct_output()
def test_input_check(self): """Test the validity of the input check""" # Create inputs bad_model = 'Not a model' model = FairModel(name='model') bad_meta = 0 meta = FairMetaModel(name='metamodel', models=[model, model]) model_list = [model, meta] bad_model_list_1 = [] bad_model_list_2 = [model, bad_model] # Test good items for good_item in [model, meta, model_list]: self._fbr._input_check(good_item) # Test bad items for bad_item in [bad_model, bad_meta, bad_model_list_1, bad_model_list_2]: self.assertRaises( FairException, self._fbr._input_check, bad_item )
def setUp(self): # Static method instantiation self._meta = FairMetaModel.read_json(self._META_MODEL_JSON)
class TestFairMetaModel(unittest.TestCase): _RISK_TABLE_COLUMN_COUNT = 2 _N_SAMPLES = 100 _MODEL_JSON = '{ "Loss Event Frequency": { "low": 20, "mode": 100, "high": 900 }, "Loss Magnitude": { "low": 3000000, "mode": 3500000, "high": 5000000 }, "name": "Regular Model 1", "n_simulations": 10000, "random_seed": 42, "model_uuid": "b6c6c968-a03c-11e9-a5db-f26e0bbd6dbc", "type": "FairModel", "creation_date": "2019-07-06 17:23:43.647370" }' _META_MODEL_JSON = '{ "Regular Model 1": { "Loss Event Frequency": { "low": 20, "mode": 100, "high": 900, "gamma":4 }, "Loss Magnitude": { "low": 3000000, "mode": 3500000, "high": 5000000, "gamma":4 }, "name": "Regular Model 1", "n_simulations": 10000, "random_seed": 42, "model_uuid": "b6c6c968-a03c-11e9-a5db-f26e0bbd6dbc", "type": "FairModel", "creation_date": "2019-07-06 17:23:43.647370" }, "Regular Model 2": { "Loss Event Frequency": { "mean": 0.3, "stdev": 0.1 }, "Loss Magnitude": { "low": 2000000000, "mode": 3000000000, "high": 5000000000, "gamma":4 }, "name": "Regular Model 2", "n_simulations": 10000, "random_seed": 42, "model_uuid": "b6ca98a4-a03c-11e9-8ce0-f26e0bbd6dbc", "type": "FairModel", "creation_date": "2019-07-06 17:23:43.672336" }, "name": "My Meta Model!", "model_uuid": "b6cce298-a03c-11e9-b79f-f26e0bbd6dbc", "creation_date": "2019-07-06 17:23:43.687336", "type": "FairMetaModel" }' def setUp(self): # Static method instantiation self._meta = FairMetaModel.read_json(self._META_MODEL_JSON) def tearDown(self): self._meta = None def test_creation(self): """Test basic instantiation""" # Ensure existence of appropriate attributes self.assertTrue(self._meta._model_uuid) self.assertTrue(self._meta._creation_date) # Check that the table is of proper-ish self.assertEqual(len(self._meta._risk_table.columns), self._RISK_TABLE_COLUMN_COUNT) # Test regular instantiation m1 = FairModel.read_json(self._MODEL_JSON) m2 = FairModel.read_json(self._MODEL_JSON) self._meta = FairMetaModel('New Model', [m1, m2]) # Throw garbage in metamodel self.assertRaises(FairException, FairMetaModel, 'Garnage Name', ['Garbage Input']) def test_read_json(self): """setUp covers most, so just test Model JSON fails""" # Ensure metamodel fails self.assertRaises(FairException, FairMetaModel.read_json, self._MODEL_JSON) def test_inspection(self): """Check the inspection methods""" # Check inspection methods self._meta.get_name() self._meta.get_uuid() # Check dataframe self.assertIsInstance(self._meta.export_results(), pd.DataFrame) # Check Params self.assertIsInstance(self._meta.export_params(), dict) def test_calculation(self): """Run a calulate all.""" # Test regular instantiation m1 = FairModel.read_json(self._MODEL_JSON) m2 = FairModel.read_json(self._MODEL_JSON) self._meta = FairMetaModel('New Model', [m1, m2]) # Test before self.assertFalse(self._meta.calculation_completed()) # Calculate self._meta.calculate_all() # Test after self.assertTrue(self._meta.calculation_completed()) def test_exports(self): """Test parameter and result exports are OK.""" # Get rid of weird formatting included in class attribute self.maxDiff = 5_000 self.assertEquals( self._meta.to_json().replace('\n', '').replace(' ', ''), self._META_MODEL_JSON.replace('\n', '').replace(' ', '')) # Test Risk Table param_dict = json.loads(self._META_MODEL_JSON) del param_dict['creation_date'] del param_dict['model_uuid'] del param_dict['name'] del param_dict['type'] self.assertEquals(self._meta.export_params(), param_dict)
class TestFairBaseReport(unittest.TestCase): _CALLER_SOURCE_DOCSTRING = "\"\"\"Script to create and run a test suite.\"\"\"" _BASE64_BYTES = bytes([100, 100, 100, 100, 100]) _BASE64_BYTES_TAG = '<img src="data:image/png;base64, ZGRkZGQ=" alt=""/>' _BASE64_FIG_TAG_FIRST_50 = '<img src="data:image/png;base64, iVBORw0KGgoAAAAN' def setUp(self): self._fbr = FairBaseReport() self._model_1 = FairModel('model1', n_simulations=5) self._model_1.input_data('Risk', mean=100, stdev=5) self._model_1.calculate_all() self._model_2 = FairModel('model2', n_simulations=5) self._model_2.input_data('Risk', mean=1000, stdev=50) self._model_2.calculate_all() self._metamodel = FairMetaModel( name='meta', models=[self._model_1, self._model_2], ) self._metamodel.calculate_all() def tearDown(self): self._fbr = None self._model_1 = None self._model_2 = None self._metamodel = None def test_input_check(self): """Test the validity of the input check""" # Create inputs bad_model = 'Not a model' model = self._model_1 bad_meta = 0 meta = self._metamodel model_list = [model, meta] bad_model_list_1 = [] bad_model_list_2 = [model, bad_model] # Test good items for good_item in [model, meta]: self._fbr._input_check([good_item]) for good_item in [model_list]: self._fbr._input_check(model_list) # Test bad items for bad_item in [ bad_model, bad_meta, bad_model_list_1, bad_model_list_2 ]: self.assertRaises(FairException, self._fbr._input_check, bad_item) def test_base64ify(self): """Test base64ify""" tag = self._fbr.base64ify(self._BASE64_BYTES) self.assertEquals(tag, self._BASE64_BYTES_TAG) # DO NOT TEST to_html or _construct_output. Those are done by subclass. def test_fig_to_img_tag(self): """Convert fig to image tag""" fig = matplotlib.pyplot.figure() tag_first_50 = self._fbr._fig_to_img_tag(fig)[:50] self.assertEqual(tag_first_50, self._BASE64_FIG_TAG_FIRST_50) def test_get_tree(self): """Test tree creation creation""" self._fbr._get_tree(self._model_1) def test_get_distribution(self): """Test distribution creation""" self._fbr._get_distribution(self._model_1, currency_prefix='$') self._fbr._get_distribution([self._model_1, self._model_2], currency_prefix='$') def test_get_distribution_icon(self): """Test distribution icon creation""" self._fbr._get_distribution_icon(self._model_1, 'Risk') def test_get_exceedence_curves(self): """Test exceedence curve creation""" self._fbr._get_exceedence_curves(self._model_1, currency_prefix='$') self._fbr._get_exceedence_curves([self._model_1, self._model_2], currency_prefix='$') def test_get_violins(self): """Test violin creation""" self._fbr._get_violins(self._metamodel) def test_get_overview_table(self): """Test overvieww table""" self._fbr._get_overview_table({ 'name_1': self._model_1, 'name_2': self._model_2, }) def test_get_model_parameter_table(self): """Get paramter table""" self._fbr._get_model_parameter_table(self._model_1) def test_get_metamodel_parameter_table(self): """Get metamodel paramter table""" self._fbr._get_metamodel_parameter_table(self._metamodel)