def setUp(self): self._fbr = FairBaseReport() self._model_1 = FairModel('model1', n_simulations=5) self._model_1.input_data('Risk', mean=100, stdev=5) self._model_2 = FairModel('model2', n_simulations=5) self._model_2.input_data('Risk', mean=1000, stdev=50) self._metamodel = FairMetaModel( name='meta', models=[self._model_1, self._model_2], )
def test_good_inputs(self): """Test base_curve for good inputs""" model = FairModel('model') meta = FairMetaModel('meta', models=[model, model]) good_list = [model, meta, model] for input_item in [model, meta, good_list]: self._fbc._input_check(input_item)
def test_exports(self): """Test outputs post calculation""" # Create model and calculate model = FairModel('Test', self.N_SAMPLES) model.bulk_import_data({ 'Loss Magnitude': { 'constant': 100 }, 'Loss Event Frequency': { 'low': 10, 'mode': 15, 'high': 20 } }) model.calculate_all() # Export results results = model.export_results() self.assertIsInstance(results, pd.DataFrame) self.assertTrue(len(results) == self.N_SAMPLES) # Export json and ensure parse-able json_data = model.to_json() self.assertIsInstance(json_data, str) _ = json.loads(json_data) # Export params params = model.export_params() self.assertIsInstance(params, dict) self.assertTrue(params)
def setUp(self): self._model_1 = FairModel('model1', n_simulations=5) self._model_1.input_data('Loss Event Frequency', mean=100, stdev=5) self._model_1.input_data('Loss Magnitude', mean=1000, stdev=50) self._model_1.calculate_all() # Node model or iterable test will be done prior to instantiation self._fdc1 = FairDistributionCurve(self._model_1) self._fdc2 = FairDistributionCurve([self._model_1, self._model_1])
def test_calculation(self): """Run a calulate all.""" # Create model and import data model = FairModel('Test', self.N_SAMPLES) model.input_data('Loss Magnitude', constant=100) # Calculate based on incomplete data self.assertRaises(FairException, model.calculate_all) # Complete calculation and run model.input_data('Loss Event Frequency', constant=10) model.calculate_all()
def test_tree_graph_creation(self): """Test tree greaph creation""" # There is little to test here other than simple creation # Whether it comes out OK or not ... ¯\_(ツ)_/¯ model = FairModel(name='Test') model.input_data('Loss Magnitude', mean=50, stdev=5) model.input_data('Loss Event Frequency', low=10, mode=20, high=30) model.calculate_all() with warnings.catch_warnings(record=False): warnings.simplefilter("ignore") ftg = FairTreeGraph(model, self._FORMAT_STRINGS) _, _ = ftg.generate_image()
def test_tree_graph_creation(self): """Test violin plot creation""" # There is little to test here other than simple creation # Whether it comes out OK or not ... ¯\_(ツ)_/¯ model = FairModel(name='Test') model.input_data('Loss Magnitude', mean=50, stdev=5) model.input_data('Loss Event Frequency', low=10, mode=20, high=30) metamodel = FairMetaModel(name='Test Meta', models=[model, model]) with warnings.catch_warnings(record=False): warnings.simplefilter("ignore") fvp = FairViolinPlot(metamodel) _, _ = fvp.generate_image()
def test_inspection(self): """Check the inspection methods""" # Build model model = FairModel('Test', self.N_SAMPLES) model.input_data('Loss Magnitude', mean=20, stdev=10) model.input_data('Loss Event Frequency', constant=10) model.input_data('Loss Magnitude', constant=10) model.calculate_all() # Check inspection methods model.get_node_statuses() model.get_name() model.calculation_completed()
def test_bad_inputs(self): """Test base_curve for bad inputs.""" model = FairModel('model') bad_input_1 = [] bad_input_2 = [model, 'a', 1] bad_input_3 = 'abc' bad_list = [bad_input_1, bad_input_2, bad_input_3] for input_item in bad_list: self.assertRaises( FairException, self._fbc._input_check, input_item, )
def test_inputs(self): """Check the input methods (leave validation to FairDataInput)""" # Test basic input model = FairModel('Test', self.N_SAMPLES) model.input_data('Loss Magnitude', constant=100) # Test duplicate inputs passed model.input_data('Loss Magnitude', constant=10) # Test bulk_import_data model.bulk_import_data({ 'Loss Magnitude': { 'constant': 100 }, 'Loss Event Frequency': { 'low': 10, 'mode': 15, 'high': 20 } }) # Test import_multi_data model.input_multi_data( 'Secondary Loss', { 'Reputational': { 'Secondary Loss Event Frequency': { 'constant': 4000 }, 'Secondary Loss Event Magnitude': { 'low': 10, 'mode': 20, 'high': 100 }, }, 'Legal': { 'Secondary Loss Event Frequency': { 'constant': 2000 }, 'Secondary Loss Event Magnitude': { 'low': 10, 'mode': 20, 'high': 100 }, } }) # Test input_raw_data model.input_raw_data('Vulnerability', [1] * self.N_SAMPLES) self.assertRaises(FairException, model.input_raw_data, 'Vulnerability', [2] * self.N_SAMPLES) self.assertRaises(FairException, model.input_raw_data, 'Vulnerability', 'abc') model.calculate_all()
def test_generate_image(self): """Check HTML content can be generated""" model_1 = FairModel(name='Model', n_simulations=10) model_1.input_data('Loss Event Frequency', mean=10, stdev=1) model_1.input_data('Loss Magnitude', low=0, mode=10, high=100) model_1.calculate_all() meta_model_1 = FairMetaModel( name='Meta', models=[model_1, model_1] ).calculate_all() # Suppress warnings for number of figures generated with warnings.catch_warnings(record=False): warnings.simplefilter("ignore") fsr = FairSimpleReport([model_1, meta_model_1]) _ = fsr._construct_output()
def test_creation(self): """Test basic instantiation.""" # Create FairModel model = FairModel('Test', self.N_SAMPLES, random_seed=42) # Ensure existence of appropriate attributes attributes = [ model._tree, model._data_input, model._calculation, model._model_uuid, model._creation_date, ] for attribute in attributes: self.assertTrue(attribute) # Check that the table is of proper self.assertEqual(len(model._model_table.columns), self.MODEL_TABLE_COLUMN_COUNT)
def test_input_check(self): """Test the validity of the input check""" # Create inputs bad_model = 'Not a model' model = FairModel(name='model') bad_meta = 0 meta = FairMetaModel(name='metamodel', models=[model, model]) model_list = [model, meta] bad_model_list_1 = [] bad_model_list_2 = [model, bad_model] # Test good items for good_item in [model, meta, model_list]: self._fbr._input_check(good_item) # Test bad items for bad_item in [bad_model, bad_meta, bad_model_list_1, bad_model_list_2]: self.assertRaises( FairException, self._fbr._input_check, bad_item )