def test_convert_to_score(self): kpis = [ KPISpecification(objective="MINIMISE"), KPISpecification(objective="MAXIMISE"), KPISpecification(objective="TARGET", target_value=10) ] values = [10.0, 20.0, 15.0] inv_values = convert_to_score(values, kpis) self.assertListEqual(list(inv_values), [10.0, -20.0, 5.0])
class TestKPISpecification(TestCase): def setUp(self): self.kpi = KPISpecification(name="Test") def test_verify_name(self): self.assertEqual(0, len(self.kpi.verify())) self.kpi.name = "" errors = [error.local_error for error in self.kpi.verify()] self.assertEqual(1, len(errors)) self.assertIn("KPI is not named", errors)
def from_json(cls, factory, json_data): """ Instantiate an BaseMCOModel object from a `json_data` dictionary and the generating `factory` object. Parameters ---------- factory: BaseMCOFactory Generating factory object json_data: dict Dictionary with an MCOModel serialized data Returns ---------- layer: BaseMCOModel BaseMCOModel instance with attributes values from the `json_data` dict """ data = deepcopy(json_data) parameters = [] for parameter_data in data["parameters"]: id = parameter_data["id"] parameter_factory = factory.parameter_factory_by_id(id) parameter = parameter_factory.model_class.from_json( parameter_factory, parameter_data["model_data"]) parameters.append(parameter) data["parameters"] = parameters data["kpis"] = [KPISpecification(**d) for d in data["kpis"]] mco_model = factory.create_model(data) return mco_model
class TestKPISpecification(TestCase): def setUp(self): self.kpi = KPISpecification(name="Test") def test_verify_name(self): self.assertEqual(0, len(self.kpi.verify())) self.kpi.name = "" errors = [error.local_error for error in self.kpi.verify()] self.assertEqual(1, len(errors)) self.assertIn("KPI is not named", errors) def test_verify_bounds(self): self.kpi.use_bounds = True self.assertEqual(0, len(self.kpi.verify())) self.kpi.lower_bound = 2 errors = [error.local_error for error in self.kpi.verify()] self.assertEqual(1, len(errors)) self.assertIn( "Upper bound value of the KPI must be greater " "than the lower bound value.", errors) def test_verify_target(self): self.kpi.objective = "TARGET" errors = [error.local_error for error in self.kpi.verify()] self.assertEqual(1, len(errors)) self.assertIn("Target value must be non-zero", errors) self.kpi.target_value = 0.5 self.assertEqual(0, len(self.kpi.verify())) self.kpi.use_bounds = True self.assertEqual(0, len(self.kpi.verify())) self.kpi.lower_bound = 0.75 errors = [error.local_error for error in self.kpi.verify()] self.assertEqual(1, len(errors)) self.assertIn( "Target value of the KPI must be within the " "lower and the upper bounds.", errors) self.kpi.target_value = 1.25 errors = [error.local_error for error in self.kpi.verify()] self.assertEqual(1, len(errors)) self.assertIn( "Target value of the KPI must be within the " "lower and the upper bounds.", errors)
def test_empty_kpi_options(self): wf = self.workflow mco_factory = self.plugin.mco_factories[0] wf.mco_model = mco_factory.create_model() kpi = KPISpecification(name='') wf.mco_model.kpis.append(kpi) errors = verify_workflow(wf) self.assertEqual(len(errors), 3) self.assertEqual(errors[1].subject, wf.mco_model.kpis[0]) self.assertIn("KPI is not named", errors[1].local_error)
def test_data_sources(self): wf = self.workflow mco_factory = self.plugin.mco_factories[0] wf.mco_model = mco_factory.create_model() parameter_factory = mco_factory.parameter_factories[0] wf.mco_model.parameters.append(parameter_factory.create_model()) wf.mco_model.parameters[0].name = "name" wf.mco_model.parameters[0].type = "type" wf.mco_model.kpis.append(KPISpecification(name='name')) layer = ExecutionLayer() wf.execution_layers.append(layer) ds_factory = self.plugin.data_source_factories[0] ds_model = ds_factory.create_model() layer.data_sources.append(ds_model) errors = verify_workflow(wf) self.assertEqual(errors[0].subject, ds_model) self.assertIn( "The number of input slots (1 values) returned by " "'Dummy data source' does not match the number " "of user-defined names specified (0 values). This " "is either a plugin error or a file error.", errors[0].local_error) ds_model.input_slot_info.append(InputSlotInfo(name="name")) errors = verify_workflow(wf) self.assertEqual(errors[0].subject, ds_model) self.assertIn( "The number of output slots (1 values) returned by " "'Dummy data source' does not match the number " "of user-defined names specified (0 values). This " "is either a plugin error or a file error.", errors[0].local_error) ds_model.output_slot_info.append(OutputSlotInfo(name="name")) errors = verify_workflow(wf) self.assertEqual(len(errors), 0) ds_model.input_slot_info[0].name = '' errors = verify_workflow(wf) self.assertEqual(len(errors), 1) self.assertIn("Input slot is not named", errors[0].local_error) ds_model.output_slot_info[0].name = '' errors = verify_workflow(wf) self.assertEqual(len(errors), 3) self.assertIn("All output variables have undefined names", errors[1].local_error) self.assertIn("An output variable has an undefined name", errors[2].local_error)
def sample_workflow(self): wf = Workflow() wf.mco_model = self.mco_factory.create_model() wf.mco_model.parameters = [self.mco_parameter_factory.create_model()] wf.mco_model.kpis = [KPISpecification()] wf.execution_layers = [ ExecutionLayer(data_sources=[ self.data_source_factory.create_model(), self.data_source_factory.create_model(), ]), ExecutionLayer( data_sources=[self.data_source_factory.create_model()]), ] return wf
def test_empty_execution_layer(self): wf = self.workflow mco_factory = self.plugin.mco_factories[0] wf.mco_model = mco_factory.create_model() parameter_factory = mco_factory.parameter_factories[0] wf.mco_model.parameters.append(parameter_factory.create_model()) wf.mco_model.parameters[0].name = "name" wf.mco_model.parameters[0].type = "type" wf.mco_model.kpis.append(KPISpecification(name='name')) layer = ExecutionLayer() wf.execution_layers.append(layer) errors = verify_workflow(wf) self.assertEqual(len(errors), 1) self.assertEqual(errors[0].subject, wf.execution_layers[0])
def setUp(self): self.kpi = KPISpecification(name="Test")
def test_multilayer_execution(self): # The multilayer peforms the following execution # layer 0: in1 + in2 | in3 + in4 # res1 res2 # layer 1: res1 + res2 # res3 # layer 2: res3 * res1 # res4 # layer 3: res4 * res2 # out1 # Final result should be # out1 = ((in1 + in2 + in3 + in4) * (in1 + in2) * (in3 + in4) data_values = [ DataValue(value=10, name="in1"), DataValue(value=15, name="in2"), DataValue(value=3, name="in3"), DataValue(value=7, name="in4"), ] def adder(model, parameters): first = parameters[0].value second = parameters[1].value return [DataValue(value=(first + second))] adder_factory = ProbeDataSourceFactory( self.plugin, input_slots_size=2, output_slots_size=1, run_function=adder, ) def multiplier(model, parameters): first = parameters[0].value second = parameters[1].value return [DataValue(value=(first * second))] multiplier_factory = ProbeDataSourceFactory( self.plugin, input_slots_size=2, output_slots_size=1, run_function=multiplier, ) mco_factory = ProbeMCOFactory(self.plugin) mco_model = mco_factory.create_model() parameter_factory = mco_factory.parameter_factories[0] mco_model.parameters = [ parameter_factory.create_model({"name": "in1"}), parameter_factory.create_model({"name": "in2"}), parameter_factory.create_model({"name": "in3"}), parameter_factory.create_model({"name": "in4"}), ] mco_model.kpis = [KPISpecification(name="out1")] wf = Workflow( mco_model=mco_model, execution_layers=[ ExecutionLayer(), ExecutionLayer(), ExecutionLayer(), ExecutionLayer(), ], ) # Layer 0 model = adder_factory.create_model() model.input_slot_info = [ InputSlotInfo(name="in1"), InputSlotInfo(name="in2"), ] model.output_slot_info = [OutputSlotInfo(name="res1")] wf.execution_layers[0].data_sources.append(model) model = adder_factory.create_model() model.input_slot_info = [ InputSlotInfo(name="in3"), InputSlotInfo(name="in4"), ] model.output_slot_info = [OutputSlotInfo(name="res2")] wf.execution_layers[0].data_sources.append(model) # layer 1 model = adder_factory.create_model() model.input_slot_info = [ InputSlotInfo(name="res1"), InputSlotInfo(name="res2"), ] model.output_slot_info = [OutputSlotInfo(name="res3")] wf.execution_layers[1].data_sources.append(model) # layer 2 model = multiplier_factory.create_model() model.input_slot_info = [ InputSlotInfo(name="res3"), InputSlotInfo(name="res1"), ] model.output_slot_info = [OutputSlotInfo(name="res4")] wf.execution_layers[2].data_sources.append(model) # layer 3 model = multiplier_factory.create_model() model.input_slot_info = [ InputSlotInfo(name="res4"), InputSlotInfo(name="res2"), ] model.output_slot_info = [OutputSlotInfo(name="out1")] wf.execution_layers[3].data_sources.append(model) kpi_results = wf.execute(data_values) self.assertEqual(1, len(kpi_results)) self.assertEqual(8750, kpi_results[0].value)
def test_kpi_specification_adherence(self): # Often the user may only wish to treat a subset of DataSource # output slots as KPIs. This test makes sure they get what they # ask for! # keep input DataValues constant data_values = [ DataValue(value=99, name="in1"), DataValue(value=1, name="in2"), ] # dummy addition DataSource(a, b) that also returns its inputs # [a, b, a+b] def adder(model, parameters): first = parameters[0].value second = parameters[1].value return [ DataValue(value=first), DataValue(value=second), DataValue(value=(first + second)), ] adder_factory = ProbeDataSourceFactory( self.plugin, input_slots_size=2, output_slots_size=3, run_function=adder, ) mco_factory = ProbeMCOFactory(self.plugin) parameter_factory = mco_factory.parameter_factories[0] mco_model = mco_factory.create_model() # DataSourceModel stats constant throughout model = adder_factory.create_model() model.input_slot_info = [ InputSlotInfo(name="in1"), InputSlotInfo(name="in2"), ] model.output_slot_info = [ OutputSlotInfo(name="out1"), OutputSlotInfo(name="out2"), OutputSlotInfo(name="out3"), ] # test Parameter and KPI spec that follows DataSource slots # exactly mco_model.parameters = [ parameter_factory.create_model({"name": "in1"}), parameter_factory.create_model({"name": "in2"}), ] mco_model.kpis = [ KPISpecification(name="out1"), KPISpecification(name="out2"), KPISpecification(name="out3"), ] # need to make a new workflow for each KPISpecification wf = Workflow(mco_model=mco_model, execution_layers=[ExecutionLayer()]) wf.execution_layers[0].data_sources.append(model) kpi_results = wf.execute(data_values) self.assertEqual(len(kpi_results), 3) self.assertEqual(kpi_results[0].value, 99) self.assertEqual(kpi_results[1].value, 1) self.assertEqual(kpi_results[2].value, 100) self.assertEqual(kpi_results[0].name, "out1") self.assertEqual(kpi_results[1].name, "out2") self.assertEqual(kpi_results[2].name, "out3") # now test all possible combinations of KPISpecification, including # those with KPIs repeated, and empty KPI specification import itertools out_options = [("out1", 99), ("out2", 1), ("out3", 100)] for num_outputs in range(len(out_options) + 2, 0, -1): for spec in itertools.permutations(out_options, r=num_outputs): mco_model.kpis = [ KPISpecification(name=opt[0]) for opt in spec ] wf = Workflow( mco_model=mco_model, execution_layers=[ExecutionLayer()] ) wf.execution_layers[0].data_sources.append(model) kpi_results = wf.execute(data_values) self.assertEqual(len(kpi_results), num_outputs) for i in range(num_outputs): self.assertEqual(kpi_results[i].name, spec[i][0]) self.assertEqual(kpi_results[i].value, spec[i][1])