Ejemplo n.º 1
0
def determine_target_value(metric: Metric, measurement: dict, scale: Scale,
                           target: TargetType):
    """Determine the target, near target or debt target value."""
    target_value = metric.get_target(target) if scale == metric.scale(
    ) else measurement.get(scale, {}).get(target)
    return None if target == "debt_target" and metric.accept_debt_expired(
    ) else target_value
Ejemplo n.º 2
0
def insert_new_measurement(database: Database, data_model, metric_data: dict,
                           measurement: dict,
                           previous_measurement: dict) -> dict:
    """Insert a new measurement."""
    if "_id" in measurement:
        del measurement["_id"]
    metric = Metric(data_model, metric_data)
    metric_type = data_model["metrics"][metric.type()]
    measurement["start"] = measurement["end"] = now = iso_timestamp()
    for scale in metric_type["scales"]:
        value = calculate_measurement_value(data_model, metric,
                                            measurement["sources"], scale)
        status = metric.status(value)
        measurement[scale] = dict(value=value,
                                  status=status,
                                  direction=metric.direction())
        # We can't cover determine_status_start() returning False in the feature tests because all new measurements have
        # a status start timestamp, hence the pragma: no cover-behave:
        if status_start := determine_status_start(
                status, previous_measurement, scale,
                now):  # pragma: no cover-behave
            measurement[scale]["status_start"] = status_start
        for target in ("target", "near_target", "debt_target"):
            target_type = cast(TargetType, target)
            measurement[scale][target] = determine_target_value(
                metric, measurement, scale, target_type)
Ejemplo n.º 3
0
 def test_green_with_debt(self):
     """Test a measurement with debt, better than the target."""
     metric = Metric(
         self.data_model,
         dict(type="metric_type",
              target="20",
              near_target="25",
              debt_target="30",
              accept_debt=True))
     self.assertEqual("target_met", metric.status("15"))
Ejemplo n.º 4
0
 def test_debt_not_met(self):
     """Test a measurement worse than the accepted debt."""
     metric = Metric(
         self.data_model,
         dict(type="metric_type",
              target="20",
              near_target="25",
              debt_target="30",
              accept_debt=True))
     self.assertEqual("target_not_met", metric.status("35"))
Ejemplo n.º 5
0
 def test_debt_end_date_removed(self):
     """Test a measurement with the technical end date reset."""
     metric = Metric(
         self.data_model,
         dict(type="metric_type",
              target="20",
              near_target="25",
              debt_target="30",
              accept_debt=True,
              debt_end_date=""),
     )
     self.assertEqual("debt_target_met", metric.status("29"))
Ejemplo n.º 6
0
 def test_debt_past_end_date(self):
     """Test a measurement with expired debt."""
     metric = Metric(
         self.data_model,
         dict(
             type="metric_type",
             target="20",
             near_target="25",
             debt_target="30",
             accept_debt=True,
             debt_end_date="2019-06-10",
         ),
     )
     self.assertEqual("target_not_met", metric.status("29"))
Ejemplo n.º 7
0
 def __init__(self, data, is_train, model_config):
     self.model_config = model_config
     self.data = data
     self.is_train = is_train
     self.model_fn = None
     self.rand_unif_init = tf.random_uniform_initializer(-0, .08, 0.08)
     self.metric = Metric(self.model_config, self.data)
Ejemplo n.º 8
0
def calculate_measurement_value(data_model, metric: Metric, sources,
                                scale: Scale) -> Optional[str]:
    """Calculate the measurement value from the source measurements."""
    if not sources or any(source["parse_error"] or source["connection_error"]
                          for source in sources):
        return None
    values = [
        int(source["value"]) -
        value_of_entities_to_ignore(data_model, metric, source)
        for source in sources
    ]
    add = metric.addition()
    if scale == "percentage":
        direction = metric.direction()
        totals = [int(source["total"]) for source in sources]
        if add is sum:
            values, totals = [sum(values)], [sum(totals)]
        values = [
            percentage(value, total, direction)
            for value, total in zip(values, totals)
        ]
    return str(add(values))
Ejemplo n.º 9
0
 def _handle_metrics_for_alternative(self, alternative, environment, metrics):
     for metric_name in metrics:
         self._log.info(self.__class__.__name__, 'Loading metric %s.', metric_name)
         # Load the extractor and collector adapters. They are strings, because the extractor and collector are based
         # both on the metric and the environment to use
         extractor_adapter = self._system_parser.get_extractor_adapter(metric_name)
         # Load the actual adapter, based on this metric and the environment for the selected alternative
         extractor_class = self._factory_extractor.create_extractor(extractor_adapter, environment)
         self._log.info(self.__class__.__name__, 'Extractor %s has been created.', extractor_adapter)
         collector_adapter = self._system_parser.get_collector_adapter(metric_name)
         if collector_adapter is not None:
             collector_class = self._factory_collector.create_collector(collector_adapter, environment)
             self._log.info(self.__class__.__name__, 'Collector %s has been created.', collector_adapter)
         else:
             collector_class = None
             self._log.info(self.__class__.__name__, 'Extractor %s does not need any collectors.', extractor_adapter)
         # Create the metric
         metric = Metric(metric_name, collector_class, extractor_class)
         self._log.info(self.__class__.__name__, 'Metric %s has been created.', metric)
         alternative.add_metric(metric)
         self._log.info(self.__class__.__name__,
                        'Metric %s has been associated to the alternative %s.',
                        metric.get_name(), alternative.get_name())
Ejemplo n.º 10
0
def value_of_entities_to_ignore(data_model, metric: Metric, source) -> int:
    """Return the value of ignored entities, i.e. entities marked as fixed, false positive or won't fix.

    If the entities have a measured attribute, return the sum of the measured attributes of the ignored
    entities, otherwise return the number of ignored attributes. For example, if the metric is the amount of ready
    user story points, the source entities are user stories and the measured attribute is the amount of story
    points of each user story.
    """
    entities = source.get("entity_user_data", {}).items()
    ignored_entities = [
        entity[0] for entity in entities
        if entity[1].get("status") in ("fixed", "false_positive", "wont_fix")
    ]
    source_type = metric.sources()[source["source_uuid"]]["type"]
    if attribute := get_measured_attribute(data_model, metric.type(),
                                           source_type):
        entity = data_model["sources"][source_type]["entities"].get(
            metric.type(), {})
        attribute_type = get_attribute_type(entity, attribute)
        convert = dict(float=float, integer=int, minutes=int)[attribute_type]
        value = sum(
            convert(entity[attribute]) for entity in source["entities"]
            if entity["key"] in ignored_entities)
Ejemplo n.º 11
0
 def setUp(self):
     """Override to set up a metric fixture."""
     self.data_model = dict(
         metrics=dict(metric_type=dict(direction="<")),
         sources=dict(source_type=dict(entities=dict(metric_type=dict(
             attributes=[dict(key="story_points", type="integer")])))),
     )
     self.metric_data = dict(
         addition="sum",
         direction="<",
         type="metric_type",
         sources={
             SOURCE_ID: dict(type="source_type"),
             SOURCE_ID2: dict(type="source_type")
         },
     )
     self.metric = Metric(self.data_model, self.metric_data)
Ejemplo n.º 12
0
 def test_near_target_worse_than_target(self):
     """Test that the measurement is red when the near target is worse than the target."""
     metric = Metric(
         self.data_model,
         dict(type="metric_type", target="20", near_target="15"))
     self.assertEqual("target_met", metric.status("17"))
Ejemplo n.º 13
0
 def test_red(self):
     """Test a red measurement."""
     metric = Metric(
         self.data_model,
         dict(type="metric_type", target="20", near_target="25"))
     self.assertEqual("target_not_met", metric.status("30"))
Ejemplo n.º 14
0
 def test_yellow(self):
     """Test a yellow measurement."""
     metric = Metric(
         self.data_model,
         dict(type="metric_type", target="20", near_target="25"))
     self.assertEqual("near_target_met", metric.status("22"))
Ejemplo n.º 15
0
 def test_green(self):
     """Test a green measurement."""
     metric = Metric(
         self.data_model,
         dict(type="metric_type", target="20", near_target="15"))
     self.assertEqual("target_met", metric.status("10"))