コード例 #1
0
    def test_evaluate(self):
        """Tests that the evaluation is generated correctly."""
        e = OdlcEvaluator(self.submitted_odlcs, self.real_odlcs)
        d = e.evaluate()
        td = {t.real_odlc: t for t in d.odlcs}

        self.assertEqual(self.submit1.pk, td[self.real1.pk].submitted_odlc)
        self.assertEqual(True, td[self.real1.pk].image_approved)
        self.assertEqual(1.0, td[self.real1.pk].classifications_ratio)
        self.assertEqual(0.0, td[self.real1.pk].geolocation_accuracy_ft)
        self.assertEqual(True, td[self.real1.pk].actionable_submission)
        self.assertEqual(1.0, td[self.real1.pk].classifications_score_ratio)
        self.assertEqual(1.0, td[self.real1.pk].geolocation_score_ratio)
        self.assertEqual(1.0, td[self.real1.pk].actionable_score_ratio)
        self.assertEqual(1.0, td[self.real1.pk].autonomous_score_ratio)
        self.assertAlmostEqual(1.0, td[self.real1.pk].score_ratio)

        self.assertEqual(self.submit2.pk, td[self.real2.pk].submitted_odlc)
        self.assertEqual(True, td[self.real2.pk].image_approved)
        self.assertEqual(0.6, td[self.real2.pk].classifications_ratio)
        self.assertAlmostEqual(109.444,
                               td[self.real2.pk].geolocation_accuracy_ft,
                               places=3)
        self.assertEqual(False, td[self.real2.pk].actionable_submission)
        self.assertEqual(0.6, td[self.real2.pk].classifications_score_ratio)
        self.assertAlmostEqual(0.270,
                               td[self.real2.pk].geolocation_score_ratio,
                               places=3)
        self.assertEqual(0.0, td[self.real2.pk].actionable_score_ratio)
        self.assertAlmostEqual(0.201, td[self.real2.pk].score_ratio, places=3)

        self.assertEqual(True, td[self.real6.pk].description_approved)
        self.assertAlmostEqual(0.350, d.score_ratio, places=3)
        self.assertEqual(2, d.unmatched_odlc_count)
コード例 #2
0
ファイル: odlc_test.py プロジェクト: raptor419/interop
    def test_evaluate_no_submitted_odlcs(self):
        """Tests that evaluation works with no submitted odlcs."""
        e = OdlcEvaluator([], self.real_odlcs)
        d = e.evaluate()

        self.assertEqual(0, d.matched_score_ratio)
        self.assertEqual(0, d.unmatched_odlc_count)
        self.assertEqual(6, len(d.odlcs))
コード例 #3
0
ファイル: odlc_test.py プロジェクト: raptor419/interop
    def test_evaluate_no_real_odlcs(self):
        """Tests that evaluation works with no real odlcs."""
        e = OdlcEvaluator(self.submitted_odlcs, [])
        d = e.evaluate()

        self.assertEqual(0, d.matched_score_ratio)
        self.assertEqual(7, d.unmatched_odlc_count)
        self.assertAlmostEqual(-0.35, d.score_ratio, places=3)
        self.assertEqual(0, len(d.odlcs))
コード例 #4
0
def generate_feedback(mission_config, user, team_eval):
    """Generates mission feedback for the given team and mission.

    Args:
        mission_config: The mission to evaluate the team against.
        user: The team user object for which to evaluate and provide feedback.
        team_eval: The team evaluation to fill.
    """
    feedback = team_eval.feedback

    # Find the user's flights.
    flight_periods = TakeoffOrLandingEvent.flights(mission_config, user)
    for period in flight_periods:
        if period.duration() is None:
            team_eval.warnings.append('Infinite flight period.')
    uas_period_logs = [
        UasTelemetry.dedupe(logs)
        for logs in UasTelemetry.by_time_period(user, flight_periods)
    ]
    uas_logs = list(itertools.chain.from_iterable(uas_period_logs))
    if not uas_logs:
        team_eval.warnings.append('No UAS telemetry logs.')

    # Determine interop telemetry rates.
    telem_max, telem_avg = UasTelemetry.rates(
        user, flight_periods, time_period_logs=uas_period_logs)
    if telem_max:
        feedback.uas_telemetry_time_max_sec = telem_max
    if telem_avg:
        feedback.uas_telemetry_time_avg_sec = telem_avg

    # Determine if the uas hit the waypoints.
    feedback.waypoints.extend(
        UasTelemetry.satisfied_waypoints(
            mission_config.home_pos,
            mission_config.mission_waypoints.order_by('order'), uas_logs))

    # Evaluate the object detections.
    user_odlcs = Odlc.objects.filter(user=user).filter(
        mission=mission_config.pk).all()
    evaluator = OdlcEvaluator(user_odlcs,
                              mission_config.odlcs.all(), flight_periods)
    feedback.odlc.CopyFrom(evaluator.evaluate())

    # Determine collisions with stationary.
    for obst in mission_config.stationary_obstacles.all():
        obst_eval = feedback.stationary_obstacles.add()
        obst_eval.id = obst.pk
        obst_eval.hit = obst.evaluate_collision_with_uas(uas_logs)

    # Add judge feedback.
    try:
        judge_feedback = MissionJudgeFeedback.objects.get(
            mission=mission_config.pk, user=user.pk)
        feedback.judge.CopyFrom(judge_feedback.proto())
    except MissionJudgeFeedback.DoesNotExist:
        team_eval.warnings.append('No MissionJudgeFeedback for team.')
コード例 #5
0
ファイル: odlc_test.py プロジェクト: raptor419/interop
 def test_match_odlcs(self):
     """Tests that matching odlcs produce maximal matches."""
     e = OdlcEvaluator(self.submitted_odlcs, self.real_odlcs)
     self.assertDictEqual({
         self.submit1: self.real1,
         self.submit2: self.real2,
         self.submit5: self.real5,
         self.submit6: self.real6,
         self.real1: self.submit1,
         self.real2: self.submit2,
         self.real5: self.submit5,
         self.real6: self.submit6,
     }, e.match_odlcs(self.submitted_odlcs, self.real_odlcs))
コード例 #6
0
def generate_feedback(mission_config, user, team_eval):
    """Generates mission feedback for the given team and mission.

    Args:
        mission_config: The mission to evaluate the team against.
        user: The team user object for which to evaluate and provide feedback.
        team_eval: The team evaluation to fill.
    """
    feedback = team_eval.feedback

    # Calculate the total mission clock time.
    missions = MissionClockEvent.missions(user)
    mission_clock_time = datetime.timedelta(seconds=0)
    for mission in missions:
        duration = mission.duration()
        if duration is None:
            team_eval.warnings.append('Infinite mission clock.')
        else:
            mission_clock_time += duration
    feedback.mission_clock_time_sec = mission_clock_time.total_seconds()

    # Calculate total time in air.
    flight_periods = TakeoffOrLandingEvent.flights(user)
    if flight_periods:
        flight_time = reduce(lambda x, y: x + y,
                             [p.duration() for p in flight_periods])
        feedback.flight_time_sec = flight_time.total_seconds()
    else:
        feedback.flight_time_sec = 0
    # Find the user's flights.
    for period in flight_periods:
        if period.duration() is None:
            team_eval.warnings.append('Infinite flight period.')
    uas_period_logs = [
        UasTelemetry.dedupe(logs)
        for logs in UasTelemetry.by_time_period(user, flight_periods)
    ]
    uas_logs = list(itertools.chain.from_iterable(uas_period_logs))
    if not uas_logs:
        team_eval.warnings.append('No UAS telemetry logs.')

    # Determine interop telemetry rates.
    telem_max, telem_avg = UasTelemetry.rates(user,
                                              flight_periods,
                                              time_period_logs=uas_period_logs)
    if telem_max:
        feedback.uas_telemetry_time_max_sec = telem_max
    if telem_avg:
        feedback.uas_telemetry_time_avg_sec = telem_avg

    # Determine if the uas went out of bounds. This must be done for
    # each period individually so time between periods isn't counted as
    # out of bounds time. Note that this calculates reported time out
    # of bounds, not actual or possible time spent out of bounds.
    out_of_bounds = datetime.timedelta(seconds=0)
    feedback.boundary_violations = 0
    for logs in uas_period_logs:
        bv, bt = FlyZone.out_of_bounds(mission_config.fly_zones.all(), logs)
        feedback.boundary_violations += bv
        out_of_bounds += bt
    feedback.out_of_bounds_time_sec = out_of_bounds.total_seconds()

    # Determine if the uas hit the waypoints.
    feedback.waypoints.extend(
        UasTelemetry.satisfied_waypoints(
            mission_config.home_pos,
            mission_config.mission_waypoints.order_by('order'), uas_logs))

    # Evaluate the object detections.
    user_odlcs = Odlc.objects.filter(user=user).all()
    evaluator = OdlcEvaluator(user_odlcs, mission_config.odlcs.all())
    feedback.odlc.CopyFrom(evaluator.evaluate())

    # Determine collisions with stationary and moving obstacles.
    for obst in mission_config.stationary_obstacles.all():
        obst_eval = feedback.stationary_obstacles.add()
        obst_eval.id = obst.pk
        obst_eval.hit = obst.evaluate_collision_with_uas(uas_logs)
    for obst in mission_config.moving_obstacles.all():
        obst_eval = feedback.moving_obstacles.add()
        obst_eval.id = obst.pk
        obst_eval.hit = obst.evaluate_collision_with_uas(uas_logs)

    # Add judge feedback.
    try:
        judge_feedback = MissionJudgeFeedback.objects.get(
            mission=mission_config.pk, user=user.pk)
        feedback.judge.CopyFrom(judge_feedback.proto())
    except MissionJudgeFeedback.DoesNotExist:
        team_eval.warnings.append('No MissionJudgeFeedback for team.')

    # Sanity check mission time.
    judge_mission_clock = (feedback.judge.flight_time_sec +
                           feedback.judge.post_process_time_sec)
    if abs(feedback.mission_clock_time_sec - judge_mission_clock) > 30:
        team_eval.warnings.append(
            'Mission clock differs between interop and judge.')
コード例 #7
0
ファイル: odlc_test.py プロジェクト: raptor419/interop
    def test_match_value(self):
        """Tests the match value for two odlcs."""
        e = OdlcEvaluator(self.submitted_odlcs, self.real_odlcs)
        self.assertAlmostEqual(
            1.0,
            e.evaluate_match(self.submit1, self.real1).score_ratio,
            places=3)
        self.assertAlmostEqual(
            0.174,
            e.evaluate_match(self.submit2, self.real2).score_ratio,
            places=3)
        self.assertAlmostEqual(
            0.0,
            e.evaluate_match(self.submit3, self.real3).score_ratio,
            places=3)
        self.assertAlmostEqual(
            0.0,
            e.evaluate_match(self.submit4, self.real4).score_ratio,
            places=3)
        self.assertAlmostEqual(
            0.3,
            e.evaluate_match(self.submit5, self.real5).score_ratio,
            places=3)
        self.assertAlmostEqual(
            0.7,
            e.evaluate_match(self.submit6, self.real6).score_ratio,
            places=3)
        self.assertAlmostEqual(
            0.240,
            e.evaluate_match(self.submit7, self.real1).score_ratio,
            places=3)

        self.assertAlmostEqual(
            0.814,
            e.evaluate_match(self.submit1, self.real2).score_ratio,
            places=3)
        self.assertAlmostEqual(
            0.32,
            e.evaluate_match(self.submit2, self.real1).score_ratio,
            places=3)