def test_evaluate_no_submitted_targets(self): """Tests that evaluation works with no submitted targets.""" e = TargetEvaluator([], self.real_targets) d = e.evaluate() self.assertEqual(0, d.matched_score_ratio) self.assertEqual(0, d.unmatched_target_count) self.assertEqual(0, len(d.targets))
def test_evaluate_no_submitted_targets(self): """Tests that evaluation works with no submitted targets.""" e = TargetEvaluator([], self.real_targets) d = e.evaluate() self.assertEqual(0, d.matched_score_ratio) self.assertEqual(0, d.unmatched_target_count) self.assertEqual(6, len(d.targets))
def test_evaluate_no_real_targets(self): """Tests that evaluation works with no real targets.""" e = TargetEvaluator(self.submitted_targets, []) d = e.evaluate() self.assertEqual(0, d.matched_score_ratio) self.assertEqual(7, d.unmatched_target_count) self.assertAlmostEqual(-0.35, d.score_ratio, places=3) self.assertEqual(0, len(d.targets))
def test_evaluate(self): """Tests that the evaluation is generated correctly.""" e = TargetEvaluator(self.submitted_targets, self.real_targets) d = e.evaluate() td = {t.real_target: t for t in d.targets} self.assertEqual(self.submit1.pk, td[self.real1.pk].submitted_target) self.assertEqual(True, td[self.real1.pk].image_approved) self.assertEqual(1.0, td[self.real1.pk].classifications_ratio) self.assertEqual(0.0, td[self.real1.pk].geolocation_accuracy_ft) self.assertEqual(True, td[self.real1.pk].actionable_submission) self.assertEqual(True, td[self.real1.pk].interop_submission) self.assertEqual(1.0, td[self.real1.pk].classifications_score_ratio) self.assertEqual(1.0, td[self.real1.pk].geolocation_score_ratio) self.assertEqual(1.0, td[self.real1.pk].actionable_score_ratio) self.assertEqual(1.0, td[self.real1.pk].autonomous_score_ratio) self.assertEqual(1.0, td[self.real1.pk].interop_score_ratio) self.assertAlmostEqual(1.0, td[self.real1.pk].score_ratio) self.assertEqual(self.submit2.pk, td[self.real2.pk].submitted_target) self.assertEqual(True, td[self.real2.pk].image_approved) self.assertEqual(0.6, td[self.real2.pk].classifications_ratio) self.assertAlmostEqual(109.444, td[self.real2.pk].geolocation_accuracy_ft, places=3) self.assertEqual(False, td[self.real2.pk].actionable_submission) self.assertEqual(False, td[self.real2.pk].interop_submission) self.assertEqual(0.6, td[self.real2.pk].classifications_score_ratio) self.assertAlmostEqual(0.270, td[self.real2.pk].geolocation_score_ratio, places=3) self.assertEqual(0.0, td[self.real2.pk].actionable_score_ratio) self.assertEqual(0.0, td[self.real2.pk].interop_score_ratio) self.assertAlmostEqual(0.174, td[self.real2.pk].score_ratio, places=3) self.assertEqual(True, td[self.real6.pk].description_approved) self.assertAlmostEqual(0.262, d.score_ratio, places=3) self.assertEqual(2, d.unmatched_target_count)
def generate_feedback(mission_config, user, team_eval): """Generates mission feedback for the given team and mission. Args: mission_config: The mission to evaluate the team against. user: The team user object for which to evaluate and provide feedback. team_eval: The team evaluation to fill. """ feedback = team_eval.feedback # Calculate the total mission clock time. missions = MissionClockEvent.missions(user) mission_clock_time = datetime.timedelta(seconds=0) for mission in missions: duration = mission.duration() if duration is None: team_eval.warnings.append('Infinite mission clock.') else: mission_clock_time += duration feedback.mission_clock_time_sec = mission_clock_time.total_seconds() # Calculate total time in air. flight_periods = TakeoffOrLandingEvent.flights(user) if flight_periods: flight_time = reduce(lambda x, y: x + y, [p.duration() for p in flight_periods]) feedback.flight_time_sec = flight_time.total_seconds() else: feedback.flight_time_sec = 0 # Find the user's flights. for period in flight_periods: if period.duration() is None: team_eval.warnings.append('Infinite flight period.') uas_period_logs = [ UasTelemetry.dedupe(logs) for logs in UasTelemetry.by_time_period(user, flight_periods) ] uas_logs = list(itertools.chain.from_iterable(uas_period_logs)) if not uas_logs: team_eval.warnings.append('No UAS telemetry logs.') # Determine interop telemetry rates. telem_max, telem_avg = UasTelemetry.rates(user, flight_periods, time_period_logs=uas_period_logs) if telem_max: feedback.uas_telemetry_time_max_sec = telem_max if telem_avg: feedback.uas_telemetry_time_avg_sec = telem_avg # Determine if the uas went out of bounds. This must be done for # each period individually so time between periods isn't counted as # out of bounds time. Note that this calculates reported time out # of bounds, not actual or possible time spent out of bounds. out_of_bounds = datetime.timedelta(seconds=0) feedback.boundary_violations = 0 for logs in uas_period_logs: bv, bt = FlyZone.out_of_bounds(mission_config.fly_zones.all(), logs) feedback.boundary_violations += bv out_of_bounds += bt feedback.out_of_bounds_time_sec = out_of_bounds.total_seconds() # Determine if the uas hit the waypoints. feedback.waypoints.extend(UasTelemetry.satisfied_waypoints( mission_config.home_pos, mission_config.mission_waypoints.order_by( 'order'), uas_logs)) # Evaluate the targets. user_targets = Target.objects.filter(user=user).all() evaluator = TargetEvaluator(user_targets, mission_config.targets.all()) feedback.target.CopyFrom(evaluator.evaluate()) # Determine collisions with stationary and moving obstacles. for obst in mission_config.stationary_obstacles.all(): obst_eval = feedback.stationary_obstacles.add() obst_eval.id = obst.pk obst_eval.hit = obst.evaluate_collision_with_uas(uas_logs) for obst in mission_config.moving_obstacles.all(): obst_eval = feedback.moving_obstacles.add() obst_eval.id = obst.pk obst_eval.hit = obst.evaluate_collision_with_uas(uas_logs) # Add judge feedback. try: judge_feedback = MissionJudgeFeedback.objects.get( mission=mission_config.pk, user=user.pk) feedback.judge.CopyFrom(judge_feedback.proto()) except MissionJudgeFeedback.DoesNotExist: team_eval.warnings.append('No MissionJudgeFeedback for team.') # Sanity check mission time. judge_mission_clock = ( feedback.judge.flight_time_sec + feedback.judge.post_process_time_sec) if abs(feedback.mission_clock_time_sec - judge_mission_clock) > 30: team_eval.warnings.append( 'Mission clock differs between interop and judge.')