def test_evaluation_dict(self): """Tests that the evaluation dictionary is generated correctly.""" e = TargetEvaluator(self.submitted_targets, self.real_targets) d = e.evaluation_dict() self.assertIn('matched_target_value', d) self.assertIn('unmatched_target_count', d) self.assertIn('targets', d) for t in d['targets'].values(): keys = ['match_value', 'image_approved', 'classifications', 'location_accuracy', 'actionable', 'interop_submission'] for key in keys: self.assertIn(key, t) for s in self.real_targets: self.assertIn(s.pk, d['targets'].keys()) self.assertAlmostEqual(1.974, d['matched_target_value'], places=3) self.assertEqual(1, d['unmatched_target_count']) self.assertEqual(self.submit1.pk, d['targets'][self.real1.pk]['submitted_target']) self.assertAlmostEqual(1.0, d['targets'][self.real1.pk]['match_value'], places=3) self.assertEqual(True, d['targets'][self.real1.pk]['image_approved']) self.assertEqual(1.0, d['targets'][self.real1.pk]['classifications']) self.assertEqual(0.0, d['targets'][self.real1.pk]['location_accuracy']) self.assertEqual(True, d['targets'][self.real1.pk]['actionable']) self.assertEqual(True, d['targets'][self.real1.pk]['interop_submission']) self.assertEqual(False, d['targets'][self.real2.pk]['actionable']) self.assertEqual(False, d['targets'][self.real2.pk]['interop_submission'])
def test_evaluation_dict_no_real_targets(self): """Tests that evaluation_dict works with no real targets.""" e = TargetEvaluator(self.submitted_targets, []) d = e.evaluation_dict() self.assertEqual(0, d['matched_target_value']) self.assertEqual(5, d['unmatched_target_count']) self.assertEqual({}, d['targets'])
def test_evaluate_no_submitted_targets(self): """Tests that evaluation works with no submitted targets.""" e = TargetEvaluator([], self.real_targets) d = e.evaluate() self.assertEqual(0, d.matched_score_ratio) self.assertEqual(0, d.unmatched_target_count) self.assertEqual(6, len(d.targets))
def test_evaluate_no_submitted_targets(self): """Tests that evaluation works with no submitted targets.""" e = TargetEvaluator([], self.real_targets) d = e.evaluate() self.assertEqual(0, d.matched_score_ratio) self.assertEqual(0, d.unmatched_target_count) self.assertEqual(0, len(d.targets))
def test_evaluate_no_real_targets(self): """Tests that evaluation works with no real targets.""" e = TargetEvaluator(self.submitted_targets, []) d = e.evaluate() self.assertEqual(0, d.matched_score_ratio) self.assertEqual(7, d.unmatched_target_count) self.assertAlmostEqual(-0.35, d.score_ratio, places=3) self.assertEqual(0, len(d.targets))
def test_evaluation_dict_no_submitted_targets(self): """Tests that evaluation_dict works with no submitted targets.""" e = TargetEvaluator([], self.real_targets) d = e.evaluation_dict() self.assertEqual(0, d['matched_target_value']) self.assertEqual(0, d['unmatched_target_count']) for td in d['targets'].values(): for v in td.values(): self.assertEqual('', v)
def test_match_value(self): """Tests the match value for two targets.""" e = TargetEvaluator(self.submitted_targets, self.real_targets) self.assertAlmostEqual(1, e.match_value(self.submit1, self.real1), places=3) self.assertAlmostEqual(0.174, e.match_value(self.submit2, self.real2), places=3) self.assertAlmostEqual(0.0, e.match_value(self.submit3, self.real3), places=3) self.assertAlmostEqual(0.5, e.match_value(self.submit4, self.real4), places=3) self.assertAlmostEqual(0.3, e.match_value(self.submit5, self.real5), places=3) self.assertAlmostEqual(0.814, e.match_value(self.submit1, self.real2), places=3) self.assertAlmostEqual(0.32, e.match_value(self.submit2, self.real1), places=3)
def test_match_targets(self): """Tests that matching targets produce maximal matches.""" e = TargetEvaluator(self.submitted_targets, self.real_targets) self.assertEqual( { self.submit1: self.real1, self.submit2: self.real2, self.submit4: self.real4, self.submit5: self.real5, self.real1: self.submit1, self.real2: self.submit2, self.real4: self.submit4, self.real5: self.submit5, }, e.match_targets(self.submitted_targets, self.real_targets))
def test_match_value(self): """Tests the match value for two targets.""" e = TargetEvaluator(self.submitted_targets, self.real_targets) self.assertAlmostEqual( 1.0, e.evaluate_match(self.submit1, self.real1).score_ratio, places=3) self.assertAlmostEqual( 0.174, e.evaluate_match(self.submit2, self.real2).score_ratio, places=3) self.assertAlmostEqual( 0.0, e.evaluate_match(self.submit3, self.real3).score_ratio, places=3) self.assertAlmostEqual( 0.0, e.evaluate_match(self.submit4, self.real4).score_ratio, places=3) self.assertAlmostEqual( 0.3, e.evaluate_match(self.submit5, self.real5).score_ratio, places=3) self.assertAlmostEqual( 0.7, e.evaluate_match(self.submit6, self.real6).score_ratio, places=3) self.assertAlmostEqual( 0.240, e.evaluate_match(self.submit7, self.real1).score_ratio, places=3) self.assertAlmostEqual( 0.814, e.evaluate_match(self.submit1, self.real2).score_ratio, places=3) self.assertAlmostEqual( 0.32, e.evaluate_match(self.submit2, self.real1).score_ratio, places=3)
def test_evaluate(self): """Tests that the evaluation is generated correctly.""" e = TargetEvaluator(self.submitted_targets, self.real_targets) d = e.evaluate() td = {t.real_target: t for t in d.targets} self.assertEqual(self.submit1.pk, td[self.real1.pk].submitted_target) self.assertEqual(True, td[self.real1.pk].image_approved) self.assertEqual(1.0, td[self.real1.pk].classifications_ratio) self.assertEqual(0.0, td[self.real1.pk].geolocation_accuracy_ft) self.assertEqual(True, td[self.real1.pk].actionable_submission) self.assertEqual(True, td[self.real1.pk].interop_submission) self.assertEqual(1.0, td[self.real1.pk].classifications_score_ratio) self.assertEqual(1.0, td[self.real1.pk].geolocation_score_ratio) self.assertEqual(1.0, td[self.real1.pk].actionable_score_ratio) self.assertEqual(1.0, td[self.real1.pk].autonomous_score_ratio) self.assertEqual(1.0, td[self.real1.pk].interop_score_ratio) self.assertAlmostEqual(1.0, td[self.real1.pk].score_ratio) self.assertEqual(self.submit2.pk, td[self.real2.pk].submitted_target) self.assertEqual(True, td[self.real2.pk].image_approved) self.assertEqual(0.6, td[self.real2.pk].classifications_ratio) self.assertAlmostEqual(109.444, td[self.real2.pk].geolocation_accuracy_ft, places=3) self.assertEqual(False, td[self.real2.pk].actionable_submission) self.assertEqual(False, td[self.real2.pk].interop_submission) self.assertEqual(0.6, td[self.real2.pk].classifications_score_ratio) self.assertAlmostEqual(0.270, td[self.real2.pk].geolocation_score_ratio, places=3) self.assertEqual(0.0, td[self.real2.pk].actionable_score_ratio) self.assertEqual(0.0, td[self.real2.pk].interop_score_ratio) self.assertAlmostEqual(0.174, td[self.real2.pk].score_ratio, places=3) self.assertEqual(True, td[self.real6.pk].description_approved) self.assertAlmostEqual(0.262, d.score_ratio, places=3) self.assertEqual(2, d.unmatched_target_count)
def generate_feedback(mission_config, user, team_eval): """Generates mission feedback for the given team and mission. Args: mission_config: The mission to evaluate the team against. user: The team user object for which to evaluate and provide feedback. team_eval: The team evaluation to fill. """ feedback = team_eval.feedback # Calculate the total mission clock time. missions = MissionClockEvent.missions(user) mission_clock_time = datetime.timedelta(seconds=0) for mission in missions: duration = mission.duration() if duration is None: team_eval.warnings.append('Infinite mission clock.') else: mission_clock_time += duration feedback.mission_clock_time_sec = mission_clock_time.total_seconds() # Calculate total time in air. flight_periods = TakeoffOrLandingEvent.flights(user) if flight_periods: flight_time = reduce(lambda x, y: x + y, [p.duration() for p in flight_periods]) feedback.flight_time_sec = flight_time.total_seconds() else: feedback.flight_time_sec = 0 # Find the user's flights. for period in flight_periods: if period.duration() is None: team_eval.warnings.append('Infinite flight period.') uas_period_logs = [ UasTelemetry.dedupe(logs) for logs in UasTelemetry.by_time_period(user, flight_periods) ] uas_logs = list(itertools.chain.from_iterable(uas_period_logs)) if not uas_logs: team_eval.warnings.append('No UAS telemetry logs.') # Determine interop telemetry rates. telem_max, telem_avg = UasTelemetry.rates(user, flight_periods, time_period_logs=uas_period_logs) if telem_max: feedback.uas_telemetry_time_max_sec = telem_max if telem_avg: feedback.uas_telemetry_time_avg_sec = telem_avg # Determine if the uas went out of bounds. This must be done for # each period individually so time between periods isn't counted as # out of bounds time. Note that this calculates reported time out # of bounds, not actual or possible time spent out of bounds. out_of_bounds = datetime.timedelta(seconds=0) feedback.boundary_violations = 0 for logs in uas_period_logs: bv, bt = FlyZone.out_of_bounds(mission_config.fly_zones.all(), logs) feedback.boundary_violations += bv out_of_bounds += bt feedback.out_of_bounds_time_sec = out_of_bounds.total_seconds() # Determine if the uas hit the waypoints. feedback.waypoints.extend(UasTelemetry.satisfied_waypoints( mission_config.home_pos, mission_config.mission_waypoints.order_by( 'order'), uas_logs)) # Evaluate the targets. user_targets = Target.objects.filter(user=user).all() evaluator = TargetEvaluator(user_targets, mission_config.targets.all()) feedback.target.CopyFrom(evaluator.evaluate()) # Determine collisions with stationary and moving obstacles. for obst in mission_config.stationary_obstacles.all(): obst_eval = feedback.stationary_obstacles.add() obst_eval.id = obst.pk obst_eval.hit = obst.evaluate_collision_with_uas(uas_logs) for obst in mission_config.moving_obstacles.all(): obst_eval = feedback.moving_obstacles.add() obst_eval.id = obst.pk obst_eval.hit = obst.evaluate_collision_with_uas(uas_logs) # Add judge feedback. try: judge_feedback = MissionJudgeFeedback.objects.get( mission=mission_config.pk, user=user.pk) feedback.judge.CopyFrom(judge_feedback.proto()) except MissionJudgeFeedback.DoesNotExist: team_eval.warnings.append('No MissionJudgeFeedback for team.') # Sanity check mission time. judge_mission_clock = ( feedback.judge.flight_time_sec + feedback.judge.post_process_time_sec) if abs(feedback.mission_clock_time_sec - judge_mission_clock) > 30: team_eval.warnings.append( 'Mission clock differs between interop and judge.')