class Test_Boost(): def test_1_small_pad_collected(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost assert (boost.num_small_boosts == 1) run_analysis_test_on_replay(test, get_specific_replays()["1_SMALL_PAD"], cache=replay_cache) def test_1_large_pad_collected(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost assert (boost.num_large_boosts == 1) run_analysis_test_on_replay(test, get_specific_replays()["1_LARGE_PAD"], cache=replay_cache) def test_0_boost_collected(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost assert (boost.num_small_boosts == 0) assert (boost.num_large_boosts == 0) run_analysis_test_on_replay( test, get_specific_replays()["0_BOOST_COLLECTED"], cache=replay_cache) def test_boost_used(self, replay_cache): case = unittest.TestCase('__init__') def test(analysis: AnalysisManager, boost_value): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost print("Predicted usage: {}, actual: {}".format( boost.boost_usage, boost_value)) case.assertAlmostEqual(boost.boost_usage, boost_value, delta=1) # self.assertGreater(boost.average_boost_level, 0) run_analysis_test_on_replay( test, get_specific_replays()["BOOST_USED"] + get_specific_replays()["0_BOOST_USED"], answers=get_specific_answers()["BOOST_USED"] + get_specific_answers()["0_BOOST_USED"], cache=replay_cache)
def test_boost_used(self): def test(analysis: AnalysisManager, boost_value): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost self.assertAlmostEqual(boost.boost_usage, boost_value, delta=9) # TODO: Figgure out a way to calculate boost in a more accurate manner print(analysis) run_analysis_test_on_replay(test, get_specific_replays()["BOOST_USED"] + get_specific_replays()["0_BOOST_USED"], answers=get_specific_answers()["BOOST_USED"] + get_specific_answers()["0_BOOST_USED"])
def test_hit_pressure(self, replay_cache): def test(analysis: AnalysisManager, answer): proto_game = analysis.get_protobuf_data() hits = proto_game.game_stats.hits expected_pressures = [ 100, 100, 100, 100, 0, 93, 100, 100, 94, 0, 0, 95, 100, 100, 0, 0, 0, 0, 90, 0, 0, 94, 100, 100, 0, 99, 0, 0, 0, 0, 0, 0, 0, 0, 100, 100, 99, 100, 0, 0, 0, 0, 100, 100, 100, 0, 100, 0, 0, 0, 0, 100, 100, 100, 100, 0, 100, 0, 94, 0, 96, 0, 100, 100, 100, 0, 0, 0, 0, 0, 98, 100, 0, 0, 100, 100, 0, 100, 0, 0, 0, 100, 100, 0, 0, 100, 0, 94, 100, 72, 100, 100, 0, 0, 0, 100, 100, 100, 100, 100, 100, 0, 0, 0, 0, 0, 0, 0, 94, 0, 100, 97, 100, 100, 0, 100, 0, 0, 0, 0, 0, 81, 100, 97, 100, 81, 100, 0, 100, 100, 0, 100, 100, 100, 100, 91, 0, 0, 0, 100, 0, 96, 100, 100, 99, 0, 0, 0, 100, 0, 0, 0, 0, 0, 100, 0, 99, 100, 98, 0, 94, 0, 100, 0, 100, 100, 0, 100, 100, 100, 0, 100, 0, 100, 67, 0, 91, 100, 100, 100, 0, 0, 0, 0, 88, 0, 100, 0, 100, 0, 0 ] for x in range(len(hits)): assert (hits[x].pressure == expected_pressures[x]) # Skip test cache since this test is calculating intensive events. run_analysis_test_on_replay( test, replay_list=get_raw_replays()["OCE_RLCS_7_CARS"], answers=get_specific_answers()["CLEARS"], calculate_intensive_events=True)
def test_boost_used(self): def test(analysis: AnalysisManager, boost_value): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost print("Predicted usage: {}, actual: {}".format( boost.boost_usage, boost_value)) self.assertAlmostEqual(boost.boost_usage, boost_value, delta=1) # self.assertGreater(boost.average_boost_level, 0) run_analysis_test_on_replay( test, get_specific_replays()["BOOST_USED"] + get_specific_replays()["0_BOOST_USED"], answers=get_specific_answers()["BOOST_USED"] + get_specific_answers()["0_BOOST_USED"])
def test_num_hits_detected(self): def test(analysis: AnalysisManager, answer): proto_game = analysis.get_protobuf_data() hits = proto_game.game_stats.hits self.assertEqual(len(hits), answer) print(analysis) run_analysis_test_on_replay(test, get_specific_replays()["HITS"], get_specific_answers()["HITS"])
def test_num_hits_detected(self, replay_cache): def test(analysis: AnalysisManager, answer): proto_game = analysis.get_protobuf_data() hits = proto_game.game_stats.hits assert (len(hits) == answer) print(analysis) run_analysis_test_on_replay(test, replay_list=get_specific_replays()["HITS"], answers=get_specific_answers()["HITS"], cache=replay_cache)
def test_0_used(self): def test(analysis: AnalysisManager, boost_value): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost self.assertEqual(boost.boost_usage, boost_value) print(analysis) run_analysis_test_on_replay(test, get_specific_replays()["0_BOOST_USED"], answers=get_specific_answers()["0_BOOST_USED"])
def test_num_clears_detected(self, replay_cache): def test(analysis: AnalysisManager, answer): proto_game = analysis.get_protobuf_data() hits = proto_game.game_stats.hits clear_counter = 0 for hit in hits: if hit.clear: clear_counter += 1 run_analysis_test_on_replay( test, replay_list=get_specific_replays()["CLEARS"], answers=get_specific_answers()["CLEARS"], cache=replay_cache)
def test_num_aerials_detected(self): def test(analysis: AnalysisManager, answer): proto_game = analysis.get_protobuf_data() hits = proto_game.game_stats.hits aerial_counter = 0 for hit in hits: if hit.aerial: aerial_counter += 1 self.assertEqual(aerial_counter, answer) run_analysis_test_on_replay(test, get_specific_replays()["AERIALS"], get_specific_answers()["AERIALS"])
def test_num_passes_detected(self): def test(analysis: AnalysisManager, answer): proto_game = analysis.get_protobuf_data() hits = proto_game.game_stats.hits pass_counter = 0 for hit in hits: if hit.pass_: pass_counter += 1 self.assertEqual(pass_counter, answer) run_analysis_test_on_replay(test, get_specific_replays()["PASSES"], get_specific_answers()["PASSES"])
def test_boost_feathered(self): def test(analysis: AnalysisManager, boost_value): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost print("Predicted usage: {}, actual: {}".format( boost.boost_usage, boost_value)) assertNearlyEqual(self, boost.boost_usage, boost_value, percent=3) # self.assertGreater(boost.average_boost_level, 0) run_analysis_test_on_replay( test, get_specific_replays()["BOOST_FEATHERED"], answers=get_specific_answers()["BOOST_FEATHERED"])
def test_num_clears_detected(self): def test(analysis: AnalysisManager, answer): proto_game = analysis.get_protobuf_data() hits = proto_game.game_stats.hits clear_counter = 0 for hit in hits: if hit.clear: clear_counter += 1 self.assertEqual(clear_counter, answer) run_analysis_test_on_replay(test, get_specific_replays()["CLEARS"], get_specific_answers()["CLEARS"])
def test_num_saves_detected(self, replay_cache): def test(analysis: AnalysisManager, answer): proto_game = analysis.get_protobuf_data() hits = proto_game.game_stats.hits counter = 0 for hit in hits: if hit.save: counter += 1 assert (counter == answer) run_analysis_test_on_replay( test, replay_list=get_specific_replays()["SAVES"], answers=get_specific_answers()["SAVES"], cache=replay_cache)
def test_total_clears_detected(self, replay_cache): def test(analysis: AnalysisManager, answer): clears_lookup = { '76561198204422936': 6, # Decka '76561198050413646': 1, # Requeim '76561198058420486': 6, # Delusion '76561197998103705': 4, # CJCJ '76561198065500375': 1, # Express '76561198173645057': 2 } # Shadey proto_game = analysis.get_protobuf_data() for pl in proto_game.players: expected_total_clears = clears_lookup[pl.id.id] calculated_total_clears = pl.stats.hit_counts.total_clears assert (expected_total_clears == calculated_total_clears) run_analysis_test_on_replay( test, replay_list=get_raw_replays()["OCE_RLCS_7_CARS"], answers=get_specific_answers()["CLEARS"], cache=replay_cache)
def test_boost_feathered(self, replay_cache): case = unittest.TestCase('__init__') def test(analysis: AnalysisManager, boost_value): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost print("Predicted usage: {}, actual: {}".format( boost.boost_usage, boost_value)) assertNearlyEqual(case, boost.boost_usage, boost_value, percent=3) # self.assertGreater(boost.average_boost_level, 0) run_analysis_test_on_replay( test, get_specific_replays()["BOOST_FEATHERED"], answers=get_specific_answers()["BOOST_FEATHERED"], cache=replay_cache) def test_boost_wasted_collection(self, replay_cache): case = unittest.TestCase('__init__') def test(analysis: AnalysisManager, boost_value): proto_game = analysis.get_protobuf_data() for index, player in enumerate(proto_game.players): wasted_answer = boost_value[index] total_wasted = (wasted_answer[0] - (255 - wasted_answer[1])) / 256.0 * 100.0 boost = player.stats.boost case.assertAlmostEqual(boost.wasted_collection, total_wasted, delta=2)
class Test_Boost(): def test_1_small_pad_collected(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() frames = analysis.get_data_frame() player = proto_game.players[0] boost = player.stats.boost assert (boost.num_small_boosts == 1) run_analysis_test_on_replay(test, get_specific_replays()["1_SMALL_PAD"], cache=replay_cache) def test_1_large_pad_collected(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost assert (boost.num_large_boosts == 1) run_analysis_test_on_replay(test, get_specific_replays()["1_LARGE_PAD"], cache=replay_cache) def test_1_large_pad_1_small_pad_collected(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost assert (boost.num_large_boosts == 1) assert (boost.num_small_boosts == 1) run_analysis_test_on_replay( test, get_raw_replays()["12_AND_100_BOOST_PADS_0_USED"], cache=replay_cache) def test_0_boost_collected(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost assert (boost.num_small_boosts == 0) assert (boost.num_large_boosts == 0) run_analysis_test_on_replay( test, get_specific_replays()["0_BOOST_COLLECTED"], cache=replay_cache) def test_lots_of_boost_collected(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost assert [boost.num_small_boosts, boost.num_large_boosts] == [25, 6] run_analysis_test_on_replay(test, get_raw_replays()["6_BIG_25_SMALL"], cache=replay_cache) def test_boost_steals(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost assert boost.num_stolen_boosts == 2 run_analysis_test_on_replay(test, get_raw_replays()["6_BIG_25_SMALL"], cache=replay_cache) def test_boost_steals_post_goal(self, replay_cache): def test(analysis: AnalysisManager): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost assert [ boost.num_small_boosts, boost.num_large_boosts, boost.num_stolen_boosts, boost.boost_usage ] == [0, 0, 0, 0] player = proto_game.players[1] boost = player.stats.boost assert [boost.num_large_boosts, boost.num_stolen_boosts] == [3, 3] assert boost.boost_usage > 0 run_analysis_test_on_replay( test, get_raw_replays()["3_STEAL_ORANGE_0_STEAL_BLUE"], cache=replay_cache) def test_boost_used(self, replay_cache): case = unittest.TestCase('__init__') def test(analysis: AnalysisManager, boost_value): proto_game = analysis.get_protobuf_data() player = proto_game.players[0] boost = player.stats.boost print("Predicted usage: {}, actual: {}".format( boost.boost_usage, boost_value)) case.assertAlmostEqual(boost.boost_usage, boost_value, delta=1) # self.assertGreater(boost.average_boost_level, 0) run_analysis_test_on_replay( test, get_specific_replays()["BOOST_USED"] + get_specific_replays()["0_BOOST_USED"], answers=get_specific_answers()["BOOST_USED"] + get_specific_answers()["0_BOOST_USED"], cache=replay_cache)