def scouting_freq_and_cat(replay):
    '''scouting_freq_and_cat takes in a previously loaded replay
    from sc2reader and returns the scouting frequency (instances per second)
    for each player, how their scouting behavior is categorized, as well as
    the winner of the game.'''
    r = replay

    try:
        frames = r.frames
        seconds = r.real_length.total_seconds()

        team1_scouting_states, team2_scouting_states = final_scouting_states(r)

        team1_num_times, team1_time = scouting_stats(team1_scouting_states)
        team2_num_times, team2_time = scouting_stats(team2_scouting_states)

        team1_freq = team1_num_times / seconds
        team2_freq = team2_num_times / seconds

        team1_frames = scouting_timeframe_list1(team1_scouting_states)
        team2_frames = scouting_timeframe_list1(team2_scouting_states)

        battles, harassing = battle_detector.buildBattleList(r)

        team1_cat = categorize_player(team1_frames, battles, harassing, frames)
        team2_cat = categorize_player(team2_frames, battles, harassing, frames)

        return team1_freq, team1_cat, team2_freq, team2_cat, r.winner.number

    except:
        print(replay.filename, "contains errors within scouting_detector")
        raise
def scouting_response(replay):
    '''scouting_response takes in a previously loaded replay and returns the
    average interval in between scouting and battles for each player. This is
    a start on exploring responses to scouting.'''
    r = replay
    try:
        factors = sc2reader.constants.GAME_SPEED_FACTOR
        # scale = 16*factors[r.expansion][r.speed]
        # it appears the scale is always 22.4 in our dataset, despite documentation to the contrary
        scale = 22.4

        team1_scouting_states, team2_scouting_states = final_scouting_states(r)
        battles, harassing = battle_detector.buildBattleList(r)

        team1_scouting_frames = scouting_timeframe_list1(team1_scouting_states)
        team2_scouting_frames = scouting_timeframe_list1(team2_scouting_states)

        team1_avg = avg_interval_before_battle(team1_scouting_frames, battles,
                                               scale)
        team2_avg = avg_interval_before_battle(team2_scouting_frames, battles,
                                               scale)

        return team1_avg, team2_avg

    except:
        raise
def control_group_stats(replay):
    '''control_group_stats is the main function for this script. It takes in
    a pre-loaded replay using sc2reader and returns the commands per second,
    peacetime macro selection rate, and battletime macro selection rate
    for each player.'''
    r = replay

    p1_cps, p2_cps = commandsPerSecond(r.game_events,
                                       r.real_length.total_seconds())
    battles, harassing = battle_detector.buildBattleList(r)
    p1_peace_rate, p1_battle_rate, p2_peace_rate, p2_battle_rate = macroRates(
        r.game_events, battles, r.frames, r.real_length.total_seconds())

    return p1_cps, p1_peace_rate, p1_battle_rate, p2_cps, p2_peace_rate, p2_battle_rate
def scouting_analysis(replay):
    '''scouting_analysis takes in a previously loaded replay and returns a
    dictionary that contains verious metrics of scouting for each player.
    These metrics include the scouting category, whether or not they
    execute an initial scouting, whether they mostly scout their opponent's
    main base, whether they consistently scout new areas of the map, and
    whether they consistently scout between battles.'''
    r = replay
    try:
        team1_scouting_states, team2_scouting_states = final_scouting_states(r)
        battles, harassing = battle_detector.buildBattleList(r)

        frames = r.frames
        team1_initial = hasInitialScouting(team1_scouting_states, frames,
                                           battles)
        team2_initial = hasInitialScouting(team2_scouting_states, frames,
                                           battles)

        team1_frames = scouting_timeframe_list1(team1_scouting_states)
        team2_frames = scouting_timeframe_list1(team2_scouting_states)

        team1_cat = categorize_player(team1_frames, battles, harassing, frames)
        team2_cat = categorize_player(team2_frames, battles, harassing, frames)

        team1_base = scoutsMainBase(team1_scouting_states)
        team2_base = scoutsMainBase(team2_scouting_states)

        team1_newAreas = scoutNewAreas(team1_scouting_states)
        team2_newAreas = scoutNewAreas(team2_scouting_states)

        team1_betweenBattles = scoutBetweenBattles(team1_scouting_states,
                                                   battles, frames)
        team2_betweenBattles = scoutBetweenBattles(team2_scouting_states,
                                                   battles, frames)

        return {
            1: [
                team1_cat, team1_initial, team1_base, team1_newAreas,
                team1_betweenBattles
            ],
            2: [
                team2_cat, team2_initial, team2_base, team2_newAreas,
                team2_betweenBattles
            ]
        }
    except:
        traceback.print_exc()
        print(replay.filename, "contains errors within scouting_detector")
        raise
def final_scouting_states(replay):
    '''final_scouting_states is the backbone of scouting_detector.py. It does
        all of the error checking needed, as well as combines all functions to
        create completed scouting dictionaries - to then be used by other functions.
        It takes in a previously loaded replay object from sc2reader and returns
        completed scouting dictionaries for each player. This function is also
        critical in understanding the order in which scouting dictionaries
        must be built by using various functions in this file.'''
    r = replay

    if r.winner is None:
        print(r.filename, "has no winner information")
        raise RuntimeError()

    try:
        # some datafiles did not have a 'Controller' attribute
        if r.attributes[1]["Controller"] == "Computer" or r.attributes[2][
                "Controller"] == "Computer":
            print(r.filename, "is a player vs. AI game")
            raise RuntimeError()
    except:
        raise RuntimeError()

    if r.length.seconds < 300:
        print(r.filename, "is shorter than 5 minutes")
        raise RuntimeError()

    if len(r.players) != 2:
        print(r.filename, "is not a 1v1 game")
        raise RuntimeError()

    tracker_events = r.tracker_events
    game_events = r.game_events
    frames = r.frames
    # scale = 16*sc2reader.constants.GAME_SPEED_FACTOR[r.expansion][r.speed]
    # it appears the scale is always 22.4 in our dataset, despite documentation to the contrary
    scale = 22.4

    allEvents = buildEventLists(tracker_events, game_events)
    objects = r.objects.values()
    team1_scouting_states, team2_scouting_states = buildScoutingDictionaries(
        r, allEvents, objects, frames)

    battles, harassing = battle_detector.buildBattleList(r)
    team1_scouting_states = integrateEngagements(team1_scouting_states,
                                                 battles, scale,
                                                 "Engaged in Battle")
    team2_scouting_states = integrateEngagements(team2_scouting_states,
                                                 battles, scale,
                                                 "Engaged in Battle")
    team1_scouting_states = integrateEngagements(team1_scouting_states,
                                                 harassing, scale, "Harassing")
    team2_scouting_states = integrateEngagements(team2_scouting_states,
                                                 harassing, scale, "Harassing")

    team1_scouting_states = checkFirstInstance(team1_scouting_states, scale)
    team2_scouting_states = checkFirstInstance(team2_scouting_states, scale)

    team1_scouting_states = removeEmptyFrames(team1_scouting_states, frames)
    team2_scouting_states = removeEmptyFrames(team2_scouting_states, frames)

    return team1_scouting_states, team2_scouting_states
Пример #6
0
def generateFields(filename):
    '''generateFields takes in a filename of a replay, loads it and gathers necessary
    statistics, and returns the statistics in a tuple. It is to be used to write
    these stats to a csv. It also takes in an integer (1 or 2), which indicates
    which statistics will be gathered. In this case, generateFields gathers
    each point in the game where a period of scouting is initiated. Inputting a
    1 will return times as a fraction of the total game time, whereas inputting
    a 2 will return absolute frames.'''
    # loading the replay
    try:
        # skipping non-replay files in the directory
        if filename[-9:] != "SC2Replay":
            raise RuntimeError()

        # extracting the game id and adding the correct tag
        # pathname = "practice_replays/" + filename
        pathname = "/Accounts/awb-data/replays/" + filename
        game_id = filename.split("_")[1].split(".")[0]
        if filename.startswith("ggg"):
            game_id = "ggg-" + game_id
        elif filename.startswith("spawningtool"):
            game_id = "st-" + game_id

        # loading the replay
        try:
            r = sc2reader.load_replay(pathname)
            # print(r.filename, r.players[0].highest_league, r.players[1].highest_league)
            if any(v != (0, {}) for v in r.plugin_result.values()):
                print(pathname, r.plugin_result)
        except:
            print(filename,
                  "cannot load using sc2reader due to an internal ValueError")
            traceback.print_exc()
            raise

        # team1_times, team2_times = scouting_detector.scouting_times(r, which)
        team1_rank, team1_rel_rank, team2_rank, team2_rel_rank = scouting_stats.ranking_stats(
            r)
        battles, harassing = battle_detector.buildBattleList(r)

        frames = r.frames
        seconds = r.real_length.total_seconds()
        battle_time = []
        battle_duration = []
        harassing_time = []
        harassing_duration = []
        for battle in battles:
            starttime = (battle[0] / frames) * seconds
            endtime = (battle[1] / frames) * seconds
            duration = endtime - starttime
            battle_duration.append(duration)
            battle_time.append(starttime)

        for harass in harassing:
            starttime = (harass[0] / frames) * seconds
            endtime = (harass[1] / frames) * seconds
            duration = endtime - starttime
            harassing_duration.append(duration)
            harassing_time.append(starttime)

        team1_uid = r.players[0].detail_data['bnet']['uid']
        team2_uid = r.players[1].detail_data['bnet']['uid']

        fields = [game_id, team1_uid, team1_rank, team2_uid, team2_rank,\
                    battle_time, battle_duration, harassing_time, harassing_duration]
        return fields

    except KeyboardInterrupt:
        raise
    except:
        print("\nReturning none\n")
        #traceback.print_exc()
        return