Пример #1
0
def cache_match_schedule():
    """Requests the match schedule from TBA and adds it to the cache."""
    # HACK: Only pulls the match schedule once since the caching built
    # into tba_communicator.py is not complete.
    matches = tba_communicator.request_matches()
    for match_data in matches:
        # 'qm' stands for qualification match
        if match_data['comp_level'] == 'qm':
            red_teams = match_data['alliances']['red']['team_keys']
            blue_teams = match_data['alliances']['blue']['team_keys']
            match_number = match_data['match_number']
            # Remove 'frc' from team number
            # (e.g. 'frc1678' -> '1678')
            red_teams = [team[3:] for team in red_teams]
            blue_teams = [team[3:] for team in blue_teams]
            final_match_data = {
                'matchNumber': match_number,
                'redTeams': red_teams,
                'blueTeams': blue_teams,
            }
        with open(
                utils.create_file_path(
                    f'data/cache/match_schedule/{match_number}.json'),
                'w') as file:
            json.dump(final_match_data, file)
Пример #2
0
    def __exit__(self, exc_type, exc_val, exc_tb):
        if not self._lines:
            return

        try:
            self._file = open(self._filename, 'w')
        except IOError:
            create_file_path(self._filename)
            self._file = open(self._filename, 'w')

        for line in self._lines:
            self._file.write(line + '\n')

        self._file.close()

        if self._executable:
            os.system('chmod a+x %s' % self._filename)
Пример #3
0
 def save_image(self):
     self.__txt_path.setText(self.__txt_path.displayText())
     save_path = self.__txt_path.text()
     created = create_dir_path(save_path)
     if created:
         self.label_log.append("create an directory:\n{}".format(save_path))
     file_path = create_file_path(save_path)
     save_image = self.__canvas.get_current_image()
     save_image.save(file_path)
     file_path_ = os.path.abspath(file_path)
     self.label_log.append("image saved in path:\n{}".format(file_path_))
Пример #4
0
def temp_super_stream_handler(snapshot):
    """Runs when any new tempSuper datas are uploaded"""
    data = snapshot['data']
    path = snapshot['path']

    # This occurs when the entirety of tempSuper datas are updated
    # (stream initialization, all tempSuper data deleted, or first
    # tempSuper created)
    if path == '/':
        # This means that all tempSuper datas have been wiped and we
        # should wipe our local copy.
        if data is None:
            delete_cache_data_folder('temp_super')
            return
    elif path.count('/') == 1:
        # This is moving the path into the data so it is in the same
        # format as data at the path '/'.  This allows us to use the
        # same code to save the data in our local cache later on.
        # The '[1:]' removes the slash at the beginning of the path
        data = {path[1:]: data}
    # If there is more than 1 slash in the path, the data is multiple
    # children deep.  tempSupers are only one child deep and this will
    # only trigger if invalid data is sent to Firebase.
    else:
        print('Error: Invalid tempSuper data received')
        return

    # This saves each tempSuper data in a separate text file.
    for temp_super_name, temp_super_value in data.items():
        # This means that this tempSuper has been deleted from Firebase
        # and we should delete it from our local copy.
        if temp_super_value is None:
            os.remove(
                utils.create_file_path(
                    f'data/cache/temp_super/{temp_super_name}.txt'))
        else:
            with open(
                    utils.create_file_path(
                        f'data/cache/temp_super/{temp_super_name}.txt'),
                    'w') as file:
                file.write(temp_super_value)
Пример #5
0
def save_data(file_path, data):
    """Saves data in 'cache' and 'upload_queue' directories.

    file_path is the relative file path to a JSON file from inside the
    'cache' or 'upload_queue' folder. (string)
    data is a dictionary that the JSON file is updated with."""
    # Removes preceding slash
    if file_path[0] == '/':
        file_path = file_path[1:]
    for directory in ['cache', 'upload_queue']:
        absolute_path = utils.create_file_path(f'data/{directory}/{file_path}')
        update_json_file(absolute_path, data)
Пример #6
0
def logging_setup(path_to_log, log_level, log_filemode):
    if not log_level in TRUE_LOG_LEVELS:
        log_level = 'DEBUG'

    if not log_filemode in TRUE_FILE_MODES:
        log_filemode = 'a'

    path = create_file_path(path_to_log)

    logging.basicConfig(filename=path,
                        level=log_level,
                        filemode=log_filemode,
                        format=MESSAGE_FORMAT_FOR_LOGGER,
                        datefmt=DATE_FORMAT_FOR_LOGGER)
def get_image_paths():
    """Gets dictionary of image paths"""
    csv_rows = dict()
    for team in TEAMS_LIST:
        # Makes the team key a list with the team number in it.
        csv_rows[team] = {
            'full_robot': '',
            'drivetrain': '',
            'mechanism': [],
        }
    # Iterates through each device in the tablets folder
    for device in os.listdir(utils.create_file_path('data/tablets')):
        # If the device is a phone serial number
        if device not in ['9AQAY1EV7J', '9AMAY1E54G', '9AMAY1E53P']:
            continue
        device_dir = utils.create_file_path(f'data/tablets/{device}/')
        # Iterates through all of files in the phone's folder
        for file in os.listdir(device_dir):
            # Tries to match the file name with the regular expression
            result = re.fullmatch(PATH_PATTERN, file)
            # If the regular expression matched
            if result:
                # Team number is the result of the first capture type
                team_num = result.group(1)
                if team_num not in TEAMS_LIST:
                    continue
                # Photo type is the result of the second capture group
                photo_type = result.group(2)

                # There can be multiple mechanism photos, so we need to handle differently
                if photo_type.startswith('mechanism'):
                    csv_rows[team_num]['mechanism'].append(os.path.join(device_dir, file))
                # Otherwise just add the photo path to its specified place in csv_rows
                else:
                    csv_rows[team_num][photo_type] = os.path.join(device_dir, file)
    return csv_rows
Пример #8
0
def temp_timd_stream_handler(temp_timd_name, temp_timd_value_):
    """Runs when any new tempTIMDs are uploaded"""
    # HACK: Remove trailing '\n' (newlines) in compressed tempTIMD
    # data.  This is a bug in the Scout app.
    temp_timd_value_ = temp_timd_value_.rstrip('\n')
    # This means that this tempTIMD has been deleted from Firebase
    # and we should delete our local copy.
    if temp_timd_value_ is None:
        os.remove(
            utils.create_file_path(
                f'data/cache/temp_timds/{temp_timd_name}.txt'))
        # Causes the corresponding TIMD to be recalculated
        register_modified_temp_timd(temp_timd_name)
    else:
        with open(
                utils.create_file_path(
                    f'data/cache/temp_timds/{temp_timd_name}.txt'),
                'w') as file:
            file.write(temp_timd_value_)
        timd_name = temp_timd_name.split('-')[0]
        # This means an already existing tempTIMD has been modified
        # and needs to be recalculated.
        if temp_timd_name in LATEST_CALCULATIONS_BY_TIMD.get(timd_name, []):
            register_modified_temp_timd(temp_timd_name)
Пример #9
0
def pull_device_data():
    """Pulls tablet data from attached tablets."""
    # Parses 'adb devices' to find num of devices so that don't try to pull from nothing
    devices = get_attached_devices()
    data = {'qr': [], 'obj_pit': [], 'subj_pit': []}
    if not devices:
        return data

    device_file_paths = []
    device_file_path = utils.create_file_path('data/tablets')
    # Pull all files from the 'Download' folder on the tablet
    pull_device_files(device_file_path, '/storage/emulated/0/Download')
    # Iterates through the 'data' folder
    for device_dir in os.listdir(device_file_path):
        if device_dir in TABLET_SERIAL_NUMBERS.keys():
            device_file_paths.append(device_dir)
        # If the folder name is a device serial, it must be a tablet folder
    for device in device_file_paths:
        # Iterate through the downloads folder in the device folder
        download_directory = os.path.join(device_file_path, device)
        for file in os.listdir(download_directory):
            for dataset, pattern in FILENAME_REGEXES.items():
                if re.fullmatch(pattern, file):
                    with open(os.path.join(download_directory,
                                           file)) as data_file:
                        # QR data is just read
                        if dataset == 'qr':
                            file_contents = data_file.read().rstrip('\n')
                        else:
                            file_contents = json.load(data_file)
                        data[dataset].append(file_contents)
                        break  # Filename will only match one regex
    # Add QRs to database and make sure that only QRs that should be decompressed are added to queue
    data['qr'] = qr_code_uploader.upload_qr_codes(data['qr'])
    for dataset in ['obj_pit', 'subj_pit']:
        current_data = local_database_communicator.read_dataset(dataset)
        modified_data = []
        for datapoint in data[dataset]:
            if datapoint in current_data:
                continue
            # Specify query to ensure that each team only has one entry
            local_database_communicator.update_dataset(
                f'raw.{dataset}', datapoint,
                {'team_number': datapoint['team_number']})
            modified_data.append({'team_number': datapoint['team_number']})
        utils.log_info(f'{len(modified_data)} items uploaded to {dataset}')
        data[dataset] = modified_data
    return data
def full_data_export():
    """Writes the current export to a timestamped directory. Returns the directory path written"""
    current_time = datetime.datetime.now()
    timestamp_str = current_time.strftime('%Y-%m-%d_%H:%M:%S')
    # Creates directory if it does not exist
    directory_path = utils.create_file_path(f'data/exports/export_{timestamp_str}')
    # Team data
    team_file_path = os.path.join(directory_path, f'team_export_{timestamp_str}.csv')
    export_team_data(team_file_path)
    # Team in match data
    timd_file_path = os.path.join(directory_path, f'timd_export_{timestamp_str}.csv')
    export_tim_data(timd_file_path)
    # TBA match data
    tba_file_path = os.path.join(directory_path, f'tba_export_{timestamp_str}.csv')
    write_tba_data(tba_file_path)
    return directory_path
Пример #11
0
def decompress_temp_timd_headers(compressed_headers):
    """Decompress headers for a single tempTIMD.

    compressed_headers are non-timed data fields."""

    with open(utils.create_file_path('data/assignments/assignments.json'),
              'r') as file:
        file_data = json.load(file)
    # Decompressed scout name to compressed scout name
    scout_name_compression_values = file_data['letters']

    # Reverses key:value pairs to enable accessing decompressed scout
    # name from compressed scout name
    scout_name_compression_values = {letter: scout_name for \
        scout_name, letter in scout_name_compression_values.items()}

    if compressed_headers[-1] == ',':
        # Removes trailing comma.
        compressed_headers = compressed_headers[:-1]

    compressed_headers = compressed_headers.split(',')

    decompressed_headers = {}

    for header in compressed_headers:
        compressed_key = header[0]
        compressed_value = header[1:]
        decompressed_key = TEMP_TIMD_COMPRESSION_KEYS[compressed_key]
        if decompressed_key == 'scoutName':
            # Uses 'scout_name_compression_values' dictionary to decompress scout name
            decompressed_value = scout_name_compression_values[
                compressed_value]
        else:
            decompressed_value = decompress_temp_timd_value(compressed_value)
        decompressed_headers[decompressed_key] = decompressed_value

    return decompressed_headers
Пример #12
0
#!/usr/bin/env python3
# Copyright (c) 2019 FRC Team 1678: Citrus Circuits
"""Changes font size of tablets for app consistency."""
# External imports
import time
# Internal imports
import adb_communicator
import utils


def adb_font_size_enforcer():
    """Enforce tablet font size to 1.30, the largest supported size"""
    devices = adb_communicator.get_attached_devices()
    # Wait for USB connection to initialize
    time.sleep(.1)
    for device in devices:
        # The -s flag specifies the device by its serial number.
        utils.run_command(
            f'adb -s {device} shell settings put system font_scale 1.30',
            return_output=False)


FILE_PATH = utils.create_file_path('data/tablets')
utils.run_command(f'rm -R {FILE_PATH}', True)
utils.run_command('mkdir data/tablets', True)
adb_font_size_enforcer()
Пример #13
0
#!/usr/bin/python3.6
"""Collects and exports all Super Scout pushing battles to a CSV file."""
# External imports
import csv
import os
# Internal imports
import decompressor
import utils

TEMP_SUPER_FILES = os.listdir(utils.create_file_path('data/cache/temp_super'))
# Sorts by match number, since Elo needs to be in chronological order
TEMP_SUPER_FILES.sort(
    key=lambda file_name: file_name.split('-')[0].split('Q')[1])

PUSHING_BATTLES = []
for temp_super_file in TEMP_SUPER_FILES:
    with open(
            utils.create_file_path(f'data/cache/temp_super/{temp_super_file}'),
            'r') as file:
        compressed_temp_super = file.read()
    decompressed_pushing_battles = \
        decompressor.decompress_temp_super_pushing_battles(
            compressed_temp_super)
    PUSHING_BATTLES += decompressed_pushing_battles

# Orders pushing battle keys for CSV export
CSV_HEADERS = ['matchNumber', 'winner', 'loser', 'winMarginIsLarge']

with open(utils.create_file_path('data/exports/pushing-battles.csv'),
          'w') as file:
    CSV_WRITER = csv.DictWriter(file, fieldnames=CSV_HEADERS)
Пример #14
0
#!/usr/bin/python3.6
"""Calculate SPRs (Scout Precision Rankings).

Used in consolidation and to identify and address issues with scouting.
These issues are often caused by misunderstanding or actions that have
an ambiguous input.  With SPRs, these questions can be cleared up during
scout training and competition to decrease errors in the future."""
# External imports
import csv
import json
import os
# Internal imports
import decompressor
import utils

TEMP_TIMDS = os.listdir(utils.create_file_path('data/cache/temp_timds'))
TIMDS = os.listdir(utils.create_file_path('data/cache/timds'))

# Scout name to SPR breakdown dictionary
# Example format: 'Sam C': {'placement': {'correct': 3}, {'total': 10}}
SPRS = {}


def register_value(scout_name_, data_field_, is_correct):
    """Registers correct or incorrect value in 'SPRS'."""
    if SPRS.get(scout_name_) is None:
        SPRS[scout_name_] = {}
    if SPRS[scout_name_].get(data_field_) is None:
        SPRS[scout_name_][data_field_] = {}
    previous_breakdown = SPRS[scout_name_][data_field_]
    previous_correct = previous_breakdown.get('correct', 0)
Пример #15
0
#!/usr/bin/python3.6
"""Calculates points prevented by a defender in all TIMDs and Teams.

TIMD stands for Team in Match Data"""
# External imports
import json
import os
import subprocess
# Internal imports
import utils

# Extracts TIMDs from cache and organizes them by match.
TIMDS_BY_MATCH = {}
for timd in os.listdir(utils.create_file_path('data/cache/timds')):
    with open(utils.create_file_path(f'data/cache/timds/{timd}'), 'r') as file:
        timd_data = json.load(file)
    if timd_data.get('calculatedData') is not None:
        # .split() removes '.json' file ending
        timd_name = timd.split('.')[0]
        match_number = timd_name.split('Q')[1]
        # Creates a blank dictionary for a match if it doesn't exist yet.
        if TIMDS_BY_MATCH.get(match_number) is None:
            TIMDS_BY_MATCH[match_number] = {}
        TIMDS_BY_MATCH[match_number][timd_name] = timd_data

# Teams that have played defense in 1 or more matches
DEFENDER_TEAMS = set()

for match_number, timds in TIMDS_BY_MATCH.items():
    # Pulls match schedule (for a single match from cache
    with open(
Пример #16
0
"""Sends web requests to The Blue Alliance (TBA) APIv3

Caches data to prevent duplicate data retrieval from the TBA API.

API documentation: https://www.thebluealliance.com/apidocs/v3"""
# External imports
import json
import requests
import time
# Internal imports
import utils

EVENT_CODE = '2019carv'

with open(utils.create_file_path('data/api_keys/tba_key.txt')) as file:
    API_KEY = file.read()
# Removes trailing newline (if it exists) from file data.
# Many file editors will automatically add a newline at the end of files.
API_KEY = API_KEY.rstrip('\n')

def make_request(api_url, show_output=True, acceptable_cache_age=0):
    """Sends a single web request to the TBA API v3 and caches result.

    api_url is the url of the API request (the path after '/api/v3')
    show_output shows print statements about the status of the
    request.  Defaults to True.
    acceptable_cache_age is the maximum age (in seconds) of data that
    can be pulled from the cache.  Pulling from the cache is disabled by
    default."""
    base_url = 'https://www.thebluealliance.com/api/v3/'
    full_url = base_url + api_url
Пример #17
0
def main():
    """
    Main Function.

    Training/Test/Plot
    """
    args = parse_arguments()
    device = torch.device('cuda')

    # remove randomness
    set_random_seed(args.seed)

    # Set Dataset
    if args.dataset == 'mnist':
        dataset = MNIST(path='data/MNIST',
                        n_class=args.n_class,
                        select=args.select,
                        select_novel_classes=args.select_novel_classes)

    elif args.dataset == 'fmnist':
        dataset = FMNIST(path='data/FMNIST',
                         n_class=args.n_class,
                         select=args.select)

    elif args.dataset == 'thyroid':
        dataset = THYROID(path='data/UCI')

    elif args.dataset == 'kddcup':
        dataset = KDDCUP(path='data/UCI')
    else:
        raise ValueError('Unknown dataset')

    checkpoints_dir = create_checkpoints_dir(args.dataset, args.fixed,
                                             args.mulobj, args.num_blocks,
                                             args.hidden_size,
                                             args.code_length, args.estimator)

    # Set Model
    if (args.autoencoder is None):
        print('No Autoencoder')
        c, h, w = dataset.shape

        # build Density Estimator
        if args.estimator == 'SOS':
            model = TinvSOS(args.num_blocks, c * h * w,
                            args.hidden_size).cuda()
        # 1-D estimator from LSA
        elif args.estimator == 'EN':
            self.model = Estimator1D(code_length=c * h * w,
                                     fm_list=[32, 32, 32, 32],
                                     cpd_channels=100).cuda()
        else:
            raise ValueError('Unknown Estimator')
    else:

        if args.autoencoder == "LSA":
            print(f'Autoencoder:{args.autoencoder}')
            print(f'Density Estimator:{args.estimator}')

            if args.dataset in ['mnist', 'fmnist']:
                model = LSA_MNIST(input_shape=dataset.shape,
                                  code_length=args.code_length,
                                  num_blocks=args.num_blocks,
                                  est_name=args.estimator,
                                  hidden_size=args.hidden_size).cuda()

            elif args.dataset in ['kddcup']:
                model = LSA_KDDCUP(num_blocks=args.num_blocks,
                                   hidden_size=args.hidden_size,
                                   code_length=args.code_length,
                                   est_name=args.estimator).cuda()

            elif args.dataset in ['thyroid']:
                model = LSA_THYROID(num_blocks=args.num_blocks,
                                    hidden_size=args.hidden_size,
                                    code_length=args.code_length,
                                    est_name=args.estimator).cuda()
            else:
                ValueError("Unknown Dataset")
        else:
            raise ValueError('Unknown Autoencoder')

    # Result saved path
    result_file_path = create_file_path(args.mulobj, args.fixed,
                                        args.pretrained, model.name,
                                        args.dataset, args.score_normed,
                                        args.num_blocks, args.hidden_size,
                                        args.code_length, args.lam,
                                        args.checkpoint)

    print(checkpoints_dir)

    helper = OneClassTestHelper(dataset=dataset,
                                model=model,
                                score_normed=args.score_normed,
                                lam=args.lam,
                                checkpoints_dir=checkpoints_dir,
                                result_file_path=result_file_path,
                                batch_size=args.batch_size,
                                lr=args.lr,
                                epochs=args.epochs,
                                before_log_epochs=args.before_log_epochs,
                                code_length=args.code_length,
                                mulobj=args.mulobj,
                                test_checkpoint=args.checkpoint,
                                log_step=args.log_step,
                                device=device,
                                fixed=args.fixed,
                                pretrained=args.pretrained,
                                load_lsa=args.load_lsa)

    if args.trainflag:
        cl = args.select
        helper.train_one_class_classification(cl)
    elif args.testflag:
        helper.test_classification()
    elif args.compute_AUROC:
        helper.compute_AUROC(log_step=args.log_step, epoch_max=args.epochs)
    elif args.plot_training_loss_auroc:
        helper.plot_training_loss_auroc(log_step=args.log_step)
    elif args.using_train_set:
        cl = args.select
        helper.test_one_class_classification_with_trainset(cl)
Пример #18
0
            blue_teams = match_data['alliances']['blue']['team_keys']
            match_number = match_data['match_number']
            # Remove 'frc' from team number and convert to integer
            # (e.g. 'frc1678' -> 1678)
            red_teams = [int(team[3:]) for team in red_teams]
            blue_teams = [int(team[3:]) for team in blue_teams]
            FINAL_MATCH_DATA[match_number] = {
                'matchNumber': match_number,
                'redTeams': red_teams,
                'blueTeams': blue_teams,
            }
    FIREBASE_UPLOAD.update({'Matches': FINAL_MATCH_DATA})

if FULL_WIPE is True:
    # Loads scout names from assignment file
    with open(utils.create_file_path('data/assignments/assignments.json'),
              'r') as file:
        SCOUT_NAMES = json.load(file)['letters'].keys()
    FIREBASE_UPLOAD.update({
        'tempTIMDs': None,
        'TIMDs': None,
        'tempSuper': None,
        'scoutManagement': {
            'currentMatchNumber': 1,
            'cycleNumber': 0,
            'availability': {scout: 0
                             for scout in SCOUT_NAMES},
        },
    })

    # Removes 'cache' and 'upload_queue' folders to remove outdated data
Пример #19
0
#!/usr/bin/env python3
# Copyright (c) 2019 FRC Team 1678: Citrus Circuits
"""Sets up a machine for use or testing of the server.

To be run for every new clone of server-2020.
"""
# External imports
import os
import subprocess
import venv
# Internal imports
import utils

# Creates data folder
utils.create_file_path('data/')

# Creates path by joining the base directory and the target directory
TARGET_PATH = utils.create_file_path('.venv', False)

# Only run if file is directly called
if __name__ == '__main__':
    # Create virtual environment
    print('Creating Virtual Environment...')
    # Clear any existing environments with clear=True
    # Install pip directly into installation
    # Set prompt to 'venv' instead of '.venv'
    venv.create(TARGET_PATH, clear=True, with_pip=True, prompt='venv')
    print('Virtual Environment Created')

    # Install pip packages
    # Set create_directories to false to avoid trying to create directory for pip
Пример #20
0
    """Removes zeroes from a list, then returns the average of the list.

    lis is the list that is averaged"""
    lis = [item for item in lis if item != 0]
    if lis == []:
        return None
    else:
        return sum(lis) / len(lis)


# Uses default Firebase URL
# DB stands for database
DB = firebase_communicator.configure_firebase()

# List of files (tempSuper datas) in the 'temp_super' cache directory.
TEMP_SUPER_FILES = os.listdir(utils.create_file_path('data/cache/temp_super'))

# Match number (string) to list of tempSuper files for that match
FILES_BY_MATCH = {}
for file_name in TEMP_SUPER_FILES:
    # tempSuper naming format:
    # S!Q{match_number}-{alliance_color}
    # (e.g. S!Q3-B is the blue alliance in match 3)
    match_number = file_name.split('-')[0].split('Q')[1]
    if FILES_BY_MATCH.get(match_number) is None:
        FILES_BY_MATCH[match_number] = []
    FILES_BY_MATCH[match_number].append(file_name)

for match_number, files in FILES_BY_MATCH.items():
    compressed_data = {}
    for temp_super_file in files:
        win = 2 if calculated_data['redPredictedScore'] > \
            calculated_data.get('bluePredictedScore', 0) else 0
        total = win + calculated_data['redChanceClimbRP'] + \
            calculated_data['redChanceRocketRP']
        return total
    else:
        win = 2 if calculated_data['bluePredictedScore'] > \
            calculated_data.get('redPredictedScore', 0) else 0
        total = win + calculated_data['blueChanceClimbRP'] + \
            calculated_data['blueChanceRocketRP']
        return total


# Gathers the calculated data from all the teams.
TEAMS = {}
for team in os.listdir(utils.create_file_path('data/cache/teams')):
    with open(utils.create_file_path(f'data/cache/teams/{team}')) as file:
        team_data = json.load(file)
    # Checks if the team has calculated data before considering them for
    # predictions.
    if team_data.get('calculatedData') is not None:
        # '.split()' removes '.txt' file ending
        TEAMS[team.split('.')[0]] = team_data

# Gathers the matches in the competition. These matches are cached from
# the tba match schedule when the server first runs.
MATCH_SCHEDULE = {}
for match in os.listdir(utils.create_file_path('data/cache/match_schedule')):
    with open(utils.create_file_path(
            f'data/cache/match_schedule/{match}')) as file:
        match_data = json.load(file)
Пример #22
0

FINAL_DATA = {}

# Firebase key names to the equivilent local cache key names
FIREBASE_TO_CACHE_KEY = {
    'TIMDs': 'timds',
    'Teams': 'teams',
    'Matches': 'matches',
}

FILES_TO_REMOVE = []

for firebase_key, cache_key in FIREBASE_TO_CACHE_KEY.items():
    for file in os.listdir(
            utils.create_file_path(f'data/upload_queue/{cache_key}')):
        file_path = utils.create_file_path(
            f'data/upload_queue/{cache_key}/{file}')
        # Collects and adds the data from a single file to 'FINAL_DATA'
        FINAL_DATA.update(collect_file_data(file_path, firebase_key))

        FILES_TO_REMOVE.append(file_path)

# Before sending the data, iterates through all of it and removes any
# NaNs (Not a Number) in the data.  (Relies on NaN != NaN)
# HACK: NaNs should be handled during calculation.
for path, value in FINAL_DATA.items():
    if path.split('/')[-1] == 'timeline':
        for action in value:
            for key, value_ in action.items():
                if isinstance(value_, float) and value_ != value_:
TEAM_DATA_DB_PATHS = [
    'raw.obj_pit',
    'raw.subj_pit',
    'processed.calc_obj_team',
    'processed.calc_subj_team',
    'processed.calc_tba_team'
]
TIM_DATA_DB_PATHS = [
    'processed.calc_obj_tim',
    'processed.calc_tba_tim'
]
DB_PATH_TO_SCHEMA_FILE = {
    'raw.obj_pit': 'schema/obj_pit_collection_schema.yml',
    'raw.subj_pit': 'schema/subj_pit_collection_schema.yml',
    'processed.calc_obj_team': 'schema/calc_obj_team_schema.yml',
    'processed.calc_subj_team': 'schema/calc_subj_team_schema.yml',
    'processed.calc_tba_team': 'schema/calc_tba_team_schema.yml',
    'processed.calc_obj_tim': 'schema/calc_obj_tim_schema.yml',
    'processed.calc_tba_tim': 'schema/calc_tba_tim_schema.yml'
}

if __name__ == '__main__':
    EXPORT_PATH = full_data_export()
    LATEST_PATH = utils.create_file_path('data/exports/latest_export', False)
    # Remove latest export directory if it exists
    if os.path.exists(LATEST_PATH):
        os.remove(LATEST_PATH)
    # Symlink the latest_export directory to the export that was just made
    os.symlink(EXPORT_PATH, LATEST_PATH)
Пример #24
0
def make_request(api_url, show_output=True, acceptable_cache_age=0):
    """Sends a single web request to the TBA API v3 and caches result.

    api_url is the url of the API request (the path after '/api/v3')
    show_output shows print statements about the status of the
    request.  Defaults to True.
    acceptable_cache_age is the maximum age (in seconds) of data that
    can be pulled from the cache.  Pulling from the cache is disabled by
    default."""
    base_url = 'https://www.thebluealliance.com/api/v3/'
    full_url = base_url + api_url
    request_headers = {'X-TBA-Auth-Key': API_KEY}

    # This cache is used with TBA's 'Last-Modified' and
    # 'If-Modified-Since' headers to prevent duplicate data downloads.
    # If the data has not changed since the last request, it will be
    # pulled from the cache.
    # Documentation of the 'Last-Modified' and 'If-Modified-Since' headers:
    # https://www.thebluealliance.com/apidocs#apiv3
    try:
        with open(utils.create_file_path('data/cache/tba/tba.json'), 'r') as file_:
            cached_requests = json.load(file_)
    except FileNotFoundError:
        cached_requests = {}

    # Returns the cached request if it was pulled within the
    # 'acceptable_cache_age' limit.
    last_request_time = cached_requests.get(api_url, {}).get('last_requested', 0)
    if (time.time() - last_request_time) < acceptable_cache_age:
        return cached_requests[api_url]['data']

    # 'cache_last_modified' is the time that the data in the cache was
    # published to TBA's API.
    cache_last_modified = cached_requests.get(api_url, {}).get('last_modified')
    if cache_last_modified is not None:
        request_headers['If-Modified-Since'] = cache_last_modified

    if show_output is True:
        print(f'Retrieving data from TBA...\nURL: {api_url}')
    while True:
        try:
            request_time = time.time()
            request = requests.get(full_url, headers=request_headers)
        except requests.exceptions.ConnectionError:
            # Errors will always be shown, even if 'show_output' is False.
            print('Error: No internet connection.  Trying again in 3 seconds...')
        else:
            if show_output is True:
                print('TBA data successfully retrieved.')
            break
        time.sleep(3)

    # A 304 status code means the data was not modified since our last
    # request, and we can pull it from the cache.
    if request.status_code == 304:
        return cached_requests[api_url]['data']
    # A 200 status code means the request was successful
    elif request.status_code == 200:
        # Updates local cache
        cached_requests[api_url] = {
            'last_requested': request_time,
            'last_modified': request.headers['Last-Modified'],
            'data': request.json(),
        }
        with open(utils.create_file_path('data/cache/tba/tba.json'), 'w') as file_:
            json.dump(cached_requests, file_)
        return request.json()
    else:
        # Errors will always be shown, even if 'show_output' is False.
        print(f'Request failed with status code {request.status_code}')
        return {}
Пример #25
0
                if {
                        'match_number': match,
                        'scout_id': id_
                } not in items_to_ignore:
                    utils.log_warning(
                        f'Duplicate Scout ID {id_} for Match {match}')
            else:
                unique_scout_ids.append(id_)
        # Scout IDs are from 1-18 inclusive
        for id_ in range(1, 19):
            if id_ not in unique_scout_ids:
                if {
                        'match_number': match,
                        'scout_id': id_
                } not in items_to_ignore:
                    utils.log_warning(
                        f'Scout ID {id_} missing from Match {match}')


# Load latest match collection compression QR code schema
SCHEMA = utils.read_schema('schema/match_collection_qr_schema.yml')

MISSING_TIM_IGNORE_FILE_PATH = utils.create_file_path(
    'data/missing_tim_ignore.yml')
_GENERIC_DATA_FIELDS = _get_data_fields('generic_data')
OBJECTIVE_QR_FIELDS = _GENERIC_DATA_FIELDS.union(
    _get_data_fields('objective_tim'))
SUBJECTIVE_QR_FIELDS = _GENERIC_DATA_FIELDS.union(
    _get_data_fields('subjective_aim'))
_TIMELINE_FIELDS = get_timeline_info()
Пример #26
0
    def train(self):
        """ Training model. """

        # switch to train
        self.net.train()

        self.log.writeline('# Start training.', False)
        plot_point = 0  # for tensorboard

        # loop epoch
        for ep in range(self.max_epoch):
            total_loss = 0  # total loss
            total_acc = 0  # total accuracy

            self.log.writeline(f'----- Epoch: {ep + 1} -----')

            subdivision = self.tms.subdivision

            # batch in one epoch (outer tqdm)
            outer_pbar = tqdm(self.train_loader, total=len(self.train_loader),
                              ncols=100, bar_format='{l_bar}{bar:30}{r_bar}')
            outer_pbar.set_description('TRAIN')

            # batch process
            for batch_idx, items in enumerate(outer_pbar):
                imgs: torch.Tensor
                labels: torch.Tensor
                paths: torch.Tensor

                imgs, labels, paths = items

                self.optimizer.zero_grad()  # init gradient

                batch_size = len(imgs)  # batch size
                batch_result = torch.tensor([])  # all result of one batch
                batch_loss = 0  # total loss of one batch

                # generate arithmetic progression of mini batch
                sep = np.linspace(0, batch_size, subdivision + 1, dtype=np.int)

                # mini batch process
                for sd in range(subdivision):
                    n, m = sep[sd], sep[sd + 1]  # cutout data (N ~ M)
                    mb_imgs = imgs[n:m].to(self.device)
                    mb_labels = labels[n:m].to(self.device)

                    mb_result = self.net(mb_imgs)  # data into model
                    loss = self.criterion(mb_result, mb_labels)  # calculate loss
                    loss.backward()  # calculate gradient (back propagation)

                    # concatenate result
                    batch_result = torch.cat((batch_result, mb_result.cpu()), dim=0)

                    batch_loss += float(loss.item())  # add loss value
                # end of this mini batch

                self.optimizer.step()  # update parameters
                loss_val = batch_loss / subdivision  # calc avg loss value

                # tensorboard log
                self.writer.add_scalar('data/loss', loss_val, plot_point)
                plot_point += 1

                # label
                predicted = torch.max(batch_result.data, 1)[1].cpu()  # predict
                labels = labels.cpu()
                self.all_pred = torch.cat((self.all_pred, predicted), dim=0)
                self.all_label = torch.cat((self.all_label, labels), dim=0)

                predicted = predicted.numpy()  # predict
                label_ans = labels.numpy()  # correct answer

                # cls_bool = [label_ans[i] for i, x in enumerate(pred_bool) if not x]

                pred_bool = (label_ans == predicted)  # matching
                # index of mistake prediction
                false_step = [idx for idx, x in enumerate(pred_bool) if not x]

                # save image of mistake prediction
                for idx in false_step:
                    fp = self.false_paths[ep]
                    name = Path(str(paths[idx])).name

                    img_path = Path(fp, f'batch_{batch_idx}-{name}')
                    img_path.parent.mkdir(parents=True, exist_ok=True)

                    save_image(imgs[idx], str(img_path))  # save

                # count of matched label
                acc_cnt = pred_bool.sum()

                # calc total
                total_acc += acc_cnt
                total_loss += loss_val * batch_size

                acc = acc_cnt / batch_size  # accuracy

                # for tqdm message
                outer_pbar.set_postfix(
                    ordered_dict=OrderedDict(loss=f'{loss_val:<.6f}', acc=f'{acc:<.6f}'))

                # for log
                ss = f'loss: {loss_val:<.6f} / acc: {acc:<.6f}'
                ss += f'\n  -> ans   : {label_ans}'
                ss += f'\n  -> result: {predicted}'

                self.log.writeline(ss, debug_ok=False)

                # break
                # end of this batch

            # add confusion matrix to tensorboard
            cm = calc_confusion_matrix(self.all_label, self.all_pred, len(self.classes))
            fig = plot_confusion_matrix(cm, list(self.classes.values()))
            add_to_tensorboard(self.writer, fig, 'confusion matrix', ep)

            # calclate total loss / accuracy
            size = len(self.train_loader.dataset)
            total_loss = total_loss / size
            total_acc = total_acc / size

            # for log
            self.log.writeline('\n---------------', debug_ok=False)
            self.log.writeline(f'Total loss: {total_loss}', debug_ok=False)
            self.log.writeline(f'Total acc: {total_acc}', debug_ok=False)
            self.log.writeline('---------------\n', debug_ok=False)

            # for tqdm
            print(f'  Total loss: {total_loss}')
            print(f'  Total acc: {total_acc}\n')

            # for tensorboard
            self.writer.add_scalar('data/total_acc', total_acc, ep)
            self.writer.add_scalar('data/total_loss', total_loss, ep)

            # exec test cycle
            if self.test_schedule[ep]:
                self.test_model.test(epoch=ep)

            # save pth cycle
            if self.pth_save_schedule[ep]:
                save_path = ul.create_file_path(
                    self.pth_save_path, '', head=f'epoch{ep + 1}', ext='pth')

                progress = ul.ProgressLog(f'Saving model to \'{save_path}\'')
                self.save_model(save_path)  # save
                progress.complete()

                # log
                self.log.writeline(f'# Saved model to \'{save_path}\'', debug_ok=False)

            # break
            # end of this epoch

        # export as json
        # self.writer.export_scalars_to_json(f'{self.tms.config_path}/all_scalars.json')
        self.writer.close()
Пример #27
0
    '015d172c98041412': 'Scout 16',
    '015d188421480008': 'Scout 17',
    '015d2568753c0200': 'Scout 18',
    # Fire tablets without cases (Backups 1-5)
    'G000H40563460VSC': 'Backup 1',
    'G000H4056383066L': 'Backup 2',
    'G000H40563460T65': 'Backup 3',
    'G000H404610600EK': 'Backup 4',
    'G0K0KH02623400GT': 'Backup 5',
    # Super scout tablets
    'redacted': 'Red Super',
    'redacted': 'Blue Super',
    'redacted': 'Purple Super',
}

ASSIGNMENT_FILE_PATH = utils.create_file_path(
    'data/assignments/assignments.txt')

# List of devices to which 'assignments.txt' has already been sent
DEVICES_WITH_FILE = []


def validate_file(device_id):
    """Validates that the assignment file was successfully transfered.

    Compares the assignments.txt on the tablet to the locally stored
    assignments.txt file.

    device_id is the serial number of the device"""
    # Reads the server version of assignments.txt
    with open(ASSIGNMENT_FILE_PATH, 'r') as file:
        computer_data = file.read()
Пример #28
0
import utils

# Uses default firebase URL
# DB stands for database
DB = firebase_communicator.configure_firebase()

if len(sys.argv) == 2:
    CYCLE_NUMBER = sys.argv[1]
else:
    print('Error: Cycle number not being passed as an argument. Exiting...')
    sys.exit(0)

# Each scout name is associated with a letter (for compression).
# This opens the JSON file that stores the letters and loads the dict
# that is used to swap names with letters.
with open(utils.create_file_path('data/assignments/assignments.json'),
          'r') as file:
    LETTERS = json.load(file)['letters']

AVAILABILITY = DB.child('scoutManagement/availability').get().val()
AVAILABLE_SCOUTS = [
    scout for scout, availability in AVAILABILITY.items() if availability == 1
]

# The base assignment string
ASSIGNMENT_STRING = f'{CYCLE_NUMBER}_{firebase_communicator.URL}|'

with open(utils.create_file_path('data/sprs/sprs.json'), 'r') as file:
    SPRS = json.load(file)

# Sorts scouts from best SPR to worst SPR
Пример #29
0
                continue
            # Join list returned by update_array with existing write operations
            write_operations.extend(update_array(path, changed_documents))
    # Write changes to database
    # Ordered must be true because we pull outdated data before pushing new data
    # Throws error on lost connection
    try:
        if write_operations:
            CLOUD_DB.competitions.bulk_write(write_operations, ordered=True)
    except pymongo.errors.AutoReconnect:
        utils.log_warning('Cloud Database Write Timeout.')
        return None
    return 0


def add_competition_cloud(tba_event_key):
    """Adds competition document to cloud database."""
    local_database_communicator.add_competition(CLOUD_DB, tba_event_key)


# Connect to cloud database
with open(utils.create_file_path('data/api_keys/cloud_password.txt')) as file:
    CLOUD_PASSWORD = file.read().rstrip('\n')
DB_ADDRESS = f'mongodb+srv://server:{CLOUD_PASSWORD}@scouting-system-3das1.gcp.mongodb.net/test?retryWrites=true&w=majority'
CLOUD_DB = pymongo.MongoClient(DB_ADDRESS).scouting_system_cloud
# Creates cloud database indexes (if they don't already exist)
CLOUD_DB.competitions.create_indexes([
    pymongo.IndexModel('tba_event_key', unique=True),
    pymongo.IndexModel('year', unique=False)
])
Пример #30
0
        for team, data in TEAMS.items()
    }

    mean = numpy.mean(list(averages.values()))
    sd = numpy.std(list(averages.values()))
    for team, average in averages.items():
        if sd == 0.0:
            TEAMS[team]['calculatedData'][team_zscore_field] = 0.0
        else:
            TEAMS[team]['calculatedData'][team_zscore_field] = (average -
                                                                mean) / sd


# Gathers the calculated data from all the teams.
TEAMS = {}
for team in os.listdir(utils.create_file_path('data/cache/teams')):
    with open(utils.create_file_path(f'data/cache/teams/{team}')) as file:
        team_data = json.load(file)
    if team_data.get('calculatedData') is not None:
        # '.split()' removes '.json' file ending
        TEAMS[team.split('.')[0]] = team_data

# Each Z-Score data field to the average data field it is calculated from.
SUPER_ZSCORE_DATA_FIELDS = {
    'agilityZScore': 'avgAgility',
    'speedZScore': 'avgSpeed',
}

# Calculates zscores for teams based on data fields in 'SUPER_ZSCORE_DATA_FIELDS'
for zscore_name, average_name in SUPER_ZSCORE_DATA_FIELDS.items():
    calculate_zscores(average_name, zscore_name)
Пример #31
0
            final_match_data = {
                'matchNumber': match_number,
                'redTeams': red_teams,
                'blueTeams': blue_teams,
            }
        with open(
                utils.create_file_path(
                    f'data/cache/match_schedule/{match_number}.json'),
                'w') as file:
            json.dump(final_match_data, file)


# Deletes the entire 'cache' directory to remove any old data.
# Checks if the directory exists before trying to delete it to avoid
# causing an error.
if os.path.isdir(utils.create_file_path('data/cache', False)):
    shutil.rmtree(utils.create_file_path('data/cache', False))

# Detects when CTRL+C is pressed, then runs handle_ctrl_c
signal.signal(signal.SIGINT, handle_ctrl_c)

# Creates all the database streams and stores them in global dict.
STREAMS = create_streams()

# In order to make match calculations, the match schedule must be taken
# from TBA and put into the cache.
cache_match_schedule()

# Wipes 'temp_timds' cache folder
delete_cache_data_folder('temp_timds')
# Stores the keys of cached 'tempTIMDs'