예제 #1
0
def upload_qr_codes(qr_codes):
    """Uploads QR codes into the current competition document.

    Prevents duplicate QR codes from being uploaded to the database.
    qr_codes is a list of QR code strings to upload.
    """
    # Gets the starting character for each QR code type, used to identify QR code type
    schema = utils.read_schema('schema/match_collection_qr_schema.yml')

    # Acquires current qr data using local_database_communicator.py
    qr_data = local_database_communicator.read_dataset('raw.qr')

    # Creates a set to store QR codes
    # This is a set in order to prevent addition of duplicate qr codes
    qr = set()

    for qr_code in qr_codes:
        if qr_code in qr_data:
            pass
        # Checks to make sure the qr is valid by checking its starting character. If the starting
        # character doesn't match either of the options, the QR is printed out.
        elif not (qr_code.startswith(
                schema['subjective_aim']['_start_character'])
                  or qr_code.startswith(
                      schema['objective_tim']['_start_character'])):
            utils.log_warning(f'Invalid QR code not uploaded: "{qr_code}"')
        else:
            qr.add(qr_code)

    # Adds the QR codes to the local database if the set isn't empty
    if qr != set():
        local_database_communicator.append_to_dataset('raw.qr', list(qr))

    return qr
예제 #2
0
def tba_request(api_url):
    """Sends a single web request to the TBA API v3 api_url is the suffix of the API request URL

    (the part after '/api/v3').
    """
    utils.log_info(f'tba request from {api_url} started')
    full_url = f'https://www.thebluealliance.com/api/v3/{api_url}'
    request_headers = {'X-TBA-Auth-Key': API_KEY}
    cached = local_database_communicator.select_tba_cache(api_url)
    # Check if cache exists
    if cached != {}:
        cached = cached[api_url]
        request_headers['If-Modified-Since'] = cached['timestamp']
    print(f'Retrieving data from {full_url}')
    utils.log_info(f'tba request from {api_url} finished')
    try:
        request = requests.get(full_url, headers=request_headers)
    except requests.exceptions.ConnectionError:
        utils.log_warning('Error: No internet connection.')
        return None
    # A 200 status code means the request was successful
    # 304 means that data was not modified since the last timestamp
    # specified in request_headers['If-Modified-Since']
    if request.status_code == 304:
        return cached['data']
    if request.status_code == 200:
        formatted_data = {
            'timestamp': request.headers['Last-Modified'],
            'data': request.json()
        }
        local_database_communicator.overwrite_tba_data(formatted_data, api_url)
        return request.json()
    raise Warning(f'Request failed with status code {request.status_code}')
예제 #3
0
def update_array(path, change_list):
    """Updates an array of embedded documents. Return 0 on success, 1 if connection was lost."""
    write_operations = []
    # Return blank list if there are no changes at this path
    if not change_list:
        return write_operations
    # Remove documents to be updated
    write_operations.append(
        pymongo.UpdateOne({'tba_event_key': utils.TBA_EVENT_KEY},
                          {'$pull': {
                              path: {
                                  '$or': change_list
                              }
                          }}))
    # Select documents to add
    filter_change_list = []
    for change in change_list:
        equals = []
        for key, value in change.items():
            equals.append({'$eq': [f'$$item.{key}', value]})
        filter_change_list.append({'$and': equals})

    to_add = local_database_communicator.DB.competitions.aggregate([{
        '$match': {
            'tba_event_key': utils.TBA_EVENT_KEY
        }
    }, {
        '$project': {
            path: {
                '$filter': {
                    'input': f'${path}',
                    'as': 'item',
                    'cond': {
                        '$or': filter_change_list
                    }
                }
            }
        }
    }])
    # Aggregate returns a cursor object, so it must be converted to a list. `tba_event_key` is
    # guaranteed to be unique, so there will always one and only one result.
    to_add = list(to_add)[0]
    # Remove `_id` so so the only item is the array nested in the directory structure
    to_add.pop('_id')
    # Remove nesting, making `to_add` only a list of changed documents
    while isinstance(to_add, dict):
        to_add = to_add[[*to_add.keys()][0]]
    # No data matched or dataset does not exist, so warn & return blank list
    if to_add is None:
        utils.log_warning(f'No the dataset at {path} does not exist.')
        return []
    write_operations.append(
        pymongo.UpdateOne({'tba_event_key': utils.TBA_EVENT_KEY},
                          {'$push': {
                              path: {
                                  '$each': to_add
                              }
                          }}))
    return write_operations
def write_tba_data(path):
    """Writes TBA Data to csv export. Path is a str representing the output absolute file path."""
    data = format_tba_data()
    if not data:
        utils.log_warning('No TBA Data to export')
    field_names = data[0].keys()
    with open(path, 'w') as file:
        writer = csv.DictWriter(file, field_names)
        writer.writeheader()
        for row in data:
            writer.writerow(row)
    utils.log_info('Exported TBA Data')
예제 #5
0
 def _pull_file_from_device(self, device_path, host_path):
     if self.adb.run(['pull', device_path, host_path]):
         return True
     # In non-root device, we can't pull /data/app/XXX/base.odex directly.
     # Instead, we can first copy the file to /data/local/tmp, then pull it.
     filename = device_path[device_path.rfind('/')+1:]
     if (self.adb.run(['shell', 'cp', device_path, '/data/local/tmp']) and
             self.adb.run(['pull', '/data/local/tmp/' + filename, host_path])):
         self.adb.run(['shell', 'rm', '/data/local/tmp/' + filename])
         return True
     log_warning('failed to pull %s from device' % device_path)
     return False
def stop_recording(args):
    adb = AdbHelper()
    result = adb.run(['shell', 'pidof', 'simpleperf'])
    if not result:
        log_warning('No simpleperf process on device. The recording has ended.')
    else:
        adb.run(['shell', 'pkill', '-l', '2', 'simpleperf'])
        print('Waiting for simpleperf process to finish...')
        while adb.run(['shell', 'pidof', 'simpleperf']):
            time.sleep(1)
    adb.run(['shell', 'cat', '/data/local/tmp/simpleperf_output'])
    adb.check_run(['pull', '/data/local/tmp/perf.data', args.perf_data_path])
    print('The recording data has been collected in %s.' % args.perf_data_path)
예제 #7
0
def stop_recording(args):
    adb = AdbHelper()
    result = adb.run(['shell', 'pidof', 'simpleperf'])
    if not result:
        log_warning(
            'No simpleperf process on device. The recording has ended.')
    else:
        adb.run(['shell', 'pkill', '-l', '2', 'simpleperf'])
        print('Waiting for simpleperf process to finish...')
        while adb.run(['shell', 'pidof', 'simpleperf']):
            time.sleep(1)
    adb.run(['shell', 'cat', '/data/local/tmp/simpleperf_output'])
    adb.check_run(['pull', '/data/local/tmp/perf.data', args.perf_data_path])
    print('The recording data has been collected in %s.' % args.perf_data_path)
예제 #8
0
def check_status_consistency(filename, row, i, log):
    """Check that the status is consistent with the requirements."""

    # Checks if Status is one of Accepted, Opened, Rejected
    # and checks for required information if so
    if row["Status"] in ["Accepted", "Opened", "Rejected"]:

        # The project apache/incubator-dubbo was renamed to apache/dubbo,
        # so the Project URL name (old) doesn't match the PR Link name
        # (new), despite them being the same project. This if statement is
        # a workaround for that issue.
        if (row["Project URL"] == "https://github.com/apache/incubator-dubbo"
                and re.sub(r"\/pull\/\d+", "", row["PR Link"]).casefold()
                == "https://github.com/apache/dubbo"):
            pass
        else:
            check_pr_link(filename, row, i, log)

    if row["Status"] in ["InspiredAFix", "Skipped", "MovedOrRenamed"]:

        # Should contain a note
        if row["Notes"] == "":
            log_warning(
                filename,
                log,
                i,
                "Status " + row["Status"] + " should contain a note",
            )
        # If it contains a note, it should be a valid link
        else:
            check_notes(filename, row, i, log)

        # Should contain a PR Link
        if row["Status"] == "InspiredAFix":
            if row["PR Link"] == "":
                log_warning(
                    filename,
                    log,
                    i,
                    "Status " + row["Status"] + " should have a PR Link",
                )
            # If it contains a PR link, it should be a valid one
            else:
                check_pr_link(filename, row, i, log)

    if row["Status"] == "" and row["PR Link"] != "":
        check_pr_link(filename, row, i, log)
        log_std_error(
            filename, log, i, row,
            "Status should not be empty when a PR link is provided.")
예제 #9
0
def check_scout_ids():
    """Checks unconsolidated TIMs in `tim_queue` to see which scouts have not sent data.

    This operation is done by `scout_id` -- if a match is missing data, then the scout_id will not
    have sent data for the match.
    returns None -- warnings are issued directly through `utils.log_warning`.
    """
    # Load matches or matches and ids to ignore from ignore file
    if os.path.exists(MISSING_TIM_IGNORE_FILE_PATH):
        with open(MISSING_TIM_IGNORE_FILE_PATH) as ignore_file:
            items_to_ignore = yaml.load(ignore_file, Loader=yaml.Loader)
    else:
        items_to_ignore = []
    matches_to_ignore = [
        item['match_number'] for item in items_to_ignore if len(item) == 1
    ]
    tims = local_database_communicator.read_dataset(
        'processed.unconsolidated_obj_tim')
    matches = {}
    for tim in tims:
        match_number = tim['match_number']
        matches[match_number] = matches.get(match_number,
                                            []) + [tim['scout_id']]

    for match, scout_ids in matches.items():
        if match in matches_to_ignore:
            continue
        unique_scout_ids = []
        for id_ in scout_ids:
            if id_ in unique_scout_ids:
                if {
                        'match_number': match,
                        'scout_id': id_
                } not in items_to_ignore:
                    utils.log_warning(
                        f'Duplicate Scout ID {id_} for Match {match}')
            else:
                unique_scout_ids.append(id_)
        # Scout IDs are from 1-18 inclusive
        for id_ in range(1, 19):
            if id_ not in unique_scout_ids:
                if {
                        'match_number': match,
                        'scout_id': id_
                } not in items_to_ignore:
                    utils.log_warning(
                        f'Scout ID {id_} missing from Match {match}')
예제 #10
0
def update_dataset(path, new_data, query, competition=utils.TBA_EVENT_KEY):
    """Updates a single dictionary within a dataset, if the query matches a dictionary within a
    dataset, replace the data given, if it does not exist create a new dictionary and add the query
    and data given by function parameter

    'path' is the path to the dataset, (e.g. 'raw.qr', 'processed.calc_obj_tim'), use dot notation.
    'data' is the data to either add, or to replace if the query matches in the dataset, must be a
    dictionary.
    'query' is a query to search through the given datset for the first occurence within a
    dictionary.
    'competition' is the tba event key.
    """
    dataset = read_dataset(path, competition, **query)
    new_document = {}

    # If all queries matched the document
    if dataset != []:
        # New document gets the value of the first dictionary in dataset that matched queries
        new_document = dataset[0]
        for new_datum in new_data:
            # Add each new datum to the new dictionary
            new_document[new_datum] = new_data[new_datum]
        # Delete the dictionary from the dataset
        DB.competitions.update_one({'tba_event_key': competition},
                                   {'$pull': {
                                       path: query
                                   }})
    # If 'new_document' is still empty, the query did not match
    else:
        # Iterate through the keys in query
        for key in query:
            # Add them to the new document
            new_document[key] = query[key]
        # Iterate through new_data
        for datum in new_data:
            # Add the keys to the new document
            if datum in new_document and new_document[datum] != new_data[datum]:
                utils.log_warning('Query and new data are mismatched')
                return None
            new_document[datum] = new_data[datum]
    # Update the dictionary in the dataset
    DB.competitions.update_one({'tba_event_key': competition},
                               {'$push': {
                                   path: new_document
                               }})
예제 #11
0
 def _annotate_files(self):
     """Annotate Source files: add acc_period/period for each source file.
        1. Annotate java source files, which have $JAVA_SRC_ROOT prefix.
        2. Annotate c++ source files.
     """
     dest_dir = self.config['annotate_dest_dir']
     for key in self.file_periods:
         from_path = key
         if not os.path.isfile(from_path):
             log_warning("can't find source file for path %s" % from_path)
             continue
         if from_path.startswith('/'):
             to_path = os.path.join(dest_dir, from_path[1:])
         elif is_windows() and ':\\' in from_path:
             to_path = os.path.join(dest_dir, from_path.replace(':\\', os.sep))
         else:
             to_path = os.path.join(dest_dir, from_path)
         is_java = from_path.endswith('.java')
         self._annotate_file(from_path, to_path, self.file_periods[key], is_java)
예제 #12
0
def push_changes_to_db(local_change_list, server_restart):
    """Pushes changes to cloud database given the local changes.

    Returns 0 on success, None on failure.
    """
    # List of paths that should be directly added (do not point to a document to be updated)
    direct_push = ['raw.qr']
    # Stores PyMongo UpdateOne objects to be written in a bulk write
    write_operations = []
    for section_name, datafield in local_change_list.items():
        for datafield_name, changed_documents in datafield.items():
            path = '.'.join([section_name, datafield_name])
            # Cloud data should be replaced on server restart, so all existing data should be
            # removed so no outdated data remains
            if server_restart:
                write_operations.append(
                    pymongo.UpdateOne({'tba_event_key': utils.TBA_EVENT_KEY},
                                      {'$set': {
                                          path: []
                                      }}))
            if path in direct_push and changed_documents != []:
                write_operations.append(
                    pymongo.UpdateOne(
                        {'tba_event_key': utils.TBA_EVENT_KEY},
                        {'$push': {
                            path: {
                                '$each': changed_documents
                            }
                        }}))
                continue
            # Join list returned by update_array with existing write operations
            write_operations.extend(update_array(path, changed_documents))
    # Write changes to database
    # Ordered must be true because we pull outdated data before pushing new data
    # Throws error on lost connection
    try:
        if write_operations:
            CLOUD_DB.competitions.bulk_write(write_operations, ordered=True)
    except pymongo.errors.AutoReconnect:
        utils.log_warning('Cloud Database Write Timeout.')
        return None
    return 0
예제 #13
0
 def _annotate_files(self):
     """Annotate Source files: add acc_period/period for each source file.
        1. Annotate java source files, which have $JAVA_SRC_ROOT prefix.
        2. Annotate c++ source files.
     """
     dest_dir = self.config['annotate_dest_dir']
     for key in self.file_periods:
         from_path = key
         if not os.path.isfile(from_path):
             log_warning("can't find source file for path %s" % from_path)
             continue
         if from_path.startswith('/'):
             to_path = os.path.join(dest_dir, from_path[1:])
         elif is_windows() and ':\\' in from_path:
             to_path = os.path.join(dest_dir,
                                    from_path.replace(':\\', os.sep))
         else:
             to_path = os.path.join(dest_dir, from_path)
         is_java = from_path.endswith('.java')
         self._annotate_file(from_path, to_path, self.file_periods[key],
                             is_java)
예제 #14
0
def install_apk(device_serial):
    """Installs chosen APK to either phone or tablet depending on user input.

    Convert serial number to human-readable format.
    """
    device_name = adb_communicator.DEVICE_SERIAL_NUMBERS[device_serial]
    print(f'Loading {LOCAL_FILE_PATH} onto {device_name}')
    # Calls 'adb push' command, which uses the Android Debug Bridge (ADB) to send the APK file
    # The -s flag specifies the device_serial by its serial number
    # return_output=True returns the output of adb
    utils.log_info(f'APK install started on {device_serial}')
    validate = utils.run_command(
        f'adb -s {device_serial} install -r {LOCAL_FILE_PATH}',
        return_output=True)
    # If .apk is loaded successfully, ADB will output a string containing 'Success'
    if 'Success' in validate:
        DEVICES_WITH_APK.append(device_serial)
        print(f'Loaded {LOCAL_FILE_PATH} onto {device_name}')
        utils.log_info(f'APK successefully installed on {device_serial}')
    else:
        utils.log_warning(
            f'Failed Loading {LOCAL_FILE_PATH} onto {device_name}.')
예제 #15
0
import pymongo
# Internal imports
import adb_communicator
import calculate_obj_team
import calculate_obj_tims
import calculate_tba_tims
import decompressor
import local_database_communicator
import qr_code_uploader
import tba_communicator
import utils

try:
    import cloud_database_communicator
except pymongo.errors.ConfigurationError:
    utils.log_warning(f'Cloud database import failed. No internet.')


def get_empty_modified_data():
    """Returns empty modified data field."""
    modified_data = {
        'raw': {
            'qr': [],
            'obj_pit': [],
            'subj_pit': []
        },
        'processed': {
            'unconsolidated_obj_tim': [],
            'calc_obj_tim': [],
            'calc_tba_tim': [],
            'subj_aim': [],
예제 #16
0
 if device not in DEVICES_WITH_SCHEDULE and SEND_MATCH_SCHEDULE:
     print(
         f'\nAttempting to load {MATCH_SCHEDULE_LOCAL_PATH} onto {device_name}'
     )
     if adb_communicator.push_file(device,
                                   MATCH_SCHEDULE_LOCAL_PATH,
                                   MATCH_SCHEDULE_TABLET_PATH,
                                   validate_file):
         DEVICES_WITH_SCHEDULE.add(device)
         print(
             f'Loaded {MATCH_SCHEDULE_LOCAL_PATH} onto {device_name}'
         )
     else:
         # Give both serial number and device name in warning
         utils.log_warning(
             f'FAILED sending {MATCH_SCHEDULE_LOCAL_PATH} to {device_name} ({device})'
         )
 if device not in DEVICES_WITH_LIST:
     print(
         f'\nAttempting to load {TEAM_LIST_LOCAL_PATH} onto {device_name}'
     )
     if adb_communicator.push_file(device, TEAM_LIST_LOCAL_PATH,
                                   TEAM_LIST_TABLET_PATH,
                                   validate_file):
         DEVICES_WITH_LIST.add(device)
         print(f'Loaded {TEAM_LIST_LOCAL_PATH} to {device_name}')
     else:
         # Give both serial number and device name in warning
         utils.log_warning(
             f'FAILED sending {TEAM_LIST_LOCAL_PATH} to {device_name} ({device})'
         )
예제 #17
0
def update_calc_tba_tims(tims):
    """Returns a list of the TIM calcs that require TBA data to run.

    Reads from the `calc_tba_tim` schema file to get data points that are pulled from TBA.
    """
    # Pull TBA data
    tba_api_url = f'event/{utils.TBA_EVENT_KEY}/matches'
    tba_data = local_database_communicator.select_tba_cache(
        tba_api_url)[tba_api_url]['data']
    # Filter out matches such that we only have quals matches
    # Create dictionary of match_number: match data to allow easier access
    quals_matches = {
        data['match_number']: data
        for data in tba_data if data['comp_level'] == 'qm'
    }
    full_tim_refs = []

    # Get team number and match number for TIMs referenced by `tims`
    # This is needed because TIMs can be passed just using match number
    for tim in tims:
        if 'team_number' in tim:
            # Ref is as specific as possible, refers to one team in one match
            if 'match_number' in tim:
                full_tim_refs.append(tim)
            else:  # Match refers to all a team's matches
                for match_num, match_data in quals_matches.items():
                    if tim['team_number'] in get_team_list_from_match(
                            match_data):
                        full_tim_refs.append({
                            'team_number': tim['team_number'],
                            'match_number': match_num
                        })
        # Ref refers to all TIMs from a match
        elif 'match_number' in tim:
            if tim['match_number'] in quals_matches:
                for team in get_team_list_from_match(
                        quals_matches[tim['match_number']]):
                    full_tim_refs.append({
                        'team_number': team,
                        'match_number': tim['match_number']
                    })
            else:
                utils.log_warning(
                    f'Cannot find TBA data from q{tim["match_number"]} in cache'
                )
        else:
            utils.log_warning(f'Invalid TBA TIM ref {tim}')

    output_data = []
    for ref in full_tim_refs:
        out = copy.deepcopy(ref)
        if quals_matches[out['match_number']]['score_breakdown'] is None:
            utils.log_warning(
                f'TBA TIM Calculation on {out["match_number"]} missing match data'
            )
            continue
        # Get robot number (e.g. the `i` in  initLineRobot1) and alliance color for TIM
        number_result = utils.catch_function_errors(
            get_robot_number_and_alliance, out['team_number'],
            quals_matches[out['match_number']])
        # `utils.catch_function_errors` returns `None` for errors, which must be handled before
        # assigning variables to function results.
        if number_result is None:
            continue
        robot_number, alliance = number_result
        for key, values in TBA_SCHEMA['tba'].items():
            filters = copy.deepcopy(values)
            type_ = filters.pop('type')
            if type_ != 'bool':
                utils.log_warning(f'Type {type_} not recognized, skipping...')
                break
            for name, correct_value in filters.items():
                # Detect entries like initLineRobot, which need a robot number after them
                if name.endswith('Robot'):
                    del filters[name]
                    filters[f'{name}{robot_number}'] = correct_value
            result = utils.catch_function_errors(
                calc_tba_bool, quals_matches[out['match_number']], alliance,
                filters)
            if result is not None:
                out[key] = result
            else:
                break
        else:
            output_data.append(out)
    return output_data