Пример #1
0
    def build_workout_switch_tables(self):
        if self._build_cur_workout.empty or len(self._build_tabs) == 0:
            return

        lift_name_tab = -1
        for lift_name in self._build_tabs.keys():
            lift_name_tab += 1
            if lift_name == self._build_cur_lift:
                break

        if lift_name_tab == -1:
            return

        self.build_tabs.setCurrentIndex(lift_name_tab)

        if self._build_cur_lift in self._build_tabs:
            try:
                model = self._build_tabs[self._build_cur_lift].model()
                self.build_save_changes(model.dataFrame())
                self._build_cur_workout_view = self._build_cur_workout[self._build_cur_workout.Lift == self._build_cur_lift]
                self._build_cur_workout_view = self._build_cur_workout_view[['Lift', 'Weight', 'Reps', 'RPE', 'e1RM']]
                model.setDataFrame(self._build_cur_workout_view)
                model.enableEditing(True)
                model.dataChanged.emit()
                model.layoutChanged.emit()
            except Exception as err:
                log_err(err, "Couldn't emit datachanged signals to table model for {}".format(self._build_cur_lift))
Пример #2
0
    def loop_on_keys(keys):
        for key in keys:
            if key == 'times':
                target['times'] = []

                times1 = cl1.pop(key)
                times2 = cl2.pop(key)

                if len(times1) != len(times2):
                    log_err(
                        f'"{key}" is different for class {CRN}: "{len(times1)}" vs. "{len(times2)}"'
                    )
                    continue

                for idx in range(len(times1)):
                    time1 = times1[idx].copy()
                    time2 = times2[idx].copy()

                    target['times'].append(
                        merge_dicts(time1, time2, allowed=allowed))

                continue

            val1 = cl1.pop(key, None)
            val2 = cl2.pop(key, None)

            if val1 != None and val2 != None:
                if val1 != val2 and key not in allowed:
                    log_warn(
                        f'"{key}" is different for class {CRN}: "{val1}" vs. "{val2}"'
                    )
                target[key] = val1
            else:
                target[key] = val1 if val1 != None else val2
Пример #3
0
 def save_bw_temp(self):
     try:
         bak_num = len(self.__bw_bak_files)
         bak_file = tempfile.mkstemp("{}.{}".format(bak_num, "xlsx"), "{}_Tmp_".format(self.bw_name))[1]
         self.weight_log.to_excel(bak_file)
         self.__bw_bak_files.append(bak_file)
     except OSError as e:
         log_err(e, "Could not create temp Weight Log file".format(self.__bw_bak_files[-1]))
Пример #4
0
 def add_bodyweight_entry(self, date, high=None, low=None, bf=None, comments=None):
     try:
         date = pd.Timestamp(date)
     except ValueError as err:
         log_err(err, 'Error converting {} to Timestamp'.format(date))
     for col, val in {'High': high, 'Low': low, 'BF_Per': bf, 'Comments': comments}.items():
         self.weight_log.ix[date, col] = val
     self.calc_weight_data()
Пример #5
0
 def save_wl_temp(self):
     try:
         bak_num = len(self.__wl_bak_files)
         bak_file = tempfile.mkstemp("{}.{}".format(bak_num, "xlsx"), "{}_Tmp_".format(self.wl_name))[1]
         self.workout_log.to_excel(bak_file)
         self.__wl_bak_files.append(bak_file)
     except OSError as e:
         log_err(e, "Could not create Workout Log backup file: {}".format(self.__wl_bak_files[-1]))
Пример #6
0
 def save_lt_temp(self):
     try:
         bak_num = len(self.__lt_bak_files)
         bak_file = tempfile.mkstemp("{}.{}".format(bak_num, "xlsx"), "{}_Tmp_".format(self.lt_name))[1]
         self.lift_types.to_excel(bak_file)
         self.__lt_bak_files.append(bak_file)
     except OSError as e:
         log_err(e, "Could not create temp Lift Type file: {}".format(self.__lt_bak_files[-1]))
Пример #7
0
def _parse_offset(offset):
    """ split the "period" input into {multiplier} {type}, e.g. {3} {days} """
    try:
        per_mult, per_freq = re.match('(\\d+)?([dwmy])', offset, re.IGNORECASE).groups()
        if per_mult:
            per_mult = int(per_mult)
        return per_mult, per_freq.upper()
    except (AttributeError, ValueError, re.error) as e:
        log_err(e, "Invalid period passed: {}".format(offset))
Пример #8
0
 def get_slot(self, lift):
     """Returns the slot (i.e. 'upper' or 'lower' body exercise) of the given string"""
     try:
         if self.is_valid_lift(lift):
             return self.lift_types.Slot[self.lift_types.Lift == lift].values[0]
         else:
             raise ValueError
     except ValueError as e:
         log_err(e, """Error retrieving slot for lift '{}'""".format(lift))
Пример #9
0
 def get_category(self, lift):
     """Returns the category (e.g. 'Squat', 'Bench', etc) of a given string"""
     try:
         if self.is_valid_lift(lift):
             return self.lift_types.Category[self.lift_types.Lift == lift].values[0]
         else:
             raise ValueError
     except ValueError as e:
         log_err(e, """Error retrieving category for lift '{}'""".format(lift))
Пример #10
0
 def save(self, data=None):
     if data is None:
         data = self.data
     else:
         self.data.copy(data)
     try:
         data.to_sql('log', self.__sql_engine, if_exists='replace')
     except Exception as e:
         log_err(e, 'Error saving dataframe')
Пример #11
0
 def __import_weight_log(filename, sheet_name=None):
     # TODO: refactor all accessory code into this one call (e.g. calc_)
     try:
         if not sheet_name:
             sheet_name = 0
         weight_log = pd.read_excel(filename, sheetname=sheet_name, header=0, index_col="Date", parse_dates=[0])
         log_message('Loaded {} records from weight log'.format(len(weight_log)))
         return weight_log
     except Exception as e:
         log_err(e, "Could not import {} database file {}".format(sheet_name, filename))
Пример #12
0
 def __import_lift_types(filename, sheet_name=None):
     # TODO: refactor all accessory code into this one call (e.g. calc_)
     try:
         if not sheet_name:
             sheet_name = 0
         lift_types = pd.read_excel(filename, sheetname=sheet_name, header=0)
         log_message('Loaded {} lifts from lift types'.format(len(sheet_name)))
         return lift_types
     except Exception as e:
         log_err(e, "Could not import {} from database file {}".format(sheet_name, filename))
Пример #13
0
 def plot_ts(self, data, **kwargs):
     # TODO: Add handling for DataFrames
     if isinstance(data, pd.Series):
         try:
             dates = [int(datetime.datetime(x.year, x.month, x.day).timestamp()) for x in data.index]
             self.plot(x=dates, y=data.values, **kwargs)
         except Exception as err:
             log_err(err, "{}\nCould not plot time series".format(data))
     else:
         self.plot(x=data[0], y=data[1], **kwargs)
Пример #14
0
    def __load_data(self):
        self.data = None
        try:
            self.data = pd.read_sql_table('log', self.__sql_engine, index_col='Date', parse_dates=['Date'])
        except Exception as e:
            log_err(e, "Could not read database file")

        # See if the workout log contains data, if not prepare it to take some
        try:
            log_message('Loaded {} records from db'.format(len(self.data)))
        except TypeError:
            self.__build_dataframe()
Пример #15
0
    def __init__(self, filename):
        if os.path.isfile(filename):
            self.__backup_file(filename)
        else:
            log_message('Database file not found: {}\n'.format(filename) + 'Building a new one from scratch')
            self.__build_dataframe()

        try:
            self.__sql_engine = create_engine('sqlite:///' + filename)
        except Exception as e:
            log_err(e, 'Could not create database engine with: {}'.format(filename))
        self.__load_data()
Пример #16
0
 def save_to_disk(self, filename=None):
     """Saves all data to disk"""
     if not filename:
         filename = self.filename
     try:
         writer = pd.ExcelWriter(filename)
         self.workout_log.to_excel(writer, sheet_name=self.wl_name)
         self.weight_log.to_excel(writer, sheet_name=self.bw_name)
         self.lift_types.to_excel(writer, sheet_name=self.lt_name)
         writer.save()
     except OSError as e:
         log_err(e, "Error saving database file!")
def parse(content):
    '''
    Parse takes the content from the request and then populates the database with the data
    :param content: (html) The html containing the courses
    :param db: (TinyDB) the current database
    '''
    soup = BeautifulSoup(content, 'html5lib')

    tables = soup.find_all('table', {'class': 'TblCourses'})
    classes = {}

    for t in tables:
        rows = t.find_all('tr', {'class': 'CourseRow'})

        for tr in rows:
            cols = tr.find_all(lambda tag: tag.name == 'td')

            if cols:
                # The first <td> is a field that is not relevant to us
                # it is either empty or contains a "flag" icon
                cols.pop(0)

                for i, c in enumerate(cols):
                    a = c.find('a')
                    cols[i] = (a.get_text()
                               if a else cols[i].get_text()).strip()

                try:
                    data = dict(zip(HEADERS, cols))

                    if data['CRN'] in classes:
                        continue

                    data['status'] = data['status'].lower()

                    try:
                        data = seatInfoSchema.load(data)
                    except ValidationError as e:
                        log_err('Marshmallow validation failed',
                                details={
                                    'messages': e.messages,
                                    'class': data
                                })
                        continue

                    classes[data['CRN']] = data

                except KeyError:
                    continue

    return classes
Пример #18
0
 def merge_workout_data(self, data, date=None):
     if not date:
         if isinstance(data.index[0], pd.Timestamp):
             date = data.index
         elif 'Date' in data:
             date = data.Date
         else:
             log_err(description='No date found in data')
             return
     data['Date'] = date
     self.workout_log.drop(date, inplace=True)
     self.workout_log.reset_index(inplace=True)
     self.workout_log = self.workout_log.merge(data, how='outer').set_index('Date')
     self.calc_workout_data()
Пример #19
0
 def _drop_set_selected(self, index):
     if not index.isValid():
         return
     try:
         drop_set_str = index.data().split('(')[0].strip()
         self.drop_set.setText(drop_set_str)
         parser = re.match(r'(\d+)x(\d+)@(\d+\.\d+)', drop_set_str)
         self._drop_weight, self._drop_reps, self._drop_rpe = parser.groups()
         self._drop_weight = int(self._drop_weight)
         self._drop_reps = int(self._drop_reps)
         self._drop_rpe = round(float(self._drop_rpe) * 2.0) / 2.0
         self.values_changed.emit()
     except (ValueError, TypeError) as err:
         log_err(err, "Couldn't parse drop_set from RPE table: {}".format(drop_set_str))
Пример #20
0
def _parse_RPE(line):
    try:
        RPE, comment = rpe_re.match(line).groups()
        if RPE:
            try:
                RPE = round(float(RPE) * 2.0) / 2.0
                if not (6.5 <= RPE <= 10.0):
                    RPE = None
            except ValueError as e:
                log_err(e, "Invalid RPE passed {}".format(RPE))
    except re.error as e:
        pass  # No biggie, the line didn't match our criteria
    if comment:
        comment = comment.strip(' ()')
    return RPE, comment
Пример #21
0
def run_advanced_scraper(**kwargs):
    try:
        scraper = AdvancedScraper(
            ssb_url=SSB_URL,
            db_dir=DB_DIR,
            cache_dir=CACHE_DIR,
            hooks=FHDAScraperHooks,
            login=login,
            trace=True,
            **kwargs,
        )
        scraper.run()

    except KeyboardInterrupt:
        log_err('Aborted advanced schedule scraper', start='\n')
Пример #22
0
    def save_classes(self, db, depts, classes):
        db_depts = []
        db_courses = []
        db_classes = []

        depts = {k.replace(' ', ''): v for k, v in depts.items()}

        for dept, t in classes.items():
            db_depts.append({
                'id': dept,
                'name': depts[dept],
            })

            for course, section in t.items():
                course_classes = []
                course_titles = set()

                for cl in section.values():
                    try:
                        data = classDataSchema.load(cl)
                        classTimes = [
                            classTimeSchema.load(time) for time in cl['times']
                        ]
                    except MarshValidationError as e:
                        print(e, cl)
                        continue

                    data['times'] = classTimes
                    db_classes.append(data)
                    course_titles.add(data['title'])
                    course_classes.append(data['CRN'])

                if len(course_titles) > 1:
                    log_err(
                        f'Multiple course titles for "{dept} {course}" {str(course_titles)}'
                    )

                db_courses.append({
                    'dept': dept,
                    'course': course,
                    'title': course_titles.pop(),
                    'classes': course_classes
                })

        db.drop_tables()
        db.table('departments').insert_multiple(db_depts)
        db.table('courses').insert_multiple(db_courses)
        db.table('classes').insert_multiple(db_classes)
Пример #23
0
def run_public_schedule_scraper(**kwargs):
    if not kwargs.get('db_dir'):
        kwargs['db_dir'] = DB_DIR

    try:
        scraper = ScheduleScraper(
            ssb_url=SSB_URL,
            cache_dir=CACHE_DIR,
            hooks=FHDAScraperHooks,
            # login=login,
            trace=True,
            **kwargs,
        )
        scraper.run()

    except KeyboardInterrupt:
        log_err('Aborted public schedule scraper', start='\n')
Пример #24
0
    def restore_from_backup(self, backup_filename=None):
        """Deletes the current database file and restores from a previously saved version"""
        if not backup_filename:
            backup_filename = self.filename + '.bak'
        if os.path.isfile(backup_filename):
            try:
                shutil.move(backup_filename, self.filename)
            except OSError as e:
                log_err(e, "Cannot restore from backup file ({})".format(backup_filename))
        else:
            log_err("Could not find backup file ({})".format(backup_filename))

        self.__load_data(backup_filename)

        # TODO: Get rid of these calls
        self.calc_workout_data()
        self.calc_weight_data()
Пример #25
0
def parse_sets(sets, tgt_e1RM=None):
    res = []
    try:
        weight, reps, mult, rpes = re.match('(\\d+\\.?\\d+)?x(\\d+)x?(\\d+)?@?(.+)?', sets.strip(), re.IGNORECASE).groups()
        if weight:
            weight = round(float(weight))

        reps = int(reps)

        if mult:
            mult = int(mult)
        else:
            mult = 1

        if rpes:
            rpes = rpes.split(',')
            if len(rpes) == 1:
                rpe = round(float(rpes[0].strip()) * 2.0) / 2.0
                rpe_vals = None
            else:
                rpe_vals = []
                for rpe in rpes:
                    rpe = float(rpe.strip())
                    rpe = round(float(rpe) * 2.0) / 2.0
                    if not 6.5 <= rpe <= 10:
                        rpe = None
                    rpe_vals.append(rpe)
        else:
            rpe = None
            rpe_vals = None
        # TODO: Find a way to integrate NSCA tables into this calc
        if rpe_vals and tgt_e1RM:  # given set of x5@7,8,9 or 345x5@7,8,9 -> calculate the weights to use
            for rpe in rpe_vals:
                if (1 <= reps <= 10) and (6.5 <= rpe <= 10.0):
                    percent_mul = rpe_table.ix[reps].ix[rpe]
                    tgt_weight = tgt_e1RM * percent_mul
                    res.append([tgt_weight, reps, rpe])
        elif weight:
            res = [[weight, reps, None] for i in range(mult-1)]
            res.append([weight, reps, rpe])
        return res
    except (AttributeError, re.error) as e:
        log_err(e, "Invalid set: {}".format(set))

    return res
Пример #26
0
def merge_dbs(final: TinyDB, first: TinyDB, second: TinyDB, allowed):
    classes1 = {doc['CRN']: doc for doc in first.table('classes').all()}
    classes2 = {doc['CRN']: doc for doc in second.table('classes').all()}

    classes = []

    for CRN in classes1.keys():
        cl1 = classes1[CRN].copy()
        cl2 = classes2.get(CRN)

        if cl2:
            classes.append(merge_dicts(cl1, cl2.copy(), allowed=allowed))
        else:
            log_err(f'Class {CRN} was only found in one DB!')
            classes.append(cl1)

    final.drop_tables()
    final.table('classes').insert_multiple(classes)
Пример #27
0
def from_fitnotes(from_file, to_file):
    try:
        fitnotes = pd.DataFrame.from_csv(from_file)
        new_log = pd.DataFrame(columns=Backend.excel_format[Backend.wl_name])
        new_log = new_log.set_index('Date')
        new_log.Lift = fitnotes.Exercise
        new_log.Weight = fitnotes["Weight (lbs)"]
        new_log.Reps = fitnotes.Reps
        idx = 0
        for date, line in fitnotes.Comment.iteritems():
            if line is not np.NaN:
                RPE, comment = _parse_RPE(line)
                new_log.RPE.iloc[idx] = RPE
                new_log.Comments.iloc[idx] = comment
            idx += 1
        # TODO: parse comments to RPE's
        print(new_log.tail(10))
        new_log.to_excel(to_file)
        log_message("Imported {} lines from {} to {}".format(len(new_log), from_file, to_file))
    except Exception as e:
        log_err(e, "Error import from FitNotes")
Пример #28
0
 def log_add_entry(self):
     try:
         date = pd.Timestamp(self.log_date.date().toPyDate())
         lift = self.log_lift.currentText()
         if not self.db.is_valid_lift(lift):
             raise ValueError('{} is not a valid lift'.format(lift))
         raw_sets = self.log_sets.text().split(',')
         parsed_sets = []
         for set in raw_sets:
             parsed_sets.extend(analysis.parse_sets(set.strip()))
         comments = self.log_comments.text()
         for set in parsed_sets:
             weight, reps, rpe = set
             self.db.add_set(date, lift, weight, reps, rpe, comments)
         self.db.calc_workout_data()
         self.log_date_changed(date - pd.Timedelta(days=1))
         self.log_date_changed(date)
         self.wl_data_changed.emit()
         self.log_lift.setCurrentText('')
         self.log_sets.setText('')
     except (ValueError, TypeError) as err:
         log_err(err, "Couldn't parse log entry")
Пример #29
0
    def an_add_analysis(self):
        data = self.db.workout_log
        group = self.an_type.currentText()
        val = self.an_value.currentText()
        try:
            if group == 'Slot':
                data = data[data.Slot == val]
            elif group == 'Category':
                data = data[data.Category == val]
            elif group == 'Lift':
                data = data[data.Lift == val]
            else:
                return
        except (ValueError, TypeError) as err:
            log_err(err, "Invalid analysis type selected: {}, {}".format(group, val))

        analysis_funcs = {'E1RMs': analysis.get_max_e1RM, 'Volume': analysis.get_volume,
                          'Norm V': analysis.calc_normalized_tonnage, 'Fatigue': analysis.get_fatigue}
        selected_func = analysis_funcs[self.an_analysis.currentText()]
        analyzed_data = selected_func(data)
        title = '{}: {} -> {}'.format(self.an_analysis.currentText(), group, val)

        if self.an_mov_avg.isChecked():
            period = self.an_period.value()
            if 'e1rm' in selected_func.__name__:
                analyzed_data = analyzed_data.resample('1D', label='right', how=np.max)
            elif 'vol' in selected_func.__name__ or 'fatigue' in selected_func.__name__:
                analyzed_data = analyzed_data.resample('1D', label='right', how=np.sum)
                analyzed_data = analyzed_data.fillna(0)

            analyzed_data = pd.rolling_mean(analyzed_data, period, min_periods=1)

            if 'fatigue' in selected_func.__name__:
                analyzed_data *= 7.0
            title += ' ({} day avg)'.format(period)

        self.an_data[title] = analyzed_data
        self.analysis_data_changed.emit()
Пример #30
0
 def make_set(self, lift, weight, reps, date, rpe=None, comments=None):
     res = pd.Series(index=self.excel_format[self.wl_name])
     try:
         if self.is_valid_lift(lift):
             res.Lift = lift
         else:
             raise ValueError('Lift "{}" is not a valid lift'.format(lift))
         if date:
             res.Date = pd.Timestamp(date)
         res.Slot = self.get_slot(lift)
         res.Category = self.get_category(lift)
         res.Weight = int(weight)
         res.Reps = int(reps)
         if rpe:
             res.RPE = round(float(rpe) * 2.0) / 2.0
         else:
             res.RPE = None
         res.e1RM = analysis.calc_e1RM(res.Lift, res.Weight, res.Reps, res.RPE)
         res.Volume = res.Weight * res.Reps
         res.Comments = comments
     except (ValueError, TypeError) as err:
         log_err(err, 'Invalid data provided for set:\n{}'.format((lift, weight, reps, rpe, comments)))
     return res
Пример #31
0
    def __check_for_blanks(self):
        wl_blanks = self.workout_log['Date', 'Lift', 'Weight', 'Reps'].isnull()
        if wl_blanks.any():
            err = ValueError("Missing values for core data in workout log")
            log_err(err, str(wl_blanks))
            raise err

        bw_blanks = self.weight_log['Date'].isnull()
        if bw_blanks.any():
            err = ValueError("Missing values for core data in bodyweight log")
            log_err(err, str(bw_blanks))
            raise err

        lift_blanks = self.lift_types.isnull()
        if lift_blanks.any():
            err = ValueError("Missing values for core data lifts table")
            log_err(err, str(lift_blanks))
            raise err
Пример #32
0
 def log_cal_selection_changed(self):
     try:
         date = pd.Timestamp(self.log_cal.selectedDate().toPyDate())
         self.log_date_changed(date)
     except (ValueError, TypeError) as err:
         log_err(err, "Couldn't convert log_cal's value to a date... skipping")
Пример #33
0
def parse(content, db):
    '''
    Parse takes the content from the request and then populates the database with the data
    :param content: (html) The html containing the courses
    :param db: (TinyDB) the current database
    '''
    soup = BeautifulSoup(content, 'html5lib')

    tables = soup.find_all('table', {'class': 'TblCourses'})
    for t in tables:
        # TODO: verify whether replacing spaces yields correct dept names in all scenarios
        dept = t['dept'].replace(' ', '')
        dept_desc = t['dept-desc']

        db.table('departments').insert({
            'id': dept,
            'name': dept_desc,
        })

        rows = t.find_all('tr', {'class': 'CourseRow'})
        s = defaultdict(lambda: defaultdict(list))
        for tr in rows:
            cols = tr.find_all(lambda tag: tag.name == 'td')

            if cols:
                # The first <td> is a field that is not relevant to us
                # it is either empty or contains a "flag" icon
                cols.pop(0)

                for i, c in enumerate(cols):
                    a = c.find('a')
                    cols[i] = (a.get_text()
                               if a else cols[i].get_text()).strip()

                try:
                    parsed_course = parse_course_str(cols[0])
                    key = parsed_course['course']
                    section = parsed_course['section']
                    data = dict(zip(HEADERS, cols))

                    if parsed_course['dept'] != dept:
                        raise ValidationError(
                            'Departments do not match',
                            f"'{parsed_course['dept']}' != '{dept}'")

                    data['dept'] = dept
                    data['course'] = key
                    data['section'] = section
                    data['status'] = data['status'].lower()
                    data['units'] = data['units'].lstrip()

                    try:
                        data = interimClassDataSchema.load(data)
                    except MarshValidationError as e:
                        print(e.messages, data)
                        continue

                    crn = data['CRN']
                    if s[key][crn]:
                        comb = set(s[key][crn][0].items()) ^ set(data.items())
                        if not comb:
                            continue

                    s[key][crn].append(data)
                except KeyError:
                    continue
                except ValidationError as e:
                    log_err('Unable to parse course - data validation failed',
                            details={
                                'message': e.message,
                                'details': e.details,
                                'course': cols,
                            })
                    print('\n')
                    continue

        for course, section in s.items():
            db.table('courses').insert({
                'dept': dept,
                'course': course,
                'classes': list(section.keys())
            })

            for cl in section.values():
                data = classDataSchema.load(cl[0])
                classTime = [classTimeSchema.load(c) for c in cl]

                check_integrity(cl, data, classTime)

                data['times'] = classTime
                db.table('classes').insert(data)
Пример #34
0
 def __backup_file(filename):
     """Backup the old database file in case anything gets corrupted during runtime"""
     try:
         shutil.copy(filename, filename + '.bak')
     except OSError as e:
         log_err(e, 'Error backing up DB File')
Пример #35
0
        for ssb_campus in ['MC', 'WVC']:
            scraper = ScheduleScraper(
                ssb_url=SSB_URL,
                db_dir=DB_DIR,
                cache_dir=CACHE_DIR,
                ssb_campus=ssb_campus,
                hooks=WVMScraperHooks,

                # max_terms=4,
                # use_cache=False,
                # start_term='201231',
                trace=True,
            )
            scraper.run()

    except KeyboardInterrupt:
        log_err('Aborted', start='\n')

    db_files = list_dbs(DB_DIR, prefix=f'sched_')
    termdbs = []

    for filepath in db_files:
        matches = re.search(r'sched_(\w{2,3})_([0-9]{6})_database.json$',
                            filepath)
        if matches and matches.groups():
            campus, term = matches.groups()
            termdbs.append({'campus': campus, 'code': term})

    with open(join(DB_DIR, 'metadata.json'), 'w') as outfile:
        json.dump({'terms': termdbs}, outfile)