def delete_comment(global_config, competition, match_str, tag): session = DbSession.open_db_session(global_config["debriefs_db_name"] + global_config["this_season"]) DebriefDataModel.deleteDebriefCommentsByTag(session, competition, match_str, tag) session.commit() return "/debrief/%s/%s" % (competition, match_str)
def process_match_comment_form(global_config, form, competition, match_str, username): global_config["logger"].debug("Process Match Comment Form: %s:%s", (competition, match_str)) session = DbSession.open_db_session(global_config["debriefs_db_name"] + global_config["this_season"]) comment = form[match_comment_label].value timestamp = str(int(time.time())) DebriefDataModel.addOrUpdateDebriefComment(session, int(match_str), competition, username, timestamp, comment) session.commit() return "/debrief/%s" % match_str
def isFileProcessed(session, db_name, filepath): if db_name == global_config['db_name']: is_processed = DataModel.isFileProcessed(session, filepath) elif db_name == global_config['issues_db_name']: is_processed = IssueTrackerDataModel.isFileProcessed(session, filepath) elif db_name == global_config['debriefs_db_name']: is_processed = DebriefDataModel.isFileProcessed(session, filepath) return is_processed
def isFileProcessed(global_config, session, db_name, filepath): if db_name == (global_config["db_name"] + global_config["this_season"]): is_processed = DataModel.isFileProcessed(session, filepath) elif db_name == (global_config["issues_db_name"] + global_config["this_season"]): is_processed = IssueTrackerDataModel.isFileProcessed(session, filepath) elif db_name == (global_config["debriefs_db_name"] + global_config["this_season"]): is_processed = DebriefDataModel.isFileProcessed(session, filepath) return is_processed
def process_issue_files(global_config, input_dir, recursive=True): # Initialize the database session connection issues_db_name = global_config['issues_db_name'] + global_config['this_season'] debrief_db_name = global_config['debriefs_db_name'] + global_config['this_season'] debrief_session = DbSession.open_db_session(debrief_db_name) issues_session = DbSession.open_db_session(issues_db_name) # The following regular expression will select all files that conform to # the file naming format Issue*.txt. Build a list of all datafiles that match # the naming format within the directory passed in via command line # arguments. file_regex = re.compile('Issue[a-zA-Z0-9_-]+.txt') files = get_files(global_config, issues_session, issues_db_name, input_dir, file_regex, recursive) files.sort() # Process data files for data_filename in files: print 'processing %s'%data_filename try: # Initialize the file_attributes dictionary in preparation for the # parsing of the data file issue_attributes = {} # Parse the data file, storing all the information in the file_attributes # dictionary FileParser.FileParser(data_filename).parse(issue_attributes) issue = IssueTrackerDataModel.addIssueFromAttributes(issues_session, issue_attributes) if issue.debrief_key != None: match_str, issue_key = issue.debrief_key.split('_') competition = global_config['this_competition'] + global_config['this_season'] DebriefDataModel.addOrUpdateDebriefIssue(debrief_session, int(match_str), competition, issue.issue_id, issue_key) except Exception, e: # log the exception but continue processing other files log_exception(global_config['logger'], e) # add the file to the set of processed files so that we don't process it again. Do it outside the # try/except block so that we don't try to process a bogus file over and over again. IssueTrackerDataModel.addProcessedFile(issues_session, data_filename)
def get_debriefs_home_page(global_config, competition): session = DbSession.open_db_session(global_config["debriefs_db_name"] + global_config["this_season"]) result = "" result += "<hr>" match_debriefs = DebriefDataModel.getDebriefsInNumericOrder(session, competition) result += insert_debrief_table(match_debriefs, competition) return result
def get_competition_debriefs(global_config, competition): global_config["logger"].debug("GET match debriefs for competition: %s", competition) session = DbSession.open_db_session(global_config["debriefs_db_name"] + global_config["this_season"]) debriefs = DebriefDataModel.getDebriefsInNumericOrder(session, competition) web.header("Content-Type", "application/json") result = [] result.append('{ "debriefs": [\n') for debrief in debriefs: result.append(debrief.json()) result.append(",\n") if len(debriefs) > 0: result = result[:-1] result.append("\n") result.append("]}") return "".join(result)
def process_debrief_files(global_config, input_dir, recursive, test): # Initialize the database session connections issues_db_name = global_config['issues_db_name'] debrief_db_name = global_config['debriefs_db_name'] debrief_session = DbSession.open_db_session(debrief_db_name) issues_session = DbSession.open_db_session(issues_db_name) # Create the database if it doesn't already exist #if not os.path.exists('./' + db_name): # DebriefDataModel.create_db_tables(my_db) # The following regular expression will select all files that conform to # the file naming format Debrief*.txt. Build a list of all datafiles that match # the naming format within the directory passed in via command line # arguments. file_regex = re.compile('Debrief[a-zA-Z0-9_-]+.txt') files = get_files(debrief_session, debrief_db_name, input_dir, file_regex, recursive, test) # Process data files for data_filename in files: print 'processing %s'%data_filename try: # Initialize the debrief_attributes dictionary in preparation for the # parsing of the data file debrief_attributes = {} # Parse the data file, storing all the information in the attributes # dictionary FileParser.FileParser(data_filename).parse(debrief_attributes) DebriefDataModel.addDebriefFromAttributes(debrief_session, debrief_attributes) # Also, extract the competition name, too, if it has been included in # the data file if debrief_attributes.has_key('Competition'): competition = debrief_attributes['Competition'] else: competition = global_config['this_competition'] + global_config['this_season'] if competition == None: raise Exception( 'Competition Not Specified!') # At competition, we will likely have multiple laptops manging the data, but we want # only one machine to be responsible for the issues database. In all likelihood, # that machine will be the one in the pits, or possibly the application running # in the cloud. if global_config['issues_db_master'] == 'Yes': match_id = debrief_attributes['Match'] submitter = debrief_attributes['Scouter'] timestamp = str(int(time.time())) subgroup = 'Unassigned' status = 'Open' owner = 'Unassigned' if debrief_attributes.has_key('Issue1_Summary'): # look to see if there is already a debrief issue, and if so, do not attempt to create/update # an issue, as there are already other issue files that would then conflict with this one issue_key = 'Issue1' if DebriefDataModel.getDebriefIssue(debrief_session, match_id, issue_key) == None: summary = debrief_attributes['Issue1_Summary'] if debrief_attributes.has_key('Issue1_Priority'): priority = debrief_attributes['Issue1_Priority'] else: priority = 'Priority_3' if debrief_attributes.has_key('Issue1_Taskgroup'): component = debrief_attributes['Issue1_Taskgroup'] else: component = '' if debrief_attributes.has_key('Issue1_Description'): description = debrief_attributes['Issue1_Description'] else: description = '' debrief_key = str(match_id) + '_' + issue_key issue_id = IssueTrackerDataModel.getIssueId(issues_session, 'Robot') issue = IssueTrackerDataModel.addOrUpdateIssue(issues_session, issue_id, summary, status, priority, subgroup, component, submitter, owner, description, timestamp, debrief_key) if issue != None: issue.create_file('./static/data/%s/ScoutingData' % competition) DebriefDataModel.addOrUpdateDebriefIssue(debrief_session, match_id, competition, issue_id, issue_key) if debrief_attributes.has_key('Issue2_Summary'): issue_key = 'Issue2' if DebriefDataModel.getDebriefIssue(debrief_session, match_id, issue_key) == None: summary = debrief_attributes['Issue2_Summary'] if debrief_attributes.has_key('Issue2_Priority'): priority = debrief_attributes['Issue2_Priority'] else: priority = 'Priority_3' if debrief_attributes.has_key('Issue2_Taskgroup'): component = debrief_attributes['Issue2_Taskgroup'] else: component = '' if debrief_attributes.has_key('Issue3_Description'): description = debrief_attributes['Issue3_Description'] else: description = '' debrief_key = str(match_id) + '_' + issue_key issue_id = IssueTrackerDataModel.getIssueId(issues_session, 'Robot') issue = IssueTrackerDataModel.addOrUpdateIssue(issues_session, issue_id, summary, status, priority, subgroup, component, submitter, owner, description, timestamp, debrief_key) if issue != None: issue.create_file('./static/data/%s/ScoutingData' % competition) DebriefDataModel.addOrUpdateDebriefIssue(debrief_session, match_id, competition, issue_id, issue_key) if debrief_attributes.has_key('Issue3_Summary'): issue_key = 'Issue3' if DebriefDataModel.getDebriefIssue(debrief_session, match_id, issue_key) == None: summary = debrief_attributes['Issue3_Summary'] if debrief_attributes.has_key('Issue3_Priority'): priority = debrief_attributes['Issue3_Priority'] else: priority = 'Priority_3' if debrief_attributes.has_key('Issue3_Taskgroup'): component = debrief_attributes['Issue3_Taskgroup'] else: component = '' if debrief_attributes.has_key('Issue3_Description'): description = debrief_attributes['Issue3_Description'] else: description = '' debrief_key = str(match_id) + '_' + issue_key issue_id = IssueTrackerDataModel.getIssueId(issues_session, 'Robot') issue = IssueTrackerDataModel.addOrUpdateIssue(issues_session, issue_id, summary, status, priority, subgroup, component, submitter, owner, description, timestamp, debrief_key) if issue != None: issue.create_file('./static/data/%s/ScoutingData' % competition) DebriefDataModel.addOrUpdateDebriefIssue(debrief_session, match_id, competition, issue_id, issue_key) except Exception, e: # log the exception but continue processing other files log_exception(e) # add the file to the set of processed files so that we don't process it again. Do it outside the # try/except block so that we don't try to process a bogus file over and over again. DebriefDataModel.addProcessedFile(debrief_session, data_filename)
def process_debrief_files(global_config, input_dir, recursive=True): # Initialize the database session connections issues_db_name = global_config["issues_db_name"] + global_config["this_season"] debrief_db_name = global_config["debriefs_db_name"] + global_config["this_season"] debrief_session = DbSession.open_db_session(debrief_db_name) issues_session = DbSession.open_db_session(issues_db_name) # Create the database if it doesn't already exist # if not os.path.exists('./' + db_name): # DebriefDataModel.create_db_tables(my_db) # The following regular expression will select all files that conform to # the file naming format Debrief*.txt. Build a list of all datafiles that match # the naming format within the directory passed in via command line # arguments. file_regex = re.compile("Debrief[a-zA-Z0-9_-]+.txt") files = get_files(global_config, debrief_session, debrief_db_name, input_dir, file_regex, recursive) # Process data files for data_filename in files: print "processing %s" % data_filename try: # Initialize the debrief_attributes dictionary in preparation for the # parsing of the data file debrief_attributes = {} # Parse the data file, storing all the information in the attributes # dictionary FileParser.FileParser(data_filename).parse(debrief_attributes) DebriefDataModel.addDebriefFromAttributes(debrief_session, debrief_attributes) # Also, extract the competition name, too, if it has been included in # the data file if debrief_attributes.has_key("Competition"): competition = debrief_attributes["Competition"] issue_base_name = WebCommonUtils.split_comp_str(competition)[0] else: competition = global_config["this_competition"] + global_config["this_season"] issue_base_name = global_config["this_competition"] if competition == None: raise Exception("Competition Not Specified!") # At competition, we will likely have multiple laptops manging the data, but we want # only one machine to be responsible for the issues database. In all likelihood, # that machine will be the one in the pits, or possibly the application running # in the cloud. if global_config["issues_db_master"] == "Yes": match_id = debrief_attributes["Match"] submitter = debrief_attributes["Scouter"] timestamp = str(int(time.time())) subgroup = "Unassigned" status = "Open" owner = "Unassigned" if debrief_attributes.has_key("Issue1_Summary") or debrief_attributes.has_key("Issue1_Description"): # look to see if there is already a debrief issue, and if so, do not attempt to create/update # an issue, as there are already other issue files that would then conflict with this one issue_key = "Issue1" if DebriefDataModel.getDebriefIssue(debrief_session, competition, match_id, issue_key) == None: # if no summary is provided, then use the description as the summary. Likewise, if no description # is provided then use the summary as the description. Keep in mind that we need at least the # summary or description to be provided. if debrief_attributes.has_key("Issue1_Summary"): summary = debrief_attributes["Issue1_Summary"] else: summary = debrief_attributes["Issue1_Description"] if debrief_attributes.has_key("Issue1_Description"): description = debrief_attributes["Issue1_Description"] else: description = debrief_attributes["Issue1_Summary"] if debrief_attributes.has_key("Issue1_Priority"): priority = debrief_attributes["Issue1_Priority"] else: priority = "Priority_3" if debrief_attributes.has_key("Issue1_Taskgroup"): component = debrief_attributes["Issue1_Taskgroup"] else: component = "" debrief_key = str(match_id) + "_" + issue_key issue_id = IssueTrackerDataModel.getIssueId(issues_session, issue_base_name) issue = IssueTrackerDataModel.addOrUpdateIssue( issues_session, issue_id, summary, status, priority, subgroup, component, submitter, owner, description, timestamp, debrief_key, ) if issue != None: issue.create_file("./static/data/%s/ScoutingData" % competition) DebriefDataModel.addOrUpdateDebriefIssue( debrief_session, match_id, competition, issue_id, issue_key ) if debrief_attributes.has_key("Issue2_Summary") or debrief_attributes.has_key("Issue2_Description"): # look to see if there is already a debrief issue, and if so, do not attempt to create/update # an issue, as there are already other issue files that would then conflict with this one issue_key = "Issue2" if DebriefDataModel.getDebriefIssue(debrief_session, competition, match_id, issue_key) == None: # if no summary is provided, then use the description as the summary. Likewise, if no description # is provided then use the summary as the description. Keep in mind that we need at least the # summary or description to be provided. if debrief_attributes.has_key("Issue2_Summary"): summary = debrief_attributes["Issue2_Summary"] else: summary = debrief_attributes["Issue2_Description"] if debrief_attributes.has_key("Issue2_Description"): description = debrief_attributes["Issue2_Description"] else: description = debrief_attributes["Issue2_Summary"] if debrief_attributes.has_key("Issue2_Priority"): priority = debrief_attributes["Issue2_Priority"] else: priority = "Priority_3" if debrief_attributes.has_key("Issue2_Taskgroup"): component = debrief_attributes["Issue2_Taskgroup"] else: component = "" debrief_key = str(match_id) + "_" + issue_key issue_id = IssueTrackerDataModel.getIssueId(issues_session, issue_base_name) issue = IssueTrackerDataModel.addOrUpdateIssue( issues_session, issue_id, summary, status, priority, subgroup, component, submitter, owner, description, timestamp, debrief_key, ) if issue != None: issue.create_file("./static/data/%s/ScoutingData" % competition) DebriefDataModel.addOrUpdateDebriefIssue( debrief_session, match_id, competition, issue_id, issue_key ) if debrief_attributes.has_key("Issue3_Summary") or debrief_attributes.has_key("Issue3_Description"): # look to see if there is already a debrief issue, and if so, do not attempt to create/update # an issue, as there are already other issue files that would then conflict with this one issue_key = "Issue3" if DebriefDataModel.getDebriefIssue(debrief_session, competition, match_id, issue_key) == None: # if no summary is provided, then use the description as the summary. Likewise, if no description # is provided then use the summary as the description. Keep in mind that we need at least the # summary or description to be provided. if debrief_attributes.has_key("Issue3_Summary"): summary = debrief_attributes["Issue3_Summary"] else: summary = debrief_attributes["Issue3_Description"] if debrief_attributes.has_key("Issue3_Description"): description = debrief_attributes["Issue3_Description"] else: description = debrief_attributes["Issue3_Summary"] if debrief_attributes.has_key("Issue3_Priority"): priority = debrief_attributes["Issue3_Priority"] else: priority = "Priority_3" if debrief_attributes.has_key("Issue3_Taskgroup"): component = debrief_attributes["Issue3_Taskgroup"] else: component = "" debrief_key = str(match_id) + "_" + issue_key issue_id = IssueTrackerDataModel.getIssueId(issues_session, issue_base_name) issue = IssueTrackerDataModel.addOrUpdateIssue( issues_session, issue_id, summary, status, priority, subgroup, component, submitter, owner, description, timestamp, debrief_key, ) if issue != None: issue.create_file("./static/data/%s/ScoutingData" % competition) DebriefDataModel.addOrUpdateDebriefIssue( debrief_session, match_id, competition, issue_id, issue_key ) except Exception, e: # log the exception but continue processing other files log_exception(global_config["logger"], e) # add the file to the set of processed files so that we don't process it again. Do it outside the # try/except block so that we don't try to process a bogus file over and over again. DebriefDataModel.addProcessedFile(debrief_session, data_filename)
def get_debrief_page(global_config, competition, match_str, allow_update=False): session = DbSession.open_db_session(global_config["debriefs_db_name"] + global_config["this_season"]) debrief = DebriefDataModel.getDebrief(session, competition, int(match_str)) debrief_issues = DebriefDataModel.getDebriefIssues(session, competition, int(match_str)) issues_session = DbSession.open_db_session(global_config["issues_db_name"] + global_config["this_season"]) if debrief != None: result = "" result += "<hr>" table_str = "<h4>Match Info</h4>" table_str += "<ul>" table_str += '<table border="1" cellspacing="5">' table_str += "<tr>" table_str += "<td>Summary</td>" table_str += "<td>" + debrief.summary + "</td>" table_str += "</tr>" table_str += "<tr>" table_str += "<td>Description</td>" table_str += "<td>" + debrief.description + "</td>" table_str += "</tr>" table_str += "</table>" table_str += "</ul>" table_str += "<h4>Reported Issues From Match</h4>" table_str += "<ul>" table_str += '<table border="1" cellspacing="5">' for issue in debrief_issues: table_str += "<tr>" table_str += "<td>" + issue.priority + "</td>" table_str += '<td><a href="/issue/' + str(issue.issue_id) + '">' + str(issue.issue_id) + "</a></td>" issue = IssueTrackerDataModel.getIssue(issues_session, issue.issue_id) if issue: table_str += "<td>" + issue.summary + "</td>" table_str += "</tr>" table_str += "</table>" table_str += "</ul>" result += table_str result += "<br>" result += "<hr>" result += '<a href="/debriefcomment/' + competition + "/" + match_str + '"> Comment On This Match</a></td>' result += "<br>" result += "<hr>" result += "<h3>Comments</h3>" comments = DebriefDataModel.getDebriefComments(session, competition, int(match_str)) if len(comments) > 0: table_str = "<ul>" table_str += '<table border="1" cellspacing="5">' table_str += "<tr>" table_str += "<th>Timestamp</th>" table_str += "<th>Commented By</th>" table_str += "<th>Comment</th>" if allow_update == True: table_str += "<th>Delete</th>" table_str += "</tr>" for comment in comments: table_str += "<tr>" table_str += ( "<td>" + time.strftime("%b %d, %Y %I:%M:%S %p", time.localtime(float(comment.tag))) + "</td>" ) table_str += "<td>" + comment.submitter + "</td>" table_str += "<td>" + comment.data + "</td>" if allow_update == True: table_str += ( '<td><a href="/deletecomment/debrief/' + competition + "/" + match_str + "/" + comment.tag + '">Delete</a></td>' ) table_str += "</tr>" table_str += "</table>" table_str += "</ul>" result += table_str result += "<hr>" return result else: return None