def iter_datatable(session, url, **kwargs): url += '&numResults=1000&startIndex=0' l = blackboard.slowlog() response = session.get(url) if kwargs.pop('edit_mode', False): response = session.ensure_edit_mode(response) history = list(response.history) + [response] document = html5lib.parse(response.content, encoding=response.encoding) keys, rows = parse_datatable(response, document, **kwargs) yield keys yield from rows next_id = 'listContainer_nextpage_top' next_o = document.find('.//h:a[@id="%s"]' % next_id, NS) if next_o is None: l("Fetching datatable took %.1f s") else: l("Fetching datatable page 1 took %.1f s") page_number = 1 while next_o: page_number += 1 url = urljoin(response.url, next_o.get('href')) l = blackboard.slowlog() response = session.get(url) l("Fetching datatable page %d took %.4f s", page_number) history += list(response.history) + [response] document = html5lib.parse(response.content, encoding=response.encoding) keys_, rows = parse_datatable(response, document, **kwargs) if keys != keys_: raise ValueError( "Page %d keys (%r) do not match page 1 keys (%r)" % (page_number, keys_, keys)) next_o = document.find('.//h:a[@id="%s"]' % next_id, NS) yield from rows response.history = history[:-1] yield response
def iter_datatable(session, url, **kwargs): url += '&numResults=1000&startIndex=0' l = blackboard.slowlog() response = session.get(url) if kwargs.pop('edit_mode', False): response = session.ensure_edit_mode(response) l("Fetching datatable page 1 took %.4f s") history = list(response.history) + [response] document = html5lib.parse(response.content, encoding=response.encoding) keys, rows = parse_datatable(response, document, **kwargs) yield keys yield from rows next_id = 'listContainer_nextpage_top' next_o = document.find('.//h:a[@id="%s"]' % next_id, NS) page_number = 1 while next_o: page_number += 1 url = urljoin(response.url, next_o.get('href')) l = blackboard.slowlog() response = session.get(url) l("Fetching datatable page %d took %.4f s", page_number) history += list(response.history) + [response] document = html5lib.parse(response.content, encoding=response.encoding) keys_, rows = parse_datatable(response, document, **kwargs) if keys != keys_: raise ValueError( "Page %d keys (%r) do not match page 1 keys (%r)" % (page_number, keys_, keys)) next_o = document.find('.//h:a[@id="%s"]' % next_id, NS) yield from rows response.history = history[:-1] yield response
def fetch_overview(session): """Fetch gradebook information. Returns (assignments, students).""" assert isinstance(session, BlackboardSession) url = ( 'https://bb.au.dk/webapps/gradebook/do/instructor/getJSONData' + '?course_id=%s' % session.course_id) l = blackboard.slowlog() response = session.get(url) l("Fetching gradebook took %.1f s") try: o = response.json() except JSONDecodeError: raise ParserError("Couldn't decode JSON", response) if 'cachedBook' in o: o = o['cachedBook'] try: columns = o['colDefs'] except KeyError: raise ParserError("No colDefs", response) assignments = {} for c in columns: if c.get('src') != 'resource/x-bb-assignment': continue assignments[c['id']] = c users = {} for row in o['rows']: user_id = row[0]['uid'] user_available = row[0]['avail'] user_cells = {cell['c']: cell for cell in row if 'c' in cell} user_data = {cell['c']: cell['v'] for cell in row if 'v' in cell} user_assignments = {} for a in assignments.keys(): try: cell = user_cells[a] except KeyError: continue needs_grading = bool(cell.get('ng')) user_assignments[a] = { 'score': cell['v'], 'needs_grading': needs_grading, 'attempts': None, } users[user_id] = dict( first_name=user_data['FN'], last_name=user_data['LN'], username=user_data['UN'], student_number=user_data['SI'], last_access=user_data['LA'], id=user_id, available=user_available, assignments=user_assignments, ) return assignments, users
def dwr_get_attempts_info(session, attempts, batch_size=20): results = [] for i in range(0, len(attempts), batch_size): j = min(len(attempts), i + batch_size) l = blackboard.slowlog() results.extend( dwr_get_attempts_info_single_request(session, attempts[i:j])) l("Fetching %d attempt lists took %%.1f s" % (j - i)) return results
def fetch_rubric(session, assoc_id, rubric_object): rubric_id = rubric_object['id'] rubric_title = rubric_object['title'] prefix = 'BBFETCH' url = ( 'https://bb.au.dk/webapps/rubric/do/course/gradeRubric' + '?mode=grid&isPopup=true&rubricCount=1&prefix=%s' % prefix + '&course_id=%s' % session.course_id + '&maxValue=1.0&rubricId=%s' % rubric_id + '&viewOnly=false&displayGrades=true&type=grading' + '&rubricAssoId=%s' % assoc_id) l = blackboard.slowlog() response = session.get(url) l("Fetching attempt rubric took %.1f s") document = html5lib.parse(response.content, encoding=response.encoding) def is_desc(div_element): classes = (div_element.get('class') or '').split() return ('u_controlsWrapper' in classes and 'radioLabel' not in classes and 'feedback' not in classes) table = document.find( './/h:table[@id="%s_rubricGradingTable"]' % prefix, NS) column_headers = list(map( element_text_content, table.findall('./h:thead/h:tr/h:th', NS)[1:])) rubric_rows = [] row_tags = table.findall('./h:tbody/h:tr', NS) for row in row_tags: row_id = row.get('rubricrowid') if row_id is None: raise ParserError("Could not get rubric row id", response) row_title = element_text_content(row.find('./h:th', NS)) row_cells = row.findall('./h:td', NS) if len(row_cells) != len(column_headers): raise ParserError("Number of row cells does not equal " + "number of table header cells", response) rubric_row_cells = [] for cell in row_cells: cell_id = cell.get('rubriccellid') if cell_id is None: raise ParserError("Could not get rubric cell id", response) cell_container = cell.find( './h:div[@class="rubricCellContainer"]', NS) cell_percentage_element = cell_container.find( './/h:input[@class="selectedPercentField"]', NS) if cell_percentage_element is None: raise ParserError("No selectedPercentField", response) percentage = form_field_value(cell_percentage_element) desc = list(filter(is_desc, cell_container.findall('./h:div', NS))) if len(desc) != 1: raise ParserError("Could not get description", response) else: desc_text = element_text_content(desc[0]) rubric_row_cells.append(dict( id=cell_id, desc=desc_text, percentage=percentage)) rubric_rows.append(dict( id=row_id, title=row_title, cells=rubric_row_cells)) return dict(id=rubric_id, title=rubric_title, columns=column_headers, rows=rubric_rows)
def fetch_attempt(session, attempt_id, is_group_assignment): assert isinstance(session, BlackboardSession) if is_group_assignment: url = ('https://bb.au.dk/webapps/assignment/' + 'gradeAssignmentRedirector' + '?course_id=%s' % session.course_id + '&groupAttemptId=%s' % attempt_id) else: url = ('https://bb.au.dk/webapps/assignment/' + 'gradeAssignmentRedirector' + '?course_id=%s' % session.course_id + '&attempt_id=%s' % attempt_id) l = blackboard.slowlog() response = session.get(url) l("Fetching attempt took %.1f s") document = html5lib.parse(response.content, encoding=response.encoding) submission_text = document.find( './/h:div[@id="submissionTextView"]', NS) if submission_text is not None: submission_text = element_to_markdown(submission_text) comments = document.find( './/h:div[@id="currentAttempt_comments"]', NS) if comments is not None: xpath = './/h:div[@class="vtbegenerated"]' comments = [ element_to_markdown(e) for e in comments.findall(xpath, NS) ] if not comments: raise blackboard.ParserError( "Page contains currentAttempt_comments, " + "but it contains no comments", response) comments = '\n\n'.join(comments) files = [] submission_list = document.find( './/h:ul[@id="currentAttempt_submissionList"]', NS) if submission_list is None: raise ParserError("No currentAttempt_submissionList", response) for submission in submission_list: filename = element_text_content(submission) download_button = submission.find( './/h:a[@class="dwnldBtn"]', NS) if download_button is not None: download_link = urljoin( response.url, download_button.get('href')) files.append( dict(filename=filename, download_link=download_link)) else: s = 'currentAttempt_attemptFilesubmissionText' a = submission.find( './/h:a[@id="' + s + '"]', NS) if a is not None: # This <li> is for the submission_text if not submission_text: raise blackboard.ParserError( "%r in file list, but no " % (filename,) + "accompanying submission text contents", response) else: raise blackboard.ParserError( "No download link for file %r" % (filename,), response) score_input = document.find( './/h:input[@id="currentAttempt_grade"]', NS) if score_input is None: score = None else: score = form_field_value(score_input) try: score = float(score) except ValueError: if score: raise blackboard.ParserError( "Couldn't parse currentAttempt_grade: %r" % (score,), response) score = None feedbacktext_input = document.find( './/*[@id="feedbacktext"]', NS) if feedbacktext_input is None: feedback = '' else: feedback = form_field_value(feedbacktext_input) if '<' in feedback: feedback = html_to_markdown(feedback) gradingNotestext_input = document.find( './/*[@id="gradingNotestext"]', NS) if gradingNotestext_input is None: grading_notes = '' else: grading_notes = form_field_value(gradingNotestext_input) feedbackfiles_rows = document.find( './/h:tbody[@id="feedbackFiles_table_body"]', NS) feedbackfiles = [] for i, row in enumerate(feedbackfiles_rows or []): try: link = row.findall('.//h:a', NS)[0] except IndexError: raise blackboard.ParserError( "feedbackFiles_table_body row %s: no link" % i, response) download_link = urljoin( response.url, link.get('href')) filename = element_text_content(link) feedbackfiles.append( dict(filename=filename, download_link=download_link)) rubric_data = None if is_group_assignment: rubric_input = document.find( './/h:input[@id="%s_rubricEvaluation"]' % attempt_id, NS) if rubric_input is not None: rubric_data_str = form_field_value(rubric_input) try: rubric_data = json.loads(unquote(rubric_data_str)) except JSONDecodeError: raise ParserError("Couldn't decode JSON", response) t1 = 'blackboard.platform.gradebook2.GroupAttempt' t2 = 'blackboard.plugin.rubric.api.core.data.EvaluationEntity' if rubric_data['evalDataType'] == t1: if rubric_data['evalEntityId'] != attempt_id: raise ParserError( "evalEntityId is %r, expected %r" % (rubric_data['evalEntityId'], attempt_id), response) elif rubric_data['evalDataType'] == t2: # Seems to indicate an already filled-out rubric pass else: raise ParserError( "Unknown evalDataType %r" % rubric_data['evalDataType'], response) return dict( submission=submission_text, comments=comments, files=files, feedback=feedback, feedbackfiles=feedbackfiles, score=score, grading_notes=grading_notes, rubric_data=rubric_data, )
def fetch_rubric(session, assoc_id, rubric_object): rubric_id = rubric_object['id'] rubric_title = rubric_object['title'] prefix = 'BBFETCH' url = ('https://%s/webapps/rubric/do/course/gradeRubric' % DOMAIN + '?mode=grid&isPopup=true&rubricCount=1&prefix=%s' % prefix + '&course_id=%s' % session.course_id + '&maxValue=1.0&rubricId=%s' % rubric_id + '&viewOnly=false&displayGrades=true&type=grading' + '&rubricAssoId=%s' % assoc_id) l = blackboard.slowlog() response = session.get(url) l("Fetching attempt rubric took %.1f s") document = html5lib.parse(response.content, transport_encoding=response.encoding) def is_desc(div_element): classes = (div_element.get('class') or '').split() return ('u_controlsWrapper' in classes and 'radioLabel' not in classes and 'feedback' not in classes) table = document.find('.//h:table[@id="%s_rubricGradingTable"]' % prefix, NS) column_headers = list( map(element_text_content, table.findall('./h:thead/h:tr/h:th', NS)[1:])) rubric_rows = [] row_tags = table.findall('./h:tbody/h:tr', NS) for row in row_tags: row_id = row.get('rubricrowid') if row_id is None: raise ParserError("Could not get rubric row id", response) row_title = element_text_content(row.find('./h:th', NS)) row_cells = row.findall('./h:td', NS) if len(row_cells) != len(column_headers): raise ParserError( "Number of row cells does not equal " + "number of table header cells", response) rubric_row_cells = [] for cell in row_cells: cell_id = cell.get('rubriccellid') if cell_id is None: raise ParserError("Could not get rubric cell id", response) cell_container = cell.find('./h:div[@class="rubricCellContainer"]', NS) cell_percentage_element = cell_container.find( './/h:input[@class="selectedPercentField"]', NS) if cell_percentage_element is None: raise ParserError("No selectedPercentField", response) percentage = form_field_value(cell_percentage_element) desc = list(filter(is_desc, cell_container.findall('./h:div', NS))) if len(desc) != 1: raise ParserError("Could not get description", response) else: desc_text = element_text_content(desc[0]) rubric_row_cells.append( dict(id=cell_id, desc=desc_text, percentage=percentage)) rubric_rows.append( dict(id=row_id, title=row_title, cells=rubric_row_cells)) return dict(id=rubric_id, title=rubric_title, columns=column_headers, rows=rubric_rows)
def fetch_attempt(session, attempt_id, is_group_assignment): assert isinstance(session, BlackboardSession) if is_group_assignment: url = ('https://%s/webapps/assignment/' % DOMAIN + 'gradeAssignmentRedirector' + '?course_id=%s' % session.course_id + '&groupAttemptId=%s' % attempt_id) else: url = ('https://%s/webapps/assignment/' % DOMAIN + 'gradeAssignmentRedirector' + '?course_id=%s' % session.course_id + '&attempt_id=%s' % attempt_id) l = blackboard.slowlog() response = session.get(url) l("Fetching attempt took %.1f s") document = html5lib.parse(response.content, transport_encoding=response.encoding) currentAttempt_container = document.find('.//h:div[@id="currentAttempt"]', NS) if currentAttempt_container is None: not_yet_submitted = ('This attempt has not yet been submitted and ' + 'is not available to view at present.') if not_yet_submitted in response.text: raise NotYetSubmitted raise blackboard.ParserError('No <div id="currentAttempt">', response=response) submission_text = document.find('.//h:div[@id="submissionTextView"]', NS) if submission_text is not None: submission_text = element_to_markdown(submission_text) comments = document.find('.//h:div[@id="currentAttempt_comments"]', NS) if comments is not None: xpath = './/h:div[@class="vtbegenerated"]' comments = [ element_to_markdown(e) for e in comments.findall(xpath, NS) ] if not comments: raise blackboard.ParserError( "Page contains currentAttempt_comments, " + "but it contains no comments", response) comments = '\n\n'.join(comments) files = [] submission_list = document.find( './/h:ul[@id="currentAttempt_submissionList"]', NS) if submission_list is None: if comments is None and submission_text is None: logger.warning("The submission is completely empty.") elif submission_text is None: logger.warning( "No submission; the student only uploaded a comment.") else: logger.warning("The student only uploaded a text submission.") submission_list = () for submission in submission_list: filename = element_text_content(submission) download_button = submission.find('.//h:a[@class="dwnldBtn"]', NS) if download_button is not None: download_link = urljoin(response.url, download_button.get('href')) files.append(dict(filename=filename, download_link=download_link)) else: s = 'currentAttempt_attemptFilesubmissionText' a = submission.find('.//h:a[@id="' + s + '"]', NS) if a is not None: # This <li> is for the submission_text if not submission_text: raise blackboard.ParserError( "%r in file list, but no " % (filename, ) + "accompanying submission text contents", response) else: raise blackboard.ParserError( "No download link for file %r" % (filename, ), response) score_input = document.find('.//h:input[@id="currentAttempt_grade"]', NS) if score_input is None: score = None else: score = form_field_value(score_input) try: score = float(score) except ValueError: if score: raise blackboard.ParserError( "Couldn't parse currentAttempt_grade: %r" % (score, ), response) score = None feedbacktext_input = document.find('.//*[@id="feedbacktext"]', NS) if feedbacktext_input is None: feedback = '' else: feedback = form_field_value(feedbacktext_input) if '<' in feedback: feedback = html_to_markdown(feedback) gradingNotestext_input = document.find('.//*[@id="gradingNotestext"]', NS) if gradingNotestext_input is None: grading_notes = '' else: grading_notes = form_field_value(gradingNotestext_input) feedbackfiles_rows = document.find( './/h:tbody[@id="feedbackFiles_table_body"]', NS) feedbackfiles = [] for i, row in enumerate(feedbackfiles_rows or []): try: link = row.findall('.//h:a', NS)[0] except IndexError: raise blackboard.ParserError( "feedbackFiles_table_body row %s: no link" % i, response) download_link = urljoin(response.url, link.get('href')) filename = element_text_content(link) feedbackfiles.append( dict(filename=filename, download_link=download_link)) rubric_data = None if is_group_assignment: rubric_input = document.find( './/h:input[@id="%s_rubricEvaluation"]' % attempt_id, NS) if rubric_input is not None: rubric_data_str = form_field_value(rubric_input) try: rubric_data = json.loads(unquote(rubric_data_str)) except JSONDecodeError: raise ParserError("Couldn't decode JSON", response) t1 = 'blackboard.platform.gradebook2.GroupAttempt' t2 = 'blackboard.plugin.rubric.api.core.data.EvaluationEntity' if rubric_data['evalDataType'] == t1: if rubric_data['evalEntityId'] != attempt_id: raise ParserError( "evalEntityId is %r, expected %r" % (rubric_data['evalEntityId'], attempt_id), response) elif rubric_data['evalDataType'] == t2: # Seems to indicate an already filled-out rubric pass else: raise ParserError( "Unknown evalDataType %r" % rubric_data['evalDataType'], response) return dict( submission=submission_text, comments=comments, files=files, feedback=feedback, feedbackfiles=feedbackfiles, score=score, grading_notes=grading_notes, rubric_data=rubric_data, )