def make_exports_html(): """Make top-level HTML page for exports directory that contains links to individual student pages. """ # Create HTML output stream html_output = cStringIO.StringIO() begin_exports_html(html_output) write_html_time_stamp(html_output) # Walk course folders in exports directory course_folder_list = get_subdirs(path_consts.EXPORTS_FOLDER) for course_folder in course_folder_list: write_course_html(html_output, course_folder) # Walk student folders html_output.write('<DIV CLASS="studentlist"><UL>\n') student_folders_list = get_subdirs( os.path.join(path_consts.EXPORTS_FOLDER, course_folder)) for student_folder in student_folders_list: student_folder_link = urllib.quote('/'.join( (course_folder, student_folder, INDEX_FILE))) html_output.write('<LI><A HREF="%s">%s</A></LI>\n' % (student_folder_link, student_folder)) html_output.write('</UL></DIV>\n') end_html(html_output) # We're ready to output the HTML page. html_file_name = os.path.join(path_consts.EXPORTS_FOLDER, INDEX_FILE) script_logging.log_status('Making %s' % html_file_name) with open(html_file_name, 'w') as ofile: ofile.write(html_output.getvalue()) html_output.close() # We're done with the output buffer
def download_all_media_recordings(doDownload=True): """Download all media recordings submissions by parsing exports folder. doDownload parameter lets us run this without actually downloading files, which is useful during development. """ # Walk exports directory looking for submissions csv files. for (folder_path, dir_list, file_list) in os.walk(path_consts.EXPORTS_FOLDER): if path_consts.SUBMISSIONS_FILE_NAME in file_list: # Walk through all the student's submission attachments. submissions_csv_file = os.path.join( folder_path, path_consts.SUBMISSIONS_FILE_NAME) submissions = export_student_artifacts.load_csv_file( submissions_csv_file) for submission in submissions[ 1:]: # Skip 1st row, which is the header. # Look for media_recording submissions if submission.submission_type == 'media_recording': target_file_path = os.path.join(folder_path, submission.media_file) if doDownload: http_downloader.download(submission.media_url, target_file_path) else: script_logging.log_status( 'Download %s to %s' % (submission.media_url, target_file_path))
def load_errors(): pattern = re.compile(REGEX_PATTERN) err_list = [] # Accumulate the download errors file_name = script_logging.ERR_FILE_NAME if (os.path.isfile(file_name)): with open(file_name, 'r') as f: for line in f: match = pattern.match(line) if match: err_list.append((match.group(1), match.group(2))) # Let's see what download errors we found if len(err_list) > 0: # This process might still generate errors, so save the current # error before starting a new one. cp_err_log() script_logging.clear_error_log() for err in err_list: # Retry the download download_file = err[0] download_url = err[1] http_downloader.download(download_url, download_file) else: script_logging.log_status('No download errors to retry.')
def dump_all_json(): """Retrieve all relevant artifact data from Canvas API & store to files. """ # Set up process logging. # Existence of error log file can tell us if errors occur. script_logging.clear_status_log() script_logging.clear_error_log() # Pull list of courses courses = canvas_data.pull_courses() # If there are course ID parameters, just load the specified courses if len(sys.argv) > 1: course_id_list = map(int, sys.argv[1:]) courses = [ course for course in courses if course['id'] in course_id_list ] # course_id = int(sys.argv[1]) # courses = [course for course in courses if course['id'] == course_id] script_logging.log_status('Storing courses JSON to %s' % (COURSES_FILE_NAME)) with open(COURSES_FILE_NAME, 'w') as f: json.dump(courses, f, indent=2) for course in courses: course_id = course['id'] # Pull students in each course students = canvas_data.pull_course_students(course_id) dump_json(students, STUDENTS_FILE_NAME, course_id, "course students") # Pull users for each course. # We'll need this to look up comment submitters. users = canvas_data.pull_course_users(course_id) dump_json(users, USERS_FILE_NAME, course_id, "course users") # pull assignments for each course assignments = canvas_data.pull_assignments(course_id) dump_json(assignments, ASSIGNMENTS_FILE_NAME, course_id, 'course assignments') # pull submissions for each assignment for assignment in assignments: assignment_id = assignment["id"] submissions = canvas_data.pull_submissions_with_comments( course_id, assignment_id) dump_json(submissions, SUBMISSIONS_FILE_NAME, assignment_id, 'assignment submissions')
def dump_json(json_data, file_name_template, record_id, content_description): """Helper function to output JSON data to external file. json_data -- JSON collection to write to external file. file_name_template -- Template for building file name. Expected to contain a string substitution element, whose value is specified by the record_id parameter. e.g. 'json/students_%s.json' record_id -- Value to use in substitution to build output file name. e.g. 39310000000000056 content_description -- String that describes the data being written to file. Used to create status log message. """ file_name = file_name_template % (record_id) script_logging.log_status('Storing %s JSON to %s' % (content_description, file_name)) with open(file_name, 'w') as f: json.dump(json_data, f, indent=2)
def download(download_url, target_file_path): """Download the file at the given URL to the target folder. If something bad happens, just skip the download & log it as an error. """ try: script_logging.log_status('Download file %s' % target_file_path) script_logging.log_status('Download URL %s' % download_url) # attachment_resp = requests.get(download_url) # attachment_resp = requests.get(download_url, timeout=10) # 10 second timeout attachment_resp = requests.get( download_url, timeout=REQUEST_TIMEOUT, stream=True) # Don't download content immediately script_logging.log_status('Status code: %s' % attachment_resp.status_code) script_logging.log_status('Headers: %s' % attachment_resp.headers) # # Extract name to use for saving file # content_header = attachment_resp.headers['Content-Disposition'] # file_name = re_filename.search(content_header).group(1) # Save attachment file to target file # saved_file_name = os.path.join(target_folder, file_name) # script_logging.log_status('Downloading %s' % target_file_path) with open(target_file_path, "wb") as ofile: ofile.write(attachment_resp.content) script_logging.log_status('Done downloading %s' % target_file_path) except Exception as e: # Escape file names in case they have weird characters in them. escaped_file = target_file_path.replace("'", "\'") escaped_url = download_url.replace("'", "\'") script_logging.log_error("Error_http_downloader '%s', '%s'" % (escaped_file, escaped_url)) script_logging.log_error("Error object: " + str(e))
def write_csv_file(file_path, csv_headers, csv_data): """Output student data to a csv file. file_path -- Full path name for output file. csv_headers -- Tuple containing csv headers. csv_data -- List of tuples containing csv data rows. """ script_logging.log_status('Writing %s' % file_path) # with open(file_path,'w') as f: # csv_writer = csv.writer(f) # csv_writer.writerow(csv_headers) # csv_writer.writerows(csv_data) # 12.15.2017 Can't handle unicode characters #? Try find the row that is causing write error # for data in csv_data: # print(file_path) # print(csv_headers) # print(data) # csv_writer.writerow(data) with io.FileIO(file_path, 'w') as f: w = csv.writer(f, encoding='utf-8') w.writerow(csv_headers) w.writerows(csv_data)
def retrieve_json(): """Retrieve JSON data from Canvas we'll need to download student artifacts.""" # Download the JSON files to a working directory. if not os.path.exists(JSON_FOLDER): os.makedirs(JSON_FOLDER) make_time_stamp_file() dump_all_json() # Report counts of what we got back. courses = load_courses_json() script_logging.log_status("Course count: %s" % len(courses)) for course in courses: course_id = course['id'] students = load_students_json(course_id) script_logging.log_status("Student count for course %s: %s" % (course_id, len(students))) users = load_users_json(course_id) script_logging.log_status("User count for course %s: %s" % (course_id, len(users))) assignments = load_assignments_json(course_id) script_logging.log_status("Assignment count for course %s: %s" % (course_id, len(assignments))) for assignment in assignments: assignment_id = assignment['id'] submissions = load_submissions_json(assignment_id) script_logging.log_status( "Submission count for assignment %s: %s" % (assignment_id, len(submissions))) script_logging.log_status("Time stamp: %s" % get_time_stamp())
11.29.2016 tps 09.02.2017 tps Download media recording submissions. 09.24.2018 tps Retry download errors. """ import json_artifacts import export_student_artifacts import download_attachments import download_media_recordings import make_html import script_logging import retry_download_errors script_logging.clear_logs() script_logging.log_status('Retrieve Canvas JSON data') json_artifacts.retrieve_json() script_logging.log_status('Create student folders') export_student_artifacts.write_student_folders() script_logging.log_status('Download attachments') download_attachments.download_all_attachments() script_logging.log_status('Download media recordings') download_media_recordings.download_all_media_recordings() script_logging.log_status('Generate HTML') make_html.make_index_pages() retry_download_errors.load_errors()
"""For testing, this script runs all the scripts in sequence to fully populate artifacts export folder from Canvas API, except for downloading attachments & media recordings. 08.31.2018 tps """ import json_artifacts import export_student_artifacts # import download_attachments # import download_media_recordings import make_html import script_logging script_logging.clear_logs() script_logging.log_status('Retrieve Canvas JSON data') json_artifacts.retrieve_json() script_logging.log_status('Create student folders') export_student_artifacts.write_student_folders() # script_logging.log_status('Download attachments') # download_attachments.download_all_attachments() # script_logging.log_status('Download media recordings') # download_media_recordings.download_all_media_recordings() script_logging.log_status('Generate HTML') make_html.make_index_pages()
def make_student_html(): """Create HTML page for each student folder.""" # Walk course folders in exports directory course_folder_list = get_subdirs(path_consts.EXPORTS_FOLDER) for course_folder in course_folder_list: # Walk student folders. Folder names are same as user names. course_folder_path = os.path.join(path_consts.EXPORTS_FOLDER, course_folder) student_folders_list = get_subdirs(course_folder_path) # Walk folder list by index number, so we can use index # to generate next/prev navigation links for each page. for i in range(0, len(student_folders_list)): student_folder = student_folders_list[i] # Build paths we need for the student folder. student_folder_path = os.path.join(course_folder_path, student_folder) student_html_file = os.path.join(student_folder_path, INDEX_FILE) script_logging.log_status('Making %s' % student_html_file) # Start generating HTML for the student folder. # html_output = cStringIO.StringIO() html_output = StringIO.StringIO( ) # Because cStringIO doesn't handle unicode data. begin_student_html(html_output, student_folder) # Generate navigation links write_html_student_navigation(html_output, i, student_folders_list) # Display when the data was exported. write_html_time_stamp(html_output) # Read in submission comments file. # We'll look for places to insert its contents as we step # through the submissions. comments_list = export_student_artifacts.load_csv_file( os.path.join(student_folder_path, path_consts.COMMENTS_FILE_NAME)) # Read in rubric assessments file. # We'll look for places to insert its contents as we step # through the submissions. rubric_assessments_list = export_student_artifacts.load_csv_file( os.path.join(student_folder_path, path_consts.RUBRIC_ASSESSMENTS_FILE_NAME)) # Read in attachments file # We'll look for places to insert its contents as we step # through the submissions. attachments_list = export_student_artifacts.load_csv_file( os.path.join(student_folder_path, path_consts.ATTACHMENTS_FILE_NAME)) # Parse student submissions file. # Split list into header & data rows. submissions_list = export_student_artifacts.load_csv_file( os.path.join(student_folder_path, path_consts.SUBMISSIONS_FILE_NAME)) csv_headers = submissions_list[0] submissions_list = submissions_list[1:] # The last 3 columns are for media recording fields. # Replace them with a single column with a link to the media recording download. csv_headers = csv_headers[:10] csv_headers.append('media_recording') # Add a column for attachments. csv_headers.append('attachments') # Add a column for submission comments. csv_headers = list(csv_headers) csv_headers.append('comments') # Add a column for grade csv_headers.append('grade') # # Add 2 columns for rubric data # csv_headers.append('rubric_points') # csv_headers.append('rubric_comments') # Add column for rubric assessments csv_headers.append('rubric_assessments') write_begin_html_table(html_output, csv_headers) for row in submissions_list: write_begin_html_table_row(html_output) # # 1st 9 elements are just data to display. # write_html_table_data(html_output, row[:9]) # 1st 4 elements are just data to display. write_html_table_data(html_output, row[:5]) # Next column is an HTML assignment description write_html_table_data_as_html(html_output, row[5:6]) # Next 4 elements are just data to display write_html_table_data(html_output, row[6:9]) # Next element is for URL submission write_html_table_data_link(html_output, row[9], 'Online URL') # # Next element is media recording id # write_html_table_data(html_output, [row[10]]) # Next element is media recording type write_html_table_data_link(html_output, row[10], row[11]) # # Next element is media recording url # write_html_table_data(html_output, [row[12]]) # Look for comments & attachments for this submission in other csv files. submission_id = row.submission_id # See if there are are any attachments. # Doing this the dumb way, by simplying looping through # all attachments, looking for ones that match. relevant_attachments = [attachments_list[0] ] # Include column headers for attachment in attachments_list[1:]: # Skip header row if submission_id == attachment.submission_id: relevant_attachments.append(attachment) write_html_attachments_table(html_output, relevant_attachments) # See if there are are any submissions comments. # Doing this the dumb way, by simply looping through # all submission comments, looking for ones that match. relevant_comments = [comments_list[0] ] # We still need the headers. for comment in comments_list[1:]: # Skip header row if submission_id == comment.submission_id: relevant_comments.append(comment) write_html_comments_table(html_output, relevant_comments) # Next columns is for grade write_html_table_data(html_output, row[13:14]) # # Next 2 columns are for rubric data # write_html_table_data(html_output, row[14:16]) # 01.16.2019 tps Render rubric assessments, if any. # Loop through all rubric assessments, looking for ones that match. relevant_rubric_assessments = [rubric_assessments_list[0] ] # We still need the headers. for rubric_assessment in rubric_assessments_list[ 1:]: # Skip header row if submission_id == rubric_assessment.submission_id: relevant_rubric_assessments.append(rubric_assessment) write_html_comments_table(html_output, relevant_rubric_assessments) write_end_html_table_row(html_output) write_end_html_table(html_output) write_html_csv_links( html_output) # Add links to underlying csv files. # We're done writing HTML for one student. end_html(html_output) # Output the student HTML file. # with open(student_html_file, 'w') as ofile: with io.open(student_html_file, 'w', encoding='utf-8') as ofile: ofile.write(html_output.getvalue()) html_output.close() # We're done with the output buffer
def write_student_folders(): """Create student data folders from JSON data files.""" # Make sure the top-level exports folder exist. if not os.path.isdir(path_consts.EXPORTS_FOLDER): os.mkdir(path_consts.EXPORTS_FOLDER) # Include time stamp file in exports folder. with open(path_consts.TIME_STAMP_FILE_NAME, 'w') as f: f.write(json_artifacts.get_time_stamp()) # Walk through exports for each course. courses = json_artifacts.load_courses_json() for course in courses: # Course data needed for export records course_id = course['id'] course_name = course['name'] course_code = course['course_code'] course_start_at = course['start_at'] # Dictionary for resolving user IDs of students & commenters to their user names. user_lookup = json_artifacts.create_user_lookup( json_artifacts.load_users_json(course_id)) # Create dictionary collection used to accumulate submissions by student. students_dict = {} students = json_artifacts.load_students_json(course_id) for student in students: # Derive a user name to associate with the student record, # instead of using the internal Canvas ID. # We might not be able to derive a user name from the student # record, in which case we have to skip it. user_name = get_user_name(student) if user_name is None: continue # Stucture to accumulate submission data for each student in course. students_dict[student['id']] = { DICT_KEY_USER_NAME: user_name, DICT_KEY_SUBMISSIONS: [], DICT_KEY_COMMENTS: [], DICT_KEY_ATTACHMENTS: [], DICT_KEY_RUBRIC_ASSESSMENTS: [] } # Walk through assignments in the course. assignments = json_artifacts.load_assignments_json(course_id) for assignment in assignments: # Assume assignments are ordered by position. # Assignment data needed for export records. assignment_id = assignment['id'] assignment_name = assignment['name'] # Encode weird unicode characters as utf-8 # The description string might be Null. assignment_description = assignment['description'] if (assignment_description is None): assignment_description = "" assignment_description = assignment_description.encode("utf-8") # Walk through submissions for the assignment. submissions = json_artifacts.load_submissions_json(assignment_id) for submission in submissions: # Submission data needed for export records. submission_id = submission['id'] submitter_id = submission['user_id'] # 08.02.2018 tps Work on retrieving rubric points & comments, if any """ If a submission has a rubric, it's in a dictionary property like this: "rubric_assessment": { "_4848": { "points": 3.0, "comments": "" } I don't know what the "_4848" key means, but it seems to be that there is only ever 1 entry, so we'll just say that we can grab that one entry's data & be done. - The entry might not have a 'points' property - 'comments' value might be null. """ # submission_rubric_pts = None # 01.16.2019 tps No longer used to generate HTML pages # submission_rubric_comments = None # 01.16.2019 tps No longer used to generate HTML pages # if 'rubric_assessment' in submission: # # script_logging.log_status( # # '**** Found rubric of type %s of length %s for course %s, assignment %s, submission %s, user %s' # # % (type(submission['rubric_assessment']), len(submission['rubric_assessment']), course_id, assignment_id, submission_id, submitter_id)) # # rubric_dict = submission['rubric_assessment'] # # rubric_element = rubric_dict[rubric_dict.keys()[0]] # # if 'points' in rubric_element: # # submission_rubric_pts = rubric_element['points'] # # if rubric_element['comments'] is not None: # # submission_rubric_comments = rubric_element['comments'] # # # script_logging.log_status( # # '**** Found rubric points %s comments %s' # # % (submission_rubric_pts, submission_rubric_comments)) # # # 01.16.2019 tps Gather multiple rubric assessments into a flat data structure # # The rubric description & ratings come from the assignment record. # # The keys in the rubric assessment dictionary can be matched to IDs of # # rubric objects in the assignment record. # rubric_list = assignment['rubric'] # for rubric_id, assessment in rubric_dict.iteritems(): # Loop through assessments # # # There might not be a "points" field in the assessment, in which case # # there is nothing for us to report. # if 'points' not in assessment: # continue # # # The "comments" value might be null, in which case use an empty string. # rubric_assessment_comments = '' # if assessment['comments'] is not None: # rubric_assessment_comments = assessment['comments'] # # # Find the rubric description # rubric = next(( x for x in rubric_list if x['id'] == rubric_id), None) # # # Find the corresponding assessment rating # # There may not be corresponding rating object for the points value # rating = next(( x for x in rubric['ratings'] if x['points'] == assessment['points']), None) # rating_description = '' # if rating is not None: # rating_description = rating['description'] # # rubric_assessment = ( # submission_id, # rubric['description'], # assessment['points'], # rating_description, # rubric_assessment_comments.encode("utf-8") # ) # students_dict[submitter_id][DICT_KEY_RUBRIC_ASSESSMENTS].append(rubric_assessment) # We've found cases where the submitter is "Test Student", # which is a student created by Canvas for impersonation purposes. # Since this is not a real student, it won't be found in the student # collection, & it should be OK to skip these submissions. # We've found cases where the submitter is a student with pending # enrollment & the user object retrieved from Canvas doesn't # have email login ID. This means it won't be found in the # student_dict collection, & it should be OK to skip it. if (submitter_id in user_lookup) and (submitter_id in students_dict): submission_user = user_lookup[submitter_id] submission_user_name = get_user_name(submission_user) submitted_at = submission['submitted_at'] submission_type = submission['submission_type'] submission_body = submission['body'] if submission_body is not None: # Encode weird unicode characters as utf-8 submission_body = submission_body.encode("utf-8") submission_url = submission['url'] # 09.02.2017 tps # Collect data for media_recording submission, which needs to be # downloaded separately. # Assume that all the media recordings are video/mp4 files. submission_media_file = None # Name for downloaded file submission_media_type = None submission_media_url = None if submission_type == 'media_recording': media_comment = submission['media_comment'] submission_media_file = media_comment[ 'media_id'] + '.mp4' submission_media_type = media_comment['media_type'] submission_media_url = media_comment['url'] # print('Submission for course %s %s, assignment %s, submission %s, user %s, type %s' % (course_id, course_name, assignment_id, submission_id, submission_user_name, submission_type)) # print(submission_media_file) #print('media url: %s type: %s, display_name: %s' % (submission['media_comment']['url'], submission['media_comment']['media_type'], submission['media_comment']['display_name'])) # Gather student's submission data. submission_data = ( submission_id, submission_user_name, course_name, course_start_at, assignment_name, assignment_description, submitted_at, submission_type, submission_body, submission_url, submission_media_file, submission_media_type, submission_media_url, submission['grade'] # submission_rubric_pts, # submission_rubric_comments ) students_dict[submitter_id][DICT_KEY_SUBMISSIONS].append( submission_data) # Gather submission comments, if any. submission_comments = submission[DICT_KEY_COMMENTS] if len(submission_comments) > 0: for submission_comment in submission_comments: # Extract items we need to export records. comment_created_at = submission_comment[ 'created_at'] comment = submission_comment['comment'].encode( "utf-8") # 01.11.2019 tps We can always retrieve the author's name from the submission record. author_user_name = submission_comment['author'][ 'display_name'] # 01.11.2019 tps We'd prefer to show the user's login name, but We've seen cases where # the comment author was dropped from the course, # in which case there is no user record for them. comment_author_id = submission_comment['author_id'] if comment_author_id in user_lookup: commenter = user_lookup[ submission_comment['author_id']] author_user_name = get_user_name(commenter) else: script_logging.log_error( 'Encountered submission comment in course %s (%s), assignment %s (%s), submitted by user %s (%s) who is not enrolled in the course' % (course_name, course_id, assignment_name, assignment_id, author_user_name, comment_author_id)) # commenter = user_lookup[submission_comment['author_id']] # author_user_name = get_user_name(commenter) # Accumulate submission comments for the student. comment_data = (submission_id, comment_created_at, comment, author_user_name) students_dict[submitter_id][ DICT_KEY_COMMENTS].append(comment_data) # Gather submission attachments, if any. if 'attachments' in submission: for submission_attachment in submission['attachments']: # Accumulate submission attachment data for the student. # 05.08.2019 tps If there is a thumbnail for the attachment, make up a name # for its thumbnail file, since Canvas doesn't provide names for thumbnail files. # Assume all thumbnails are PNG images. thumbnail_file = None if submission_attachment['thumbnail_url']: thumbnail_file = 'thumb' + str( hash(submission_attachment['thumbnail_url'] )) + '.png' attachment_data = ( submission_id, submission_attachment['url'], submission_attachment['filename'], submission_attachment['display_name'], # submission_attachment['display_name'].replace('/', '%2F'), # Some attachments have weird file names submission_attachment['thumbnail_url'], thumbnail_file) students_dict[submitter_id][ DICT_KEY_ATTACHMENTS].append(attachment_data) # print('attachment for course %s, assignment %s, submission %s, user %s, type %s, display name %s' % (course_name, assignment_id, submission_id, user_name, submission_type, submission_attachment['display_name'])) #print('attachment for course %s, assignment %s, submission %s, user %s, type %s, display name %s' % (course_name, assignment_id, submission_id, user_name, submission_type, submission_attachment['display_name'])) # 01.16.2019 tps Gather multiple rubric assessments into a flat data structure. # The rubric description & ratings come from the assignment record. # The keys in the rubric assessment dictionary can be matched to IDs of # rubric objects in the assignment record. if 'rubric_assessment' in submission: rubric_dict = submission['rubric_assessment'] rubric_list = assignment['rubric'] for rubric_id, assessment in rubric_dict.iteritems( ): # Loop through assessments # There might not be a "points" field in the assessment, in which case # there is nothing for us to report. if 'points' not in assessment: continue # The "comments" value might be null, in which case use an empty string. rubric_assessment_comments = '' if assessment['comments'] is not None: rubric_assessment_comments = assessment[ 'comments'] # Find the rubric description rubric = next( (x for x in rubric_list if x['id'] == rubric_id), None) # Find the corresponding assessment rating. # There may not be a corresponding rating object for the points value. rating = next( (x for x in rubric['ratings'] if x['points'] == assessment['points']), None) rating_description = '' if rating is not None: rating_description = rating['description'] rubric_assessment = ( submission_id, rubric['description'], assessment['points'], rating_description, rubric_assessment_comments.encode("utf-8")) students_dict[submitter_id][ DICT_KEY_RUBRIC_ASSESSMENTS].append( rubric_assessment) else: # Mention that we skipped a submission script_logging.log_status( 'Skipped submission for course %s, assignment %s, submission %s, user %s' % (course_id, assignment_id, submission_id, submitter_id)) # Make a file directory to hold course data, named after the course. course_folder_name = os.path.join(path_consts.EXPORTS_FOLDER, course_name) if not os.path.isdir(course_folder_name): os.mkdir(course_folder_name) # We're ready to output all the course's student data csv files. for user_id, student_data in students_dict.items(): # Each student gets their own folder, named by their user name. user_name = student_data[DICT_KEY_USER_NAME] student_folder_name = os.path.join(course_folder_name, user_name) if not os.path.isdir(student_folder_name): os.mkdir(student_folder_name) # Write submissions file. file_path = os.path.join(student_folder_name, path_consts.SUBMISSIONS_FILE_NAME) write_csv_file(file_path, SUBMISSION_HEADERS, student_data[DICT_KEY_SUBMISSIONS]) # Write submission comments file. file_path = os.path.join(student_folder_name, path_consts.COMMENTS_FILE_NAME) write_csv_file(file_path, SUBMISSION_COMMENT_HEADERS, student_data[DICT_KEY_COMMENTS]) # Write submission attachments file. file_path = os.path.join(student_folder_name, path_consts.ATTACHMENTS_FILE_NAME) write_csv_file(file_path, SUBMISSION_ATTACHMENT_HEADERS, student_data[DICT_KEY_ATTACHMENTS]) # Write rubric assessments file. file_path = os.path.join(student_folder_name, path_consts.RUBRIC_ASSESSMENTS_FILE_NAME) write_csv_file(file_path, SUBMISSION_RUBRIC_ASSESSMENT_HEADERS, student_data[DICT_KEY_RUBRIC_ASSESSMENTS])
def cp_err_log(): err_log_file = script_logging.ERR_FILE_NAME err_log_cp = make_cp_file_name() script_logging.log_status('Copying error log %s to %s' % (err_log_file, err_log_cp)) shutil.copy(err_log_file, err_log_cp)