def evaluate_cv_course(label_type, k=5, label_col = "label_type", raw_data_dir = "morf-data/", course_col = "course", fold_col = "fold_num", pred_cols = ("prob", "pred"), user_col = "userID"): """ Fetch metrics by first averaging over folds within course, then returning results by course. :param label_type: label type defined by user. :param label_col: column containing labels. :param raw_data_bucket: bucket containing raw data; used to fetch course names. :param raw_data_dir: path to directory in raw_data_bucket containing course-level directories. :param proc_data_bucket: bucket containing session-level archived results from [mode] jobs (i.e., session-level extracted features). :param course_col: column containing course identifier. :param pred_cols: user-supplied prediction columns; these columns will be checked for missing values and to ensure they contain values for every user in the course. :param user_col: column containing user ID for predictions. :param labels_file: name of csv file containing labels. :return: None. """ job_config = MorfJobConfig(CONFIG_FILENAME) job_config.update_mode(mode) check_label_type(label_type) raw_data_buckets = job_config.raw_data_buckets proc_data_bucket = job_config.proc_data_bucket s3 = job_config.initialize_s3() # clear any preexisting data for this user/job/mode clear_s3_subdirectory(job_config) course_data = [] for raw_data_bucket in raw_data_buckets: pred_file = generate_archive_filename(job_config, mode="test", extension="csv") pred_key = make_s3_key_path(job_config, pred_file, mode="test") # download course prediction and label files, fetch classification metrics at course level with tempfile.TemporaryDirectory(dir=os.getcwd()) as working_dir: pred_csv = download_from_s3(proc_data_bucket, pred_key, s3, working_dir, job_config=job_config) job_config.update_mode("cv") # set mode to cv to fetch correct labels for sessions even if they are train/test sessions label_csv = initialize_labels(job_config, raw_data_bucket, None, None, label_type, working_dir, raw_data_dir, level="all") pred_df = pd.read_csv(pred_csv) lab_df = pd.read_csv(label_csv, dtype=object) pred_lab_df = pd.merge(lab_df, pred_df, how = "left", on = [user_col, course_col]) check_dataframe_complete(pred_lab_df, job_config, columns = list(pred_cols)) for course in fetch_complete_courses(job_config, data_bucket = raw_data_bucket, data_dir = raw_data_dir, n_train=1): fold_metrics_list = list() for fold_num in range(1, k+1): fold_metrics_df = fetch_binary_classification_metrics(job_config, pred_lab_df[pred_lab_df[fold_col] == fold_num], course) fold_metrics_list.append(fold_metrics_df) assert len(fold_metrics_list) == k, "something is wrong; number of folds doesn't match. Try running job again from scratch." course_metrics_df = pd.concat(fold_metrics_list).mean() course_metrics_df[course_col] = course course_data.append(course_metrics_df) job_config.update_mode(mode) master_metrics_df = pd.concat(course_data, axis = 1).T # reorder dataframe so course name is first cols = list(master_metrics_df) # move the column to head of list using index, pop and insert cols.insert(0, cols.pop(cols.index(course_col))) master_metrics_df = master_metrics_df.ix[:, cols] csv_fp = generate_archive_filename(job_config, extension="csv") master_metrics_df[course_col] = hash_df_column(master_metrics_df[course_col], job_config.user_id, job_config.hash_secret) master_metrics_df.to_csv(csv_fp, index = False, header = True) upload_key = make_s3_key_path(job_config, mode = "test", filename=csv_fp) upload_file_to_s3(csv_fp, bucket=proc_data_bucket, key=upload_key) os.remove(csv_fp) return
def evaluate_prule_session(): """ Perform statistical testing for prule analysis. :return: None """ raw_data_dir = "morf-data/" job_config = MorfJobConfig(CONFIG_FILENAME) job_config.update_mode(mode) logger = set_logger_handlers(module_logger, job_config) raw_data_buckets = job_config.raw_data_buckets proc_data_bucket = job_config.proc_data_bucket prule_file = job_config.prule_url s3 = job_config.initialize_s3() # clear any preexisting data for this user/job/mode clear_s3_subdirectory(job_config) with tempfile.TemporaryDirectory(dir=os.getcwd()) as working_dir: input_dir, output_dir = initialize_input_output_dirs(working_dir) # pull extraction results from every course into working_dir for raw_data_bucket in raw_data_buckets: for course in fetch_courses(job_config, raw_data_bucket): for session in fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course, fetch_all_sessions=True): if session in fetch_sessions(job_config, raw_data_bucket, raw_data_dir, course): ## session is a non-holdout session fetch_mode = "extract" else: fetch_mode = "extract-holdout" feat_file = generate_archive_filename(job_config, course=course, session=session, mode=fetch_mode) feat_key = make_s3_key_path(job_config, filename=feat_file, course=course, session=session, mode=fetch_mode) feat_local_fp = download_from_s3(proc_data_bucket, feat_key, s3, input_dir, job_config=job_config) unarchive_file(feat_local_fp, input_dir) docker_image_fp = urlparse(job_config.prule_evaluate_image).path docker_image_dir = os.path.dirname(docker_image_fp) docker_image_name = os.path.basename(docker_image_fp) image_uuid = load_docker_image(docker_image_dir, job_config, logger, image_name=docker_image_name) # create a directory for prule file and copy into it; this will be mounted to docker image prule_dir = os.path.join(working_dir, "prule") os.makedirs(prule_dir) shutil.copy(urlparse(prule_file).path, prule_dir) cmd = "{} run --network=\"none\" --rm=true --volume={}:/input --volume={}:/output --volume={}:/prule {} ".format(job_config.docker_exec, input_dir, output_dir, prule_dir, image_uuid) subprocess.call(cmd, shell=True) # rename result file and upload results to s3 final_output_file = os.path.join(output_dir, "output.csv") final_output_archive_name = generate_archive_filename(job_config, extension="csv") final_output_archive_fp = os.path.join(output_dir, final_output_archive_name) os.rename(final_output_file, final_output_archive_fp) output_key = make_s3_key_path(job_config, filename = final_output_archive_name, mode = "test") upload_file_to_s3(final_output_archive_fp, proc_data_bucket, output_key, job_config, remove_on_success=True) return
def test_all(label_type): """ test a single overall model using the entire dataset using the Docker image. :return: """ level = "all" job_config = MorfJobConfig(CONFIG_FILENAME) job_config.update_mode(mode) check_label_type(label_type) # clear any preexisting data for this user/job/mode clear_s3_subdirectory(job_config) run_image(job_config, job_config.raw_data_buckets, level=level, label_type=label_type) # fetch archived result file and push csv result back to s3, mimicking session- and course-level workflow result_file = collect_all_results(job_config) upload_key = make_s3_key_path(job_config, filename=generate_archive_filename( job_config, extension="csv")) upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key) os.remove(result_file) send_email_alert(job_config) return
def evaluate_course(label_type, label_col = "label_type", raw_data_dir = "morf-data/", course_col = "course", pred_cols = ("prob", "pred"), user_col = "userID", labels_file = "labels-test.csv"): """ Fetch metrics by course. :param label_type: label type defined by user. :param label_col: column containing labels. :param raw_data_bucket: bucket containing raw data; used to fetch course names. :param raw_data_dir: path to directory in raw_data_bucket containing course-level directories. :param proc_data_bucket: bucket containing session-level archived results from [mode] jobs (i.e., session-level extracted features). :param course_col: column containing course identifier. :param pred_cols: user-supplied prediction columns; these columns will be checked for missing values and to ensure they contain values for every user in the course. :param user_col: column containing user ID for predictions. :param labels_file: name of csv file containing labels. :return: None. """ job_config = MorfJobConfig(CONFIG_FILENAME) job_config.update_mode(mode) check_label_type(label_type) raw_data_buckets = job_config.raw_data_buckets proc_data_bucket = job_config.proc_data_bucket s3 = job_config.initialize_s3() # clear any preexisting data for this user/job/mode clear_s3_subdirectory(job_config) course_data = [] for raw_data_bucket in raw_data_buckets: pred_file = generate_archive_filename(job_config, mode="test", extension="csv") pred_key = "{}/{}/{}/{}".format(job_config.user_id, job_config.job_id, "test", pred_file) label_key = raw_data_dir + labels_file # download course prediction and label files, fetch classification metrics at course level with tempfile.TemporaryDirectory(dir=os.getcwd()) as working_dir: download_from_s3(proc_data_bucket, pred_key, s3, working_dir, job_config=job_config) download_from_s3(raw_data_bucket, label_key, s3, working_dir, job_config=job_config) pred_df = pd.read_csv("/".join([working_dir, pred_file])) lab_df = pd.read_csv("/".join([working_dir, labels_file]), dtype=object) lab_df = lab_df[lab_df[label_col] == label_type].copy() pred_lab_df = pd.merge(lab_df, pred_df, how = "left", on = [user_col, course_col]) check_dataframe_complete(pred_lab_df, job_config, columns = pred_cols) for course in fetch_complete_courses(job_config, data_bucket = raw_data_bucket, data_dir = raw_data_dir, n_train=1): course_metrics_df = fetch_binary_classification_metrics(job_config, pred_lab_df, course) course_data.append(course_metrics_df) master_metrics_df = pd.concat(course_data).reset_index().rename(columns={"index": course_col}) csv_fp = generate_archive_filename(job_config, extension="csv") master_metrics_df[course_col] = hash_df_column(master_metrics_df[course_col], job_config.user_id, job_config.hash_secret) master_metrics_df.to_csv(csv_fp, index = False, header = True) upload_key = make_s3_key_path(job_config, mode = "test", filename=csv_fp) upload_file_to_s3(csv_fp, bucket=proc_data_bucket, key=upload_key) os.remove(csv_fp) return
def collect_course_cv_results(job_config, k=5, raw_data_dir="morf-data/"): """ Iterate through course-level directories in bucket, download individual files from [mode], add column for course and session, and concatenate into single 'master' csv. :param s3: boto3.client object with appropriate access credentials. :param raw_data_buckets: list of buckets containing raw data; used to fetch course names from each bucket. :param raw_data_dir: path to directory in raw_data_bucket containing course-level directories. :param proc_data_bucket: bucket containing session-level archived results from [mode] jobs (i.e., session-level extracted features). :param mode: mode to collect results for, {extract, test}. :param holdout: flag; fetch holdout run only (boolean; default False). :return: path to csv. """ logger = set_logger_handlers(module_logger, job_config) raw_data_buckets = job_config.raw_data_buckets mode = job_config.mode pred_df_list = list() session = None for raw_data_bucket in raw_data_buckets: for course in fetch_complete_courses(job_config, raw_data_bucket): with tempfile.TemporaryDirectory(dir=os.getcwd()) as working_dir: for fold_num in range(1, k + 1): logger.info( "fetching {} results for course {} session {}".format( mode, course, session)) try: fold_csv_name = "{}_{}_test.csv".format( course, fold_num) key = make_s3_key_path(job_config, course, fold_csv_name, mode="test") pred_fp = download_from_s3(job_config.proc_data_bucket, key, job_config.initialize_s3(), working_dir, dest_filename=fold_csv_name) pred_df = pd.read_csv(pred_fp, dtype=object) pred_df['course'] = course pred_df['fold_num'] = str(fold_num) pred_df_list.append(pred_df) except Exception as e: logger.warning("exception occurred: {} ".format(e)) continue master_feat_df = pd.concat(pred_df_list) csv_fp = generate_archive_filename(job_config, mode="test", extension='csv') master_feat_df.to_csv(csv_fp, index=False, header=True) return csv_fp
def test_course(label_type, raw_data_dir="morf-data/", multithread=True): """ tests one model per course using the Docker image. :param label_type: label type provided by user. :raw_data_dir: path to directory in all data buckets where course-level directories are located; this should be uniform for every raw data bucket. :multithread: whether to run job in parallel (multithread = false can be useful for debugging). :return: """ level = "course" job_config = MorfJobConfig(CONFIG_FILENAME) job_config.update_mode(mode) logger = set_logger_handlers(module_logger, job_config) check_label_type(label_type) # clear any preexisting data for this user/job/mode clear_s3_subdirectory(job_config) if multithread: num_cores = job_config.max_num_cores else: num_cores = 1 ## for each bucket, call job_runner once per course with --mode=test and --level=course for raw_data_bucket in job_config.raw_data_buckets: logger.info("[INFO] processing bucket {}".format(raw_data_bucket)) courses = fetch_complete_courses(job_config, raw_data_bucket, raw_data_dir) reslist = [] with Pool(num_cores) as pool: for course in courses: poolres = pool.apply_async(run_image, [ job_config, raw_data_bucket, course, None, level, label_type ]) reslist.append(poolres) pool.close() pool.join() for res in reslist: logger.info(res.get()) result_file = collect_course_results(job_config) upload_key = make_s3_key_path(job_config, filename=generate_archive_filename( job_config, extension="csv")) upload_file_to_s3(result_file, bucket=job_config.proc_data_bucket, key=upload_key) os.remove(result_file) send_email_alert(job_config) return
def cross_validate_course(label_type, k=5, multithread=True): """ Compute k-fold cross-validation across courses. :return: """ # todo: call to create_course_folds() goes here job_config = MorfJobConfig(CONFIG_FILENAME) job_config.update_mode(mode) # clear previous test results clear_s3_subdirectory(job_config, mode="test") docker_image_dir = os.getcwd( ) # directory the function is called from; should contain docker image logger = set_logger_handlers(module_logger, job_config) if multithread: num_cores = job_config.max_num_cores else: num_cores = 1 logger.info("conducting cross validation") for raw_data_bucket in job_config.raw_data_buckets: reslist = [] with Pool(num_cores) as pool: for course in fetch_complete_courses(job_config, raw_data_bucket): for fold_num in range(1, k + 1): poolres = pool.apply_async(execute_image_for_cv, [ job_config, raw_data_bucket, course, fold_num, docker_image_dir, label_type ]) reslist.append(poolres) pool.close() pool.join() for res in reslist: logger.info(res.get()) test_csv_fp = collect_course_cv_results(job_config) pred_key = make_s3_key_path(job_config, os.path.basename(test_csv_fp), mode="test") upload_file_to_s3(test_csv_fp, job_config.proc_data_bucket, pred_key, job_config, remove_on_success=True) return
def cross_validate_session(label_type, k=5, multithread=True, raw_data_dir="morf-data/"): """ Compute k-fold cross-validation across sessions. :return: """ raise NotImplementedError # this is not implemented! # todo: call to create_session_folds() goes here job_config = MorfJobConfig(CONFIG_FILENAME) job_config.update_mode(mode) logger = set_logger_handlers(module_logger, job_config) # clear any preexisting data for this user/job/mode # clear_s3_subdirectory(job_config) if multithread: num_cores = job_config.max_num_cores else: num_cores = 1 logger.info("conducting cross validation") with Pool(num_cores) as pool: for raw_data_bucket in job_config.raw_data_buckets: for course in fetch_complete_courses(job_config, raw_data_bucket): for session in fetch_sessions(job_config, raw_data_bucket, data_dir=raw_data_dir, course=course, fetch_all_sessions=True): for fold_num in range(1, k + 1): with tempfile.TemporaryDirectory( dir=job_config.local_working_directory ) as working_dir: # get fold train data input_dir, output_dir = initialize_input_output_dirs( working_dir) session_input_dir = os.path.join( input_dir, course, session) session_output_dir = os.path.join( output_dir, course, session) trainkey = make_s3_key_path( job_config, course, make_feature_csv_name(course, session, fold_num, "train"), session) train_data_path = download_from_s3( job_config.proc_data_bucket, trainkey, job_config.initialize_s3(), dir=session_input_dir, job_config=job_config) testkey = make_s3_key_path( job_config, course, make_feature_csv_name(course, session, fold_num, "test"), session) test_data_path = download_from_s3( job_config.proc_data_bucket, testkey, job_config.initialize_s3(), dir=session_input_dir, job_config=job_config) # get labels initialize_labels(job_config, raw_data_bucket, course, session, label_type, session_input_dir, raw_data_dir) # run docker image with mode == cv #todo # upload results #todo pool.close() pool.join() return
def make_folds(job_config, raw_data_bucket, course, k, label_type, raw_data_dir="morf-data/"): """ Utility function to be called by create_course_folds for creating the folds for a specific course. :return: """ logger = set_logger_handlers(module_logger, job_config) user_id_col = "userID" label_col = "label_value" logger.info("creating cross-validation folds for course {}".format(course)) with tempfile.TemporaryDirectory( dir=job_config.local_working_directory) as working_dir: input_dir, output_dir = initialize_input_output_dirs(working_dir) # download data for each session for session in fetch_sessions(job_config, raw_data_bucket, data_dir=raw_data_dir, course=course, fetch_all_sessions=True): # get the session feature and label data download_train_test_data(job_config, raw_data_bucket, raw_data_dir, course, session, input_dir, label_type=label_type) # merge features to ensure splits are correct feat_csv_path = aggregate_session_input_data( "features", os.path.join(input_dir, course)) label_csv_path = aggregate_session_input_data( "labels", os.path.join(input_dir, course)) feat_df = pd.read_csv(feat_csv_path, dtype=object) label_df = pd.read_csv(label_csv_path, dtype=object) feat_label_df = pd.merge(feat_df, label_df, on=user_id_col) if feat_df.shape[0] != label_df.shape[0]: logger.error( "number of observations in extracted features and labels do not match for course {}; features contains {} and labels contains {} observations" .format(course, feat_df.shape[0], label_df.shape[0])) # create the folds logger.info( "creating cv splits with k = {} course {} session {}".format( k, course, session)) skf = StratifiedKFold(n_splits=k, shuffle=True) folds = skf.split(np.zeros(feat_label_df.shape[0]), feat_label_df.label_value) for fold_num, train_test_indices in enumerate( folds, 1): # write each fold train/test data to csv and push to s3 train_index, test_index = train_test_indices train_df, test_df = feat_label_df.loc[train_index, ].drop( label_col, axis=1), feat_label_df.loc[test_index, ].drop(label_col, axis=1) train_df_name = os.path.join( working_dir, make_feature_csv_name(course, fold_num, "train")) test_df_name = os.path.join( working_dir, make_feature_csv_name(course, fold_num, "test")) train_df.to_csv(train_df_name, index=False) test_df.to_csv(test_df_name, index=False) # upload to s3 try: train_key = make_s3_key_path(job_config, course, os.path.basename(train_df_name)) upload_file_to_s3(train_df_name, job_config.proc_data_bucket, train_key, job_config, remove_on_success=True) test_key = make_s3_key_path(job_config, course, os.path.basename(test_df_name)) upload_file_to_s3(test_df_name, job_config.proc_data_bucket, test_key, job_config, remove_on_success=True) except Exception as e: logger.warning( "exception occurred while uploading cv results: {}".format( e)) return
def execute_image_for_cv(job_config, raw_data_bucket, course, fold_num, docker_image_dir, label_type, raw_data_dir="morf-data/"): """ :param job_config: :param raw_data_bucket: :param course: :param fold_num: :param docker_image_dir: :param label_type: :param raw_data_dir: :return: """ user_id_col = "userID" logger = set_logger_handlers(module_logger, job_config) with tempfile.TemporaryDirectory( dir=job_config.local_working_directory) as working_dir: input_dir, output_dir = initialize_input_output_dirs(working_dir) # get fold train data course_input_dir = os.path.join(input_dir, course) trainkey = make_s3_key_path( job_config, course, make_feature_csv_name(course, fold_num, "train")) train_data_path = download_from_s3(job_config.proc_data_bucket, trainkey, job_config.initialize_s3(), dir=course_input_dir, job_config=job_config) testkey = make_s3_key_path( job_config, course, make_feature_csv_name(course, fold_num, "test")) test_data_path = download_from_s3(job_config.proc_data_bucket, testkey, job_config.initialize_s3(), dir=course_input_dir, job_config=job_config) # get labels train_users = pd.read_csv(train_data_path)[user_id_col] train_labels_path = initialize_cv_labels(job_config, train_users, raw_data_bucket, course, label_type, input_dir, raw_data_dir, fold_num, "train", level="course") # run docker image with mode == cv image_uuid = load_docker_image(docker_image_dir, job_config, logger) cmd = make_docker_run_command( job_config, job_config.docker_exec, input_dir, output_dir, image_uuid, course, None, mode, job_config.client_args) + " --fold_num {}".format(fold_num) execute_and_log_output(cmd, logger) # upload results pred_csv = os.path.join(output_dir, "{}_{}_test.csv".format(course, fold_num)) pred_key = make_s3_key_path(job_config, course, os.path.basename(pred_csv), mode="test") upload_file_to_s3(pred_csv, job_config.proc_data_bucket, pred_key, job_config, remove_on_success=True) return
def create_session_folds(label_type, k=5, multithread=True, raw_data_dir="morf-data/"): """ From extract and extract-holdout data, create k randomized folds for each session and archive results to s3. :param label_type: type of outcome label to use. :param k: number of folds. :param multithread: logical indicating whether multiple cores should be used (if available) :param raw_data_dir: name of subfolder in s3 buckets containing raw data. :return: """ user_id_col = "userID" label_col = "label_value" job_config = MorfJobConfig(CONFIG_FILENAME) job_config.update_mode(mode) logger = set_logger_handlers(module_logger, job_config) # clear any preexisting data for this user/job/mode clear_s3_subdirectory(job_config) if multithread: num_cores = job_config.max_num_cores else: num_cores = 1 logger.info("creating cross-validation folds") with Pool(num_cores) as pool: for raw_data_bucket in job_config.raw_data_buckets: for course in fetch_complete_courses(job_config, raw_data_bucket): for session in fetch_sessions(job_config, raw_data_bucket, data_dir=raw_data_dir, course=course, fetch_all_sessions=True): with tempfile.TemporaryDirectory( dir=job_config.local_working_directory ) as working_dir: # todo: call make_folds() here via apply_async(); currently this is not parallelized! input_dir, output_dir = initialize_input_output_dirs( working_dir) # get the session feature and label data download_train_test_data(job_config, raw_data_bucket, raw_data_dir, course, session, input_dir, label_type=label_type) feature_file = os.path.join( input_dir, course, session, make_feature_csv_name(course, session)) label_file = os.path.join( input_dir, course, session, make_label_csv_name(course, session)) feat_df = pd.read_csv(feature_file, dtype=object) label_df = pd.read_csv(label_file, dtype=object) # merge features to ensure splits are correct feat_label_df = pd.merge(feat_df, label_df, on=user_id_col) assert feat_df.shape[0] == label_df.shape[ 0], "features and labels must contain same number of observations" # create the folds logger.info( "creating cv splits with k = {} course {} session {}" .format(k, course, session)) skf = StratifiedKFold(n_splits=k, shuffle=True) folds = skf.split(np.zeros(feat_df.shape[0]), feat_label_df.label_value) for fold_num, train_test_indices in enumerate( folds, 1 ): # write each fold train/test data to csv and push to s3 train_index, test_index = train_test_indices train_df, test_df = feat_label_df.loc[ train_index, ].drop( label_col, axis=1), feat_label_df.loc[ test_index, ].drop(label_col, axis=1) train_df_name = os.path.join( working_dir, make_feature_csv_name(course, session, fold_num, "train")) test_df_name = os.path.join( working_dir, make_feature_csv_name(course, session, fold_num, "test")) train_df.to_csv(train_df_name, index=False) test_df.to_csv(test_df_name, index=False) # upload to s3 try: train_key = make_s3_key_path( job_config, course, os.path.basename(train_df_name), session) upload_file_to_s3(train_df_name, job_config.proc_data_bucket, train_key, job_config, remove_on_success=True) test_key = make_s3_key_path( job_config, course, os.path.basename(test_df_name), session) upload_file_to_s3(test_df_name, job_config.proc_data_bucket, test_key, job_config, remove_on_success=True) except Exception as e: logger.warning( "exception occurred while uploading cv results: {}" .format(e)) pool.close() pool.join() return
def send_success_email(job_config, emailaddr_from="*****@*****.**"): """ Send an email alert with an attachment. Modified substantially from: http://blog.vero4ka.info/blog/2016/10/26/how-to-send-an-email-with-attachment-via-amazon-ses-in-python/ https://gist.github.com/yosemitebandit/2883593 :param job_config: MorfJobConfig object. :param emailaddr_from: address to send email from (string). :return: """ aws_access_key_id = job_config.aws_access_key_id aws_secret_access_key = job_config.aws_secret_access_key proc_data_bucket = job_config.proc_data_bucket job_id = job_config.job_id user_id = job_config.user_id emailaddr_to = job_config.email_to status = job_config.status job_config.update_mode( "test" ) # need to set mode so that correct key path is used to fetch results results_file_name = "morf-results.csv" s3 = boto3.client("s3", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key) # fetch model evaluation results attachment_basename = generate_archive_filename(job_config, mode="evaluate", extension="csv") key = make_s3_key_path(job_config, filename=attachment_basename) attachment_filepath = download_from_s3(proc_data_bucket, key, s3) with open(attachment_filepath) as f: data = f.read() output = io.StringIO(data) # Build an email subject_text = construct_message_subject(job_config) msg = MIMEMultipart() msg["Subject"] = subject_text msg["From"] = emailaddr_from msg["To"] = emailaddr_to # What a recipient sees if they don't use an email reader msg.preamble = "Multipart message.\n" # the body body_text = construct_message_body(job_config) body = MIMEText(body_text) msg.attach(body) # The attachment part = MIMEApplication(output.getvalue()) part.add_header("Content-Disposition", "attachment", filename=results_file_name) part.add_header("Content-Type", "application/vnd.ms-excel; charset=UTF-8") msg.attach(part) # Connect to Amazon SES ses = boto3.client( "ses", region_name="us-east-1", aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, ) # And finally, send the email try: ses.send_raw_email(Source=emailaddr_from, Destinations=[emailaddr_to, emailaddr_from], RawMessage={ 'Data': msg.as_string(), }) print("[INFO] email notification sent emailaddr_to {}".format( emailaddr_to)) except Exception as e: print("[WARNING] error sending email to {}: {}".format( emailaddr_to, e)) return