Exemplo n.º 1
0
def main(term, cr, cr_type, prog, track, email_type, f_name):
    '''Main function.'''
    # Initialize FileLocator
    FL = FileLocator()
    # Today's date
    today = datetime.strftime(datetime.today(), '%Y-%m-%d')
    # If no term provided, guess the current term
    if not term:
        terms = dpu.get_term_descriptions()
        term = dpu.guess_current_term(terms)
    # Default file name
    if not f_name:
        f_name = 'faculty'
    # Get emails
    emails = dpu.get_faculty_emails(term,
                                    course=cr,
                                    course_type=cr_type,
                                    program=prog,
                                    faculty_track=track,
                                    email_type=email_type)
    # Set output location
    output_folder = os.path.abspath(
        os.path.join(os.sep, FL.faculty, 'temp_email_lists'))
    # Ensure location exists
    dpu.ensure_dir(output_folder)
    # Set output file name
    output_file = os.path.abspath(
        os.path.join(os.sep, output_folder,
                     'emails_{0}_{1}.csv'.format(f_name, today)))
    # Output
    np.savetxt(output_file, emails, delimiter=";", fmt='%s')
Exemplo n.º 2
0
def main(term, campus):
    '''docstring'''

    # Get Term Descriptions
    TermDescriptions = dpu.get_term_descriptions()

    # If a term is not passed, guess current
    if not term:
        term = dpu.guess_current_term(TermDescriptions)
    # Ensure term is type str
    if type(term) is not str:
        term = str(term)

    # If a campus is not passed, use both
    if not campus:
        campus = ['MENP', 'RFU']
    else:
        # In schedule, 'LPC' is represented by 'MENP'
        if campus == 'LPC':
            campus = 'MENP'
        # Convert string to list so code execution can be parallel
        campus = [campus]

    # Get schedule
    schedule = dpu.get_schedule(term, TermDescriptions)

    # Filter out non-clinical courses
    schedule = filter_schedule_for_cln(schedule, campus)

    # Reorder and filter out unncessary data
    schedule = schedule[[
        'Term', 'Cr', 'Sec', 'Time', 'Clinical Site', 'Unit', 'Max Cap',
        'Confirmed', 'Faculty'
    ]]

    # Get previous clinical roster
    prev_term = str(int(term) - 5)
    try:
        prev_cln_roster = dpu.get_cln(prev_term, TermDescriptions)
    except:
        prev_term = str(int(prev_term) - 5)
        prev_cln_roster = dpu.get_cln(prev_term, TermDescriptions)
    # Get previous enrollment numbers
    schedule['Prev Cr Enrl'] = schedule.apply(get_prev_enrl,
                                              axis=1,
                                              args=(prev_cln_roster,
                                                    prev_term))

    # Get output folder
    output_folder = os.path.join(
        dpu.get_dir_of_schedule(term, TermDescriptions), 'Charts')

    # Output file name
    output_file = os.path.join(output_folder,
                               'Clinical Review {}.xlsx'.format(term))

    # Output result
    schedule.to_excel(output_file, index=False)
def main(term):
    '''Main function.'''
    # Make a global list to store jobs completed
    global jobs_completed
    jobs_completed = []

    # If user did not pass a term, guess current
    if not term:
        term = dpu.guess_current_term()
    # Get preceptors
    preceptors, file = dpu.get_cln(term, dpu.get_term_descriptions())
    # Drop any rows without a preceptor email
    preceptors.dropna(subset=['Preceptor Email'], inplace=True)
    # Drop any rows with "TBD" as preceptor email
    preceptors = preceptors[~preceptors['Preceptor Email'].isin(['TBD', 'TBA']
                                                                )]
    # Drop any rows where preceptor email was already sent or will be sent by RFU
    preceptors = preceptors[preceptors['Preceptor Letter Sent'].isnull()]
    # Email the preceptors
    preceptors.apply(email_preceptor, axis=1)
    # Save the current date in Excel file for preceptor's emailed
    save_excel_changes(file)
Exemplo n.º 4
0
def main(prev_date):
    '''Main function call.'''
    # Call to FileLocator class
    FL = FileLocator()

    # Check if a prev_date was supplied
    if prev_date:
        # Test for proper date formatting
        try:
            datetime.strptime(prev_date, '%Y-%m-%d')
        except ValueError:
            print(
                'Date provided was not in a valid format. Please retry using %Y-%m-d format (e.g. 2018-01-24).'
            )

        # Gather absolute paths to previous
        nc_prev = os.path.abspath(
            os.path.join(os.sep, FL.health_req_report, 'Downloaded Reports',
                         'Noncompliant ' + prev_date + '.csv'))
        cc_prev = os.path.abspath(
            os.path.join(os.sep, FL.health_req_report, 'Downloaded Reports',
                         'Compliant ' + prev_date + '.csv'))
        # Make sure both files exist
        if os.path.exists(nc_prev) and os.path.exists(cc_prev):
            num_files = 1
        else:
            raise ValueError(
                'Date provided is in a valid format, but compliance files do not exist using that date.'
            )
    else:
        num_files = 2
    print()
    # Get the latest reports

    noncompliant_files = dpu.get_latest(FL.health_req_cb_downloads,
                                        'Noncompliant',
                                        num_files=num_files)
    compliant_files = dpu.get_latest(FL.health_req_cb_downloads,
                                     'Compliant',
                                     num_files=num_files)

    if prev_date:
        # Add previous files
        noncompliant_files = [noncompliant_files, nc_prev]
        compliant_files = [compliant_files, cc_prev]
        if noncompliant_files[0] == noncompliant_files[1] or compliant_files[
                0] == compliant_files[1]:
            raise 'Previous date provided is same as date of most recent compliance files. Download more recent reports and try again.'
    else:
        prev_date = noncompliant_files[1].rstrip('.csv')[-10:]

    # Get the two most recent reports
    noncompliant_curr = read_cb(noncompliant_files[0])
    noncompliant_prev = read_cb(noncompliant_files[1])
    compliant_curr = read_cb(compliant_files[0])
    compliant_prev = read_cb(compliant_files[1])
    # Get change logs
    noncompliant_changelog = pd.merge(
        noncompliant_curr,
        noncompliant_prev,
        on=noncompliant_curr.columns.tolist(),
        how='outer',
        indicator=True).query("_merge != 'both'").drop('_merge', 1)
    compliant_changelog = pd.merge(
        compliant_curr,
        compliant_prev,
        on=compliant_curr.columns.tolist(),
        how='outer',
        indicator=True).query("_merge != 'both'").drop('_merge', 1)
    # Get next action date file
    nad_file = dpu.get_latest(FL.health_req_cb_downloads,
                              'Next_Action_Date',
                              num_files=1)
    to_datetime = lambda d: datetime.strptime(d, '%m/%d/%Y')
    to_string = lambda d: datetime.strftime(d, '%m/%d/%Y')
    next_action_date = pd.read_csv(nad_file,
                                   header=0,
                                   converters={
                                       'Order Submission Date': to_datetime,
                                       'Requirement Due Date': to_datetime
                                   })
    next_action_date.rename(columns={'Email Address': 'Email'}, inplace=True)
    # Drop all but earliest next requirement
    next_action_date.sort_values(by='Requirement Due Date').drop_duplicates(
        subset='Email', keep='first', inplace=True)
    # Put back as string
    next_action_date['Requirement Due Date'] = next_action_date[
        'Requirement Due Date'].apply(lambda x: to_string(x))

    # Get the latest student list
    students = pd.read_excel(dpu.get_latest(FL.students, 'Student List'),
                             header=0,
                             converters={
                                 'Emplid': str,
                                 'Admit Term': str,
                                 'Latest Term Enrl': str,
                                 'Run Term': str,
                             })
    students.drop_duplicates(subset='Emplid', inplace=True)
    # Get the faculty list
    faculty = pd.read_excel(os.path.join(FL.faculty, 'Employee List.xlsx'),
                            header=0,
                            converters={
                                'Empl ID': str,
                            })
    # Ignore all but necessary faculty columns
    faculty = faculty[[
        'Empl ID', 'Last-First', 'Primary Email', 'Secondary Email',
        'Cell Phone'
    ]]
    # Get term descriptions
    TermDescriptions = dpu.get_term_descriptions()
    # Get current term
    current_term = dpu.guess_current_term(TermDescriptions)
    # Figure out next term value
    next_term = str(int(current_term) + 5)
    # Get internship roster
    internships = dpu.get_cln(current_term, TermDescriptions)
    # Try to get next term's roster, if exists
    try:
        cln_2 = dpu.get_cln(next_term, TermDescriptions)
        internships = pd.concat(internships, cln_2)
    except:
        pass
    # Drop all but required information
    internships = internships[[
        'Term', 'Cr', 'Clinical Site', 'Unit', 'Date', 'Student ID'
    ]].copy(deep=True)
    internships = internships[internships['Cr'] == '443']
    # Get schedule
    global schedule
    schedule = dpu.get_schedule(current_term, TermDescriptions)
    # Get clinical roster
    roster = clean_student_roster(current_term, internships)
    # Merge with student data
    roster = pd.merge(roster,
                      students[[
                          'Emplid', 'Last Name', 'First Name', 'Maj Desc',
                          'Campus', 'Email', 'Best Phone'
                      ]],
                      how='left',
                      on='Emplid')
    # Combine with faculty data
    # List of the required columns
    req_list = ['Last-First', 'Primary Email', 'Secondary Email', 'Cell Phone']
    # Naming convention for suffixes
    name_con = ['1_curr', '2_curr', '1_next', '2_next']
    # Iterate through suffixes (i.e., through instructors)
    for suffix in name_con:
        # Create new column names
        inst = 'Instructor' + '_' + suffix
        new_cols = [inst + ' ' + x for x in req_list]
        # Apply search function
        roster[new_cols] = roster.apply(instructor_contact_info,
                                        axis=1,
                                        args=(suffix, faculty, req_list,
                                              schedule))
    # Drop Faculty ID Fields
    id_fields = ['Faculty_ID_' + suffix for suffix in name_con]
    roster.drop(labels=id_fields, axis=1, inplace=True)
    # Combine with schedule data
    # List of the required columns
    req_list = ['Clinical Site', 'Unit']
    # Iterate through suffixes
    for suffix in name_con:
        # Create new column names
        new_cols = [x + '_' + suffix for x in req_list]
        # Apply search function
        roster[new_cols] = roster.apply(clinical_info,
                                        axis=1,
                                        args=(suffix, req_list, schedule,
                                              internships))
    # Gather historical student data
    historical_students = get_historical_student_data(FL.hist_students,
                                                      students)

    # Get a faculty roster for health_req report
    faculty_roster = clean_faculty_roster(current_term)
    # Merge with faculty contact info
    faculty_roster = pd.merge(faculty_roster,
                              faculty,
                              left_on='Emplid',
                              right_on='Empl ID')
    # Separate out names
    faculty_roster['Last Name'] = faculty_roster['Last-First'].apply(
        lambda x: x.split(', ')[0])
    faculty_roster['First Name'] = faculty_roster['Last-First'].apply(
        lambda x: x.split(', ')[1])

    # Collection of tracker names
    all_trackers = np.concatenate(
        (compliant_curr['To-Do List Name'].unique(),
         noncompliant_curr['To-Do List Name'].unique()))
    all_trackers = np.unique(all_trackers)
    # Breakdown into types
    dna_trackers = []
    student_trackers = []
    faculty_trackers = []
    for tracker in all_trackers:
        if 'Disclosure & Authorization' in tracker:
            dna_trackers.append(tracker)
        elif 'DE34' in tracker:
            faculty_trackers.append(tracker)
        elif 'DE69' in tracker:
            student_trackers.append(tracker)
    '''Here we attempt to connect all students in the clinical roster with
    an account in Castle Branch. To do so, we make a dictionary lookup.
    # dict[Emplid] = (Last Name, First Name, Email)
    '''
    cb_to_dpu = {}
    # Attempt to match students, starting with full accounts
    # Sometimes the dna account is all we get, even though they have full
    roster.apply(
        match_students,
        axis=1,
        args=(noncompliant_curr[noncompliant_curr['To-Do List Name'].isin(
            student_trackers)], cb_to_dpu),
        historic=historical_students)
    roster.apply(match_students,
                 axis=1,
                 args=(compliant_curr[compliant_curr['To-Do List Name'].isin(
                     student_trackers)], cb_to_dpu),
                 historic=historical_students)
    roster.apply(match_students,
                 axis=1,
                 args=(noncompliant_curr[
                     noncompliant_curr['To-Do List Name'].isin(dna_trackers)],
                       cb_to_dpu))
    roster.apply(match_students,
                 axis=1,
                 args=(compliant_curr[compliant_curr['To-Do List Name'].isin(
                     dna_trackers)], cb_to_dpu))
    # Faculty matching
    cb_to_fac = {}
    faculty_roster.apply(
        match_faculty,
        axis=1,
        args=(noncompliant_curr[noncompliant_curr['To-Do List Name'].isin(
            faculty_trackers)], cb_to_fac))
    faculty_roster.apply(
        match_faculty,
        axis=1,
        args=(compliant_curr[compliant_curr['To-Do List Name'].isin(
            faculty_trackers)], cb_to_fac))
    faculty_roster.apply(
        match_faculty,
        axis=1,
        args=(noncompliant_curr[noncompliant_curr['To-Do List Name'].isin(
            student_trackers)], cb_to_fac))
    faculty_roster.apply(
        match_faculty,
        axis=1,
        args=(compliant_curr[compliant_curr['To-Do List Name'].isin(
            student_trackers)], cb_to_fac))

    # New column names
    fields = [
        'Changed Since ' + prev_date, 'Compliant', 'Requirements Incomplete',
        'Next Due', 'Next Due Date'
    ]
    # Gather compliance status
    roster[fields] = roster.apply(determine_status,
                                  axis=1,
                                  args=(cb_to_dpu, noncompliant_curr,
                                        compliant_curr, noncompliant_changelog,
                                        compliant_changelog, student_trackers,
                                        dna_trackers, next_action_date),
                                  account='student')
    # Gather compliance status
    faculty_roster[fields] = faculty_roster.apply(
        determine_status,
        axis=1,
        args=(cb_to_fac, noncompliant_curr, compliant_curr,
              noncompliant_changelog, compliant_changelog, faculty_trackers,
              student_trackers, next_action_date),
        account='faculty')

    # Archive old reports that were created by this script
    for cr_report in ['student_report', 'faculty_report']:
        dpu.archive_old_reports(FL.health_req_report, cr_report, 'Archived')
    # Archive old reports downloaded from Castle Branch
    for dl_report in ['Compliant', 'Noncompliant', 'Next_Action_Date']:
        dpu.archive_old_reports(FL.health_req_cb_downloads,
                                dl_report,
                                'Archive',
                                keep_min=3)

    # Gather column names
    faculty_cols, student_cols = prepare_columns(prev_date)
    # Output to file
    date_of_current = noncompliant_files[0].rstrip('.csv')[-10:]
    date_of_current = datetime.strptime(date_of_current, '%Y-%m-%d')
    output_report(roster, student_cols, 'student_report', date_of_current,
                  FL.health_req_report)
    output_report(faculty_roster, faculty_cols, 'faculty_report',
                  date_of_current, FL.health_req_report)
Exemplo n.º 5
0
def main(api_key, term, cr, prog, sites, f_name):
    '''Main function call.'''
    # Ensure API key
    if not api_key:
        raise "You did not specify an API_KEY and there is no key saved in your environment variables. Use --api-key or -a to specify key."

    # Initialize File Locator
    FL = FileLocator()

    # Create a PoolManager that verifies certificates when making requests
    http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
                               ca_certs=certifi.where())

    # Initialize the googlemaps Client with API key
    gmaps = googlemaps.Client(key=api_key)

    # Open connection to database
    db_path = os.path.join(os.path.sep, FL.location, 'location.db')
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()

    print('Updating student data...', end='', flush=True)
    # Gather student data
    student_list = dpu.get_student_list()
    # Drop duplicated student IDs - this would only cause problems
    student_list.drop_duplicates(subset='Emplid', inplace=True)
    # Update student data
    student_ids = student_list['Emplid'].unique()
    #run_term = int(student_list['Run Term'][0])
    update_data(student_list, 'students', student_ids, student_df_fields,
                student_db_fields, 'Emplid', cursor, conn, http, api_key)
    print('complete!')
    print('Updating site data...', end='', flush=True)
    # Gather site data
    # Data currently stored in ARC database
    arc_path = os.path.join(os.path.sep, FL.arc, 'Backend',
                            'AffiliationAgreements_Backend.accdb')
    access_conn_str = (
        r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)}; DBQ=%s;' %
        arc_path)
    # Connect and open cursor
    cnxn = pyodbc.connect(access_conn_str)
    crsr = cnxn.cursor()
    # Get all site data
    sites_cols = [
        'site_id', 'Name', 'Address1', 'Address2', 'City', 'State', 'Postal',
        'Type'
    ]
    sql = "SELECT * FROM sites"
    crsr.execute(sql)
    sites_df = pd.DataFrame.from_records(crsr.fetchall(), columns=sites_cols)
    sites_df['site_id'] = sites_df['site_id'].astype('str')
    site_ids = sites_df['site_id'].unique()
    all_site_names = sites_df[sites_df['Type'] ==
                              'Hospital']['Name'].unique().tolist()
    # Close Access connection
    crsr.close()
    cnxn.close()
    # Update data
    update_data(sites_df, 'locations', site_ids, site_df_fields,
                site_db_fields, 'site_id', cursor, conn, http, api_key)
    print('complete!')
    print('Updating faculty data...', end='', flush=True)
    # Gather Faculty Data
    faculty_list = dpu.get_employee_list()
    # Drop inactive and staff
    faculty_list = faculty_list[(faculty_list['Status'] == 'Active')
                                & (faculty_list['Track'] != 'Staff')]
    # Drop faculty without ID number
    faculty_list.dropna(subset=['Empl ID'], inplace=True)
    # Rename address column
    faculty_list.rename(columns={'Address': 'Address1'}, inplace=True)
    faculty_ids = faculty_list['Empl ID'].unique()
    # Update data
    update_data(faculty_list, 'faculty', faculty_ids, faculty_df_fields,
                faculty_db_fields, 'Empl ID', cursor, conn, http, api_key)
    print('complete!')

    print('Gathering student and faculty roster...', end='', flush=True)
    # If no term provided, guess the current term
    TermDescriptions = dpu.get_term_descriptions()
    if not term:
        term = dpu.guess_current_term(TermDescriptions)

    # Gather schedule
    schedule = dpu.get_schedule(term, TermDescriptions)
    # Filter schedule
    if prog:
        schedule = schedule[schedule['Program'] == prog].copy(deep=True)
        if prog == 'MENP':
            MS_progs = ['BS-Health Sciences Combined', 'MS-Generalist Nursing']
            possible_ids = student_list[(student_list['Campus'] == 'LPC') & (
                student_list['Maj Desc'].isin(MS_progs))]['Emplid'].unique(
                ).tolist()
        elif prog == 'RFU':
            possible_ids = student_list[student_list['Campus'] ==
                                        'RFU']['Emplid'].unique().tolist()
        elif prog == 'DNP':
            possible_ids = student_list[
                student_list['Maj Desc'] ==
                'Doctor of Nursing Practice']['Emplid'].unique().tolist()
        elif prog == 'RN-MS':
            RN_progs = ['BS-Nursing  RN to MS', 'MS-Nursing RN to MS']
            possible_ids = student_list[student_list['Maj Desc'].isin(
                RN_progs)]['Emplid'].unique().tolist()
    else:
        schedule = schedule[schedule['Program'].isin(['MENP', 'RFU'])]
        MS_progs = ['BS-Health Sciences Combined', 'MS-Generalist Nursing']
        possible_ids = student_list[student_list['Maj Desc'].isin(
            MS_progs)]['Emplid'].unique().tolist()
    if cr:
        possible_courses = [cr]
    else:
        possible_courses = schedule['Cr'].unique().tolist()

    # Need to gather roster data (student IDs and faculty IDs only) as array
    roster = dpu.get_student_roster()
    # Filter roster
    cln_types = ['PRA', 'CLN']
    roster = roster[(roster['Term'] == term)
                    & (roster['Student ID'].isin(possible_ids)) &
                    (roster['Role'] == 'PI') &
                    (roster['Cr'].isin(possible_courses)) &
                    ((roster['Type'].isin(cln_types)) |
                     ((roster['Cr'] == '301') &
                      (roster['Type'] == 'LAB')))].copy(deep=True)
    # Takes care of 301 issue (due to changing course times)
    roster = roster.sort_values(
        by=['Faculty_ID',
            'Start Date'], ascending=[True, False]).drop_duplicates(
                subset=['Term', 'Student ID', 'Cr', 'Sec']).copy(deep=True)
    print('complete!')
    # Gather student and faculty IDs
    requested_students = np.unique(roster['Student ID'].values)
    requested_faculty = np.unique(roster['Faculty_ID'].values)

    # All DePaul Campuses
    sql = "SELECT id FROM locations WHERE type = 'DePaul'"
    cursor.execute(sql)
    requested_dpu = np.ravel(cursor.fetchall())

    print('Gathering sites...', end='', flush=True)
    # Gather requested sites
    if sites:
        try:
            with open(sites, 'r') as infile:
                site_names = infile.readlines()
            site_names = [x.strip() for x in site_names]
        except:
            raise 'Could not open file {}'.format(sites)
        # Find best string match for each site name provided in file
        best_matches = []
        for site_name in site_names:
            best_matches.append(
                dpu.find_best_string_match(site_name, all_site_names))
        best_matches = list(set(best_matches))
        requested_sites = sites_df[sites_df['Name'].isin(
            best_matches)]['site_id'].unique().tolist()
    # Default to all hospitals in IL
    else:
        requested_sites = sites_df[(sites_df['State'] == 'IL') & (
            sites_df['Type'] == 'Hospital')]['site_id'].unique().tolist()
    print('complete!')
    print('Updating student travel times...', end='', flush=True)
    # Update all the possible travel times
    for student_id, site_id in product(requested_students, requested_sites):
        _, _ = access_or_update_travel_time(student_id, site_id, cursor, gmaps)
    print('complete!')
    # Then pull the students, sites
    sql = "SELECT * FROM students WHERE id IN ({seq})".format(
        seq=','.join(['?'] * len(requested_students)))
    cursor.execute(sql, requested_students)
    students = pd.DataFrame(cursor.fetchall(), columns=student_cols)

    sql = "SELECT * FROM locations WHERE id IN ({seq})".format(
        seq=','.join(['?'] * len(requested_sites)))
    cursor.execute(sql, requested_sites)
    sites = pd.DataFrame(cursor.fetchall(), columns=site_cols)

    sql = "SELECT * FROM locations WHERE id IN ({seq})".format(
        seq=','.join(['?'] * len(requested_dpu)))
    cursor.execute(sql, requested_dpu)
    depaul = pd.DataFrame(cursor.fetchall(), columns=site_cols)

    sql = "SELECT * FROM faculty WHERE id IN ({seq})".format(
        seq=','.join(['?'] * len(requested_faculty)))
    cursor.execute(sql, requested_faculty)
    faculty = pd.DataFrame(cursor.fetchall(), columns=faculty_cols)
    print('Building the chart...', end='', flush=True)
    # Plot the map
    if not f_name:
        f_name = 'location_chart'
    today = datetime.strftime(datetime.today(), '%Y-%m-%d')
    f_name = os.path.join(os.path.sep, FL.location,
                          '{} {}.html'.format(f_name, today))
    plot_map(cursor,
             student_list=students,
             site_list=sites,
             dpu_list=depaul,
             fac_list=faculty,
             outfile=f_name)
    print('complete!')
    # Close cursor, commit, and close connection
    cursor.close()
    conn.commit()
    conn.close()
def main(term):
    '''Main function call.'''
    # Initialize File Locator
    FL = FileLocator()
    # Gather term descriptions
    TermDescriptions = dpu.get_term_descriptions()
    # If term not given, guess
    if not term:
        term = dpu.guess_current_term(TermDescriptions)

    # Gather faculty
    Faculty = dpu.get_employee_list()
    Faculty.rename(columns={
        'Empl ID': 'Faculty ID',
        'Last Name': 'Faculty Last Name',
        'First Name': 'Faculty First Name',
        'Last-First': 'Faculty Full Name',
        'Primary Email': 'Faculty Primary Email',
        'Secondary Email': 'Faculty Secondary Email',
        'Cell Phone': 'Faculty Phone'
    },
                   inplace=True)
    # Gather student list
    students = dpu.get_student_list()
    students.rename(columns={
        'Emplid': 'Student ID',
        'Student Name': 'Student Full Name',
        'Last Name': 'Student Last Name',
        'First Name': 'Student First Name',
        'Email': 'Student Email',
        'Best Phone': 'Student Phone'
    },
                    inplace=True)
    # Gather schedule
    schedule = dpu.get_schedule(term, TermDescriptions)
    # Filter schedule
    schedule = schedule[schedule['Program'].isin(['MENP', 'RFU'])]

    # Gather Roster
    roster = dpu.get_student_roster()
    roster.rename(columns={'Faculty_ID': 'Faculty ID'}, inplace=True)
    # Filter roster
    cln_types = ['PRA', 'CLN']
    MENP_courses = schedule['Cr'].unique().tolist()
    roster = roster[(roster['Term'] == term) & (roster['Role'] == 'PI') &
                    (roster['Cr'].isin(MENP_courses)) &
                    ((roster['Type'].isin(cln_types)) |
                     ((roster['Cr'] == '301') &
                      (roster['Type'] == 'LAB')))].copy(deep=True)
    # Takes care of 301 issue (due to changing course times)
    roster = roster.sort_values(
        by=['Faculty ID',
            'Start Date'], ascending=[True, False]).drop_duplicates(
                subset=['Term', 'Student ID', 'Cr', 'Sec']).copy(deep=True)
    # Gather string-formatted and concatenated dates and times
    roster[['Dates', 'Times']] = roster.apply(apply_dates_times, axis=1)
    # Drop Unneeded
    roster.drop(labels=[
        'Student Name', 'Student Major', 'Subject', 'Type', 'Class Nbr',
        'Role', 'Mode', 'Start Date', 'End Date', 'Pat', 'Mtg Start', 'Mtg End'
    ],
                axis=1,
                inplace=True)

    # Merge together
    roster = roster.merge(schedule[[
        'Cr', 'Sec', 'Title', 'Clinical Site', 'Unit', 'Max Cap', 'Confirmed'
    ]],
                          how='left',
                          on=['Cr', 'Sec'])
    roster = roster.merge(students[[
        'Student ID', 'Student Last Name', 'Student First Name',
        'Student Full Name', 'Student Email', 'Student Phone', 'Campus'
    ]],
                          how='left',
                          on='Student ID')
    roster = roster.merge(Faculty[[
        'Faculty ID', 'Faculty Last Name', 'Faculty First Name',
        'Faculty Full Name', 'Faculty Primary Email',
        'Faculty Secondary Email', 'Faculty Phone'
    ]],
                          how='left',
                          on='Faculty ID')

    # Column names and order
    column_names = [
        'Term', 'Campus', 'Cr', 'Sec', 'Clinical Site', 'Unit', 'Dates',
        'Times', 'Student ID', 'Student Last Name', 'Student First Name',
        'Student Full Name', 'Student Email', 'Student Phone', 'Faculty ID',
        'Faculty Last Name', 'Faculty First Name', 'Faculty Full Name',
        'Faculty Primary Email', 'Faculty Secondary Email', 'Faculty Phone',
        'Title', 'Max Cap', 'Confirmed'
    ]

    # Gather date
    date_of_report = datetime.strptime(
        dpu.get_latest(FL.rosters, 'NSG_STDNT_ROSTER').rstrip('.xlsx')[-10:],
        '%Y-%m-%d')

    # Ensure Output path
    starting_path = FL.cln_roster
    # Gather academic year and quarter
    ay = TermDescriptions[TermDescriptions['Term'] ==
                          term]['Academic Year'].item()
    q = TermDescriptions[TermDescriptions['Term'] == term]['Quarter'].item()
    # Update file path
    output_path = os.path.join(starting_path, ay, q)
    dpu.ensure_dir(output_path)

    # Output file
    output_report(roster, column_names, 'Clinical Roster', date_of_report,
                  output_path)
Exemplo n.º 7
0
def main (faculty_id, cr, cr_type, mode, num_recent):
    '''Main function call.'''
    
    # Guess current term
    TermDescriptions = dpu.get_term_descriptions()

    # Download historical course info
    course_data = dpu.get_class_data()
    # Filter out non-primary instructors
    course_data = course_data[course_data['Role'] == 'PI'].copy(deep=True)
    # Takes care of 301 issue (due to changing course times)
    course_data = course_data.sort_values(by=['ID', 'Start Date'], ascending=[True, False]).drop_duplicates(subset=['Term', 'Class Nbr']).copy(deep=True)
    
    # Gather min and max terms
    min_term = course_data['Term'].min()
    min_long_term = TermDescriptions[TermDescriptions['Term'] ==  min_term]['Long Description'].item()
    max_term = course_data['Term'].max()
    max_long_term = TermDescriptions[TermDescriptions['Term'] ==  max_term]['Long Description'].item()
    
    Faculty = dpu.get_employee_list()
    if faculty_id:
        # Get faculty name for print statement
        first = Faculty[Faculty['Empl ID'] == faculty_id]['First Name'].item()
        last = Faculty[Faculty['Empl ID'] == faculty_id]['Last Name'].item()
        print(f'Searching ID {faculty_id} between {min_long_term} and {max_long_term}.')
    else:
        if not cr:
            raise "Must submit either a faculty ID or a course for which to perform the search."
        print(f'Searching for last offering of NSG {cr} between {min_long_term} and {max_long_term}.')

    # If report appears to be outdate, suggest so to user
    curr_term = dpu.guess_current_term(TermDescriptions)
    if int(curr_term) > int(max_term):
        print('Your report may be out of date. To update or increase range of search, download a new NSG_CLASS_SCHEDULE_MULTI_TERM report.')
    
    # Do Pandas filter for ID and other optional attributes
    if faculty_id:
        course_data = course_data[course_data['ID'] == faculty_id].copy(deep=True)
    if cr:
        course_data = course_data[course_data['Catalog'] == cr].copy(deep=True)
    if cr_type:
        course_data = course_data[course_data['Component'] == cr_type].copy(deep=True)
    if mode:
        course_data = course_data[course_data['Mode'] == mode].copy(deep=True)
        
    # Test if any records found
    if len(course_data) == 0:
        raise 'No records found. To increase range of search, download a new NSG_CLASS_SCHEDULE_MULTI_TERM report.'
    
    # Sort by term
    course_data.sort_values(by=['Term', 'Catalog', 'Section'], ascending=[False, True, True], inplace=True)
    
    # Get number of unique terms, and sort in reverse order
    unique_terms = course_data['Term'].unique().tolist()
    unique_terms.sort(reverse=True)
    
    if faculty_id and cr:
        print(f'NSG {cr} was last taught by {first} {last} on... (in reverse order)')
    elif not cr:
        print(f'Last course taught by {first} {last} on... (in reverse order)')
    elif not faculty_id:
        print(f'NSG {cr} was last taught on... (in reverse order)')
    # Default to return a single term of info
    if not num_recent:
        num_recent = 1
    
    # Iterate through terms
    counter = 0
    while counter < num_recent:
        # get term from array using counter as index
        temp_term = unique_terms[counter]
        long_term = TermDescriptions[TermDescriptions['Term'] == temp_term]['Long Description'].item()
        # filter for that term
        temp_courses = course_data[course_data['Term'] == temp_term]
        # print each one
        temp_courses.apply(print_row, axis=1, args=(long_term, Faculty))        
        counter += 1
Exemplo n.º 8
0
def main(term):
    '''Main function.'''

    FL = FileLocator()
    
    # Gather term descriptions and faculty
    TermDescriptions = dpu.get_term_descriptions()
    Faculty = dpu.get_employee_list()

    # Gather schedule based on term (guess term if not provided)
    if not term:
        term = dpu.guess_current_term(TermDescriptions)
    schedule = dpu.get_schedule(term, TermDescriptions)
    
    # Initial setup
    output_path = os.path.join(os.path.sep, FL.schedule, 'Template', 'Course Coordinators')
    output_fields = ['Term', 'NSG', 'Cr', 'Sec', 'Time', 'Clinical Site', 'Unit', 'Faculty', 'Fac Conf', 'Primary Email', 'Secondary Email', 'Cell Phone']
    contact_fields = ['Primary Email', 'Secondary Email', 'Cell Phone']

    # Remove any courses in the wrong term or in DNP or RN to MS program
    schedule = schedule[(schedule['Term'] == term) & (schedule['Program'].isin(['MENP', 'RFU']))]

    # Create a set of all courses and faculty with a coordinator
    coord_courses = set()
    coord_faculty = set()
    schedule.apply(find_coord, axis=1, args=(coord_courses, coord_faculty))
    coord_courses = list(coord_courses)
    coord_faculty = list(coord_faculty)

    # Remove any courses that do not have a coordinator
    schedule = schedule[schedule['Cr'].isin(coord_courses)]

    # Remove all previous files
    dpu.ensure_empty_dir(output_path)

    # Map contact information
    for field in contact_fields:        
        schedule[field] = schedule.apply(map_contact, axis=1, args=(field, Faculty))

    for course in coord_courses:
        coord_programs = set()
        schedule.apply(find_sections, axis=1, args=(course, coord_programs))
        coord_programs = list(coord_programs)
        for program in coord_programs:
            # Get coordinator faculty name
            faculty = schedule[(schedule['Cr'] == course) & (schedule['Type'] == 'COORD') & (schedule['Program'] == program)]['Faculty'].astype(list).values[0]
            # Return only the labs or clns that match the program
            schedule_out = schedule[(schedule['Cr'] == course) & (schedule['Type'] != 'COORD') & (schedule['Type'] != 'LEC') & (schedule['Program'] == program)].copy(deep=True)
            # Keep only the needed columns
            schedule_out = schedule_out[output_fields]
            # Build output filename
            f_name = '{0} - {1} - {2}.xlsx'.format(faculty, course, program)
            output_file = os.path.join(os.path.sep, output_path, f_name)
            # Output file of lab/cln sections
            schedule_out.to_excel(output_file, index=False)
    
    # Iterate through coordinators
    for coordinator in coord_faculty:
        # Ignore undefined faculty
        if coordinator not in ['TBA', 'TBD']:
            # Get coordinator's email
            try:
                recipient = Faculty[Faculty['Last-First'] == coordinator]['Primary Email'].item()
            except:
                recipient = Faculty[Faculty['Last-First'] == coordinator]['Secondary Email'].item()
            # Write subject
            subject = 'Course Coordinator: Your Clinical or Lab Faculty'
            # Write body
            body = '''Dear {},
            
            Attached please find a list of your clinical or lab courses along with faculty contact info. If there is no confirmation date listed for a faculty member (field = 'Fac Conf'), we do not recommend contacting that person at this time. They have likely only given us a tentative 'yes.'
            
            This is an automated email - if something looks wrong, please reply and let me know.'''.format(coordinator.split(',')[1].strip())
            # Get attachments    
            attachments = []
            for file in os.listdir(output_path):
                file_path = os.path.join(output_path, file)
                if coordinator in file_path:
                    attachments.append(file_path)
            # Send email
            try:
                send_email(recipient, subject, body, attachments)
            except Exception as e:
                print('Exception occured for {0}: {1}'.format(coordinator, e))