Esempio n. 1
0
def agentweekly():
    user, password, imap_url = emailcredentials()
    mail = imaplib.IMAP4_SSL(imap_url)
    mail.login(user, password)
    mail.select('"Agent Weekly"')

    eml = search('Subject', 'UMRF Agent Stats for the week', mail)[0].split()
    for i in eml[-1:]:
        result, data = mail.fetch(i, '(RFC822)')
        emlhtml = get_body(email.message_from_bytes(data[0][1]))
        soup = BeautifulSoup(emlhtml, 'lxml')
        dt = re.findall('from ([0-9-]+)', soup.text)[0]

        #prep/clean data
        dfraw = pd.read_html(emlhtml, header=0)[0]
        #dfraw.insert(8, 'Ticket %',((dfraw['IncidentsCreated']/dfraw['CallsHandled'])*100).replace([np.inf,-np.inf], np.nan).round(2))
        dfraw['WeekStart'] = dt
        dfraw['FCR %'] = dfraw['FCR %'].replace({
            '%': ''
        }, regex=True).astype('float')
        dfraw['LoggedOnTime'] = dfraw['LoggedOnTime'].apply(
            lambda x: float(convtime(x)[0]))
        dfraw['AvailTime'] = dfraw['AvailTime'].apply(
            lambda x: float(convtime(x)[0]))
        dfraw['NotReadyTime'] = dfraw['NotReadyTime'].apply(
            lambda x: float(convtime(x)[0]))
        dfraw['ASA'] = dfraw['ASA'].apply(lambda x: float(convtime(x)[1]))
        dfraw['AHT'] = dfraw['AHT'].apply(lambda x: float(convtime(x)[1]))
        dfraw.rename(columns={
            'LoggedOnTime': 'LoggedOnTime (hrs)',
            'AvailTime': 'AvailTime (hrs)',
            'NotReadyTime': 'NotReadyTime (hrs)',
            'ASA': 'ASA (min)',
            'AHT': 'AHT (min)',
        },
                     inplace=True)
        dfraw['IncidentsCreated'] = dfraw['IncidentsCreated'].mask(
            (dfraw['IncidentsCreated'] == 0) & (dfraw['CallsHandled'] > 0))
    dfweeklyeml = dfraw
    return dfweeklyeml, dt
Esempio n. 2
0
def rejectedincidents() :
    today = datetime.strftime(datetime.today(), '%Y-%m-%d')
    yesterday = datetime.strftime(datetime.strptime(today,'%Y-%m-%d') - timedelta(1), '%d-%b-%Y')
    print(yesterday)

    conn = sqlite3.connect('UMRF_Incidents_SQL.sqlite')
    conn2 = sqlite3.connect('emplistmanual.sqlite')
    dflist = []
    
    user, password, imap_url = emailcredentials()
    mail = imaplib.IMAP4_SSL(imap_url)
    mail.login(user,password)
    mail.select('"Rejected Incidents"')
    
    eml = search('ON', "{}".format(yesterday), mail)[0].split()
    for i in eml[:] :
        result, data = mail.fetch(i,'(RFC822)')
        emlbytes = get_body(email.message_from_bytes(data[0][1]))
        try :
            s = str(emlbytes,'windows-1252')
            incidentNum = re.findall('Incident # (INC[0-9]+)', s)[0]
        except :
            print('multipart email: trying base64 decoding')
        try :
            b = email.message_from_bytes(data[0][1])
            if b.is_multipart():
                for part in b.walk():
                    ctype = part.get_content_type()
                    cdispo = str(part.get('Content-Disposition'))
            
                    # skip any text/plain (txt) attachments
                    if ctype == 'text/plain' and 'attachment' not in cdispo:
                        s = part.get_payload(decode=True)  # decode
                        break
            else :
                s = b.get_payload(decode=True)
        except :
            print('could not read, fetching incident number from subject...')
            s = str(data[0][1])[2:-1]
        s = str(s,'windows-1252')
        
        incidentNum = re.findall('Incident (INC[0-9]+)', s)[0]
        sql = '''SELECT * FROM AllData
                WHERE number IS "{}"'''.format(incidentNum)
        dfraw = pd.read_sql_query(sql,conn)
        df = dfraw.replace({'\r\n':'. ','\n':' '}, regex=True)
        df = df.filter(['number','sys_created_by','opened_by','opened_at','company','assignment_group','reassignment_count','u_resolving_level','resolved_by','short_description','description','comments_and_work_notes','close_notes'])
        df = df.rename(columns={'sys_created_by':'Emp_ID','reassignment_count':'reassigned','u_resolving_level':'resolving_lvl'})
        #df = df.drop(columns=['comments_and_work_notes','comments'])
        #print(df)
        dflist.append(df)
        
    if len(dflist) > 1 :
        df = pd.concat(dflist,axis=0)
        df = df.apply(pd.to_numeric, errors='ignore')
    elif len(dflist) == 1 :
        df = df.apply(pd.to_numeric, errors='ignore')
    elif len(dflist) == 0 :
        print('No Rejected Incidents!')
    try :
        df.to_html('rejected_incidents_yest.html',index=False)
        dfempcount = pd.read_sql_query('''SELECT * FROM "EmpCount"''', conn, index_col=None)
        dfcount = df.filter(['number','Emp_ID'])
        dfempcount = dfempcount.append(dfcount,sort=False)
        dfempcount = dfempcount.drop_duplicates(subset=['number'])
        dfempcount.to_sql('EmpCount', conn, if_exists='replace',index=False)
        df = df.drop(columns=['Emp_ID'])
        dfemp = pd.read_sql_query('''SELECT * FROM info''', conn2)
        df_emp = dfemp.filter(['email2','position'])
        df_emp = df_emp[~(df_emp['position'] == 'Agent')]
        eml = df_emp['email2'].values.tolist()
        html = StringIO()
        df.to_html(buf=html)
        dfhtml = pd.read_html(html,index_col=0)[0]
        print(eml)
        html = dfhtml.to_html(border='border',index=False)
        
        text = '''This is an automated email. If you have any questions, concerns, or feedback please email Kyle Lynch at [email protected]'''
                
        #send_mail(user, eml,'Rejected Incidents Report for {}'.format(yesterday), text=text, html=html)
    except :
        print('No Rejected Incidents!')
    print('UMRF_Rejected_Incidents.py completed successfully!')
Esempio n. 3
0
# -*- coding: utf-8 -*-
import imaplib, email
import sqlite3
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import os
import re
from datetime import date, datetime
from sensitive import emailcredentials

pd.set_option('display.max_columns', 100)

user, password, imap_url = emailcredentials()


def get_body(msg):
    if msg.is_multipart():
        return get_body(msg.get_payload(1))
    else:
        return msg.get_payload(None, True)


def search(key, value, conn):
    result, data = conn.search(None, key, '"{}"'.format(value))
    return data


def get_emails(result_bytes):
    msgs = []
    for num in result_bytes[0].split():
Esempio n. 4
0
def agentdailycsv():
    conn = sqlite3.connect('UMRF_Incidents_SQL.sqlite')
    conn2 = sqlite3.connect('UMRF_SQL_Daily.sqlite')
    conn3 = sqlite3.connect('emplistmanual.sqlite')

    user, password, imap_url = emailcredentials()
    mail = imaplib.IMAP4_SSL(imap_url)
    mail.login(user, password)
    mail.select('"Agent Daily"')

    dfemp = pd.DataFrame(columns=['FirstName', 'LastName'])
    dfdaily = pd.DataFrame()
    datelist = pd.read_sql_query('''SELECT DISTINCT "Date" FROM "AllData"''',
                                 conn2)['Date'].tolist()

    eml = search('Subject', 'FW: UMRF Agent Stats', mail)[0].split()
    for i in eml[-5:]:
        result, data = mail.fetch(i, '(RFC822)')
        emlhtml = get_body(email.message_from_bytes(data[0][1]))
        soup = BeautifulSoup(emlhtml, 'lxml')
        dt = re.findall('from ([0-9-]+)', soup.text)[0]
        if not dt in datelist:
            print(dt, 'not in')
            datelist.append(dt)

            #prep/clean data
            dfraw = pd.read_html(emlhtml,
                                 header=0)[0].set_index(['Employee Number'])
            dfraw['IncidentsCreated'] = dfraw['IncidentsCreated'].mask(
                (dfraw['IncidentsCreated'] == 0) & (dfraw['CallsHandled'] > 0))
            dfraw['Date'] = dt
            dfraw['FCR %'] = dfraw['FCR %'].replace({
                '%': ''
            }, regex=True).astype('float')
            dfraw['LoggedOnTime'] = dfraw['LoggedOnTime'].apply(
                lambda x: float(convtime(x)[0]))
            dfraw['AvailTime'] = dfraw['AvailTime'].apply(
                lambda x: float(convtime(x)[0]))
            dfraw['NotReadyTime'] = dfraw['NotReadyTime'].apply(
                lambda x: float(convtime(x)[0]))
            dfraw['ASA'] = dfraw['ASA'].apply(lambda x: float(convtime(x)[1]))
            dfraw['AHT'] = dfraw['AHT'].apply(lambda x: float(convtime(x)[1]))
            dfraw.rename(columns={
                'LoggedOnTime': 'LoggedOnTime (hrs)',
                'AvailTime': 'AvailTime (hrs)',
                'NotReadyTime': 'NotReadyTime (hrs)',
                'ASA': 'ASA (min)',
                'AHT': 'AHT (min)',
            },
                         inplace=True)
            dftemp = dfraw.filter(items=['FirstName', 'LastName'])
            dfemp = dfemp.append(dftemp)
            dfemp.drop_duplicates(inplace=True)
            dfemp.index.rename('Employee Number', inplace=True)

            yesterday = (datetime.strptime(dt,
                                           '%Y-%m-%d')).strftime('%Y-%m-%d')

            dfraw2 = pd.read_sql_query(
                '''SELECT * FROM AllData WHERE Date IS "{0}"'''.format(
                    yesterday), conn)
            df_inc = dfraw2.filter(
                ['sys_created_by', 'opened_by', 'resolved_by'])
            s_open = df_inc['opened_by'].value_counts()
            s_res = df_inc['resolved_by'].value_counts()

            dfemp = pd.read_sql_query('''SELECT * FROM info''', conn3)
            df_relate = dfemp.filter(['fedex_id', 'first_name', 'last_name'])
            df_relate['opened_by'] = df_relate['first_name'] + ' ' + df_relate[
                'last_name']
            df_relate = df_relate.drop(
                columns=['first_name', 'last_name']).set_index('opened_by')

            df = pd.concat([df_relate, s_open, s_res], axis=1, sort=True)
            df['opened_by'] = df['opened_by'].fillna(0)
            df['resolved_by'] = df['resolved_by'].fillna(0)

            df = df.dropna().astype(int)
            df['FCR %'] = (df['resolved_by'] / df['opened_by']).round(4) * 100
            df = df.rename(
                columns={
                    'fedex_id': 'Employee Number',
                    'opened_by': 'IncidentsCreated',
                    'resolved_by': 'IncidentsCreatedAndResolved'
                }).reset_index().set_index('Employee Number')

            df_call = dfraw.drop(columns=[
                'IncidentsCreated', 'IncidentsCreatedAndResolved', 'FCR %'
            ])

            df_day = pd.concat([df, df_call], axis=1,
                               sort=True).dropna().reset_index(drop=False)
            df_day['FirstName'] = df_day['index'].str.split().str[0]
            df_day['LastName'] = df_day['index'].str.split().str[-1]
            #df_week['FirstName'] = df_week['index'].str.extract('([a-zA-Z-]+)\s[\sa-zA-Z-]+')
            #df_week['LastName'] = df_week['index'].str.extract('[a-zA-Z-]+\s([\sa-zA-Z-]+)')
            df_day = df_day.drop(columns=['index'])
            df_day['Ticket %'] = (
                (df_day['IncidentsCreated'] / df_day['CallsHandled']) *
                100).replace([np.inf, -np.inf], np.nan).round(2)
            df_day = df_day.filter([
                'Employee Number', 'LastName', 'FirstName',
                'Number of Surveys', 'Avg. Survey Score (100)',
                'Avg. Survey Score (5)', 'IncidentsCreated',
                'IncidentsCreatedAndResolved', 'FCR %', 'Ticket %',
                'IncidentsResolved', 'LoggedOnTime (hrs)', 'AvailTime (hrs)',
                'NotReadyTime (hrs)', 'NR%', 'CallsAnswered', 'ACH%',
                'ASA (min)', 'CallsHandled', 'AHT (min)', 'AbandonRingCalls',
                'AbandonHoldCalls', 'AbandonHoldOutCalls', 'ShortCalls', 'Date'
            ])
            df_day = df_day.sort_values(by=['LastName']).reset_index(drop=True)

            dfemp = dfemp.filter(['fedex_id', 'position'])
            dfemp = dfemp[dfemp['position'] == 'Agent']
            df_daily = pd.merge(
                df_day,
                dfemp,
                left_on='Employee Number',
                right_on='fedex_id',
                sort=False).drop(columns=['fedex_id', 'position'])
            df_daily['FCR %'] = df_daily['FCR %'].mask(
                (df_daily['FCR %'] == 0) & ~(df_daily['IncidentsCreated'] > 0))
            dfdaily = dfdaily.append(df_daily)
            #print(dfdaily)
        else:
            print(dt, 'already in')
            continue
    dfdaily.to_sql('AllData', conn2, if_exists='append', index=False)
    print('UMRF_Agent_Daily Completed Successfully!')
Esempio n. 5
0
def callpatternsql():
    conn = sqlite3.connect('UMRF_Call_Pattern.sqlite')
    dflist = []
    days = {
        'Monday': int(0),
        'Tuesday': int(1),
        'Wednesday': int(2),
        'Thursday': int(3),
        'Friday': int(4),
        'Saturday': int(5),
        'Sunday': int(6)
    }
    try:
        datelist = pd.read_sql_query(
            '''SELECT DISTINCT "Date" FROM "AllData"''',
            conn)['Date'].tolist()
    except:
        datelist = []
    user, password, imap_url = emailcredentials()
    mail = imaplib.IMAP4_SSL(imap_url)
    mail.login(user, password)
    mail.select('"Call Pattern"')
    eml = search('Subject', 'FW: UMRF Calls Arrival Pattern for',
                 mail)[0].split()
    for i in eml[-5:]:
        #for i in eml :
        result, data = mail.fetch(i, '(RFC822)')
        emlhtml = get_body(email.message_from_bytes(data[0][1]))

        #extract data
        soup = BeautifulSoup(emlhtml, 'lxml')
        dt = re.findall('for ([0-9-]+)', soup.text)[0]
        year, month, day = dt.split('-')
        day = calendar.day_name[calendar.weekday(int(year), int(month),
                                                 int(day))]
        if dt not in datelist:
            print(dt, 'not in')
            datelist.append(dt)
            #prep/clean data
            dfraw = pd.read_html(emlhtml, header=0)[0]
            dfraw = dfraw.iloc[14:40].reset_index(drop=True)
            dfraw.replace(
                {
                    '! ': '',
                    '!': '',
                    '< /td>': '',
                    '<tr>': '',
                    '< td>': '',
                    '< tr>': '',
                    'A\sM': 'AM',
                    '\s': '',
                    '[0-9]+\s': ''
                },
                regex=True,
                inplace=True)
            if not 'Overflow Calls' in dfraw:
                dfraw['Overflow Calls'] = dfraw['Calls Offered'].astype(
                    'float') - dfraw['ACD Calls'].astype('float')

            if dfraw['Overflow Calls'].isnull().values.any():
                nanindex = dfraw[dfraw.isnull().any(axis=1)]
                for i in nanindex.index.values:
                    dfraw1 = dfraw[['Time Interval']]
                    dfraw2 = dfraw[[
                        'SL Abandoned', 'Abandoned Calls', 'Calls Offered',
                        'ACD Calls', 'Overflow Calls'
                    ]]
                    dfraw2.iloc[i] = dfraw2.iloc[i].shift(1)
                    dfraw = pd.concat([dfraw1, dfraw2], axis=1, sort=False)
                    dfraw['SL Abandoned'] = dfraw['SL Abandoned'].fillna(0)
            dfraw['Day'] = day
            dfraw = dfraw.reset_index()
            dfraw = dfraw.rename(columns={'Time Interval': 'Time Int'})
            dfraw['Time Interval'] = pd.date_range(
                start='{} 07:30:00'.format(dt),
                end='{} 20:00:00'.format(dt),
                freq='30T').astype('str')
            df = dfraw.set_index(['Time Interval'])
            df = df.drop(columns=['index'])
            df['Date'] = dt
            df['rank'] = df['Day'].map(days)
            dflist.append(df)
        else:
            print(dt, 'already in')
            continue
    if len(dflist) > 1:
        dfall = pd.concat(dflist, axis=0).sort_values(by=['Date'],
                                                      kind='mergesort')
        dfall = dfall.apply(pd.to_numeric, errors='ignore')
        dfall.to_sql('AllData', conn, if_exists='append')
    elif len(dflist) == 1:
        df = df.apply(pd.to_numeric, errors='ignore')
        df.to_sql('AllData', conn, if_exists='append')
    elif len(dflist) == 0:
        print('Already up to date!')
    conn.commit()
    conn.close()
Esempio n. 6
0
def agentdailycsv():

    conn = sqlite3.connect('UMRF_Incidents_SQL.sqlite')
    dflist = []
    days = {
        'Monday': int(0),
        'Tuesday': int(1),
        'Wednesday': int(2),
        'Thursday': int(3),
        'Friday': int(4),
        'Saturday': int(5),
        'Sunday': int(6)
    }
    try:
        datelist = pd.read_sql_query(
            '''SELECT DISTINCT "Date" FROM "AllData"''',
            conn)['Date'].tolist()
    except:
        datelist = []

    user, password, imap_url = emailcredentials()
    mail = imaplib.IMAP4_SSL(imap_url)
    mail.login(user, password)
    mail.select('"Incidents Daily"')

    eml = search('Subject', 'FW: UMRF_Incidents', mail)[0].split()
    for i in eml[-5:]:
        result, data = mail.fetch(i, '(RFC822)')
        emlbytes = get_body(email.message_from_bytes(data[0][1]))
        #print(emlbytes)
        s = str(emlbytes, 'windows-1252')
        #print(s)
        data = StringIO(s)
        df = pd.read_csv(data)
        df = df.filter([
            'number', 'opened_at', 'u_affected_user', 'short_description',
            'state', 'assignment_group', 'assigned_to', 'caller_id.user_name',
            'category', 'business_service', 'work_notes', 'urgency',
            'severity', 'resolved_at', 'close_notes', 'reopen_count',
            'reassignment_count', 'priority', 'parent_incident',
            'incident_state', 'impact', 'calendar_duration', 'description',
            'sys_created_by', 'sys_created_on', 'correlation_display',
            'location', 'comments_and_work_notes', 'closed_at', 'comments',
            'active', 'resolved_by', 'opened_by', 'cmdb_ci', 'company',
            'u_resolving_level', 'u_process_feedback',
            'u_process_feedback_notes'
        ])
        dt = df.at[2, 'opened_at'][:10]
        year, month, day = dt.split('-')
        day = calendar.day_name[calendar.weekday(int(year), int(month),
                                                 int(day))]
        if dt not in datelist:
            print(dt, 'not in')
            datelist.append(dt)
            df['Date'] = dt
            dflist.append(df)
        else:
            print(dt, 'already in')
            continue
    if len(dflist) > 1:
        dfall = pd.concat(dflist, axis=0).sort_values(by=['Date'],
                                                      kind='mergesort')
        dfall = dfall.apply(pd.to_numeric, errors='ignore')
        dfall.to_sql('AllData', conn, if_exists='append')
    elif len(dflist) == 1:
        df = df.apply(pd.to_numeric, errors='ignore')
        df.to_sql('AllData', conn, if_exists='append')
    elif len(dflist) == 0:
        print('Already up to date!')
    print('UMRF_Incident_Daily_SQL.py completed successfully!')