def import_updates_to_sql():
    create_tables()
    now = datetime.datetime.now()

    #
    # Import Delivery
    #
    wb, ws = open_wb(app_cfg['XLS_AS_DELIVERY_STATUS'])
    my_csv = xlrd_wb_to_csv(wb, ws)

    my_new_list = []
    for my_row in my_csv:
        my_row.insert(0, '')
        my_new_list.append(my_row)

    push_list_to_csv(my_new_list, 'csv_services.csv')
    load_infile('services', 'csv_services.csv')

    #
    # Import Subscriptions
    #
    wb, ws = open_wb(app_cfg['XLS_SUBSCRIPTIONS'])
    my_csv = xlrd_wb_to_csv(wb, ws)

    my_new_list = []
    for my_row in my_csv:
        my_row.insert(0, '')
        my_new_list.append(my_row)

    push_list_to_csv(my_new_list, 'csv_subscriptions.csv')
    load_infile('subscriptions', 'csv_subscriptions.csv')

    #
    # Import Bookings
    #
    wb, ws = open_wb(app_cfg['XLS_BOOKINGS'])
    my_csv = xlrd_wb_to_csv(wb, ws)

    my_new_list = []
    last_col = len(my_csv[0])

    for my_row in my_csv:
        # Add some useful columns
        my_row.insert(0, '')
        my_row.insert(last_col+1, 'hash')
        my_row.insert(last_col+2, now)

        my_new_list.append(my_row)

    push_list_to_csv(my_new_list, 'csv_bookings.csv')
    load_infile('bookings', 'csv_bookings.csv')

    return
Exemplo n.º 2
0
def build_customer_list():
    my_map = build_sheet_map(app_cfg['XLS_BOOKINGS'], sheet_map,
                             'XLS_BOOKINGS')

    wb_bookings, sheet_bookings = open_wb(app_cfg['XLS_BOOKINGS'], 'updates')
    customer_list = []
    col_num_end_customer = -1
    col_num_erp_customer = -1

    #
    # First find the column numbers for these column names in the book
    #
    for val in my_map:
        if val[0] == 'ERP End Customer Name':
            col_num_erp_customer = val[2]
        elif val[0] == 'End Customer Global Ultimate Name':
            col_num_end_customer = val[2]

    #
    # Main loop of bookings excel data
    #
    top_row = []
    for i in range(sheet_bookings.nrows):
        if i == 0:
            # Grab these values to make column headings
            top_row_erp_val = sheet_bookings.cell_value(
                i, col_num_erp_customer)
            top_row_end_val = sheet_bookings.cell_value(
                i, col_num_end_customer)
            top_row = [top_row_erp_val, top_row_end_val]
            continue

        # Capture both of the Customer names
        customer_name_erp = sheet_bookings.cell_value(i, col_num_erp_customer)
        customer_name_end = sheet_bookings.cell_value(i, col_num_end_customer)
        customer_list.append((customer_name_erp, customer_name_end))

    # Create a simple customer_list list of tuples
    # Contains a full set of unique sorted customer names
    # customer_list = [(erp_customer_name,end_customer_ultimate), (CustA,CustA)]
    customer_list = set(customer_list)

    # Convert the SET to a LIST so we can sort it
    customer_list = list(customer_list)

    # Sort the LIST
    customer_list.sort(key=lambda tup: tup[0])

    # Convert the customer name tuples to a list
    tmp_list = []
    for customer in customer_list:
        tmp_list.append(list(customer))
    customer_list = tmp_list

    # Place column headings at the top
    customer_list.insert(0, top_row)

    return customer_list
Exemplo n.º 3
0
def get_customer():
    print('Hello from main.py')

    # Try creating a Test_Table in the mySQL DB from Models.py
    create_tables("Test_Table")

    # Try retrieving a SmartSheet
    ss_test_list = get_list_from_ss('Tetration SKUs')
    print('Rows in my SmartSheet', len(ss_test_list))

    # Try writing an xlsx file to local storage
    push_list_to_xls(ss_test_list, 'my_test_sheet.xlsx')

    # Try opening an xlsx file from local storage
    xws, wb = open_wb('my_test_sheet.xlsx')
    return 'Hello from Main.py: I read: '+ str(len(ss_test_list)) + ' rows from SmartSheets'
Exemplo n.º 4
0
def get_customer():
    print('Hello from main.py')
    # print('EXECUTING pre run checks')
    # pre_run_file_checks()
    # print('DONE EXECUTING pre run checks')

    # Try creating a Test_Table in the mySQL DB from Models.py
    create_tables("Test_Table")
    create_tables("Bookings")

    # Try retrieving a SmartSheet
    ss_test_list = get_list_from_ss('Tetration SKUs')
    print('Rows in my SmartSheet', len(ss_test_list))

    # Try writing an xlsx file to local storage
    push_list_to_xls(ss_test_list, 'my_test_sheet.xlsx')

    # Try opening an xlsx file from local storage
    wb, ws = open_wb('my_test_sheet.xlsx')
    print("Opened my_test_sheet and it has:", ws.nrows, " rows")

    return 'Hello from Main.py: I read: ' + str(
        len(ss_test_list)) + ' rows from SmartSheets'
Exemplo n.º 5
0
def build_sheet_map(file_name, my_map, tag, run_dir=app_cfg["UPDATES_DIR"]):
    print('MAPPING>>>>>>>>>> ', run_dir + '\\' + file_name)

    # First look and the tag and decide if we looking at
    # A local excel sheet or Smart Sheet
    if tag[:2] == 'SS':
        # Get the list of columns
        my_sheet = Ssheet(file_name, True)
        my_columns = my_sheet.columns

        # Loop across the Smart Sheet columns
        for ss_col in range(len(my_columns)):
            ss_col_num = my_sheet.columns[ss_col]['index']
            ss_col_name = my_sheet.columns[ss_col]['title']

            # Loop across the sheet map and look for a match
            for idx, val in enumerate(my_map):
                col_name = val[0]
                if col_name == ss_col_name and val[1] == tag:
                    # We have a match on the source col name and file tag
                    val[2] = ss_col_num

    elif tag[:3] == 'XLS':
        workbook, sheet = open_wb(file_name, run_dir)
        # Loop across all column headings in the bookings file and
        # Find the column number that matches the col_name in my_dict
        for wb_col_num in range(sheet.ncols):
            for idx, val in enumerate(my_map):
                col_name = val[0]
                if col_name == sheet.cell_value(0,
                                                wb_col_num) and val[1] == tag:
                    val[2] = wb_col_num
    else:
        print('Missing Map TAG')
        exit()

    return my_map
def process_delivery(run_dir=app_cfg["UPDATES_DIR"]):
    print('MAPPING>>>>>>>>>> ',
          run_dir + '\\' + app_cfg['XLS_AS_DELIVERY_STATUS'])

    # Open up the subscription excel workbooks
    wb, sheet = open_wb(app_cfg['XLS_AS_DELIVERY_STATUS'], run_dir)

    # Get the renewal columns we are looking for
    my_map = build_sheet_map(app_cfg['XLS_AS_DELIVERY_STATUS'], sheet_map,
                             'XLS_AS_DELIVERY_STATUS', run_dir)

    # List comprehension replacement for above
    # Strip out the columns from the sheet map that we don't need
    my_map = [x for x in my_map if x[1] == 'XLS_AS_DELIVERY_STATUS']

    # Create a simple column name dict
    col_nums = {
        sheet.cell_value(0, col_num): col_num
        for col_num in range(0, sheet.ncols)
    }

    # Loop over all of the delivery records
    # Build a dict of {customer:[delivery stuff blah blah ]}
    my_dict = {}
    for row_num in range(1, sheet.nrows):
        customer = sheet.cell_value(row_num, col_nums['End Customer'])
        if customer in my_dict:
            tmp_record = []
            tmp_records = my_dict[customer]
        else:
            tmp_record = []
            tmp_records = []

        # Loop over the my map gather the columns we need
        for col_map in my_map:
            my_cell = sheet.cell_value(row_num, col_map[2])

            # Is this cell a Date type (3) ?
            # If so format as a M/D/Y
            if sheet.cell_type(row_num, col_map[2]) == 3:
                my_cell = datetime.datetime(
                    *xlrd.xldate_as_tuple(my_cell, wb.datemode))
                my_cell = my_cell.strftime('%m-%d-%Y')

            tmp_record.append(my_cell)

        tmp_records.append(tmp_record)
        my_dict[customer] = tmp_records

    #
    # # Sort each customers delivery status
    # #
    # delivery_dict = {}
    # # print('diag1',my_dict['BLUE CROSS & BLUE SHIELD OF ALABAMA'])
    # # exit()
    # # ['08-20-2018', '12', '08-20-2019', 72.0, 1500.0, 'Sub170034', 'ACTIVE']
    #
    # for customer, renewals in my_dict.items():
    #     # Sort this customers renewal records by date order
    #     renewals.sort(key=lambda x: datetime.datetime.strptime(x[0], '%m-%d-%Y'))
    #     # sorted_dict[customer] = renewals
    #     #
    #     # print('\t', customer, ' has', len(renewals), ' records')
    #     # print('\t\t', renewals)
    #     # print ('---------------------')
    #     # time.sleep(1)
    #
    #     next_renewal_date = renewals[0][0]
    #     next_renewal_rev = 0
    #     next_renewal_qtr = renewals[0][2]
    #     for renewal_rec in renewals:
    #         if renewal_rec[0] == next_renewal_date:
    #             # Tally this renewal record and get the next
    #             # print (type(renewal_rec[4]), renewal_rec[4])
    #             # time.sleep(1)
    #             next_renewal_rev = renewal_rec[4] + next_renewal_rev
    #         elif renewal_rec[0] != next_renewal_date:
    #             # Record these summarized values
    #             summarized_rec.append([next_renewal_date, next_renewal_rev, next_renewal_qtr])
    #             # Reset these values and get the next renewal date for this customer
    #             next_renewal_date = renewal_rec[0]
    #             next_renewal_rev = renewal_rec[1]
    #             next_renewal_qtr = renewal_rec[2]
    #
    #         # Check to see if this is the last renewal record
    #         # If so exit the loop
    #         if renewals.index(renewal_rec) == len(renewals)-1:
    #             break
    #
    #     summarized_rec.append([next_renewal_date, next_renewal_rev, next_renewal_qtr])
    #     summarized_dict[customer] = summarized_rec
    #     summarized_rec = []

    return my_dict
Exemplo n.º 7
0
def process_bookings():
    # Go to Smartsheets and build these two dicts to use reference lookups
    # team_dict: {'sales_levels 1-6':[('PSS','TSA')]}
    # sku_dict: {sku : [sku_type, sku_description]}
    team_dict = build_coverage_dict()
    sku_dict = build_sku_dict()

    #
    # Open up the bookings excel workbooks
    #
    wb_bookings, sheet_bookings = open_wb(app_cfg['XLS_BOOKINGS'], 'updates')

    # From the current up to date bookings file build a simple list
    # that describes the format of the output file we are creating
    # and the columns we need to add (ie PSS, TSA, Renewal Dates)

    my_sheet_map = build_sheet_map(app_cfg['XLS_BOOKINGS'], sheet_map,
                                   'XLS_BOOKINGS')
    print('sheet_map ', id(sheet_map))
    print('my_sheet_map ', id(my_sheet_map))
    #
    # init a bunch a variable we need for the main loop
    #
    order_header_row = []
    order_rows = []
    order_row = []
    trash_rows = []

    dest_col_nums = {}
    src_col_nums = {}

    # Build a dict of source sheet {'col_name' : src_col_num}
    # Build a dict of destination sheet {'col_name' : dest_col_num}
    # Build the header row for the output file
    for idx, val in enumerate(my_sheet_map):
        # Add to the col_num dict of col_names
        dest_col_nums[val[0]] = idx
        src_col_nums[val[0]] = val[2]
        order_header_row.append(val[0])

    # Initialize the order_row and trash_row lists
    order_rows.append(order_header_row)
    trash_rows.append(sheet_bookings.row_values(0))

    print('There are ', sheet_bookings.nrows, ' rows in Raw Bookings')

    #
    # Main loop of over raw bookings excel data
    #
    # This loop will build two lists:
    # 1. Interesting orders based on SKUs (order_rows)
    # 2. Trash orders SKUs we don't care about (trash_rows)
    # As determined by the sku_dict
    # We have also will assign team coverage to both rows
    #
    for i in range(1, sheet_bookings.nrows):

        # Is this SKU of interest ?
        sku = sheet_bookings.cell_value(i, src_col_nums['Bundle Product ID'])

        if sku in sku_dict:
            # Let's make a row for this order
            # Since it has an "interesting" sku
            customer = sheet_bookings.cell_value(
                i, src_col_nums['ERP End Customer Name'])
            order_row = []
            sales_level = ''
            sales_level_cntr = 0

            # Grab SKU data from the SKU dict
            sku_type = sku_dict[sku][0]
            sku_desc = sku_dict[sku][1]
            sku_sensor_cnt = sku_dict[sku][2]

            # Walk across the sheet_map columns
            # to build this output row cell by cell
            for val in my_sheet_map:
                col_name = val[0]  # Source Sheet Column Name
                col_idx = val[2]  # Source Sheet Column Number

                # If this is a 'Sales Level X' column then
                # Capture it's value until we get to level 6
                # then do a team lookup
                if col_name[:-2] == 'Sales Level':
                    sales_level = sales_level + sheet_bookings.cell_value(
                        i, col_idx) + ','
                    sales_level_cntr += 1

                    if sales_level_cntr == 6:
                        # We have collected all 6 sales levels
                        # Now go to find_team to do the lookup
                        sales_level = sales_level[:-1]
                        sales_team = find_team(team_dict, sales_level)
                        pss = sales_team[0]
                        tsa = sales_team[1]
                        order_row[dest_col_nums['pss']] = pss
                        order_row[dest_col_nums['tsa']] = tsa

                if col_idx != -1:
                    # OK we have a cell that we need from the raw bookings
                    # sheet we need so grab it
                    order_row.append(sheet_bookings.cell_value(i, col_idx))
                elif col_name == 'Product Description':
                    # Add in the Product Description
                    order_row.append(sku_desc)
                elif col_name == 'Product Type':
                    # Add in the Product Type
                    order_row.append(sku_type)
                elif col_name == 'Sensor Count':
                    # Add in the Sensor Count
                    order_row.append(sku_sensor_cnt)
                else:
                    # this cell is assigned a -1 in the sheet_map
                    # so assign a blank as a placeholder for now
                    order_row.append('')

            # Done with all the columns in this row
            # Log this row for BOTH customer names and orders
            # Go to next row of the raw bookings data
            order_rows.append(order_row)

        else:
            # The SKU was not interesting so let's trash it
            trash_rows.append(sheet_bookings.row_values(i))

    print('Extracted ', len(order_rows),
          " rows of interesting SKU's' from Raw Bookings")
    print('Trashed ', len(trash_rows),
          " rows of trash SKU's' from Raw Bookings")
    #
    # End of main loop
    #

    #
    # Renewal Analysis
    #
    renewal_dict = process_renewals()
    for order_row in order_rows[1:]:
        customer = order_row[dest_col_nums['ERP End Customer Name']]
        if customer in renewal_dict:
            next_renewal_date = datetime.datetime.strptime(
                renewal_dict[customer][0][0], '%m-%d-%Y')
            next_renewal_rev = renewal_dict[customer][0][1]
            next_renewal_qtr = renewal_dict[customer][0][2]

            order_row[dest_col_nums['Renewal Date']] = next_renewal_date
            order_row[dest_col_nums['Product Bookings']] = next_renewal_rev
            order_row[dest_col_nums['Fiscal Quarter ID']] = next_renewal_qtr

            if len(renewal_dict[customer]) > 1:
                renewal_comments = '+' + str(len(renewal_dict[customer]) -
                                             1) + ' more renewal(s)'
                order_row[dest_col_nums['Renewal Comments']] = renewal_comments

    # Now we build a an order dict
    # Let's organize as this
    # order_dict: {cust_name:[[order1],[order2],[orderN]]}
    order_dict = {}
    orders = []
    order = []

    for idx, order_row in enumerate(order_rows):
        if idx == 0:
            continue
        customer = order_row[0]
        orders = []

        # Is this customer in the order dict ?
        if customer in order_dict:
            orders = order_dict[customer]
            orders.append(order_row)
            order_dict[customer] = orders
        else:
            orders.append(order_row)
            order_dict[customer] = orders

    # Create a simple customer_list
    # Contains a full set of unique sorted customer names
    # Example: customer_list = [[erp_customer_name,end_customer_ultimate], [CustA,CustA]]
    customer_list = build_customer_list()
    print('There are ', len(customer_list), ' unique Customer Names')

    # Clean up order_dict to remove:
    # 1.  +/- zero sum orders
    # 2. zero revenue orders
    order_dict, customer_platforms = cleanup_orders(customer_list, order_dict,
                                                    my_sheet_map)

    #
    # Create a summary order file out of the order_dict
    #
    summary_order_rows = [order_header_row]
    for key, val in order_dict.items():
        for my_row in val:
            summary_order_rows.append(my_row)
    print(len(summary_order_rows), ' of scrubbed rows after removing "noise"')

    #
    # Push our lists to an excel file
    #
    # push_list_to_xls(customer_platforms, 'jim ')
    print('order summary name ', app_cfg['XLS_ORDER_SUMMARY'])

    push_list_to_xls(summary_order_rows, app_cfg['XLS_ORDER_SUMMARY'],
                     'updates')
    push_list_to_xls(order_rows, app_cfg['XLS_ORDER_DETAIL'], 'updates')
    push_list_to_xls(customer_list, app_cfg['XLS_CUSTOMER'], 'updates')
    push_list_to_xls(trash_rows, app_cfg['XLS_BOOKINGS_TRASH'], 'updates')

    # exit()
    #
    # Push our lists to a smart sheet
    #
    # push_xls_to_ss(wb_file, app_cfg['XLS_ORDER_SUMMARY'])
    # push_xls_to_ss(wb_file, app_cfg['XLS_ORDER_DETAIL'])
    # push_xls_to_ss(wb_file, app_cfg['XLS_CUSTOMER'])
    # exit()
    return
Exemplo n.º 8
0
from my_app.func_lib.open_wb import open_wb
from my_app.func_lib.push_list_to_xls import push_list_to_xls
from my_app.func_lib.push_xlrd_to_xls import push_xlrd_to_xls
from my_app.func_lib.build_sku_dict import build_sku_dict
from my_app.settings import app_cfg
from my_app.Customer import Customer
import xlrd
from datetime import datetime
import time

as_wb, as_ws = open_wb(app_cfg['TESTING_TA_AS_FIXED_SKU_RAW'])
cust_wb, cust_ws = open_wb(app_cfg['TESTING_BOOKINGS_RAW_WITH_SO'])
sub_wb, sub_ws = open_wb(app_cfg['TESTING_RAW_SUBSCRIPTIONS'])

print("AS Fixed SKUs Rows:", as_ws.nrows)
print('Bookings Rows:', cust_ws.nrows)
print('Subscription Rows:', sub_ws.nrows)

#
# Create a SKU Filter
#
# Options Are: Product / Software / Service / SaaS / *
sku_filter_val = '*'
tmp_dict = build_sku_dict()
sku_filter_dict = {}

for key, val in tmp_dict.items():
    if val[0] == sku_filter_val:
        sku_filter_dict[key] = val
    elif sku_filter_val == '*':
        # Selects ALL Interesting SKUs
Exemplo n.º 9
0
def main():
    as_wb, as_ws = open_wb(app_cfg['TESTING_TA_AS_FIXED_SKU_RAW'])
    cust_wb, cust_ws = open_wb(app_cfg['TESTING_BOOKINGS_RAW_WITH_SO'])
    sub_wb, sub_ws = open_wb(app_cfg['TESTING_RAW_SUBSCRIPTIONS'])

    print()
    print('RAW Input Data')
    print("\tAS Fixed SKUs Rows:", as_ws.nrows)
    print('\tBookings Rows:', cust_ws.nrows)
    print('\tSubscription Rows:', sub_ws.nrows)

    #
    # Build a Team Dict
    #
    team_dict = build_coverage_dict()

    #
    # Create a SKU Filter
    #
    # Options Are: Product / Software / Service / SaaS / All SKUs
    sku_filter_val = 'All SKUs'
    tmp_dict = build_sku_dict()
    sku_filter_dict = {}

    for key, val in tmp_dict.items():
        if val[0] == sku_filter_val:
            sku_filter_dict[key] = val
        elif sku_filter_val == 'All SKUs':
            # Selects ALL Interesting SKUs
            sku_filter_dict[key] = val

    print()
    print('SKU Filter set to:', sku_filter_val)
    print()

    #
    # Build a xref dict of valid customer ids for lookup by SO and ERP Name
    #
    xref_cust_name = {}
    xref_so = {}
    for row_num in range(1, cust_ws.nrows):
        cust_id = cust_ws.cell_value(row_num, 15)
        cust_erp_name = cust_ws.cell_value(row_num, 13)
        cust_so = cust_ws.cell_value(row_num, 11)

        # Only add valid ID/Name Pairs to the reference
        if cust_id == '-999' or cust_id == '':
            continue

        if cust_erp_name not in xref_cust_name:
            xref_cust_name[cust_erp_name] = cust_id
            if (cust_so, cust_erp_name) not in xref_so:
                xref_so[(cust_so, cust_erp_name)] = cust_id

    #
    # Process Main Bookings File
    #
    cntr = 0
    cust_db = {}
    cust_alias_db = {}
    so_dict = {}

    #
    # Main loop over the bookings data starts here
    #
    for row_num in range(1, cust_ws.nrows):
        # Gather the fields we want
        cust_id = cust_ws.cell_value(row_num, 15)
        cust_erp_name = cust_ws.cell_value(row_num, 13)
        cust_ultimate_name = cust_ws.cell_value(row_num, 14)
        cust_so = cust_ws.cell_value(row_num, 11)
        cust_sku = cust_ws.cell_value(row_num, 19)
        cust_sales_lev_1 = cust_ws.cell_value(row_num, 3)
        cust_sales_lev_2 = cust_ws.cell_value(row_num, 4)
        cust_sales_lev_3 = cust_ws.cell_value(row_num, 5)
        cust_sales_lev_4 = cust_ws.cell_value(row_num, 6)
        cust_sales_lev_5 = cust_ws.cell_value(row_num, 7)
        cust_sales_lev_6 = cust_ws.cell_value(row_num, 8)
        cust_acct_mgr = cust_ws.cell_value(row_num, 9)

        # Grab this SO number in a simple dict {so:(cust_id, cust_id)
        if cust_so not in so_dict:
            so_dict[cust_so] = ((cust_id, cust_erp_name), )
        else:
            so_dict[cust_so] = so_dict[cust_so] + ((cust_id, cust_erp_name), )

        # Do we have a missing or bad cust_id try to look one up
        if cust_id == '' or cust_id == '-999':
            if cust_erp_name in xref_cust_name:
                cust_id = xref_cust_name[cust_erp_name]

            if (cust_so, cust_erp_name) in xref_so:
                cust_id = xref_so[(cust_so, cust_erp_name)]

            # If id is still bad flag cust_id as UNKNOWN
            if cust_id == '' or cust_id == '-999':
                cust_id = 'UNKNOWN'

        #
        # Check cust_db
        # {cust_id: Customer_obj}
        #
        # Is this a new cust_id ?
        if cust_id not in cust_db:
            # Create a new cust_id object and basic record
            cust_db[cust_id] = Customer(cust_id)
            cust_db[cust_id].sales_lev_1 = cust_sales_lev_1
            cust_db[cust_id].sales_lev_2 = cust_sales_lev_2
            cust_db[cust_id].sales_lev_3 = cust_sales_lev_3
            cust_db[cust_id].sales_lev_4 = cust_sales_lev_4
            cust_db[cust_id].sales_lev_5 = cust_sales_lev_5
            cust_db[cust_id].sales_lev_6 = cust_sales_lev_6
            sales_level = cust_sales_lev_1 + ',' + cust_sales_lev_2 + ',' + cust_sales_lev_3 + ',' + \
                cust_sales_lev_4 + ',' + cust_sales_lev_5 + ',' + cust_sales_lev_6
            sales_team = find_team(team_dict, sales_level)
            pss = sales_team[0]
            tsa = sales_team[1]
            cust_db[cust_id].pss = pss
            cust_db[cust_id].tsa = tsa
            cust_db[cust_id].am = cust_acct_mgr

        # Is this a SKU we want if so add_order
        if cust_sku in sku_filter_dict:
            cust_db[cust_id].add_order(cust_so, cust_sku)

        # Add this customer_erp_name as an alias to the customer object
        cust_db[cust_id].add_alias(cust_erp_name)

        # Add this name to an easy alias lookup dict
        if cust_erp_name not in cust_alias_db:
            cust_alias_db[cust_erp_name] = cust_id

    print('Unique Customer IDs with filter of', " '" + sku_filter_val + "' :",
          len(cust_db))
    print("Customer Unique Customer Names: ", len(cust_alias_db))
    print("Unique Sales Order Numbers: ", len(so_dict))

    # A quick check on customer ids -
    id_list = [['Customer ID', 'Customer Aliases']]
    for cust_id, cust_obj in cust_db.items():
        alias_list = []
        alias_str = ''
        cust_aliases = cust_obj.aliases
        for cust_alias in cust_aliases:
            alias_list.append(cust_alias)
            alias_str = alias_str + cust_alias + ' : '
        alias_str = alias_str[:-3]
        id_list.append([cust_id, alias_str])

    push_list_to_xls(id_list, 'log_Unique_Cust_IDs.xlsx')

    # # Display Customer IDs and Aliases
    # for cust_id, cust_obj in cust_db.items():
    #     if len(cust_obj.aliases) > 1:
    #         print()
    #         print('Customer ID', cust_id, ' has the following aliases')
    #         for name in cust_obj.aliases:
    #             print('\t\t', name)
    #             time.sleep(1)

    # # Display Sales Order info
    # for cust_id, cust_obj in cust_db.items():
    #     if len(cust_obj.orders) > 1:
    #         print()
    #         print('Customer ID', cust_id, cust_obj.aliases, ' has the following orders')
    #         for my_order, my_skus in cust_obj.orders.items():
    #             print('\t', 'SO Num:', my_order, 'SKUs', my_skus)
    #             time.sleep(1)

    #
    # Process AS AS-F SKU File - match bookings SO and (AS SO / PID) numbers
    # and make a list of tuples for each cust_id
    #
    as_db = {}
    so_status_list = [[
        'AS SO Number', 'AS Customer Name', "AS PID", 'Duplicate ?',
        'Match in Booking ?'
    ]]
    as_zombie_so = []
    as_so_found_cntr = 0
    as_so_not_found_cntr = 0
    as_so_duplicate_cntr = 0
    as_so_unique_cntr = 0
    for row_num in range(1, as_ws.nrows):
        my_as_info_list = []
        # Gather the fields we want
        as_pid = as_ws.cell_value(row_num, 0)
        as_cust_name = as_ws.cell_value(row_num, 2)
        as_so = as_ws.cell_value(row_num, 19)

        # Just a check
        if as_so in as_db:
            dupe = 'Duplicate SO'
            as_so_duplicate_cntr += 1
        else:
            dupe = 'Unique SO'
            as_so_unique_cntr += 1

        if as_so not in as_db:
            my_as_info_list.append((as_pid, as_cust_name))
            as_db[as_so] = my_as_info_list
        else:
            my_as_info_list = as_db[as_so]
            add_it = True
            for info in my_as_info_list:
                if info == (as_pid, as_cust_name):
                    add_it = False
                    break
            if add_it:
                my_as_info_list.append((as_pid, as_cust_name))
                as_db[as_so] = my_as_info_list

        # Checks
        if as_so not in so_dict:
            so_status_list.append(
                [as_so, as_cust_name, as_pid, dupe, 'NOT in Bookings'])
            as_zombie_so.append([as_so, as_cust_name, as_pid])
            as_so_not_found_cntr += 1
        else:
            so_status_list.append(
                [as_so, as_cust_name, as_pid, dupe, 'FOUND in Bookings'])
            as_so_found_cntr += 1

    push_list_to_xls(so_status_list, 'log_AS SO_Status_List.xlsx')
    print('AS SO NOT Found (Zombies):', as_so_not_found_cntr)
    print('AS SO Found:', as_so_found_cntr)
    print('\t AS SO Totals:', as_so_found_cntr + as_so_not_found_cntr)
    print()
    print('AS SO Duplicate:', as_so_duplicate_cntr)
    print('AS SO Unique:', as_so_unique_cntr)

    #
    # Update the cust_db objects with the AS data from as_db
    #
    found_list = []
    for cust_id, cust_obj in cust_db.items():
        for so, skus in cust_obj.orders.items():
            if so in as_db:
                found_list.append(so)
                cust_obj.add_as_pid(so, as_db[so])

    print('Updated cust_db with: ', len(found_list), ' AS SOs')

    #
    # Process Subscriptions and add to Customer Objects
    #
    for row_num in range(1, sub_ws.nrows):
        # Gather the fields we want
        sub_cust_name = sub_ws.cell_value(row_num, 2)
        sub_id = sub_ws.cell_value(row_num, 4)
        sub_start_date = sub_ws.cell_value(row_num, 6)
        sub_renew_date = sub_ws.cell_value(row_num, 8)
        sub_renew_status = sub_ws.cell_value(row_num, 5)
        sub_monthly_rev = sub_ws.cell_value(row_num, 10)

        year, month, day, hour, minute, second = xlrd.xldate_as_tuple(
            sub_start_date, sub_wb.datemode)
        sub_start_date = datetime(year, month, day)

        year, month, day, hour, minute, second = xlrd.xldate_as_tuple(
            sub_renew_date, sub_wb.datemode)
        sub_renew_date = datetime(year, month, day)

        if sub_cust_name in cust_alias_db:
            cust_id = cust_alias_db[sub_cust_name]
            cust_obj = cust_db[cust_id]
            sub_info = [
                sub_id, sub_cust_name, sub_start_date, sub_renew_date,
                sub_renew_status, sub_monthly_rev
            ]
            cust_obj.add_sub_id(sub_info)

    #
    # Make the Magic List
    #
    magic_list = []
    header_row = [
        'Customer ID', 'AS SO', 'AS PID', 'AS Customer Name', 'PSS', 'TSA',
        'AM', 'Upcoming Renewal Info' + ' \n' +
        'Sub ID - Start Date - Renewal Date - Days to Renew - Annual Rev',
        ' Next Renewal Date', 'Days to Renew', 'Subscription Status',
        'AS Delivery Mgr', 'AS Tracking Status', 'AS Tracking Sub Status',
        'AS Tracking Comments'
    ]
    magic_list.append(header_row)
    print(magic_list)
    x = 0
    today = datetime.today()

    for cust_id, cust_obj in cust_db.items():
        cust_aliases = cust_obj.aliases
        as_pids = cust_obj.as_pids
        sub_ids = cust_obj.subs
        pss = cust_obj.pss
        tsa = cust_obj.tsa
        am = cust_obj.am

        if len(as_pids) == 0:
            # No AS PID info available
            sub_summary, next_renewal_date, days_to_renew, sub_renew_status = process_sub_info(
                cust_obj.subs)
            magic_row = [
                cust_id, '', 'AS Info Unavailable', cust_aliases[0], pss, tsa,
                am, sub_summary, next_renewal_date, days_to_renew,
                sub_renew_status, '', '', '', ''
            ]
            magic_list.append(magic_row)
        else:
            # Let's look at the AS PIDs in cust_obj
            for so, as_pid_info in as_pids.items():
                # We will make a row for each AS SO
                for as_detail in as_pid_info:
                    magic_row = []
                    as_so = so
                    as_pid = as_detail[0]
                    as_cust_name = as_detail[1]

                    sub_summary, next_renewal_date, days_to_renew, sub_renew_status = process_sub_info(
                        cust_obj.subs)

                    # Go get additional AS Info
                    as_tracking_status = ''
                    as_tracking_sub_status = ''
                    as_tracking_comments = ''
                    as_dm = ''
                    for row_num in range(1, as_ws.nrows):
                        if as_pid == as_ws.cell_value(row_num, 0):
                            as_dm = as_ws.cell_value(row_num, 1)
                            as_tracking_status = as_ws.cell_value(row_num, 7)
                            as_tracking_sub_status = as_ws.cell_value(
                                row_num, 8)
                            as_tracking_comments = as_ws.cell_value(row_num, 9)
                            break

                    magic_row = [
                        cust_id, so, as_pid, as_cust_name, pss, tsa, am,
                        sub_summary, next_renewal_date, days_to_renew,
                        sub_renew_status, as_dm, as_tracking_status,
                        as_tracking_sub_status, as_tracking_comments
                    ]

                    magic_list.append(magic_row)

    print(len(magic_list))
    print(x)
    push_list_to_xls(magic_list, 'magic.xlsx')
    return
Exemplo n.º 10
0
            # Add the new cell to the new row
            my_new_row[col_num] = new_cell_value

        # This will make list_to_hash type to all one big string called my_string
        str_to_hash = ''.join(list_to_hash)

        # Create a 32 character hash value and place it in the Hash Column
        hash_result = (hashlib.md5(str_to_hash.encode('utf-8')).hexdigest())

        tie_breaker = 1
        if hash_result in hash_dict:
            tie_breaker = hash_dict[hash_result] + 1
            hash_dict[hash_result] = tie_breaker
        else:
            hash_dict[hash_result] = tie_breaker

        my_new_row[date_added_col] = date_added
        my_new_row[hash_col] = hash_result + '-' + str(tie_breaker)

    return my_new_sheet


if __name__ == "__main__" and __package__ is None:
    xlrd_wb, xlrd_ws = open_wb('tmp_TA Master Bookings.xlsx')
    # wb, ws = open_wb('tmp_Master Renewals.xlsx')
    # xlrd_wb, xlrd_ws = open_wb('tmp_TA Customer List.xlsx')

    a_sheet = add_hash_to_xls(xlrd_wb, xlrd_ws)
    push_list_to_xls(a_sheet, 'tmp_TA Master Bookings_hashed.xlsx')
Exemplo n.º 11
0
    print()
    print('CREATING IN DIRECTORY >>>>>>>>>> ', path_to_run_dir)
    print('CREATING SHEET >>>>>>>>>> ', csv_output)
    print()

    # for x, my_row in enumerate(my_list):
    with open(path_to_file, 'w', newline='') as writeFile:
        writer = csv.writer(writeFile)
        writer.writerows(my_list)

    writeFile.close()
    return


if __name__ == "__main__" and __package__ is None:
    wb, ws = open_wb(app_cfg['XLS_BOOKINGS'])
    # wb, ws = open_wb(app_cfg['XLS_AS_DELIVERY_STATUS'])
    # wb, ws = open_wb(app_cfg['XLS_SUBSCRIPTIONS'])
    create_tables('Bookings')

    my_csv_list = xlrd_wb_to_csv(wb, ws)
    my_list = xlrd_wb_to_list(wb, ws)

    print(len(my_csv_list))
    print(len(my_list))

    push_list_to_xls(my_list, 'my_xls.xlsx')
    push_list_to_csv(my_csv_list, 'my_csv.csv')

    exit()
Exemplo n.º 12
0
def pre_run_file_checks(run_dir=app_cfg['UPDATES_SUB_DIR']):
    home = os.path.join(app_cfg['HOME'], app_cfg['MOUNT_POINT'],
                        app_cfg['MY_APP_DIR'])
    working_dir = app_cfg['WORKING_SUB_DIR']
    update_dir = app_cfg['UPDATES_SUB_DIR']
    archive_dir = app_cfg['ARCHIVES_SUB_DIR']

    # Check that all key directories exist
    path_to_main_dir = (os.path.join(home))
    if not os.path.exists(path_to_main_dir):
        print(path_to_main_dir, "MAIN_DIR does NOT Exist !")
        exit()

    path_to_run_dir = (os.path.join(home, run_dir))
    if not os.path.exists(path_to_run_dir):
        print(path_to_run_dir, " WORKING_DIR does NOT Exist !")
        exit()

    path_to_updates = (os.path.join(home, update_dir))
    if not os.path.exists(path_to_updates):
        print(path_to_updates, "UPDATES_SUB_DIR does NOT Exist !")
        exit()

    path_to_archives = (os.path.join(home, archive_dir))
    if not os.path.exists(path_to_archives):
        print(path_to_archives, "ARCHIVE_SUB_DIR does NOT Exist !")
        exit()

    # OK directories are there any files ?
    if not os.listdir(path_to_run_dir):
        print('Directory', path_to_run_dir, 'contains NO files')
        exit()

    #  Get the required Files to begin processing from app_cfg (settings.py)
    files_needed = {}
    # Do we have RAW files to process ?
    for var in app_cfg:
        if var.find('RAW') != -1:
            # Look for any config var containing the word 'RAW' and assume they are "Missing'
            files_needed[app_cfg[var]] = 'Missing'

    # See if we have the files_needed are there and they have consistent dates (date_list)
    run_files = os.listdir(path_to_run_dir)
    date_list = []
    for file_needed, status in files_needed.items():
        for run_file in run_files:
            date_tag = run_file[-13:-13 + 8]  # Grab the date if any
            run_file = run_file[:len(run_file) -
                                14]  # Grab the name without the date
            if run_file == file_needed:
                date_list.append(date_tag)  # Grab the date
                files_needed[file_needed] = 'Found'
                break

    # All time stamps the same ?
    base_date = date_list[0]
    for date_stamp in date_list:
        if date_stamp != base_date:
            print('ERROR: Inconsistent date stamp(s) found')
            exit()

    # Do we have all the files we need ?
    for file_name, status in files_needed.items():
        if status != 'Found':
            print("ERROR: Filename ", "'" + file_name,
                  "MM-DD-YY'  is missing from directory", "'" + run_dir + "'")
            exit()

    # Read the config_dict.json file
    try:
        with open(os.path.join(path_to_run_dir,
                               app_cfg['META_DATA_FILE'])) as json_input:
            config_dict = json.load(json_input)
        print(config_dict)
        print(type(config_dict))
        print(config_dict['last_run_dir'])
    except:
        print('No config_dict file found.')

    # Since we have a consistent date then Create the json file for config_data.json.
    # Put the time_stamp in it
    config_dict = {
        'data_time_stamp': base_date,
        'last_run_dir': path_to_run_dir,
        'files_scrubbed': 'never'
    }
    with open(os.path.join(path_to_run_dir, app_cfg['META_DATA_FILE']),
              'w') as json_output:
        json.dump(config_dict, json_output)

    # Delete all previous tmp_ files
    for file_name in run_files:
        if file_name[0:4] == 'tmp_':
            os.remove(os.path.join(path_to_run_dir, file_name))

    # Here is what we have - All things should be in place
    print('Our directories:')
    print('\tPath to Main Dir:', path_to_main_dir)
    print('\tPath to Updates Dir:', path_to_updates)
    print('\tPath to Archives Dir:', path_to_archives)
    print('\tPath to Run Dir:', path_to_run_dir)

    # Process the RAW data (Renewals and Bookings)
    # Clean up rows, combine multiple Bookings files, add custom table names
    processing_date = date_list[0]
    file_paths = []
    bookings = []
    subscriptions = []
    as_status = []
    print()
    print('We are processing files:')

    # We need to make a sku_filter_dict here
    tmp_dict = build_sku_dict()
    sku_filter_dict = {}
    for key, val in tmp_dict.items():
        if val[0] == 'Service':
            sku_filter_dict[key] = val

    # Main loop to process files
    for file_name in files_needed:
        file_path = file_name + ' ' + processing_date + '.xlsx'
        file_path = os.path.join(path_to_run_dir, file_path)

        file_paths.append(file_path)

        my_wb, my_ws = open_wb(file_name + ' ' + processing_date + '.xlsx',
                               run_dir)
        print('\t\t', file_name + '', processing_date + '.xlsx', ' has ',
              my_ws.nrows, ' rows and ', my_ws.ncols, 'columns')

        if file_name.find('Bookings') != -1:
            # For the Bookings start_row is here
            start_row = 3
            start_col = 1
            for row in range(start_row, my_ws.nrows):
                bookings.append(my_ws.row_slice(row, start_col))

        elif file_name.find('Subscriptions') != -1:
            # This raw sheet starts on row num 0
            for row in range(0, my_ws.nrows):
                subscriptions.append(my_ws.row_slice(row))

        elif file_name.find('AS Delivery Status') != -1:
            # This AS-F raw sheet starts on row num 0
            # Grab the header row
            as_status.append(my_ws.row_slice(0))
            for row in range(1, my_ws.nrows):
                # Check to see if this is a TA SKU
                if my_ws.cell_value(row, 14) in sku_filter_dict:
                    as_status.append(my_ws.row_slice(row))

    # For the Subscriptions sheet we need to convert
    # col 6 & 8 to DATE from STR
    # col 10 (monthly rev) to FLOAT from STR
    #
    subscriptions_scrubbed = []

    for row_num, my_row in enumerate(subscriptions):
        my_new_row = []

        for col_num, my_cell in enumerate(my_row):
            if row_num == 0:
                # Is this the header row ?
                my_new_row.append(my_cell.value)
                continue
            if col_num == 9 or col_num == 11:
                tmp_val = datetime.strptime(my_cell.value, '%d %b %Y')
                # tmp_val = datetime.strptime(my_cell.value, '%m/%d/%Y')
            elif col_num == 13:
                tmp_val = my_cell.value
                try:
                    tmp_val = float(tmp_val)
                except ValueError:
                    tmp_val = 0
            else:
                tmp_val = my_cell.value

            my_new_row.append(tmp_val)
        subscriptions_scrubbed.append(my_new_row)

    # Now Scrub AS Delivery Info
    as_status_scrubbed = []
    for row_num, my_row in enumerate(as_status):
        my_new_row = []
        for col_num, my_cell in enumerate(my_row):
            if row_num == 0:
                # Is this the header row ?
                my_new_row.append(my_cell.value)
                continue
            if col_num == 0:  # PID
                tmp_val = str(int(my_cell.value))
            elif col_num == 19:  # SO Number
                tmp_val = str(int(my_cell.value))
            elif col_num == 26:  # Project Start Date
                tmp_val = datetime(
                    *xlrd.xldate_as_tuple(my_cell.value, my_wb.datemode))
            elif col_num == 27:  # Scheduled End Date
                tmp_val = datetime(
                    *xlrd.xldate_as_tuple(my_cell.value, my_wb.datemode))
            elif col_num == 28:  # Project Creation Date
                tmp_val = datetime(
                    *xlrd.xldate_as_tuple(my_cell.value, my_wb.datemode))
            else:
                tmp_val = my_cell.value

            my_new_row.append(tmp_val)
        as_status_scrubbed.append(my_new_row)

    # Now Scrub Bookings Data
    bookings_scrubbed = []
    for row_num, my_row in enumerate(bookings):
        my_new_row = []
        for col_num, my_cell in enumerate(my_row):
            if row_num == 0:
                # Is this the header row ?
                my_new_row.append(my_cell.value)
                continue

            if col_num == 0 or col_num == 2 or \
                    col_num == 11:  # Fiscal Year / Fiscal Period / SO
                tmp_val = str(int(my_cell.value))
            elif col_num == 15:  # Customer ID
                try:
                    tmp_val = str(int(my_cell.value))
                except ValueError:
                    tmp_val = '-999'
            else:
                tmp_val = my_cell.value

            my_new_row.append(tmp_val)
        bookings_scrubbed.append(my_new_row)

    #
    # Push the lists out to an Excel File
    #
    push_list_to_xls(bookings_scrubbed, app_cfg['XLS_BOOKINGS'], run_dir,
                     'ta_bookings')
    push_list_to_xls(subscriptions_scrubbed, app_cfg['XLS_SUBSCRIPTIONS'],
                     run_dir, 'ta_subscriptions')
    push_list_to_xls(as_status_scrubbed, app_cfg['XLS_AS_DELIVERY_STATUS'],
                     run_dir, 'ta_delivery')
    # push_xlrd_to_xls(as_status, app_cfg['XLS_AS_DELIVERY_STATUS'], run_dir, 'ta_delivery')

    print('We have ', len(bookings), 'bookings line items')
    print('We have ', len(as_status), 'AS-Fixed SKU line items')
    print('We have ', len(subscriptions), 'subscription line items')

    with open(os.path.join(path_to_run_dir,
                           app_cfg['META_DATA_FILE'])) as json_input:
        config_dict = json.load(json_input)
    config_dict['files_scrubbed'] = 'phase_1'
    with open(os.path.join(path_to_run_dir, app_cfg['META_DATA_FILE']),
              'w') as json_output:
        json.dump(config_dict, json_output)

    print('pre run file checks DONE!')
    return
Exemplo n.º 13
0
def build_dashboard():
    exit()
    from my_app.func_lib.sheet_desc import sheet_map
    #
    # Open the order summary
    #
    wb_orders, sheet_orders = open_wb(app_cfg['XLS_ORDER_SUMMARY'], 'updates')

    # wb_orders, sheet_orders = open_wb('tmp_TA Scrubbed Orders_as_of ' + app_cfg['PROD_DATE'])

    # Loop over the orders XLS worksheet
    # Create a simple list of orders with NO headers
    order_list = []
    for row_num in range(1, sheet_orders.nrows):  # Skip the header row start at 1
        tmp_record = []
        for col_num in range(sheet_orders.ncols):
            my_cell = sheet_orders.cell_value(row_num, col_num)

            # If we just read a date save it as a datetime
            if sheet_orders.cell_type(row_num, col_num) == 3:
                my_cell = datetime.datetime(*xlrd.xldate_as_tuple(my_cell, wb_orders.datemode))
            tmp_record.append(my_cell)
        order_list.append(tmp_record)

    # Create a dict of customer orders
    customer_order_dict = create_customer_order_dict(order_list)
    print()
    print('We have summarized ', len(order_list), ' of interesting line items into')
    print(len(customer_order_dict), ' unique customers')
    print()
    # Build Sheet Maps
    sheet_map = build_sheet_map(app_cfg['SS_CX'], sheet_map, 'SS_CX')
    sheet_map = build_sheet_map(app_cfg['SS_AS'], sheet_map, 'SS_AS')
    sheet_map = build_sheet_map(app_cfg['SS_SAAS'], sheet_map, 'SS_SAAS')

    #
    # Get dict updates from linked sheets CX/AS/SAAS
    #
    cx_dict = get_linked_sheet_update(sheet_map, 'SS_CX', sheet_keys)
    as_dict = get_linked_sheet_update(sheet_map, 'SS_AS', sheet_keys)
    saas_dict = get_linked_sheet_update(sheet_map, 'SS_SAAS', sheet_keys)

    # print(cx_dict)
    # exit()
    print()
    print('We have CX Updates: ', len(cx_dict))
    print('We have AS Updates: ', len(as_dict))
    print('We have SAAS Updates: ', len(saas_dict))
    print()

    # Create Platform dict for platform lookup
    tmp_dict = build_sku_dict()
    platform_dict = {}
    for key, val in tmp_dict.items():
        if val[0] == 'Product' or val[0] == 'SaaS':
            platform_dict[key] = val[1]

    #
    # Init Main Loop Variables
    #
    new_rows = []
    new_row = []
    bookings_col_num = -1
    sensor_col_num = -1
    svc_bookings_col_num = -1
    platform_type_col_num = -1
    sku_col_num = -1
    my_col_idx = {}

    # Create top row for the dashboard
    # also make a dict (my_col_idx) of {column names : column number}
    for col_idx, col in enumerate(sheet_map):
        new_row.append(col[0])
        my_col_idx[col[0]] = col_idx
    new_rows.append(new_row)

    #
    # Main loop
    #
    for customer, orders in customer_order_dict.items():
        new_row = []
        order = []
        orders_found = len(orders)

        # Default Values
        bookings_total = 0
        sensor_count = 0
        service_bookings = 0
        platform_type = 'Not Identified'

        saas_status = 'No Status'
        cx_contact = 'None assigned'
        cx_status = 'No Update'
        as_pm = ''
        as_cse1 = ''
        as_cse2 = ''
        as_complete = ''  # 'Project Status/PM Completion'
        as_comments = ''  # 'Delivery Comments'

        #
        # Get update from linked sheets (if any)
        #
        if customer in saas_dict:
            saas_status = saas_dict[customer][0]
            if saas_status is True:
                saas_status = 'Provision Complete'
            else:
                saas_status = 'Provision NOT Complete'
        else:
            saas_status = 'No Status'

        if customer in cx_dict:
            cx_contact = cx_dict[customer][0]
            cx_status = cx_dict[customer][1]
        else:
            cx_contact = 'None assigned'
            cx_status = 'No Update'

        if customer in as_dict:
            if as_dict[customer][0] == '':
                as_pm = 'None Assigned'
            else:
                as_pm = as_dict[customer][0]

            if as_dict[customer][1] == '':
                as_cse1 = 'None Assigned'
            else:
                as_cse1 = as_dict[customer][1]

            if as_dict[customer][2] == '':
                as_cse2 = 'None Assigned'
            else:
                as_cse2 = as_dict[customer][2]

            if as_dict[customer][3] == '':
                as_complete = 'No Update'
            else:
                # 'Project Status/PM Completion'
                as_complete = as_dict[customer][3]

            if as_dict[customer][4] == '':
                as_comments = 'No Comments'
            else:
                as_comments = as_dict[customer][4]

        #
        # Loop over this customers orders
        # Create one summary row for this customer
        # Total things
        # Build a list of things that may change order to order (ie Renewal Dates, Customer Names)
        #
        platform_count = 0
        for order_idx, order in enumerate(orders):
            # calculate totals in this loop (ie total_books, sensor count etc)
            bookings_total = bookings_total + order[my_col_idx['Total Bookings']]
            sensor_count = sensor_count + order[my_col_idx['Sensor Count']]

            if order[my_col_idx['Product Type']] == 'Service':
                service_bookings = service_bookings + order[my_col_idx['Total Bookings']]

            if order[my_col_idx['Bundle Product ID']] in platform_dict:
                platform_count += 1
                platform_type = platform_dict[order[my_col_idx['Bundle Product ID']]]
                if platform_count > 1:
                    platform_type = platform_type + ' plus ' + str(platform_count-1)

        #
        # Modify/Update this record as needed and then add to the new_rows
        #
        order[my_col_idx['Total Bookings']] = bookings_total
        order[my_col_idx['Sensor Count']] = sensor_count
        order[my_col_idx['Service Bookings']] = service_bookings

        order[my_col_idx['CSM']] = cx_contact
        order[my_col_idx['Comments']] = cx_status

        order[my_col_idx['Project Manager']] = as_pm
        order[my_col_idx['AS Engineer 1']] = as_cse1
        order[my_col_idx['AS Engineer 2']] = as_cse2
        order[my_col_idx['Project Status/PM Completion']] = as_complete  # 'Project Status/PM Completion'
        order[my_col_idx['Delivery Comments']] = as_comments

        order[my_col_idx['Provisioning completed']] = saas_status

        order[my_col_idx['Product Description']] = platform_type

        order[my_col_idx['Orders Found']] = orders_found

        new_rows.append(order)
    #
    # End of main loop
    #

    # Do some clean up and ready for output
    #
    # Rename the columns as per the sheet map
    cols_to_delete = []
    for idx, map_info in enumerate(sheet_map):
        if map_info[3] != '':
            if map_info[3] == '*DELETE*':
                # Put the columns to delete in a list
                cols_to_delete.append(idx)
            else:
                # Rename to the new column name
                new_rows[0][idx] = map_info[3]

    # Loop over the new_rows and
    # delete columns we don't need as per the sheet_map
    for col_idx in sorted(cols_to_delete, reverse=True):
        for row_idx, my_row in enumerate(new_rows):
            del new_rows[row_idx][col_idx]

    #
    # Write the Dashboard to an Excel File
    #
    push_list_to_xls(new_rows, app_cfg['XLS_DASHBOARD'], 'updates')
    # push_xls_to_ss(app_cfg['XLS_DASHBOARD']+'_as_of_01_31_2019.xlsx', 'jims dash')

    return
Exemplo n.º 14
0
def phase_2(run_dir=app_cfg['UPDATES_DIR']):
    home = app_cfg['HOME']
    working_dir = app_cfg['WORKING_DIR']
    path_to_run_dir = (os.path.join(home, working_dir, run_dir))

    bookings_path = os.path.join(path_to_run_dir, app_cfg['XLS_BOOKINGS'])

    # Read the config_dict.json file
    with open(os.path.join(path_to_run_dir, app_cfg['META_DATA_FILE'])) as json_input:
        config_dict = json.load(json_input)
    data_time_stamp = datetime.datetime.strptime(config_dict['data_time_stamp'], '%m-%d-%y')
    last_run_dir = config_dict['last_run_dir']

    print("Run Date: ", data_time_stamp, type(data_time_stamp))
    print('Run Directory:', last_run_dir)
    print(bookings_path)

    # Go to Smartsheets and build these two dicts to use reference lookups
    # team_dict: {'sales_levels 1-6':[('PSS','TSA')]}
    # sku_dict: {sku : [sku_type, sku_description]}
    team_dict = build_coverage_dict()
    sku_dict = build_sku_dict()

    #
    # Open up the bookings excel workbooks
    #
    wb_bookings, sheet_bookings = open_wb(app_cfg['XLS_BOOKINGS'], run_dir)

    # From the current up to date bookings file build a simple list
    # that describes the format of the output file we are creating
    # and the columns we need to add (ie PSS, TSA, Renewal Dates)

    my_sheet_map = build_sheet_map(app_cfg['XLS_BOOKINGS'], sheet_map,
                                   'XLS_BOOKINGS', run_dir)
    #
    # init a bunch a variables we need for the main loop
    #
    order_header_row = []
    order_rows = []
    order_row = []
    trash_rows = []

    dest_col_nums = {}
    src_col_nums = {}

    # Build a dict of source sheet {'col_name' : src_col_num}
    # Build a dict of destination sheet {'col_name' : dest_col_num}
    # Build the header row for the output file
    for idx, val in enumerate(my_sheet_map):
        # Add to the col_num dict of col_names
        dest_col_nums[val[0]] = idx
        src_col_nums[val[0]] = val[2]
        order_header_row.append(val[0])

    # Initialize the order_row and trash_row lists
    order_rows.append(order_header_row)
    trash_rows.append(sheet_bookings.row_values(0))

    print('There are ', sheet_bookings.nrows, ' rows in Raw Bookings')

    #
    # Main loop of over raw bookings excel data
    #
    # This loop will build two lists:
    # 1. Interesting orders based on SKUs (order_rows)
    # 2. Trash orders SKUs we don't care about (trash_rows)
    # As determined by the sku_dict
    # We have also will assign team coverage to both rows
    #
    for i in range(1, sheet_bookings.nrows):

        # Is this SKU of interest ?
        sku = sheet_bookings.cell_value(i, src_col_nums['Bundle Product ID'])

        if sku in sku_dict:
            # Let's make a row for this order
            # Since it has an "interesting" sku
            customer = sheet_bookings.cell_value(i, src_col_nums['ERP End Customer Name'])
            order_row = []
            sales_level = ''
            sales_level_cntr = 0

            # Grab SKU data from the SKU dict
            sku_type = sku_dict[sku][0]
            sku_desc = sku_dict[sku][1]
            sku_sensor_cnt = sku_dict[sku][2]

            # Walk across the sheet_map columns
            # to build this output row cell by cell
            for val in my_sheet_map:
                col_name = val[0]  # Source Sheet Column Name
                col_idx = val[2]  # Source Sheet Column Number

                # If this is a 'Sales Level X' column then
                # Capture it's value until we get to level 6
                # then do a team lookup
                if col_name[:-2] == 'Sales Level':
                    sales_level = sales_level + sheet_bookings.cell_value(i, col_idx) + ','
                    sales_level_cntr += 1

                    if sales_level_cntr == 6:
                        # We have collected all 6 sales levels
                        # Now go to find_team to do the lookup
                        sales_level = sales_level[:-1]
                        sales_team = find_team(team_dict, sales_level)
                        pss = sales_team[0]
                        tsa = sales_team[1]
                        order_row[dest_col_nums['pss']] = pss
                        order_row[dest_col_nums['tsa']] = tsa

                if col_idx != -1:
                    # OK we have a cell that we need from the raw bookings
                    # sheet we need so grab it
                    order_row.append(sheet_bookings.cell_value(i, col_idx))
                elif col_name == 'Product Description':
                    # Add in the Product Description
                    order_row.append(sku_desc)
                elif col_name == 'Product Type':
                    # Add in the Product Type
                    order_row.append(sku_type)
                elif col_name == 'Sensor Count':
                    # Add in the Sensor Count
                    order_row.append(sku_sensor_cnt)
                else:
                    # this cell is assigned a -1 in the sheet_map
                    # so assign a blank as a placeholder for now
                    order_row.append('')

            # Done with all the columns in this row
            # Log this row for BOTH customer names and orders
            # Go to next row of the raw bookings data
            order_rows.append(order_row)

        else:
            # The SKU was not interesting so let's trash it
            trash_rows.append(sheet_bookings.row_values(i))

    print('Extracted ', len(order_rows), " rows of interesting SKU's' from Raw Bookings")
    print('Trashed ', len(trash_rows), " rows of trash SKU's' from Raw Bookings")
    #
    # End of main loop
    #
    push_list_to_xls(order_rows,'jim.xlsx')

    #
    # Subscription Analysis
    #
    no_match = [['No Match Found in Subscription update']]
    no_match_cntr = 0
    match_cntr = 0
    subs_sorted_dict, subs__summary_dict = process_subs(run_dir)
    for order_row in order_rows[1:]:
        customer = order_row[dest_col_nums['ERP End Customer Name']]
        if customer in subs_sorted_dict:
            match_cntr += 1
            sub_start_date = datetime.datetime.strptime(subs_sorted_dict[customer][0][0], '%m-%d-%Y')
            sub_initial_term = subs_sorted_dict[customer][0][1]
            sub_renewal_date = datetime.datetime.strptime(subs_sorted_dict[customer][0][2], '%m-%d-%Y')
            sub_days_to_renew = subs_sorted_dict[customer][0][3]
            sub_monthly_charge = subs_sorted_dict[customer][0][4]
            sub_id = subs_sorted_dict[customer][0][5]
            sub_status = subs_sorted_dict[customer][0][6]

            order_row[dest_col_nums['Start Date']] = sub_start_date
            order_row[dest_col_nums['Initial Term']] = sub_initial_term
            order_row[dest_col_nums['Renewal Date']] = sub_renewal_date
            order_row[dest_col_nums['Days Until Renewal']] = sub_days_to_renew
            order_row[dest_col_nums['Monthly Charge']] = sub_monthly_charge
            order_row[dest_col_nums['Subscription ID']] = sub_id
            order_row[dest_col_nums['Status']] = sub_status

            if len(subs_sorted_dict[customer]) > 1:
                renewal_comments = '+' + str(len(subs_sorted_dict[customer])-1) + ' more subscriptions(s)'
                order_row[dest_col_nums['Subscription Comments']] = renewal_comments
        else:
            got_one = False
            for x in no_match:
                if x[0].lower() in customer.lower():
                    got_one = True
                    break
            if got_one is False:
                no_match_cntr += 1
                no_match.append([customer])

    push_list_to_xls(order_rows, 'jim1.xlsx')
    push_list_to_xls(no_match, 'subcription misses.xlsx')

    #
    # AS Delivery Analysis
    #
    as_dict = process_delivery(run_dir)
    print(as_dict)
    print(len(as_dict))
    for order_row in order_rows[1:]:
        customer = order_row[dest_col_nums['ERP End Customer Name']]
        if customer in as_dict:
            match_cntr += 1
            # as_customer = as_dict[customer][0][0]
            # as_pid = as_dict[customer][0][1]
            # as_dm = as_dict[customer][0][2]
            # as_start_date = datetime.datetime.strptime(as_dict[customer][0][3], '%m-%d-%Y')
            #
            # order_row[dest_col_nums['End Customer']] = as_customer
            # order_row[dest_col_nums['PID']] = as_pid
            # order_row[dest_col_nums['Delivery Manager']] = as_dm
            # order_row[dest_col_nums['Project Scheduled Start Date']] = as_start_date

            order_row[dest_col_nums['PID']] = as_dict[customer][0][0]
            order_row[dest_col_nums['Delivery Manager']] = as_dict[customer][0][1]
            order_row[dest_col_nums['Delivery PM']] = as_dict[customer][0][2]
            order_row[dest_col_nums['Tracking status']] = as_dict[customer][0][3]
            order_row[dest_col_nums['Tracking Sub-status']] = as_dict[customer][0][4]
            order_row[dest_col_nums['Comments']] = as_dict[customer][0][5]
            order_row[dest_col_nums['Project Scheduled Start Date']] = datetime.datetime.strptime(as_dict[customer][0][6], '%m-%d-%Y')
            order_row[dest_col_nums['Scheduled End Date']] = datetime.datetime.strptime(as_dict[customer][0][7], '%m-%d-%Y')
            order_row[dest_col_nums['Project Creation Date']] = datetime.datetime.strptime(as_dict[customer][0][8], '%m-%d-%Y')
            # order_row[dest_col_nums['Project Closed Date']] = datetime.datetime.strptime(as_dict[customer][0][9], '%m-%d-%Y')
            order_row[dest_col_nums['Traffic lights (account team)']] = as_dict[customer][0][10]
            order_row[dest_col_nums['Tracking Responsible']] = as_dict[customer][0][11]
            order_row[dest_col_nums['ExecutiveSummary']] = as_dict[customer][0][12]
            order_row[dest_col_nums['Critalpath']] = as_dict[customer][0][13]
            order_row[dest_col_nums['IsssuesRisks']] = as_dict[customer][0][14]
            order_row[dest_col_nums['ActivitiesCurrent']] = as_dict[customer][0][15]
            order_row[dest_col_nums['ActivitiesNext']] = as_dict[customer][0][16]
            order_row[dest_col_nums['LastUpdate']] = as_dict[customer][0][17]
            order_row[dest_col_nums['SO']] = as_dict[customer][0][18]

        else:
            got_one = False
            for x in no_match:
                if x[0].lower() in customer.lower():
                    got_one = True
                    break
            if got_one is False:
                no_match_cntr += 1
                no_match.append([customer])

    #
    # End of  Construction Zone
    #

    # Now we build a an order dict
    # Let's organize as this
    # order_dict: {cust_name:[[order1],[order2],[orderN]]}
    order_dict = {}
    orders = []
    order = []

    for idx, order_row in enumerate(order_rows):
        if idx == 0:
            continue
        customer = order_row[0]
        orders = []

        # Is this customer in the order dict ?
        if customer in order_dict:
            orders = order_dict[customer]
            orders.append(order_row)
            order_dict[customer] = orders
        else:
            orders.append(order_row)
            order_dict[customer] = orders

    # Create a simple customer_list
    # Contains a full set of unique sorted customer names
    # Example: customer_list = [[erp_customer_name,end_customer_ultimate], [CustA,CustA]]
    customer_list = build_customer_list(run_dir)
    print('There are ', len(customer_list), ' unique Customer Names')

    # Clean up order_dict to remove:
    # 1.  +/- zero sum orders
    # 2. zero revenue orders
    order_dict, customer_platforms = cleanup_orders(customer_list, order_dict, my_sheet_map)

    #
    # Create a summary order file out of the order_dict
    #
    summary_order_rows = [order_header_row]
    for key, val in order_dict.items():
        for my_row in val:
            summary_order_rows.append(my_row)
    print(len(summary_order_rows), ' of scrubbed rows after removing "noise"')

    #
    # Push our lists to an excel file
    #
    # push_list_to_xls(customer_platforms, 'jim ')
    print('order summary name ', app_cfg['XLS_ORDER_SUMMARY'])

    push_list_to_xls(summary_order_rows, app_cfg['XLS_ORDER_SUMMARY'],
                     run_dir, 'ta_summary_orders')
    push_list_to_xls(order_rows, app_cfg['XLS_ORDER_DETAIL'], run_dir, 'ta_order_detail')
    push_list_to_xls(customer_list, app_cfg['XLS_CUSTOMER'], run_dir, 'ta_customers')
    push_list_to_xls(trash_rows, app_cfg['XLS_BOOKINGS_TRASH'], run_dir, 'ta_trash_rows')

    # exit()
    #
    # Push our lists to a smart sheet
    #
    # push_xls_to_ss(wb_file, app_cfg['XLS_ORDER_SUMMARY'])
    # push_xls_to_ss(wb_file, app_cfg['XLS_ORDER_DETAIL'])
    # push_xls_to_ss(wb_file, app_cfg['XLS_CUSTOMER'])
    # exit()
    return
Exemplo n.º 15
0
def main():
    as_wb, as_ws = open_wb(app_cfg['XLS_AS_DELIVERY_STATUS'])
    cust_wb, cust_ws = open_wb(app_cfg['XLS_BOOKINGS'])
    sub_wb, sub_ws = open_wb(app_cfg['XLS_SUBSCRIPTIONS'])

    print()
    print('RAW Input Data')
    print("\tAS Fixed SKUs Rows:", as_ws.nrows)
    print('\tBookings Rows:', cust_ws.nrows)
    print('\tSubscription Rows:', sub_ws.nrows)

    #
    # Build a Team Dict
    #
    team_dict = build_coverage_dict()

    #
    # Create a SKU Filter
    #
    # Options Are: Product / Software / Service / SaaS / All SKUs
    sku_filter_val = 'All SKUs'
    tmp_dict = build_sku_dict()
    sku_filter_dict = {}

    for key, val in tmp_dict.items():
        if val[0] == sku_filter_val:
            sku_filter_dict[key] = val
        elif sku_filter_val == 'All SKUs':
            # Selects ALL Interesting SKUs
            sku_filter_dict[key] = val

    print()
    print('SKU Filter set to:', sku_filter_val)
    print()

    #
    # Build a xref dict of valid customer ids for lookup by SO and ERP Name
    #
    xref_cust_name = {}
    xref_so = {}
    for row_num in range(1, cust_ws.nrows):
        cust_id = cust_ws.cell_value(row_num, 15)
        cust_erp_name = cust_ws.cell_value(row_num, 13)
        cust_so = cust_ws.cell_value(row_num, 11)

        # Only add valid ID/Name Pairs to the reference
        if cust_id == '-999' or cust_id == '':
            continue

        if cust_erp_name not in xref_cust_name:
            xref_cust_name[cust_erp_name] = cust_id
            if (cust_so, cust_erp_name) not in xref_so:
                xref_so[(cust_so, cust_erp_name)] = cust_id

    #
    # Process Main Bookings File
    #
    cntr = 0
    cust_db = {}
    cust_alias_db = {}
    so_dict = {}

    #
    # Main loop over the bookings data starts here
    #
    for row_num in range(1, cust_ws.nrows):
        # Gather the fields we want
        cust_id = cust_ws.cell_value(row_num, 15)
        cust_erp_name = cust_ws.cell_value(row_num, 13)
        cust_ultimate_name = cust_ws.cell_value(row_num, 14)
        cust_so = cust_ws.cell_value(row_num, 11)
        cust_sku = cust_ws.cell_value(row_num, 19)
        cust_sales_lev_1 = cust_ws.cell_value(row_num, 3)
        cust_sales_lev_2 = cust_ws.cell_value(row_num, 4)
        cust_sales_lev_3 = cust_ws.cell_value(row_num, 5)
        cust_sales_lev_4 = cust_ws.cell_value(row_num, 6)
        cust_sales_lev_5 = cust_ws.cell_value(row_num, 7)
        cust_sales_lev_6 = cust_ws.cell_value(row_num, 8)
        cust_acct_mgr = cust_ws.cell_value(row_num, 9)

        # Grab this SO number in a simple dict {so:(cust_id, cust_id)
        if cust_so not in so_dict:
            # so_dict[cust_so] = ((cust_id, cust_erp_name),)
            so_dict[cust_so] = ((cust_id, cust_erp_name, cust_sku), )
        else:
            # so_dict[cust_so] = so_dict[cust_so] + ((cust_id, cust_erp_name),)
            so_dict[cust_so] = so_dict[cust_so] + (
                (cust_id, cust_erp_name, cust_sku), )

        # Do we have a missing or bad cust_id try to look one up
        if cust_id == '' or cust_id == '-999':
            if cust_erp_name in xref_cust_name:
                cust_id = xref_cust_name[cust_erp_name]

            if (cust_so, cust_erp_name) in xref_so:
                cust_id = xref_so[(cust_so, cust_erp_name)]

            # If id is still bad flag cust_id as UNKNOWN
            if cust_id == '' or cust_id == '-999':
                cust_id = 'UNKNOWN'

        #
        # Check cust_db
        # {cust_id: Customer_obj}
        #
        # Is this a new cust_id ?
        if cust_id not in cust_db:
            # Create a new cust_id object and basic record
            cust_db[cust_id] = Customer(cust_id)
            cust_db[cust_id].sales_lev_1 = cust_sales_lev_1
            cust_db[cust_id].sales_lev_2 = cust_sales_lev_2
            cust_db[cust_id].sales_lev_3 = cust_sales_lev_3
            cust_db[cust_id].sales_lev_4 = cust_sales_lev_4
            cust_db[cust_id].sales_lev_5 = cust_sales_lev_5
            cust_db[cust_id].sales_lev_6 = cust_sales_lev_6
            sales_level = cust_sales_lev_1 + ',' + cust_sales_lev_2 + ',' + cust_sales_lev_3 + ',' + \
                cust_sales_lev_4 + ',' + cust_sales_lev_5 + ',' + cust_sales_lev_6
            sales_team = find_team(team_dict, sales_level)
            pss = sales_team[0]
            tsa = sales_team[1]
            cust_db[cust_id].pss = pss
            cust_db[cust_id].tsa = tsa
            cust_db[cust_id].am = cust_acct_mgr

        # Is this a SKU we want if so add_order
        if cust_sku in sku_filter_dict:
            cust_db[cust_id].add_order(cust_so, cust_sku)

        # Add this customer_erp_name as an alias to the customer object
        cust_db[cust_id].add_alias(cust_erp_name)

        # Add this name to an easy alias lookup dict
        if cust_erp_name not in cust_alias_db:
            cust_alias_db[cust_erp_name] = cust_id

    print('Unique Customer IDs with filter of', " '" + sku_filter_val + "' :",
          len(cust_db))
    print("Customer Unique Customer Names: ", len(cust_alias_db))
    print("Unique Sales Order Numbers: ", len(so_dict))

    # A quick check on customer ids -
    id_list = [['Customer ID', 'Customer Aliases']]
    for cust_id, cust_obj in cust_db.items():
        alias_list = []
        alias_str = ''
        cust_aliases = cust_obj.aliases
        for cust_alias in cust_aliases:
            alias_list.append(cust_alias)
            alias_str = alias_str + cust_alias + ' : '
        alias_str = alias_str[:-3]
        id_list.append([cust_id, alias_str])

    push_list_to_xls(id_list, 'log_Unique_Cust_IDs.xlsx')

    #
    # Get SAAS Data from the BU Sheet
    #
    # saas_rows = get_list_from_ss(app_cfg['SS_SAAS'])
    # test_list = []
    #
    # for x in saas_rows:
    #     for y in x:
    #         print(type(y), y)
    #     time.sleep(.4)
    # exit()
    #
    # for row_num in range(1, len(saas_rows)):
    #     try:
    #         tmp_val = [saas_rows[row_num][1], str(int(saas_rows[row_num][2]))]
    #     except ValueError:
    #         tmp_val = ['Bad Data in row ' + str(row_num), saas_rows[row_num][1]]
    #
    #     test_list.append(tmp_val)
    # push_list_to_xls(test_list, 'saas_status.xlsx')
    #
    # saas_status_list = [['status', 'cust name', 'saas so', 'cust id']]
    # for row in test_list:
    #     saas_name = row[0]
    #     saas_so = row[1]
    #     saas_status = ''
    #     saas_cust_id = ''
    #
    #     if saas_name.find('Bad Data') != -1:
    #         saas_status = 'Bad Data in SaaS Sheet', saas_name, saas_so, saas_cust_id
    #     else:
    #         if saas_name in cust_alias_db:
    #             saas_cust_id = cust_alias_db[saas_name]
    #             saas_status = 'Matched Data with SaaS Sheet', saas_name, saas_so, saas_cust_id
    #         else:
    #             saas_status = 'No Matching Data from SaaS Sheet', saas_name, saas_so, saas_cust_id
    #
    #     saas_status_list.append(saas_status)
    # push_list_to_xls(saas_status_list, 'log_saas_data_matches.xlsx')

    # # Display Customer IDs and Aliases
    # for cust_id, cust_obj in cust_db.items():
    #     if len(cust_obj.aliases) > 1:
    #         print()
    #         print('Customer ID', cust_id, ' has the following aliases')
    #         for name in cust_obj.aliases:
    #             print('\t\t', name)
    #             time.sleep(1)

    # # Display Sales Order info
    # for cust_id, cust_obj in cust_db.items():
    #     if len(cust_obj.orders) > 1:
    #         print()
    #         print('Customer ID', cust_id, cust_obj.aliases, ' has the following orders')
    #         for my_order, my_skus in cust_obj.orders.items():
    #             print('\t', 'SO Num:', my_order, 'SKUs', my_skus)
    #             time.sleep(1)

    #
    # Process AS AS-F SKU File - match bookings SO and (AS SO / PID) numbers
    # and make a list of tuples for each cust_id
    #
    as_db = {}
    so_status_list = [[
        'AS SO Number', 'AS Customer Name', "AS PID", 'Duplicate ?',
        'Match in Booking ?'
    ]]
    as_zombie_so = []
    as_so_found_cntr = 0
    as_so_not_found_cntr = 0
    as_so_duplicate_cntr = 0
    as_so_unique_cntr = 0
    for row_num in range(1, as_ws.nrows):
        my_as_info_list = []
        # Gather the fields we want
        as_pid = as_ws.cell_value(row_num, 0)
        as_cust_name = as_ws.cell_value(row_num, 2)
        as_so = as_ws.cell_value(row_num, 19)

        # Just a check
        if as_so in as_db:
            dupe = 'Duplicate SO'
            as_so_duplicate_cntr += 1
        else:
            dupe = 'Unique SO'
            as_so_unique_cntr += 1

        if as_so not in as_db:
            my_as_info_list.append((as_pid, as_cust_name))
            as_db[as_so] = my_as_info_list
        else:
            my_as_info_list = as_db[as_so]
            add_it = True
            for info in my_as_info_list:
                if info == (as_pid, as_cust_name):
                    add_it = False
                    break
            if add_it:
                my_as_info_list.append((as_pid, as_cust_name))
                as_db[as_so] = my_as_info_list

        # Checks
        if as_so not in so_dict:
            so_status_list.append(
                [as_so, as_cust_name, as_pid, dupe, 'NOT in Bookings'])
            as_zombie_so.append([as_so, as_cust_name, as_pid])
            as_so_not_found_cntr += 1
        else:
            so_status_list.append(
                [as_so, as_cust_name, as_pid, dupe, 'FOUND in Bookings'])
            as_so_found_cntr += 1

    push_list_to_xls(so_status_list, 'log_AS SO_Status_List.xlsx')
    print('AS SO NOT Found (Zombies):', as_so_not_found_cntr)
    print('AS SO Found:', as_so_found_cntr)
    print('\t AS SO Totals:', as_so_found_cntr + as_so_not_found_cntr)
    print()
    print('AS SO Duplicate:', as_so_duplicate_cntr)
    print('AS SO Unique:', as_so_unique_cntr)
    print('len of as_db', len(as_db))

    #
    # Update the cust_db objects with the AS data from as_db
    #
    found_list = 0
    as_zombies = [[
        'AS SO', 'AS PID', 'AS Customer Name', 'Possible Match', 'Ratio'
    ]]
    for as_so, as_info in as_db.items():
        # as_info is [so #:[(as_pid, as_cust_name),()]]
        as_cust_name = as_info[0][1]

        if as_so in so_dict:
            cust_id = so_dict[as_so][0][0]
            cust_obj = cust_db[cust_id]
            found_list = found_list + len(as_info)
            cust_obj.add_as_pid(as_so, as_info)
        else:
            # OK this AS_SO is NOT in the Main so_dict
            # We need to attempt to match on as_cust_name in the customer alias dict
            # We need to find the customer_id
            if as_cust_name in cust_alias_db:
                cust_id = cust_alias_db[as_cust_name]
                cust_obj = cust_db[cust_id]
                found_list = found_list + len(as_info)
                cust_obj.add_as_pid(as_so, as_info)
            else:
                # do a fuzzy match search against all customer aliases
                best_match = 0
                for k, v in cust_alias_db.items():
                    match_ratio = fuzz.ratio(as_cust_name, k)
                    if match_ratio > best_match:
                        possible_cust = k
                        best_match = match_ratio

                cust_id = cust_alias_db[possible_cust]
                cust_obj = cust_db[cust_id]
                found_list = found_list + len(as_info)
                cust_obj.add_as_pid(as_so, as_info)

                cust_obj.add_as_pid(as_so, as_info)

                as_zombies.append([
                    as_so, as_info[0][0], as_info[0][1], possible_cust,
                    best_match
                ])
                print('\tNOT FOUND Customer ID for: ', as_cust_name)

    push_list_to_xls(as_zombies, 'tmp_zombies.xlsx')
    print('Updated cust_db with: ', found_list, ' AS SOs')

    #
    # Process Subscriptions and add to Customer Objects
    #
    for row_num in range(1, sub_ws.nrows):
        # Gather the fields we want
        sub_cust_name = sub_ws.cell_value(row_num, 2)
        sub_id = sub_ws.cell_value(row_num, 4)
        sub_start_date = sub_ws.cell_value(row_num, 6)
        sub_renew_date = sub_ws.cell_value(row_num, 8)
        sub_renew_status = sub_ws.cell_value(row_num, 5)
        sub_monthly_rev = sub_ws.cell_value(row_num, 10)

        year, month, day, hour, minute, second = xlrd.xldate_as_tuple(
            sub_start_date, sub_wb.datemode)
        sub_start_date = datetime(year, month, day)

        year, month, day, hour, minute, second = xlrd.xldate_as_tuple(
            sub_renew_date, sub_wb.datemode)
        sub_renew_date = datetime(year, month, day)

        if sub_cust_name in cust_alias_db:
            cust_id = cust_alias_db[sub_cust_name]
            cust_obj = cust_db[cust_id]
            sub_info = [
                sub_id, sub_cust_name, sub_start_date, sub_renew_date,
                sub_renew_status, sub_monthly_rev
            ]
            cust_obj.add_sub_id(sub_info)

    #
    # Make the Magic List
    #
    magic_list = []
    header_row = [
        'Customer ID', 'AS SO', 'AS PID', 'AS Customer Name', 'Sales Level 1',
        'Sales Level 2', 'PSS', 'TSA', 'AM', 'Subscription History' + ' \n' +
        'Sub ID - Start Date - Renewal Date - Days to Renew - Annual Rev',
        'Sub 1st Billing Date', 'Next Renewal Date', 'Days to Renew',
        'Next Renewal Monthly Rev', 'Sub Current Status', 'AS Delivery Mgr',
        'AS Tracking Status', 'AS Tracking Sub Status', 'AS Tracking Comments',
        'AS SKU', 'AS Project Creation Date', 'AS Project Start Date',
        'AS Scheduled End Date',
        'Days from 1st Sub Billing to AS Project Start'
    ]
    magic_list.append(header_row)
    print(magic_list)
    x = 0
    today = datetime.today()

    for cust_id, cust_obj in cust_db.items():
        cust_aliases = cust_obj.aliases
        as_pids = cust_obj.as_pids
        sub_ids = cust_obj.subs
        pss = cust_obj.pss
        tsa = cust_obj.tsa
        am = cust_obj.am
        sales_lev1 = cust_obj.sales_lev_1
        sales_lev2 = cust_obj.sales_lev_2

        if len(as_pids) == 0:
            # No AS PID info available
            sub_summary, billing_start_date, next_renewal_date, days_to_renew, renewal_rev, sub_renew_status = process_sub_info(
                cust_obj.subs)
            magic_row = [
                cust_id, '', 'AS Info Unavailable', cust_aliases[0],
                sales_lev1, sales_lev2, pss, tsa, am, sub_summary,
                billing_start_date, next_renewal_date, days_to_renew,
                renewal_rev, sub_renew_status, '', '', '', '', '', '', '', '',
                ''
            ]
            magic_list.append(magic_row)
        else:
            # Let's look at the AS PIDs in cust_obj
            for so, as_pid_info in as_pids.items():
                # We will make a row for each AS SO
                for as_detail in as_pid_info:
                    magic_row = []
                    as_so = so
                    as_pid = as_detail[0]
                    as_cust_name = as_detail[1]

                    sub_summary, billing_start_date, next_renewal_date, days_to_renew, renewal_rev, sub_renew_status = process_sub_info(
                        cust_obj.subs)

                    # Go get additional AS Info
                    as_tracking_status = ''
                    as_tracking_sub_status = ''
                    as_tracking_comments = ''
                    as_dm = ''
                    as_project_start = ''
                    as_scheduled_end = ''
                    as_project_created = ''
                    as_sku = ''

                    for row_num in range(1, as_ws.nrows):
                        if as_pid == as_ws.cell_value(row_num, 0):
                            as_dm = as_ws.cell_value(row_num, 1)
                            as_tracking_status = as_ws.cell_value(row_num, 7)
                            as_tracking_sub_status = as_ws.cell_value(
                                row_num, 8)
                            as_tracking_comments = as_ws.cell_value(row_num, 9)
                            as_sku = as_ws.cell_value(row_num, 14)
                            as_project_start = as_ws.cell_value(row_num, 26)
                            as_scheduled_end = as_ws.cell_value(row_num, 27)
                            as_project_created = as_ws.cell_value(row_num, 28)

                            year, month, day, hour, minute, second = xlrd.xldate_as_tuple(
                                as_project_start, as_wb.datemode)
                            as_project_start = datetime(year, month, day)

                            year, month, day, hour, minute, second = xlrd.xldate_as_tuple(
                                as_scheduled_end, as_wb.datemode)
                            as_scheduled_end = datetime(year, month, day)

                            year, month, day, hour, minute, second = xlrd.xldate_as_tuple(
                                as_project_created, as_wb.datemode)
                            as_project_created = datetime(year, month, day)
                            break

                    if isinstance(billing_start_date, datetime) and isinstance(
                            as_project_start, datetime):
                        time_to_service = billing_start_date - as_project_start
                    else:
                        time_to_service = ''

                    magic_row = [
                        cust_id, so, as_pid, as_cust_name, sales_lev1,
                        sales_lev2, pss, tsa, am, sub_summary,
                        billing_start_date, next_renewal_date, days_to_renew,
                        renewal_rev, sub_renew_status, as_dm,
                        as_tracking_status, as_tracking_sub_status,
                        as_tracking_comments, as_sku, as_project_created,
                        as_project_start, as_scheduled_end, time_to_service
                    ]

                    magic_list.append(magic_row)

    # print(len(magic_list))
    # print(x)
    # for my_row in magic_list:
    #     for my_col in my_row:
    #         print (my_col, type(my_col))
    #     time.sleep(.1)
    # exit()
    push_list_to_xls(magic_list, 'magic.xlsx')

    #
    # Make a NEW customer list
    #
    cust_as_of = 201910
    new_cust_dict = {}
    new_cust_list = [[
        'Booking Period', 'Customer ID', 'Customer Name', 'PSS', 'PSS email',
        'TSA', 'TSA email', 'AM'
    ]]
    for row_num in range(1, cust_ws.nrows):
        cust_id = cust_ws.cell_value(row_num, 15)
        if cust_id in cust_db:
            booking_period = cust_ws.cell_value(row_num, 2)
            cust_name = cust_ws.cell_value(row_num, 13)
            pss = cust_db[cust_id].pss
            tsa = cust_db[cust_id].tsa
            am = cust_db[cust_id].am
            if int(cust_ws.cell_value(row_num, 2)) >= cust_as_of:
                new_cust_list.append(
                    [booking_period, cust_id, cust_name, pss, tsa, am])

    push_list_to_xls(new_cust_list, 'tmp_New_Customer_list.xlsx')

    return
Exemplo n.º 16
0
from my_app.func_lib.open_wb import open_wb
from my_app.func_lib.add_hash_to_xls import add_hash_to_xls
from my_app.func_lib.push_list_to_xls import push_list_to_xls
from datetime import datetime

#
# db.create_all()
#

Customers.__table__.drop(db.session.bind)
Customers.__table__.create(db.session.bind)

now = datetime.now()

# Add a hash to this sheet
xlrd_wb, xlrd_ws = open_wb('tmp_TA Customer List.xlsx')
a_sheet = add_hash_to_xls(xlrd_wb, xlrd_ws)
push_list_to_xls(a_sheet, 'tmp_TA Customer List_hashed.xlsx')

# Now open the sheet that includes a unique hash value
xlrd_wb, xlrd_ws = open_wb('tmp_TA Customer List_hashed.xlsx')

# Loop over the sheet starting row 1 to exclude headers
for row_num in range(1, xlrd_ws.nrows):
    a_cust = Customers()
    a_cust.customer_ultimate_name = xlrd_ws.cell_value(row_num, 0)
    a_cust.customer_erp_name = xlrd_ws.cell_value(row_num, 1)
    a_cust.date_added = now
    a_cust.hash_value = xlrd_ws.cell_value(row_num, 2)

    db.session.add(a_cust)
Exemplo n.º 17
0
def refresh_data():
    # This function retrieves data from the update_dir
    # It looks for a file(s) in the format of:
    #   'FY17 TA Master Bookings as of 02-25-19.xlsx'
    #   'TA Renewal Dates as of 02-25-19.xlsx'
    # It will prep them and create all supporting files in the update dir
    # ALL workbooks created will begin with tmp_XXXXX.xlsx
    # We then create a dated archive directory and move a set of all files to the archive & working dirs

    home = app_cfg['HOME']
    working_dir = app_cfg['WORKING_DIR']
    update_dir = app_cfg['UPDATES_DIR']
    archive_dir = app_cfg['ARCHIVES_DIR']

    path_to_main_dir = (os.path.join(home, working_dir))
    path_to_updates = (os.path.join(home, working_dir, update_dir))
    path_to_archives = (os.path.join(home, working_dir, archive_dir))

    update_files = os.listdir(path_to_updates)
    bookings = []
    start_row = 0
    as_of_date = ''

    print(path_to_main_dir)
    print(path_to_updates)
    print(path_to_archives)
    print(app_cfg['PROD_DATE'])
    print(app_cfg['UPDATE_DATE'])

    # Look in the "ta_data_updates" dir
    # this is where we place newly updated sheets to be put into production
    if len(update_files) == 0:
        # NO update files exist so log an error and return
        print('ERROR: No Update files exist in:', path_to_updates)
        return
    else:
        for file in update_files:
            # When we find a "Master Bookings" file
            # Add the rows to the "bookings" list
            if file.find('Master Bookings') != -1:
                wb, ws = open_wb(file, 'updates')
                as_of_date = file[-13:-13 + 8]

                if start_row == 0:
                    # For the first workbook include the header row
                    start_row = 2
                elif start_row == 2:
                    # For subsequent workbooks skip the header
                    start_row = 3

                for row in range(start_row, ws.nrows):
                    bookings.append(ws.row_values(row))
    print('as of', as_of_date)

    # We have now created the bookings list lets write it
    # and rename it to the current as_of_date
    print('New Master Bookings has ', len(bookings), ' line items')
    push_list_to_xls(bookings, 'tmp_working_bookings', 'updates')
    os.rename(
        os.path.join(path_to_updates, 'tmp_working_bookings'),
        os.path.join(path_to_updates,
                     'tmp_Master Bookings as of ' + as_of_date + '.xlsx'))

    # Create a workbook of filtered AS SKUs only
    as_bookings = get_as_skus(bookings)
    push_list_to_xls(as_bookings, 'tmp_working_as_bookings', 'updates')
    os.rename(
        os.path.join(path_to_updates, 'tmp_working_as_bookings'),
        os.path.join(path_to_updates,
                     'tmp_TA AS SKUs as of ' + as_of_date + '.xlsx'))

    # process_bookings
    print('Before init', app_cfg['XLS_BOOKINGS'])
    init_settings()
    print('after init', app_cfg['XLS_BOOKINGS'])
    process_bookings()

    # build_dashboard
    build_dashboard()

    # Make an archive directory where we need to place these update files
    os.mkdir(os.path.join(path_to_archives, as_of_date + " Updates"))
    archive_folder_path = os.path.join(path_to_archives,
                                       as_of_date + " Updates")
    print(archive_folder_path)

    # Delete all current working files from the working directory stamped with del_date
    files = os.listdir(path_to_main_dir)
    del_date = ''
    for file in files:
        if file.find('Master Bookings') != -1:
            del_date = file[-13:-13 + 8]
            break

    for file in files:
        if file[-13:-13 + 8] == del_date:
            print('Deleting file', file)
            os.remove(os.path.join(path_to_main_dir, file))

    # Move a copy of all new files to the working directory also
    main_files = os.listdir(path_to_updates)
    for file in main_files:
        copyfile(os.path.join(path_to_updates, file),
                 os.path.join(path_to_main_dir, file))

    # Move all updates to the archive directory
    update_files = os.listdir(path_to_updates)
    for file in update_files:
        print(file)
        os.rename(os.path.join(path_to_updates, file),
                  os.path.join(archive_folder_path, file))

    print('All data files have been refreshed and archived !')
    print('Before init', app_cfg['XLS_BOOKINGS'])
    init_settings()
    print('after init', app_cfg['XLS_BOOKINGS'])
    return
Exemplo n.º 18
0
def process_renewals():
    # Open up the renewals excel workbooks
    wb, sheet = open_wb(app_cfg['XLS_RENEWALS'], 'updates')

    # Get the renewal columns we are looking for
    my_map = build_sheet_map(app_cfg['XLS_RENEWALS'], sheet_map,
                             'XLS_RENEWALS')

    print('sheet_map ', id(sheet_map))
    print('my map ', id(my_map))

    # List comprehension replacement for above
    # Strip out the columns from the sheet map that we don't need
    my_map = [x for x in my_map if x[1] == 'XLS_RENEWALS']

    # Create a simple column name dict
    col_nums = {
        sheet.cell_value(0, col_num): col_num
        for col_num in range(0, sheet.ncols)
    }

    # Loop over all of the renewal records
    # Build a dict of {customer:[next renewal date, next renewal revenue, upcoming renewals]}
    my_dict = {}
    for row_num in range(1, sheet.nrows):
        customer = sheet.cell_value(row_num, col_nums['End Customer'])
        if customer in my_dict:
            tmp_record = []
            tmp_records = my_dict[customer]
        else:
            tmp_record = []
            tmp_records = []

        # Loop over the my map gather the columns we need
        for col_map in my_map:
            my_cell = sheet.cell_value(row_num, col_map[2])

            # Is this cell a Date type (3) ?
            # If so format as a M/D/Y
            if sheet.cell_type(row_num, col_map[2]) == 3:
                my_cell = datetime.datetime(
                    *xlrd.xldate_as_tuple(my_cell, wb.datemode))
                my_cell = my_cell.strftime('%m-%d-%Y')

            tmp_record.append(my_cell)

        tmp_records.append(tmp_record)
        my_dict[customer] = tmp_records

    #
    # Sort each customers renewal dates
    #
    sorted_dict = {}
    summarized_dict = {}
    summarized_rec = []

    for customer, renewals in my_dict.items():
        # Sort this customers renewal records by date order
        renewals.sort(key=lambda x: x[0])
        sorted_dict[customer] = renewals

        next_renewal_date = renewals[0][0]
        next_renewal_rev = 0
        next_renewal_qtr = renewals[0][2]
        for renewal_rec in renewals:
            if renewal_rec[0] == next_renewal_date:
                # Tally this renewal record and get the next
                next_renewal_rev = float(renewal_rec[1] + next_renewal_rev)
            elif renewal_rec[0] != next_renewal_date:
                # Record these summarized values
                summarized_rec.append(
                    [next_renewal_date, next_renewal_rev, next_renewal_qtr])
                # Reset these values and get the next renewal date for this customer
                next_renewal_date = renewal_rec[0]
                next_renewal_rev = renewal_rec[1]
                next_renewal_qtr = renewal_rec[2]

            # Check to see if this is the last renewal record
            # If so exit the loop
            if renewals.index(renewal_rec) == len(renewals) - 1:
                break

        summarized_rec.append(
            [next_renewal_date, next_renewal_rev, next_renewal_qtr])
        summarized_dict[customer] = summarized_rec
        summarized_rec = []

    # print(sorted_dict['FIRST NATIONAL BANK OF SOUTHERN AFRICA LTD'])
    # print(summarized_dict['SPECTRUM HEALTH SYSTEM'])
    # print (len(summarized_dict['SPECTRUM HEALTH SYSTEM']))
    return summarized_dict
Exemplo n.º 19
0
    match = False
    ratios = fuzz.ratio(name1, name2)
    if ratios >= 85 and ratios < 100:
        print(name1, '/ \t', name2, ratios)
        match = True

    #time.sleep(1)

    return match


if __name__ == "__main__":
    list_a = ['jim']
    list_b = ['jim', 'gym', 'ang', 'jime']
    my_file = 'C:/Users/jpisano/ta_adoption_data/ta_data_updates/tmp_TA Customer List.xlsx'
    my_wb, my_ws = open_wb(my_file)

    sheet1 = my_wb.sheet_by_name('Sheet1')
    sheet2 = my_wb.sheet_by_name('Sheet2')

    unique_names = []
    jims_list = []
    duplicate_names = []
    aka_list = []
    aka = {}

    for my_row in range(1, sheet1.nrows):
        duplicate_names.append(sheet1.cell_value(my_row, 0))

    for my_row in range(0, sheet2.nrows):
        unique_names.append(sheet1.cell_value(my_row, 0))
Exemplo n.º 20
0
def file_checks(run_dir=app_cfg['UPDATES_DIR']):
    home = app_cfg['HOME']
    working_dir = app_cfg['WORKING_DIR']
    update_dir = app_cfg['UPDATES_DIR']
    archive_dir = app_cfg['ARCHIVES_DIR']

    # Check that all key directories exist
    path_to_main_dir = (os.path.join(home, working_dir))
    if not os.path.exists(path_to_main_dir):
        print(path_to_main_dir, " does NOT Exist !")
        exit()

    path_to_run_dir = (os.path.join(home, working_dir, run_dir))

    if not os.path.exists(path_to_run_dir):
        print(path_to_run_dir, " does NOT Exist !")
        exit()

    path_to_updates = (os.path.join(home, working_dir, update_dir))
    if not os.path.exists(path_to_updates):
        print(path_to_updates, " does NOT Exist !")
        exit()

    path_to_archives = (os.path.join(home, working_dir, archive_dir))
    if not os.path.exists(path_to_archives):
        print(path_to_archives, " does NOT Exist !")
        exit()

    # OK directories are there any files ?
    if not os.listdir(path_to_run_dir):
        print('Directory', path_to_run_dir, 'contains NO files')
        exit()

    #  Get the required Files to begin processing from app_cfg (settings.py)
    files_needed = {}
    # Do we have RAW files to process ?
    for var in app_cfg:
        if var.find('RAW') != -1:
            # Look for any config var containing the word 'RAW' and assume they are "Missing'
            files_needed[app_cfg[var]] = 'Missing'

    # See if we have the files_needed are there and they have consistent dates (date_list)
    run_files = os.listdir(path_to_run_dir)
    date_list = []
    for file_needed, status in files_needed.items():
        for run_file in run_files:
            date_tag = run_file[-13:-13 + 8]  # Grab the date if any
            run_file = run_file[:len(run_file) -
                                14]  # Grab the name without the date
            if run_file == file_needed:
                date_list.append(date_tag)  # Grab the date
                files_needed[file_needed] = 'Found'
                break

    # All time stamps the same ?
    base_date = date_list[0]
    for date_stamp in date_list:
        if date_stamp != base_date:
            print('ERROR: Inconsistent date stamp found')
            exit()

    # Do we have all the files we need ?
    for file_name, status in files_needed.items():
        if status != 'Found':
            print('ERROR: File ', file_name, 'is missing')
            exit()

    # Read the config_dict.json file
    # with open(os.path.join(path_to_run_dir, app_cfg['META_DATA_FILE'])) as json_input:
    #     config_dict = json.load(json_input)
    # print(config_dict)

    # Since we have a consistent date then Create the json file for config_data.json.
    # Put the time_stamp in it
    config_dict = {
        'data_time_stamp': base_date,
        'last_run_dir': path_to_run_dir
    }
    with open(os.path.join(path_to_run_dir, app_cfg['META_DATA_FILE']),
              'w') as json_output:
        json.dump(config_dict, json_output)

    # Delete all previous tmp_ files
    for file_name in run_files:
        if file_name[0:4] == 'tmp_':
            os.remove(os.path.join(path_to_run_dir, file_name))

    # Here is what we have - All things should be in place
    print('Our directories:')
    print('\tPath to Main Dir:', path_to_main_dir)
    print('\tPath to Updates Dir:', path_to_updates)
    print('\tPath to Archives Dir:', path_to_archives)
    print('\tPath to Run Dir:', path_to_run_dir)

    # Process the RAW data (Renewals and Bookings)
    # Clean up rows, combine multiple Bookings files, add custom table names
    processing_date = date_list[0]
    file_paths = []
    bookings = []
    renewals = []
    start_row = 0
    print()
    print('We are processing files:')

    for file_name in files_needed:
        file_path = file_name + ' ' + processing_date + '.xlsx'
        file_path = os.path.join(path_to_run_dir, file_path)

        file_paths.append(file_path)

        my_wb, my_ws = open_wb(file_name + ' ' + processing_date + '.xlsx',
                               run_dir)
        # my_wb = xlrd.open_workbook(file_path)
        # my_ws = my_wb.sheet_by_index(0)
        print('\t\t', file_name + '', processing_date + '.xlsx', ' has ',
              my_ws.nrows, ' rows and ', my_ws.ncols, 'columns')

        if file_name.find('Bookings') != -1:
            if start_row == 0:
                # For the first workbook include the header row
                start_row = 2
            elif start_row == 2:
                # For subsequent workbooks skip the header
                start_row = 3
            for row in range(start_row, my_ws.nrows):
                bookings.append(my_ws.row_slice(row))

        elif file_name.find('Renewals') != -1:
            for row in range(2, my_ws.nrows):
                renewals.append(my_ws.row_slice(row))

    # Push the lists out to an Excel File
    push_xlrd_to_xls(bookings, app_cfg['XLS_BOOKINGS'], run_dir, 'ta_bookings')

    as_bookings = get_as_skus(bookings)
    push_xlrd_to_xls(as_bookings, app_cfg['XLS_AS_SKUS'], run_dir,
                     'as_bookings')

    push_xlrd_to_xls(renewals, app_cfg['XLS_RENEWALS'], run_dir, 'ta_renewals')

    print('We have ', len(bookings), 'bookings line items')
    print('We have ', len(as_bookings), 'Services line items')
    print('We have ', len(renewals), 'renewal line items')

    return
Exemplo n.º 21
0
from my_app.settings import app_cfg
from my_app.func_lib.open_wb import open_wb
from my_app.func_lib.push_list_to_xls import push_list_to_xls
from my_app.func_lib.get_list_from_ss import get_list_from_ss
from my_app.func_lib.push_xls_to_ss import push_xls_to_ss

# push_xls_to_ss('mailer names.xlsx', 'CTUG mailer')
# jim = get_list_from_ss('Tetration Coverage Map')
# print (jim)
#
# exit()

# Feed this a list like this
# Chris Mchenry (chmchenr) <*****@*****.**>; Gordon Hirst (ghirst) <*****@*****.**>;
#
wb, ws = open_wb('mailer scrub.xlsx')

raw = ws.cell_value(0, 0)
raw_len = len(raw)
names = []
name = ''
for c in raw:
    if c != ';':
        name = name + c
    else:
        if name[0] == ' ':
            name = name[1:]
        names.append(name)
        name = ''

word = ''
Exemplo n.º 22
0
def process_subs(run_dir=app_cfg["UPDATES_DIR"]):
    print('MAPPING>>>>>>>>>> ', run_dir + '\\' + app_cfg['XLS_SUBSCRIPTIONS'])
    # Open up the subscription excel workbooks

    wb, sheet = open_wb(app_cfg['XLS_SUBSCRIPTIONS'], run_dir)

    # Get the renewal columns we are looking for
    my_map = build_sheet_map(app_cfg['XLS_SUBSCRIPTIONS'], sheet_map,
                             'XLS_SUBSCRIPTIONS', run_dir)

    print('sheet_map ', id(sheet_map))
    print('my map ', id(my_map))

    # List comprehension replacement for above
    # Strip out the columns from the sheet map that we don't need
    my_map = [x for x in my_map if x[1] == 'XLS_SUBSCRIPTIONS']

    # Create a simple column name dict
    col_nums = {
        sheet.cell_value(0, col_num): col_num
        for col_num in range(0, sheet.ncols)
    }

    # Loop over all of the subscription records
    # Build a dict of {customer:[next renewal date, next renewal revenue, upcoming renewals]}
    my_dict = {}
    for row_num in range(1, sheet.nrows):
        customer = sheet.cell_value(row_num, col_nums['End Customer'])
        if customer in my_dict:
            tmp_record = []
            tmp_records = my_dict[customer]
        else:
            tmp_record = []
            tmp_records = []

        # Loop over the my map gather the columns we need
        for col_map in my_map:
            my_cell = sheet.cell_value(row_num, col_map[2])

            # Is this cell a Date type (3) ?
            # If so format as a M/D/Y
            if sheet.cell_type(row_num, col_map[2]) == 3:
                my_cell = datetime.datetime(
                    *xlrd.xldate_as_tuple(my_cell, wb.datemode))
                my_cell = my_cell.strftime('%m-%d-%Y')

            tmp_record.append(my_cell)

        tmp_records.append(tmp_record)
        my_dict[customer] = tmp_records

    #
    # Sort each customers renewal dates
    #
    sorted_dict = {}
    summarized_dict = {}
    summarized_rec = []
    # print('diag1',my_dict['BLUE CROSS & BLUE SHIELD OF ALABAMA'])
    # exit()
    # ['08-20-2018', '12', '08-20-2019', 72.0, 1500.0, 'Sub170034', 'ACTIVE']

    for customer, renewals in my_dict.items():
        # Sort this customers renewal records by date order
        renewals.sort(
            key=lambda x: datetime.datetime.strptime(x[0], '%m-%d-%Y'))
        sorted_dict[customer] = renewals
        #
        # print('\t', customer, ' has', len(renewals), ' records')
        # print('\t\t', renewals)
        # print ('---------------------')
        # time.sleep(1)

        next_renewal_date = renewals[0][0]
        next_renewal_rev = 0
        next_renewal_qtr = renewals[0][2]
        for renewal_rec in renewals:
            if renewal_rec[0] == next_renewal_date:
                # Tally this renewal record and get the next
                # print (type(renewal_rec[4]), renewal_rec[4])
                # time.sleep(1)
                next_renewal_rev = renewal_rec[4] + next_renewal_rev
            elif renewal_rec[0] != next_renewal_date:
                # Record these summarized values
                summarized_rec.append(
                    [next_renewal_date, next_renewal_rev, next_renewal_qtr])
                # Reset these values and get the next renewal date for this customer
                next_renewal_date = renewal_rec[0]
                next_renewal_rev = renewal_rec[1]
                next_renewal_qtr = renewal_rec[2]

            # Check to see if this is the last renewal record
            # If so exit the loop
            if renewals.index(renewal_rec) == len(renewals) - 1:
                break

        summarized_rec.append(
            [next_renewal_date, next_renewal_rev, next_renewal_qtr])
        summarized_dict[customer] = summarized_rec
        summarized_rec = []

    # print(sorted_dict['FIRST NATIONAL BANK OF SOUTHERN AFRICA LTD'])
    # print(summarized_dict['SPECTRUM HEALTH SYSTEM'])
    # print (len(summarized_dict['SPECTRUM HEALTH SYSTEM']))
    return sorted_dict, summarized_dict
Exemplo n.º 23
0
            elif src_cell_type == xlrd.XL_CELL_BLANK:
                print("\t\tBlank", my_ws.cell_value(my_row, my_col[2]),
                      ' needs to be a ' + dest_cell_type)

            elif src_cell_type == xlrd.XL_CELL_BOOLEAN:
                print("\t\tBoolean", my_ws.cell_value(my_row, my_col[2]),
                      ' needs to be a ' + dest_cell_type)

            elif src_cell_type == xlrd.XL_CELL_EMPTY:
                print("\t\tEmpty", my_ws.cell_value(my_row, my_col[2]),
                      ' needs to be a ' + dest_cell_type)

            elif src_cell_type == xlrd.XL_CELL_ERROR:
                print("\t\tError", my_ws.cell_value(my_row, my_col[2]),
                      ' needs to be' + dest_cell_type)
            time.sleep(0.5)
            print(type(dest_cell_val), dest_cell_val)
            list_of_row.append(dest_cell_val)
        print(list_of_row)

        list_of_rows.append(list_of_row)
        print(list_of_rows)
        time.sleep(2)
    return list_of_rows


if __name__ == "__main__":
    path_to_scrub = 'C:/Users\jpisano/ta_adoption_data/ta_data_updates/TA Master Subscriptions as of 06-12-19.xlsx'
    my_path, my_file = ntpath.split(path_to_scrub)
    wb, ws = open_wb(my_file)
    data_scrubber(ws, path_to_scrub)