Exemplo n.º 1
0
"""
#NEW Damages script to be used until efinity supplier communications are automated.
#This script reads all damaged stock processed by Spree QC teams. 
#Script must be scheduled to run on a daily basis at 23:45.

import pandas as pd
from pandas import DataFrame
from datetime import date, timedelta
from pandas import ExcelWriter
import MyFunx, gdocs

today = date.today() 

#Import Efinity bad stock
pw = 'Spr33Pops101'
PODetail = MyFunx.sql_import("vw_PurchaseOrderItems","PurchaseOrderDate", pw)
PODetail = PODetail[['PurchaseOrderNumber','SimpleSKU','SimpleName','SupplierName','Email','AlternateEmail']]

Stock = MyFunx.sql_import("vw_Inventory","Date", pw)
QCed = Stock[['Date','BrandName','SimpleSKU','QualityControlBadQuantity']]
QCed.rename(columns={'Date': 'Date QCed','QualityControlBadQuantity':'QC_Damaged'}, inplace=True)

#Reading from Oversupply google doc    
c = gdocs.authenticate_gdocs()
sheet = c.open('Oversupply')
ws = sheet.worksheet('Sheet1')
if ws.cell(2,1).value == "":
    print "No oversupply scanned"
    OS = DataFrame(columns = ['SKU','Oversupply'])
    l = 0
else:
Exemplo n.º 2
0
writer.save()

#Deleting all data from google doc
clean1 = worksht.range('A2:C' + str(lgth + 5))
for cl in clean1:
    cl.value = ""
worksht.update_cells(clean1)
    
clean2 = ws.range('A2:C' + str(l + 5))
for cl in clean2:
    cl.value = ""
ws.update_cells(clean2)

#Email Spree output file 
doc_name = 'Daily Damages & Oversupply ' 
part = '03_Damages_OS\\Damages_OS ' + str(today) + '.xlsx'
message = 'Daily Damages and Oversupply'
maillist = 'MailList_Damages.txt'
MyFunx.send_message(doc_name, message, part, maillist)

#Create rolling damages doc
dmgs.columns = ['SKU','Reason for damage','QC Responsible','Date'] 
dmgmrg = pd.merge(dmgs, BPdetail, on='SKU', how = 'left')
dmgmrg = dmgmrg[['Date','Order ID','SKU','Reason for damage','QC Responsible']]
dmgmrg.to_csv('RollingDamages.txt', sep=';',index=False, encoding = 'utf-8')

#Create rolling oversupply doc
Oversup = DayCount[["Date","Supplier","PO","SKU","Oversupply"]]
Oversup = Oversup[Oversup.Oversupply.isnull() == False]
Oversup.to_csv('RollingOversupply.txt', sep=';',index=False, encoding = 'utf-8')
Exemplo n.º 3
0
#Split entries with multiple POs
x = df['POs'].str.split('\n|/ |, | ').apply(Series, 1).stack()
x = x.map(lambda y: y.strip(" ()#abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-&:.%"))
x.index = x.index.droplevel(-1)
x.name = 'POs'
del df['POs']
m = df.join(x)

#Create Supplier Contacts output files
S = m[['Date','Event','POs','Supplier','Brand','Client name','Client email']]
S = S[S['POs'].isnull() == False]
path = '03_Damages_OS'
DataName = S
DocName = 'Supplier Contacts'
DaysCounting = 70
MyFunx.data_history( DataName, DocName, DaysCounting, path )

#Select tomorrow's date    
dates = []
for d in m['Date']:
    if today.weekday() == 4:
        if d.date() == today + timedelta(3):
            d = d.strftime('%Y/%m/%d')
        else: 
            d = 0
    elif d.date() == today + timedelta(1):
            d = d.strftime('%Y/%m/%d')
    else: 
        d = 0
    dates.append(d)
m["Date"] = dates
Exemplo n.º 4
0
from pandas import ExcelWriter
import MyFunx

team = raw_input('What is your Team Letter (A, B, C, S (S = SAMPLES))? ')
shift = datetime.today().strftime('%Y-%m-%d %H:%M')

#Import Brightpearl Detail Report
columns = ["Order ID", "Ref", "Status", "Contact", "SKU", "Name", "Quantity"]
BP = pd.read_csv('BPdetail.csv', header = 0, usecols = columns)
    
#Lulu1 = pd.ExcelFile('Z:\\Python Scripts\\00_UPDATE\\Lulu1.xlsx')
#Planned = Lulu1.parse('Sheet4', skiprows = 0, index = None, parse_cols = (36,46))

#Connect to MSSQL to get Lulu data
pw = "Spr33Pops101"
Lulu =  MyFunx.sql_import("vw_ProcurementPipeline","ActualGoLiveDate",pw)
Planned = Lulu[['SimpleSKU','ProcurementProductCategoryL3']]
Planned.drop_duplicates(inplace = True, take_last = True)

#Reading from StockCount google docs
c = gspread.Client(auth=('*****@*****.**', 'spreeapp'))
c.login()

#==============================================================================
# Details for SAMPLES processing
#==============================================================================
if team == 'S':
    sht = c.open('SamplesCount')
    worksheet = sht.worksheet('Team' + team)
       
    #Parameters for total samples count doc
Exemplo n.º 5
0
def InboundData(lastmonth, nextmonth, today):
    
    import numpy as np
    import pandas as pd
    from pandas import DataFrame
    import gspread
    import MyFunx, gdocs    
    
    #==============================================================================
    # Import from all required data sources
    #==============================================================================
    #Import Brightpearl Detail Report data
    columns = [u"Order ID", u"Ref", u"SKU", u"Status", u"Quantity"]
    BPdetail = pd.read_csv('BPdetail.csv', header = 0, usecols = columns, encoding = 'iso-8859-1')
    BPdetail['Order ID'] = BPdetail['Order ID'].map(lambda x: unicode(x))
    BPdetail.rename(columns={'Order ID': u'POs', 'Quantity': u'BP Qty'}, inplace=True)
    BPdet = BPdetail[BPdetail['Status'].str.contains('Cancel PO')==False]
    
    BPdet = pd.pivot_table(BPdet, values = [u'BP Qty'], index = [u'SKU',u'POs',u'Ref',u'Status'], aggfunc=np.sum)
    BPdet.reset_index(inplace=True)
       
    CancelledPOs = BPdetail[BPdetail['Status']=='Cancel PO']
    CancelledPOs = CancelledPOs.groupby('POs').agg({'SKU':'count'})
    
    #Import Brightpearl PO Report data
    columns = ["Order ID", "Delivery due"]
    BPreport = pd.read_csv('BPreport.csv', header = 0, usecols = columns, parse_dates = [1])
    BPreport = BPreport.dropna(axis = 0,how = 'all') #removes empty rows
    BPreport['Order ID'] = BPreport['Order ID'].map(lambda x: x.strip('PO#')) #removes text in front of PO number
    BPreport.rename(columns={'Order ID': 'POs', 'Delivery due':'DeliveryDue'}, inplace=True)
    
    BP = pd.merge(BPdet, BPreport, on = 'POs', how = 'left', sort = False)
    
    #Import Epping Receiving Report data
    c = gdocs.authenticate_gdocs()
    
    sht = c.open('Epping Receiving Report')
    worksheet = sht.worksheet('Booked')
    
    info = worksheet.get_all_values()
    headers = info.pop(0)
    B_R = DataFrame(data = info, columns = headers)
    
    Bookd = B_R[[u'POs',u'Date booked']]
    Bookd = Bookd.replace('',np.nan)
    Bookd = Bookd.dropna(subset = [u'Date booked'], thresh = 1)
    Bookd = Bookd.drop_duplicates(subset = [u'POs'], take_last = False)
    Bookd = Bookd.loc[Bookd.POs != ""]
    Bookd[u'Date booked'] = pd.to_datetime(Bookd[u'Date booked'], infer_datetime_format = True)
    
    Receivd = B_R[['POs', 'Partial delivery', 'Date received']]
    Receivd = Receivd.replace('',np.nan)
    Receivd = Receivd.dropna(subset = ['Date received'], thresh = 1)
    Receivd = Receivd.drop_duplicates(subset = ['POs'], take_last = True)
    Receivd = Receivd.loc[Receivd.POs != ""]
    Receivd['Date received'] = pd.to_datetime(Receivd['Date received'], infer_datetime_format = True)
    
    #Import Rolling Stock data
    Stock = pd.ExcelFile('Z:\\SUPPLY CHAIN\\Python Scripts\\02_StockCount\\Rolling Stock.xlsx')
    QCed = Stock.parse('Sheet1', skiprows = 0, index = None, parse_cols = (1,3,4,5), encoding = 'utf-8')
    QCed.rename(columns={'Date': u'LastQCed', 'PO':u'POs','ProductID':u'SKU'}, inplace=True)
    poqc = [unicode(p) for p in QCed['POs']]
    QCed['POs'] = poqc
    QCed = QCed.groupby(['POs','SKU']).agg({'Qty Counted':np.sum, 'LastQCed':np.max})
    QCed.reset_index(inplace=True)
    
    #Import Rolling Damages
    Damages = pd.ExcelFile('03_Damages_OS\\Rolling Damages.xlsx')
    Damages = Damages.parse('Sheet1', skiprows = 0, index = None, encoding = 'utf-8')
    SKU = Damages['SKU'].value_counts()
    Damagd = DataFrame(data = SKU)
    Damagd.reset_index(level=0, inplace=True)
    Damagd.columns = [u'SKU', u'Qty Damaged']
    
    #Import Lulu Assortment Plans
    ## table = "vw_ProcurementPipeline"
    ## dateparse = "ActualGoLiveDate"
    
    pw = raw_input("Enter SQL Server database password: "******"vw_ProcurementPipeline",["ActualGoLiveDate","PlannedGoLiveDate"], pw)
    Planned = Lulu[['PlannedGoLiveDayOfWeek','PlannedGoLiveMonth','PlannedGoLiveYear','PlannedGoLiveDate','BuyerPlanName','BuyerPlanStatus','EmployeeFirstName','PlannedUnitCostExclTax','PlannedTotalQuantity','PlannedTotalCostExclTax','SimpleSKU','SimpleName','ConfigName','ConfigSKU','ProcurementStatus','ProcurementProductCategoryL3','ActualGoLiveDate','Supplier','Designer','EANNumber','BarCode']]
    #Merge EAN, BarCode information with SKU
    Planned.loc[Planned.EANNumber == "",'EANNumber'] = None
    SKU = Planned['EANNumber'].combine_first(Planned['SimpleSKU'])
    Planned.loc[:,'SKU'] = Planned['BarCode'].combine_first(SKU)
    Planned.drop_duplicates(subset = ['SKU','PlannedGoLiveMonth'], inplace = True, take_last = True)
    if lastmonth == 11 | 12:
        Planned = Planned[((Planned.PlannedGoLiveMonth >= lastmonth) & (Planned.PlannedGoLiveYear == today.year)) | ((Planned.PlannedGoLiveMonth <= nextmonth) & (Planned.PlannedGoLiveYear == today.year))]
    else:    
        Planned = Planned[((Planned.PlannedGoLiveMonth >= lastmonth) & (Planned.PlannedGoLiveYear == today.year)) & ((Planned.PlannedGoLiveMonth <= nextmonth) & (Planned.PlannedGoLiveYear == today.year))]
    Planned.rename(columns={'PlannedGoLiveDayOfWeek':'GLDay','PlannedGoLiveMonth':'GLMonth','PlannedGoLiveYear':'GLYear','PlannedGoLiveDate':'GLDate', 'EmployeeFirstName':'Buyer','ProcurementProductCategoryL3':'Category', 'PlannedUnitCostExclTax':'UnitCost','PlannedTotalQuantity':'TotalUnits','PlannedTotalCostExclTax':'TotalCost'}, inplace=True)
    Planned = Planned[Planned['TotalCost'] > 0]
    Planned.loc[Planned.ProcurementStatus == 'Deleted', ['TotalUnits','TotalCost']] = ""
    
    #Import Dynaman IBOI1003 Inbound Order Received Messages
    ## table = "vw_WarehouseInboundItemsReceived"
    ## dateparse = "Timestamp"
    
    IBOI1003 =  MyFunx.sql_import("vw_WarehouseInboundItemsReceived","Timestamp",pw)
    IBOI1003 = IBOI1003[['MessageReference','ItemCode','QuantityReceived','Timestamp']]
    TaknIn = IBOI1003.groupby(['MessageReference','ItemCode']).agg({'QuantityReceived':np.sum, 'Timestamp':np.max})
    TaknIn.reset_index(inplace=True)
    TaknIn.columns = ['POs','SKU','OTDLastReceived','Qty Received']
    TaknIn['POs'] = TaknIn['POs'].apply(lambda x: x if len(x) < 7 else 0)
    TaknIn = TaknIn[TaknIn['POs'] != 0]
    
    #Import Dynaman ITMI1002 
    ## table = "vw_WarehouseStockAvailability"
    ## dateparse = "Timestamp"
    
    ITMI1002 =  MyFunx.sql_import("vw_WarehouseStockAvailability","Timestamp",pw)
    ITMI1002 = ITMI1002[['ITEM_CODE','QTY']]
    PutAway = pd.pivot_table(ITMI1002, values = ['QTY'], index = ['ITEM_CODE'], aggfunc=np.sum)
    PutAway.reset_index(inplace=True)
    PutAway.columns = ['SKU','Qty PutAway']
    
    Merge = pd.merge(Planned, BP, on = 'SKU', how = 'left', sort = False)
    Merge1 = pd.merge(Merge, Bookd, on = 'POs', how = 'left', sort = False)
    Merge2 = pd.merge(Merge1, Receivd, on = 'POs', how = 'left', sort = False)
    Merge3 = pd.merge(Merge2, QCed, on = ['SKU','POs'], how = 'left', sort = False)
    Merge4 = pd.merge(Merge3, Damagd, on = 'SKU', how = 'left', sort = False)
    Merge5 = pd.merge(Merge4, TaknIn, on = ['SKU','POs'], how = 'left', sort = False)
    Visibility = pd.merge(Merge5, PutAway, on = 'SKU', how = 'left')
    Visibility.drop_duplicates(inplace = True)
    #remove duplicates that are in Draft PO status,assuming that these POs are outdated
    Visibility.loc[Visibility['Ref'].str.contains("sample|Sample|SAMPLE|samples|Samples|OS|Os|OVERSUPPLY|fraud")==True,['TotalUnits','TotalCost','Qty PutAway']] = ""
    Visibility['Oversupply'] = ""
    Visibility.loc[Visibility['Ref'].str.contains("OS|Os|OVERSUPPLY")==True, 'Oversupply'] = Visibility['BP Qty']    
    V0 = Visibility.loc[Visibility.TotalUnits == "",]
    Vn0 = Visibility.loc[Visibility.TotalUnits != "",]
    Va = Vn0[Vn0.duplicated(subset = ['SKU'], take_last = False)==False]
    Vb = Vn0[Vn0.duplicated(subset = ['SKU'], take_last = False)==True]
    Vb.loc[:, ['TotalUnits','TotalCost','Qty PutAway']] = ""    
    Vc = Vb[Vb['Status']!='Draft PO'] 
    Visibility = Va.append([Vc,V0], ignore_index = True)
    Visibility.replace("", np.nan, inplace = True)
    
    return Visibility
Exemplo n.º 6
0
#Split entries with multiple POs
x = df['POs'].str.split('\n|/ |, | ').apply(Series, 1).stack()
x = x.map(lambda y: y.strip(" ()#abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-&:.%"))
x.index = x.index.droplevel(-1)
x.name = 'POs'
del df['POs']
m = df.join(x)

#Create Supplier Contacts output files
S = m[['Date','Event','POs','Supplier','Brand','Client name','Client email']]
S = S[S['POs'].isnull() == False]
path = '03_Damages_OS'
DataName = S
DocName = 'Supplier Contacts'
DaysCounting = 70
MyFunx.data_history( DataName, DocName, DaysCounting, path )

#Select tomorrow's date    
dates = []
for d in m['Date']:
    if today.weekday() == 4:
        if d.date() == today + timedelta(3):
            d = d.strftime('%Y/%m/%d')
    elif d.date() == today + timedelta(1):
            d = d.strftime('%Y/%m/%d')
    else: 
        d = 0
    dates.append(d)
m["Date"] = dates

#Clean up date and time values
Exemplo n.º 7
0
#This script keeps an audit trail of samples sent from WH to Sample Room, Studio and back to WH

import pandas as pd
from pandas import DataFrame
from datetime import date, datetime, timedelta
from pandas import ExcelWriter
import MyFunx, gdocs

shift = datetime.today().strftime('%Y-%m-%d %H:%M')
today = date.today() 
#==============================================================================
# Read Samples Plan master data
#==============================================================================

pw = "Spr33Pops101"
Lulu =  MyFunx.sql_import("vw_ProcurementPipeline","ActualGoLiveDate", pw)
Planned = Lulu[['PlannedGoLiveDayOfWeek','PlannedGoLiveMonth','PlannedGoLiveYear','BuyerPlanName','BuyerPlanStatus','EmployeeFirstName','PlannedUnitCostExclTax','PlannedTotalQuantity','PlannedTotalCostExclTax','SimpleSKU','SimpleName','ConfigName','ConfigSKU','ProcurementStatus','ProcurementProductCategoryL3','ActualGoLiveDate','Supplier','Designer','EANNumber','BarCode']]
Planned.rename(columns = {'PlannedGoLiveDayOfWeek':'GLDay','PlannedGoLiveMonth':'GLMonth','PlannedGoLiveYear':'GLYear','EmployeeFirstName':'Buyer','SimpleSKU':'ProductID'}, inplace = True)
Planned.drop_duplicates(subset = ['ProductID','GLMonth'], inplace = True, take_last = True)
Planned = Planned[Planned['PlannedTotalCostExclTax'] > 0]

Stock = pd.ExcelFile('Z:\\SUPPLY CHAIN\\Python Scripts\\02_StockCount\\Rolling Stock.xlsx')
QCed = Stock.parse('Sheet1', skiprows = 0, index = None, parse_cols = (0,1,4), parse_dates = True)
QCed.rename(columns={'Date': 'Date QCed'}, inplace=True)

SamplesPlan = pd.merge(Planned, QCed, on = 'ProductID', how = 'left', sort = False)
SamplesPlan.drop_duplicates(subset = ['ConfigSKU'],take_last = True, inplace = True)
SamplesPlan = SamplesPlan[(SamplesPlan.GLMonth >= today.month - 2) & (SamplesPlan.GLMonth <= today.month + 2) & (SamplesPlan.GLYear == today.year)]

#==============================================================================
# Read data from Google docs